szehon-ho commented on code in PR #54571: URL: https://github.com/apache/spark/pull/54571#discussion_r2893788007
########## dev/generate_srs_registry.py: ########## @@ -0,0 +1,401 @@ +#!/usr/bin/env python3 + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +Generate the Spatial Reference System (SRS) registry for Apache Spark. + +Downloads CRS definitions from the PROJ (Cartographic Projections and +Coordinate Transformations Library) GitHub repository and generates a CSV +file used by Spark for geospatial types on both the JVM and Python sides. + +PROJ is a C/C++ library (https://proj.org/) that maintains the authoritative +EPSG and ESRI CRS databases. This script extracts SRID metadata from PROJ's +SQL source files, which contain easily parseable plain-text SRS information. + +The script produces entries from the following PROJ SQL files: + - geodetic_crs.sql (EPSG geodetic CRS: geographic, geocentric, etc.) + - projected_crs.sql (EPSG projected CRS) + - compound_crs.sql (EPSG compound CRS) + - vertical_crs.sql (EPSG vertical CRS) + - engineering_crs.sql (EPSG engineering CRS) + - esri.sql (ESRI geodetic, projected, compound, vertical, engineering CRS) + +Additionally, the following special entries are added: + - SRID 0 -> SRID:0 (Spark convention: Cartesian, no defined SRS) + - SRID 4267 -> OGC:CRS27 (OGC standardization of NAD27) + - SRID 4269 -> OGC:CRS83 (OGC standardization of NAD83) + - SRID 4326 -> OGC:CRS84 (OGC standardization of WGS 84) + +Prerequisites: + Python 3.9+ (no third-party packages required) + +Usage: + # Generate from the default PROJ version: + python dev/generate_srs_registry.py + + # Generate from a specific PROJ version: + python dev/generate_srs_registry.py --proj-version 9.7.1 + + # Verify the generated files: + wc -l sql/api/src/main/resources/org/apache/spark/sql/srs_registry.csv + wc -l python/pyspark/sql/srs_registry.csv + +Upgrade workflow: + 1. Update `DEFAULT_PROJ_VERSION` to the new PROJ release tag. + 2. Run this script using `python dev/generate_srs_registry.py`. + 3. Review the diff to see which SRIDs were added or removed. +""" + +import argparse +import os +import re +import sys +import urllib.request + +# Default PROJ version to download SQL files from. +DEFAULT_PROJ_VERSION = "9.7.1" +# PLEASE ENSURE THIS IS UPDATED TO A VALID PROJ VERSION TAG WHEN UPGRADING! + +# Default timeout (in seconds) for downloading SQL files from GitHub. +DEFAULT_DOWNLOAD_TIMEOUT_SECS = 30 + +# URL template for raw SQL files from the PROJ GitHub repository. +PROJ_RAW_URL = "https://raw.githubusercontent.com/OSGeo/PROJ/{version}/data/sql/{filename}" + +# PROJ SQL files to download. EPSG CRS definitions are spread across +# dedicated files, while ESRI definitions are all in a single file. +PROJ_SQL_FILES = [ + "geodetic_crs.sql", + "projected_crs.sql", + "compound_crs.sql", + "vertical_crs.sql", + "engineering_crs.sql", + "esri.sql", +] + +# OGC special cases: these SRIDs are standardized under OGC rather than EPSG. +# The OGC string IDs override the EPSG ones for these SRIDs. +OGC_SPECIAL_CASES = { + 4267: "OGC:CRS27", # NAD27 + 4269: "OGC:CRS83", # NAD83 + 4326: "OGC:CRS84", # WGS 84 +} + +# Output paths for the generated CSV, relative to the Spark repo root. +JAVA_RESOURCE_PATH = os.path.join( + "sql", "api", "src", "main", "resources", "org", "apache", "spark", "sql", "srs_registry.csv" +) +PYTHON_RESOURCE_PATH = os.path.join("python", "pyspark", "sql", "srs_registry.csv") + + +def download_sql(version, filename, timeout=DEFAULT_DOWNLOAD_TIMEOUT_SECS): + """Download a SQL file from the PROJ GitHub repository at a pinned version tag.""" + url = PROJ_RAW_URL.format(version=version, filename=filename) + print(f" Downloading {url}") + try: + with urllib.request.urlopen(url, timeout=timeout) as response: + return response.read().decode("utf-8") + except urllib.error.URLError as e: + print(f"ERROR: Failed to download {url}: {e}", file=sys.stderr) + if "CERTIFICATE_VERIFY_FAILED" in str(e): + print( + "Hint: Run 'Install Certificates.command' from your Python " + "installation, or set the SSL_CERT_FILE environment variable.", + file=sys.stderr, + ) + print(f"Check that PROJ version '{version}' exists as a GitHub tag.", file=sys.stderr) + sys.exit(1) + + +def parse_sql_values(values_str): + """ + Parse the comma-separated fields inside a SQL VALUES(...) clause. + + Handles SQL-quoted strings (single quotes with '' escape for literal + apostrophes) and unquoted NULL / integer literals. + + Returns a list of string values, with NULL represented as None. + """ + fields = [] + i = 0 + n = len(values_str) + while i < n: + if values_str[i] in (" ", "\t"): + i += 1 + continue + if values_str[i] == "'": + # Quoted string: scan until closing quote ('' is an escaped quote). + i += 1 + buf = [] + while i < n: + if values_str[i] == "'" and i + 1 < n and values_str[i + 1] == "'": + buf.append("'") + i += 2 + elif values_str[i] == "'": + i += 1 + break + else: + buf.append(values_str[i]) + i += 1 + fields.append("".join(buf)) + elif values_str[i : i + 4].upper() == "NULL": + fields.append(None) + i += 4 + else: + # Unquoted literal (integer, etc.) + j = i + while j < n and values_str[j] not in (",", ")"): + j += 1 + fields.append(values_str[i:j].strip()) + i = j + # Skip comma separator. + while i < n and values_str[i] in (",", " ", "\t"): + if values_str[i] == ",": + i += 1 + break + i += 1 + return fields + + +def parse_geodetic_crs(sql_content): + """ + Parse geodetic_crs INSERT statements from SQL content. + + The `type` field (position 4) determines whether the CRS is geographic: + 'geographic 2D', 'geographic 3D' -> geographic + 'geocentric', 'other' -> non-geographic + + Deprecated entries are included (to match Databricks Runtime behavior). + Entries with non-numeric codes are skipped. + + Returns a list of (srid, string_id, is_geographic) tuples. + """ + results = [] + pattern = re.compile(r'INSERT INTO "geodetic_crs" VALUES\((.+)\);', re.IGNORECASE) + for line in sql_content.splitlines(): + match = pattern.search(line) + if not match: + continue + fields = parse_sql_values(match.group(1)) + if len(fields) < 5: + continue + auth_name = fields[0] + code = fields[1] + crs_type = fields[4] + try: + srid = int(code) + except (ValueError, TypeError): + continue + is_geographic = crs_type is not None and crs_type.startswith("geographic") + string_id = f"{auth_name}:{code}" + results.append((srid, string_id, is_geographic)) + return results + + +def parse_simple_crs(sql_content, table_name): + """ + Parse INSERT statements for CRS tables that are always non-geographic. + + Works for projected_crs, compound_crs, vertical_crs, and engineering_crs tables. + Fields used: auth_name (0), code (1). + + Deprecated entries are included (to match Databricks Runtime behavior). + Entries with non-numeric codes are skipped. + + Returns a list of (srid, string_id, is_geographic=False) tuples. + """ + results = [] + pattern = re.compile(rf'INSERT INTO "{table_name}" VALUES\((.+)\);', re.IGNORECASE) + for line in sql_content.splitlines(): + match = pattern.search(line) + if not match: + continue + fields = parse_sql_values(match.group(1)) + if len(fields) < 2: + continue + auth_name = fields[0] + code = fields[1] + try: + srid = int(code) + except (ValueError, TypeError): + continue + string_id = f"{auth_name}:{code}" + results.append((srid, string_id, False)) + return results + + +def parse_all_crs_from_sql(sql_content): + """ + Parse all CRS types (geodetic, projected, compound, vertical, engineering) + from a single SQL file. Used for multi-table files like esri.sql. + """ + entries = [] + entries.extend(parse_geodetic_crs(sql_content)) + for table in ["projected_crs", "compound_crs", "vertical_crs", "engineering_crs"]: + entries.extend(parse_simple_crs(sql_content, table)) + return entries + + +def write_csv(entries, proj_version, output_path): + """Write SRS entries to a CSV file with a metadata header.""" + os.makedirs(os.path.dirname(output_path), exist_ok=True) + with open(output_path, "w") as f: + f.write( + f"# Generated by dev/generate_srs_registry.py from PROJ {proj_version}\n" + f"# Source: https://github.com/OSGeo/PROJ/tree/{proj_version}/data/sql\n" + f"# Do not edit manually. Re-run the script to regenerate.\n" + ) + f.write("srid,string_id,is_geographic\n") + for srid, string_id, is_geographic in sorted(entries): + f.write(f"{srid},{string_id},{str(is_geographic).lower()}\n") Review Comment: Writing the row with manual `f.write` means a `string_id` (or other field) containing a comma or newline would break the CSV. Consider using `csv.writer` for the data rows so that fields are properly quoted when needed; the comment lines and header can stay as plain `f.write`. ########## dev/generate_srs_registry.py: ########## @@ -0,0 +1,401 @@ +#!/usr/bin/env python3 + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +Generate the Spatial Reference System (SRS) registry for Apache Spark. + +Downloads CRS definitions from the PROJ (Cartographic Projections and +Coordinate Transformations Library) GitHub repository and generates a CSV +file used by Spark for geospatial types on both the JVM and Python sides. + +PROJ is a C/C++ library (https://proj.org/) that maintains the authoritative +EPSG and ESRI CRS databases. This script extracts SRID metadata from PROJ's +SQL source files, which contain easily parseable plain-text SRS information. + +The script produces entries from the following PROJ SQL files: + - geodetic_crs.sql (EPSG geodetic CRS: geographic, geocentric, etc.) + - projected_crs.sql (EPSG projected CRS) + - compound_crs.sql (EPSG compound CRS) + - vertical_crs.sql (EPSG vertical CRS) + - engineering_crs.sql (EPSG engineering CRS) + - esri.sql (ESRI geodetic, projected, compound, vertical, engineering CRS) + +Additionally, the following special entries are added: + - SRID 0 -> SRID:0 (Spark convention: Cartesian, no defined SRS) + - SRID 4267 -> OGC:CRS27 (OGC standardization of NAD27) + - SRID 4269 -> OGC:CRS83 (OGC standardization of NAD83) + - SRID 4326 -> OGC:CRS84 (OGC standardization of WGS 84) + +Prerequisites: + Python 3.9+ (no third-party packages required) + +Usage: + # Generate from the default PROJ version: + python dev/generate_srs_registry.py + + # Generate from a specific PROJ version: + python dev/generate_srs_registry.py --proj-version 9.7.1 + + # Verify the generated files: + wc -l sql/api/src/main/resources/org/apache/spark/sql/srs_registry.csv + wc -l python/pyspark/sql/srs_registry.csv + +Upgrade workflow: + 1. Update `DEFAULT_PROJ_VERSION` to the new PROJ release tag. + 2. Run this script using `python dev/generate_srs_registry.py`. + 3. Review the diff to see which SRIDs were added or removed. +""" + +import argparse +import os +import re +import sys +import urllib.request + +# Default PROJ version to download SQL files from. +DEFAULT_PROJ_VERSION = "9.7.1" +# PLEASE ENSURE THIS IS UPDATED TO A VALID PROJ VERSION TAG WHEN UPGRADING! + +# Default timeout (in seconds) for downloading SQL files from GitHub. +DEFAULT_DOWNLOAD_TIMEOUT_SECS = 30 + +# URL template for raw SQL files from the PROJ GitHub repository. +PROJ_RAW_URL = "https://raw.githubusercontent.com/OSGeo/PROJ/{version}/data/sql/{filename}" + +# PROJ SQL files to download. EPSG CRS definitions are spread across +# dedicated files, while ESRI definitions are all in a single file. +PROJ_SQL_FILES = [ + "geodetic_crs.sql", + "projected_crs.sql", + "compound_crs.sql", + "vertical_crs.sql", + "engineering_crs.sql", + "esri.sql", +] + +# OGC special cases: these SRIDs are standardized under OGC rather than EPSG. +# The OGC string IDs override the EPSG ones for these SRIDs. +OGC_SPECIAL_CASES = { + 4267: "OGC:CRS27", # NAD27 + 4269: "OGC:CRS83", # NAD83 + 4326: "OGC:CRS84", # WGS 84 +} + +# Output paths for the generated CSV, relative to the Spark repo root. +JAVA_RESOURCE_PATH = os.path.join( + "sql", "api", "src", "main", "resources", "org", "apache", "spark", "sql", "srs_registry.csv" +) +PYTHON_RESOURCE_PATH = os.path.join("python", "pyspark", "sql", "srs_registry.csv") + + +def download_sql(version, filename, timeout=DEFAULT_DOWNLOAD_TIMEOUT_SECS): + """Download a SQL file from the PROJ GitHub repository at a pinned version tag.""" + url = PROJ_RAW_URL.format(version=version, filename=filename) + print(f" Downloading {url}") + try: + with urllib.request.urlopen(url, timeout=timeout) as response: + return response.read().decode("utf-8") + except urllib.error.URLError as e: + print(f"ERROR: Failed to download {url}: {e}", file=sys.stderr) + if "CERTIFICATE_VERIFY_FAILED" in str(e): + print( + "Hint: Run 'Install Certificates.command' from your Python " + "installation, or set the SSL_CERT_FILE environment variable.", + file=sys.stderr, + ) + print(f"Check that PROJ version '{version}' exists as a GitHub tag.", file=sys.stderr) + sys.exit(1) + + +def parse_sql_values(values_str): + """ + Parse the comma-separated fields inside a SQL VALUES(...) clause. + + Handles SQL-quoted strings (single quotes with '' escape for literal + apostrophes) and unquoted NULL / integer literals. + + Returns a list of string values, with NULL represented as None. + """ + fields = [] + i = 0 + n = len(values_str) + while i < n: + if values_str[i] in (" ", "\t"): + i += 1 + continue + if values_str[i] == "'": + # Quoted string: scan until closing quote ('' is an escaped quote). + i += 1 + buf = [] + while i < n: + if values_str[i] == "'" and i + 1 < n and values_str[i + 1] == "'": + buf.append("'") + i += 2 + elif values_str[i] == "'": + i += 1 + break + else: + buf.append(values_str[i]) + i += 1 + fields.append("".join(buf)) + elif values_str[i : i + 4].upper() == "NULL": + fields.append(None) + i += 4 + else: + # Unquoted literal (integer, etc.) + j = i + while j < n and values_str[j] not in (",", ")"): + j += 1 + fields.append(values_str[i:j].strip()) + i = j + # Skip comma separator. + while i < n and values_str[i] in (",", " ", "\t"): + if values_str[i] == ",": + i += 1 + break + i += 1 + return fields + + +def parse_geodetic_crs(sql_content): + """ + Parse geodetic_crs INSERT statements from SQL content. + + The `type` field (position 4) determines whether the CRS is geographic: + 'geographic 2D', 'geographic 3D' -> geographic + 'geocentric', 'other' -> non-geographic + + Deprecated entries are included (to match Databricks Runtime behavior). + Entries with non-numeric codes are skipped. + + Returns a list of (srid, string_id, is_geographic) tuples. + """ + results = [] + pattern = re.compile(r'INSERT INTO "geodetic_crs" VALUES\((.+)\);', re.IGNORECASE) + for line in sql_content.splitlines(): + match = pattern.search(line) + if not match: + continue + fields = parse_sql_values(match.group(1)) + if len(fields) < 5: + continue + auth_name = fields[0] + code = fields[1] + crs_type = fields[4] + try: + srid = int(code) + except (ValueError, TypeError): + continue + is_geographic = crs_type is not None and crs_type.startswith("geographic") + string_id = f"{auth_name}:{code}" + results.append((srid, string_id, is_geographic)) + return results + + +def parse_simple_crs(sql_content, table_name): + """ + Parse INSERT statements for CRS tables that are always non-geographic. + + Works for projected_crs, compound_crs, vertical_crs, and engineering_crs tables. + Fields used: auth_name (0), code (1). + + Deprecated entries are included (to match Databricks Runtime behavior). + Entries with non-numeric codes are skipped. + + Returns a list of (srid, string_id, is_geographic=False) tuples. + """ + results = [] + pattern = re.compile(rf'INSERT INTO "{table_name}" VALUES\((.+)\);', re.IGNORECASE) + for line in sql_content.splitlines(): + match = pattern.search(line) + if not match: + continue + fields = parse_sql_values(match.group(1)) + if len(fields) < 2: + continue + auth_name = fields[0] + code = fields[1] + try: + srid = int(code) + except (ValueError, TypeError): + continue + string_id = f"{auth_name}:{code}" + results.append((srid, string_id, False)) + return results + + +def parse_all_crs_from_sql(sql_content): + """ + Parse all CRS types (geodetic, projected, compound, vertical, engineering) + from a single SQL file. Used for multi-table files like esri.sql. + """ + entries = [] + entries.extend(parse_geodetic_crs(sql_content)) + for table in ["projected_crs", "compound_crs", "vertical_crs", "engineering_crs"]: + entries.extend(parse_simple_crs(sql_content, table)) + return entries + + +def write_csv(entries, proj_version, output_path): + """Write SRS entries to a CSV file with a metadata header.""" + os.makedirs(os.path.dirname(output_path), exist_ok=True) + with open(output_path, "w") as f: Review Comment: Consider opening the file with `encoding="utf-8"` so that the output encoding is consistent across environments (the PROJ source data is UTF-8). -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
