This is an automated email from the ASF dual-hosted git repository.

jiayu pushed a commit to branch branch-1.7.0
in repository https://gitbox.apache.org/repos/asf/sedona.git

commit eff60c5593b8f4a60a42b6d32d891b9d3d603662
Author: John Bampton <[email protected]>
AuthorDate: Tue Feb 11 13:07:44 2025 +1000

    [DOCS] Run Python black on Markdown code blocks (#1797)
---
 README.md                                       | 38 ++++++++---
 docs/api/sql/Raster-visualizer.md               |  7 ++-
 docs/api/sql/Spider.md                          | 84 +++++++++++++++++--------
 docs/setup/azure-synapse-analytics.md           | 23 ++++---
 docs/setup/databricks.md                        |  1 +
 docs/setup/install-python.md                    | 41 +++++++-----
 docs/setup/release-notes.md                     | 18 ++++--
 docs/tutorial/concepts/clustering-algorithms.md | 33 +++++-----
 docs/tutorial/raster.md                         | 37 +++++++----
 9 files changed, 191 insertions(+), 91 deletions(-)

diff --git a/README.md b/README.md
index efb3b39dd6..9b25c8685c 100644
--- a/README.md
+++ b/README.md
@@ -74,25 +74,43 @@ This example loads NYC taxi trip records and taxi zone 
information stored as .CS
 #### Load NYC taxi trips and taxi zones data from CSV Files Stored on AWS S3
 
 ```python
-taxidf = sedona.read.format('csv').option("header","true").option("delimiter", 
",").load("s3a://your-directory/data/nyc-taxi-data.csv")
-taxidf = taxidf.selectExpr('ST_Point(CAST(Start_Lon AS Decimal(24,20)), 
CAST(Start_Lat AS Decimal(24,20))) AS pickup', 'Trip_Pickup_DateTime', 
'Payment_Type', 'Fare_Amt')
+taxidf = (
+    sedona.read.format("csv")
+    .option("header", "true")
+    .option("delimiter", ",")
+    .load("s3a://your-directory/data/nyc-taxi-data.csv")
+)
+taxidf = taxidf.selectExpr(
+    "ST_Point(CAST(Start_Lon AS Decimal(24,20)), CAST(Start_Lat AS 
Decimal(24,20))) AS pickup",
+    "Trip_Pickup_DateTime",
+    "Payment_Type",
+    "Fare_Amt",
+)
 ```
 
 ```python
-zoneDf = sedona.read.format('csv').option("delimiter", 
",").load("s3a://your-directory/data/TIGER2018_ZCTA5.csv")
-zoneDf = zoneDf.selectExpr('ST_GeomFromWKT(_c0) as zone', '_c1 as zipcode')
+zoneDf = (
+    sedona.read.format("csv")
+    .option("delimiter", ",")
+    .load("s3a://your-directory/data/TIGER2018_ZCTA5.csv")
+)
+zoneDf = zoneDf.selectExpr("ST_GeomFromWKT(_c0) as zone", "_c1 as zipcode")
 ```
 
 #### Spatial SQL query to only return Taxi trips in Manhattan
 
 ```python
-taxidf_mhtn = 
taxidf.where('ST_Contains(ST_PolygonFromEnvelope(-74.01,40.73,-73.93,40.79), 
pickup)')
+taxidf_mhtn = taxidf.where(
+    "ST_Contains(ST_PolygonFromEnvelope(-74.01,40.73,-73.93,40.79), pickup)"
+)
 ```
 
 #### Spatial Join between Taxi Dataframe and Zone Dataframe to Find taxis in 
each zone
 
 ```python
-taxiVsZone = sedona.sql('SELECT zone, zipcode, pickup, Fare_Amt FROM zoneDf, 
taxiDf WHERE ST_Contains(zone, pickup)')
+taxiVsZone = sedona.sql(
+    "SELECT zone, zipcode, pickup, Fare_Amt FROM zoneDf, taxiDf WHERE 
ST_Contains(zone, pickup)"
+)
 ```
 
 #### Show a map of the loaded Spatial Dataframes using GeoPandas
@@ -101,14 +119,14 @@ taxiVsZone = sedona.sql('SELECT zone, zipcode, pickup, 
Fare_Amt FROM zoneDf, tax
 zoneGpd = gpd.GeoDataFrame(zoneDf.toPandas(), geometry="zone")
 taxiGpd = gpd.GeoDataFrame(taxidf.toPandas(), geometry="pickup")
 
-zone = zoneGpd.plot(color='yellow', edgecolor='black', zorder=1)
-zone.set_xlabel('Longitude (degrees)')
-zone.set_ylabel('Latitude (degrees)')
+zone = zoneGpd.plot(color="yellow", edgecolor="black", zorder=1)
+zone.set_xlabel("Longitude (degrees)")
+zone.set_ylabel("Latitude (degrees)")
 
 zone.set_xlim(-74.1, -73.8)
 zone.set_ylim(40.65, 40.9)
 
-taxi = taxiGpd.plot(ax=zone, alpha=0.01, color='red', zorder=3)
+taxi = taxiGpd.plot(ax=zone, alpha=0.01, color="red", zorder=3)
 ```
 
 ## Docker image
diff --git a/docs/api/sql/Raster-visualizer.md 
b/docs/api/sql/Raster-visualizer.md
index 7dc20a94ff..7ffba49bc1 100644
--- a/docs/api/sql/Raster-visualizer.md
+++ b/docs/api/sql/Raster-visualizer.md
@@ -59,9 +59,14 @@ Example:
 
 ```python
 from sedona.raster_utils.SedonaUtils import SedonaUtils
+
 # Or from sedona.spark import *
 
-df = sedona.read.format('binaryFile').load(DATA_DIR + 
'raster.tiff').selectExpr("RS_FromGeoTiff(content) as raster")
+df = (
+    sedona.read.format("binaryFile")
+    .load(DATA_DIR + "raster.tiff")
+    .selectExpr("RS_FromGeoTiff(content) as raster")
+)
 htmlDF = df.selectExpr("RS_AsImage(raster, 500) as raster_image")
 SedonaUtils.display_image(htmlDF)
 ```
diff --git a/docs/api/sql/Spider.md b/docs/api/sql/Spider.md
index 0b313ff6fb..d77e50aac6 100644
--- a/docs/api/sql/Spider.md
+++ b/docs/api/sql/Spider.md
@@ -5,9 +5,18 @@ Sedona offers a spatial data generator called Spider. It is a 
data source that g
 Once you have your [`SedonaContext` object created](../Overview#quick-start), 
you can create a DataFrame with the `spider` data source.
 
 ```python
-df_random_points = sedona.read.format("spider").load(n=1000, 
distribution='uniform')
-df_random_boxes = sedona.read.format("spider").load(n=1000, 
distribution='gaussian', geometryType='box', maxWidth=0.05, maxHeight=0.05)
-df_random_polygons = sedona.read.format("spider").load(n=1000, 
distribution='bit', geometryType='polygon', minSegment=3, maxSegment=5, 
maxSize=0.1)
+df_random_points = sedona.read.format("spider").load(n=1000, 
distribution="uniform")
+df_random_boxes = sedona.read.format("spider").load(
+    n=1000, distribution="gaussian", geometryType="box", maxWidth=0.05, 
maxHeight=0.05
+)
+df_random_polygons = sedona.read.format("spider").load(
+    n=1000,
+    distribution="bit",
+    geometryType="polygon",
+    minSegment=3,
+    maxSegment=5,
+    maxSize=0.1,
+)
 ```
 
 Now we have three DataFrames with random spatial data. We can show the first 
three rows of the `df_random_points` DataFrame to verify the data is generated 
correctly.
@@ -38,22 +47,24 @@ import matplotlib.pyplot as plt
 import geopandas as gpd
 
 # Convert DataFrames to GeoDataFrames
-gdf_random_points = gpd.GeoDataFrame(df_random_points.toPandas(), 
geometry='geometry')
-gdf_random_boxes = gpd.GeoDataFrame(df_random_boxes.toPandas(), 
geometry='geometry')
-gdf_random_polygons = gpd.GeoDataFrame(df_random_polygons.toPandas(), 
geometry='geometry')
+gdf_random_points = gpd.GeoDataFrame(df_random_points.toPandas(), 
geometry="geometry")
+gdf_random_boxes = gpd.GeoDataFrame(df_random_boxes.toPandas(), 
geometry="geometry")
+gdf_random_polygons = gpd.GeoDataFrame(
+    df_random_polygons.toPandas(), geometry="geometry"
+)
 
 # Create a figure and a set of subplots
 fig, axes = plt.subplots(1, 3, figsize=(15, 5))
 
 # Plot each GeoDataFrame on a different subplot
-gdf_random_points.plot(ax=axes[0], color='blue', markersize=5)
-axes[0].set_title('Random Points')
+gdf_random_points.plot(ax=axes[0], color="blue", markersize=5)
+axes[0].set_title("Random Points")
 
-gdf_random_boxes.boundary.plot(ax=axes[1], color='red')
-axes[1].set_title('Random Boxes')
+gdf_random_boxes.boundary.plot(ax=axes[1], color="red")
+axes[1].set_title("Random Boxes")
 
-gdf_random_polygons.boundary.plot(ax=axes[2], color='green')
-axes[2].set_title('Random Polygons')
+gdf_random_polygons.boundary.plot(ax=axes[2], color="green")
+axes[2].set_title("Random Polygons")
 
 # Adjust the layout
 plt.tight_layout()
@@ -103,8 +114,11 @@ Example:
 
 ```python
 import geopandas as gpd
-df = sedona.read.format("spider").load(n=300, distribution='uniform', 
geometryType='box', maxWidth=0.05, maxHeight=0.05)
-gpd.GeoDataFrame(df.toPandas(), geometry='geometry').boundary.plot()
+
+df = sedona.read.format("spider").load(
+    n=300, distribution="uniform", geometryType="box", maxWidth=0.05, 
maxHeight=0.05
+)
+gpd.GeoDataFrame(df.toPandas(), geometry="geometry").boundary.plot()
 ```
 
 ![Uniform Distribution](../../image/spider/spider-uniform.png)
@@ -126,8 +140,11 @@ Example:
 
 ```python
 import geopandas as gpd
-df = sedona.read.format("spider").load(n=300, distribution='gaussian', 
geometryType='polygon', maxSize=0.05)
-gpd.GeoDataFrame(df.toPandas(), geometry='geometry').boundary.plot()
+
+df = sedona.read.format("spider").load(
+    n=300, distribution="gaussian", geometryType="polygon", maxSize=0.05
+)
+gpd.GeoDataFrame(df.toPandas(), geometry="geometry").boundary.plot()
 ```
 
 ![Gaussian Distribution](../../image/spider/spider-gaussian.png)
@@ -151,8 +168,11 @@ Example:
 
 ```python
 import geopandas as gpd
-df = sedona.read.format("spider").load(n=300, distribution='bit', 
geometryType='point', probability=0.2, digits=10)
-gpd.GeoDataFrame(df.toPandas(), geometry='geometry').plot(markersize=1)
+
+df = sedona.read.format("spider").load(
+    n=300, distribution="bit", geometryType="point", probability=0.2, digits=10
+)
+gpd.GeoDataFrame(df.toPandas(), geometry="geometry").plot(markersize=1)
 ```
 
 ![Bit Distribution](../../image/spider/spider-bit.png)
@@ -176,8 +196,11 @@ Example:
 
 ```python
 import geopandas as gpd
-df = sedona.read.format("spider").load(n=300, distribution='diagonal', 
geometryType='point', percentage=0.5, buffer=0.5)
-gpd.GeoDataFrame(df.toPandas(), geometry='geometry').plot(markersize=1)
+
+df = sedona.read.format("spider").load(
+    n=300, distribution="diagonal", geometryType="point", percentage=0.5, 
buffer=0.5
+)
+gpd.GeoDataFrame(df.toPandas(), geometry="geometry").plot(markersize=1)
 ```
 
 ![Diagonal Distribution](../../image/spider/spider-diagonal.png)
@@ -199,8 +222,11 @@ Example:
 
 ```python
 import geopandas as gpd
-df = sedona.read.format("spider").load(n=2000, distribution='sierpinski', 
geometryType='point')
-gpd.GeoDataFrame(df.toPandas(), geometry='geometry').plot(markersize=1)
+
+df = sedona.read.format("spider").load(
+    n=2000, distribution="sierpinski", geometryType="point"
+)
+gpd.GeoDataFrame(df.toPandas(), geometry="geometry").plot(markersize=1)
 ```
 
 ![Sierpinski Distribution](../../image/spider/spider-sierpinski.png)
@@ -218,8 +244,11 @@ Example:
 
 ```python
 import geopandas as gpd
-df = sedona.read.format("spider").load(n=300, distribution='parcel', 
dither=0.5, splitRange=0.5)
-gpd.GeoDataFrame(df.toPandas(), geometry='geometry').boundary.plot()
+
+df = sedona.read.format("spider").load(
+    n=300, distribution="parcel", dither=0.5, splitRange=0.5
+)
+gpd.GeoDataFrame(df.toPandas(), geometry="geometry").boundary.plot()
 ```
 
 ![Parcel Distribution](../../image/spider/spider-parcel.png)
@@ -255,8 +284,11 @@ Example:
 
 ```python
 import geopandas as gpd
-df_random_points = sedona.read.format("spider").load(n=1000, 
distribution='uniform', translateX=0.5, translateY=0.5, scaleX=2, scaleY=2)
-gpd.GeoDataFrame(df_random_points.toPandas(), 
geometry='geometry').plot(markersize=1)
+
+df_random_points = sedona.read.format("spider").load(
+    n=1000, distribution="uniform", translateX=0.5, translateY=0.5, scaleX=2, 
scaleY=2
+)
+gpd.GeoDataFrame(df_random_points.toPandas(), 
geometry="geometry").plot(markersize=1)
 ```
 
 The data is now in the region `[0.5, 2.5] x [0.5, 2.5]`.
diff --git a/docs/setup/azure-synapse-analytics.md 
b/docs/setup/azure-synapse-analytics.md
index b502fde456..98a47bb8e9 100644
--- a/docs/setup/azure-synapse-analytics.md
+++ b/docs/setup/azure-synapse-analytics.md
@@ -56,14 +56,23 @@ Start your notebook with:
 ```python
 from sedona.spark import SedonaContext
 
-config = SedonaContext.builder() \
-    .config('spark.jars.packages',
-            'org.apache.sedona:sedona-spark-shaded-3.4_2.12-1.6.1,'
-            'org.datasyslab:geotools-wrapper-1.6.1-28.2') \
-    .config("spark.serializer","org.apache.spark.serializer.KryoSerializer") \
-    .config("spark.kryo.registrator", 
"org.apache.sedona.core.serde.SedonaKryoRegistrator") \
-    .config("spark.sql.extensions", 
"org.apache.sedona.viz.sql.SedonaVizExtensions,org.apache.sedona.sql.SedonaSqlExtensions")
 \
+config = (
+    SedonaContext.builder()
+    .config(
+        "spark.jars.packages",
+        "org.apache.sedona:sedona-spark-shaded-3.4_2.12-1.6.1,"
+        "org.datasyslab:geotools-wrapper-1.6.1-28.2",
+    )
+    .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
+    .config(
+        "spark.kryo.registrator", 
"org.apache.sedona.core.serde.SedonaKryoRegistrator"
+    )
+    .config(
+        "spark.sql.extensions",
+        
"org.apache.sedona.viz.sql.SedonaVizExtensions,org.apache.sedona.sql.SedonaSqlExtensions",
+    )
     .getOrCreate()
+)
 
 sedona = SedonaContext.create(config)
 ```
diff --git a/docs/setup/databricks.md b/docs/setup/databricks.md
index a9f3abf602..a6f3d8bdd6 100644
--- a/docs/setup/databricks.md
+++ b/docs/setup/databricks.md
@@ -37,6 +37,7 @@ SedonaSQLRegistrator.registerAll(spark)
 
 ```python
 from sedona.register.geo_registrator import SedonaRegistrator
+
 SedonaRegistrator.registerAll(spark)
 ```
 
diff --git a/docs/setup/install-python.md b/docs/setup/install-python.md
index 21f9636aab..c90d313e5e 100644
--- a/docs/setup/install-python.md
+++ b/docs/setup/install-python.md
@@ -45,12 +45,20 @@ You can get it using one of the following methods:
 
 ```python
 from sedona.spark import *
-config = SedonaContext.builder(). \
-    config('spark.jars.packages',
-           'org.apache.sedona:sedona-spark-3.3_2.12:{{ sedona.current_version 
}},'
-           'org.datasyslab:geotools-wrapper:{{ sedona.current_geotools }}'). \
-    config('spark.jars.repositories', 
'https://artifacts.unidata.ucar.edu/repository/unidata-all'). \
-    getOrCreate()
+
+config = (
+    SedonaContext.builder()
+    .config(
+        "spark.jars.packages",
+        "org.apache.sedona:sedona-spark-3.3_2.12:{{ sedona.current_version }},"
+        "org.datasyslab:geotools-wrapper:{{ sedona.current_geotools }}",
+    )
+    .config(
+        "spark.jars.repositories",
+        "https://artifacts.unidata.ucar.edu/repository/unidata-all";,
+    )
+    .getOrCreate()
+)
 sedona = SedonaContext.create(config)
 ```
 
@@ -62,15 +70,18 @@ SedonaRegistrator is deprecated in Sedona 1.4.1 and later 
versions. Please use t
 from pyspark.sql import SparkSession
 from sedona.register import SedonaRegistrator
 from sedona.utils import SedonaKryoRegistrator, KryoSerializer
-spark = SparkSession. \
-    builder. \
-    appName('appName'). \
-    config("spark.serializer", KryoSerializer.getName). \
-    config("spark.kryo.registrator", SedonaKryoRegistrator.getName). \
-    config('spark.jars.packages',
-           'org.apache.sedona:sedona-spark-shaded-3.3_2.12:{{ 
sedona.current_version }},'
-           'org.datasyslab:geotools-wrapper:{{ sedona.current_geotools }}'). \
-    getOrCreate()
+
+spark = (
+    SparkSession.builder.appName("appName")
+    .config("spark.serializer", KryoSerializer.getName)
+    .config("spark.kryo.registrator", SedonaKryoRegistrator.getName)
+    .config(
+        "spark.jars.packages",
+        "org.apache.sedona:sedona-spark-shaded-3.3_2.12:{{ 
sedona.current_version }},"
+        "org.datasyslab:geotools-wrapper:{{ sedona.current_geotools }}",
+    )
+    .getOrCreate()
+)
 SedonaRegistrator.registerAll(spark)
 ```
 
diff --git a/docs/setup/release-notes.md b/docs/setup/release-notes.md
index 1bf7f76584..20b6086995 100644
--- a/docs/setup/release-notes.md
+++ b/docs/setup/release-notes.md
@@ -373,8 +373,10 @@ Sedona 1.6.0 is compiled against Spark 3.3 / Spark 3.4 / 
Spark 3.5, Flink 1.19,
 ```python
 from pyspark.sql.types import DoubleType
 
+
 def mean_udf(raster):
-       return float(raster.as_numpy().mean())
+    return float(raster.as_numpy().mean())
+
 
 sedona.udf.register("mean_udf", mean_udf, DoubleType())
 df_raster.withColumn("mean", expr("mean_udf(rast)")).show()
@@ -1045,11 +1047,15 @@ Sedona 1.4.1 is compiled against Spark 3.3 / Spark 3.4 
/ Flink 1.12, Java 8.
         ```python
         from sedona.spark import *
 
-        config = SedonaContext.builder().\
-           config('spark.jars.packages',
-               'org.apache.sedona:sedona-spark-shaded-3.3_2.12:1.4.1,'
-               'org.datasyslab:geotools-wrapper:1.4.0-28.2'). \
-           getOrCreate()
+        config = (
+            SedonaContext.builder()
+            .config(
+                "spark.jars.packages",
+                "org.apache.sedona:sedona-spark-shaded-3.3_2.12:1.4.1,"
+                "org.datasyslab:geotools-wrapper:1.4.0-28.2",
+            )
+            .getOrCreate()
+        )
         sedona = SedonaContext.create(config)
         sedona.sql("SELECT ST_GeomFromWKT(XXX) FROM")
         ```
diff --git a/docs/tutorial/concepts/clustering-algorithms.md 
b/docs/tutorial/concepts/clustering-algorithms.md
index 830b0667ee..79cb72cd8f 100644
--- a/docs/tutorial/concepts/clustering-algorithms.md
+++ b/docs/tutorial/concepts/clustering-algorithms.md
@@ -50,21 +50,24 @@ Let’s create a Spark DataFrame with this data and then run 
the clustering with
 
 ```python
 df = (
-    sedona.createDataFrame([
-        (1, 8.0, 2.0),
-        (2, 2.6, 4.0),
-        (3, 2.5, 4.0),
-        (4, 8.5, 2.5),
-        (5, 2.8, 4.3),
-        (6, 12.8, 4.5),
-        (7, 2.5, 4.2),
-        (8, 8.2, 2.5),
-        (9, 8.0, 3.0),
-        (10, 1.0, 5.0),
-        (11, 8.0, 2.5),
-        (12, 5.0, 6.0),
-        (13, 4.0, 3.0),
-    ], ["id", "x", "y"])
+    sedona.createDataFrame(
+        [
+            (1, 8.0, 2.0),
+            (2, 2.6, 4.0),
+            (3, 2.5, 4.0),
+            (4, 8.5, 2.5),
+            (5, 2.8, 4.3),
+            (6, 12.8, 4.5),
+            (7, 2.5, 4.2),
+            (8, 8.2, 2.5),
+            (9, 8.0, 3.0),
+            (10, 1.0, 5.0),
+            (11, 8.0, 2.5),
+            (12, 5.0, 6.0),
+            (13, 4.0, 3.0),
+        ],
+        ["id", "x", "y"],
+    )
 ).withColumn("point", ST_Point(col("x"), col("y")))
 ```
 
diff --git a/docs/tutorial/raster.md b/docs/tutorial/raster.md
index 7d7df586eb..d7a05a4391 100644
--- a/docs/tutorial/raster.md
+++ b/docs/tutorial/raster.md
@@ -260,7 +260,12 @@ For multiple raster data files use the following code to 
load the data [from pat
 
 === "Python"
     ```python
-    rawDf = sedona.read.format("binaryFile").option("recursiveFileLookup", 
"true").option("pathGlobFilter", "*.tif*").load(path_to_raster_data_folder)
+    rawDf = (
+        sedona.read.format("binaryFile")
+        .option("recursiveFileLookup", "true")
+        .option("pathGlobFilter", "*.tif*")
+        .load(path_to_raster_data_folder)
+    )
     rawDf.createOrReplaceTempView("rawdf")
     rawDf.show()
     ```
@@ -589,7 +594,11 @@ Sedona allows collecting Dataframes with raster columns 
and working with them lo
 The raster objects are represented as `SedonaRaster` objects in Python, which 
can be used to perform raster operations.
 
 ```python
-df_raster = 
sedona.read.format("binaryFile").load("/path/to/raster.tif").selectExpr("RS_FromGeoTiff(content)
 as rast")
+df_raster = (
+    sedona.read.format("binaryFile")
+    .load("/path/to/raster.tif")
+    .selectExpr("RS_FromGeoTiff(content) as rast")
+)
 rows = df_raster.collect()
 raster = rows[0].rast
 raster  # <sedona.raster.sedona_raster.InDbSedonaRaster at 0x1618fb1f0>
@@ -598,18 +607,18 @@ raster  # <sedona.raster.sedona_raster.InDbSedonaRaster 
at 0x1618fb1f0>
 You can retrieve the metadata of the raster by accessing the properties of the 
`SedonaRaster` object.
 
 ```python
-raster.width        # width of the raster
-raster.height       # height of the raster
-raster.affine_trans # affine transformation matrix
-raster.crs_wkt      # coordinate reference system as WKT
+raster.width  # width of the raster
+raster.height  # height of the raster
+raster.affine_trans  # affine transformation matrix
+raster.crs_wkt  # coordinate reference system as WKT
 ```
 
 You can get a numpy array containing the band data of the raster using the 
`as_numpy` or `as_numpy_masked` method. The
 band data is organized in CHW order.
 
 ```python
-raster.as_numpy()        # numpy array of the raster
-raster.as_numpy_masked() # numpy array with nodata values masked as nan
+raster.as_numpy()  # numpy array of the raster
+raster.as_numpy_masked()  # numpy array with nodata values masked as nan
 ```
 
 If you want to work with the raster data using `rasterio`, you can retrieve a 
`rasterio.DatasetReader` object using the
@@ -621,7 +630,7 @@ If you want to work with the raster data using `rasterio`, 
you can retrieve a `r
 ```python
 ds = raster.as_rasterio()  # rasterio.DatasetReader object
 # Work with the raster using rasterio
-band1 = ds.read(1)         # read the first band
+band1 = ds.read(1)  # read the first band
 ```
 
 ## Writing Python UDF to work with raster data
@@ -632,9 +641,11 @@ return any Spark data type as output. This is an example 
of a Python UDF that ca
 ```python
 from pyspark.sql.types import DoubleType
 
+
 def mean_udf(raster):
     return float(raster.as_numpy().mean())
 
+
 sedona.udf.register("mean_udf", mean_udf, DoubleType())
 df_raster.withColumn("mean", expr("mean_udf(rast)")).show()
 ```
@@ -655,13 +666,17 @@ objects yet. However, you can write a UDF that returns 
the band data as an array
 from pyspark.sql.types import ArrayType, DoubleType
 import numpy as np
 
+
 def mask_udf(raster):
-    band1 = raster.as_numpy()[0,:,:]
+    band1 = raster.as_numpy()[0, :, :]
     mask = (band1 < 1400).astype(np.float64)
     return mask.flatten().tolist()
 
+
 sedona.udf.register("mask_udf", band_udf, ArrayType(DoubleType()))
-df_raster.withColumn("mask", expr("mask_udf(rast)")).withColumn("mask_rast", 
expr("RS_MakeRaster(rast, 'I', mask)")).show()
+df_raster.withColumn("mask", expr("mask_udf(rast)")).withColumn(
+    "mask_rast", expr("RS_MakeRaster(rast, 'I', mask)")
+).show()
 ```
 
 ```

Reply via email to