zhengruifeng commented on code in PR #49338: URL: https://github.com/apache/spark/pull/49338#discussion_r1900491893
########## python/pyspark/sql/functions/builtin.py: ########## @@ -15341,13 +15341,32 @@ def regexp_count(str: "ColumnOrName", regexp: "ColumnOrName") -> Column: Examples -------- + >>> from pyspark.sql import functions as sf >>> df = spark.createDataFrame([("1a 2b 14m", r"\d+")], ["str", "regexp"]) - >>> df.select(regexp_count('str', lit(r'\d+')).alias('d')).collect() - [Row(d=3)] - >>> df.select(regexp_count('str', lit(r'mmm')).alias('d')).collect() - [Row(d=0)] - >>> df.select(regexp_count("str", col("regexp")).alias('d')).collect() - [Row(d=3)] + >>> df.select('*', sf.regexp_count('str', sf.lit(r'\d+'))).show() + +---------+------+----------------------+ + | str|regexp|regexp_count(str, \d+)| + +---------+------+----------------------+ + |1a 2b 14m| \d+| 3| + +---------+------+----------------------+ + >>> df.select('*', sf.regexp_count('str', sf.lit(r'mmm'))).show() + +---------+------+----------------------+ + | str|regexp|regexp_count(str, mmm)| + +---------+------+----------------------+ + |1a 2b 14m| \d+| 0| + +---------+------+----------------------+ Review Comment: ```suggestion +---------+------+----------------------+ ``` ########## python/pyspark/sql/functions/builtin.py: ########## @@ -15406,29 +15442,57 @@ def regexp_extract_all( Parameters ---------- - str : :class:`~pyspark.sql.Column` or str + str : :class:`~pyspark.sql.Column` or column name target column to work on. - regexp : :class:`~pyspark.sql.Column` or str + regexp : :class:`~pyspark.sql.Column` or column name regex pattern to apply. - idx : int, optional + idx : :class:`~pyspark.sql.Column` or int, optional matched group id. Returns ------- :class:`~pyspark.sql.Column` all strings in the `str` that match a Java regex and corresponding to the regex group index. + See Also + -------- + :meth:`pyspark.sql.functions.regexp_extract` + Examples -------- + >>> from pyspark.sql import functions as sf >>> df = spark.createDataFrame([("100-200, 300-400", r"(\d+)-(\d+)")], ["str", "regexp"]) - >>> df.select(regexp_extract_all('str', lit(r'(\d+)-(\d+)')).alias('d')).collect() - [Row(d=['100', '300'])] - >>> df.select(regexp_extract_all('str', lit(r'(\d+)-(\d+)'), 1).alias('d')).collect() - [Row(d=['100', '300'])] - >>> df.select(regexp_extract_all('str', lit(r'(\d+)-(\d+)'), 2).alias('d')).collect() - [Row(d=['200', '400'])] - >>> df.select(regexp_extract_all('str', col("regexp")).alias('d')).collect() - [Row(d=['100', '300'])] + >>> df.select('*', sf.regexp_extract_all('str', sf.lit(r'(\d+)-(\d+)'))).show() + +----------------+-----------+---------------------------------------+ + | str| regexp|regexp_extract_all(str, (\d+)-(\d+), 1)| + +----------------+-----------+---------------------------------------+ + |100-200, 300-400|(\d+)-(\d+)| [100, 300]| + +----------------+-----------+---------------------------------------+ + >>> df.select('*', sf.regexp_extract_all('str', sf.lit(r'(\d+)-(\d+)'), sf.lit(1))).show() + +----------------+-----------+---------------------------------------+ + | str| regexp|regexp_extract_all(str, (\d+)-(\d+), 1)| + +----------------+-----------+---------------------------------------+ + |100-200, 300-400|(\d+)-(\d+)| [100, 300]| + +----------------+-----------+---------------------------------------+ Review Comment: ```suggestion +----------------+-----------+---------------------------------------+ ``` ########## python/pyspark/sql/functions/builtin.py: ########## @@ -15341,13 +15341,32 @@ def regexp_count(str: "ColumnOrName", regexp: "ColumnOrName") -> Column: Examples -------- + >>> from pyspark.sql import functions as sf >>> df = spark.createDataFrame([("1a 2b 14m", r"\d+")], ["str", "regexp"]) - >>> df.select(regexp_count('str', lit(r'\d+')).alias('d')).collect() - [Row(d=3)] - >>> df.select(regexp_count('str', lit(r'mmm')).alias('d')).collect() - [Row(d=0)] - >>> df.select(regexp_count("str", col("regexp")).alias('d')).collect() - [Row(d=3)] + >>> df.select('*', sf.regexp_count('str', sf.lit(r'\d+'))).show() + +---------+------+----------------------+ + | str|regexp|regexp_count(str, \d+)| + +---------+------+----------------------+ + |1a 2b 14m| \d+| 3| + +---------+------+----------------------+ + >>> df.select('*', sf.regexp_count('str', sf.lit(r'mmm'))).show() + +---------+------+----------------------+ + | str|regexp|regexp_count(str, mmm)| + +---------+------+----------------------+ + |1a 2b 14m| \d+| 0| + +---------+------+----------------------+ + >>> df.select('*', sf.regexp_count("str", sf.col("regexp"))).show() + +---------+------+-------------------------+ + | str|regexp|regexp_count(str, regexp)| + +---------+------+-------------------------+ + |1a 2b 14m| \d+| 3| + +---------+------+-------------------------+ Review Comment: ```suggestion +---------+------+-------------------------+ ``` ########## python/pyspark/sql/functions/builtin.py: ########## @@ -15341,13 +15341,32 @@ def regexp_count(str: "ColumnOrName", regexp: "ColumnOrName") -> Column: Examples -------- + >>> from pyspark.sql import functions as sf >>> df = spark.createDataFrame([("1a 2b 14m", r"\d+")], ["str", "regexp"]) - >>> df.select(regexp_count('str', lit(r'\d+')).alias('d')).collect() - [Row(d=3)] - >>> df.select(regexp_count('str', lit(r'mmm')).alias('d')).collect() - [Row(d=0)] - >>> df.select(regexp_count("str", col("regexp")).alias('d')).collect() - [Row(d=3)] + >>> df.select('*', sf.regexp_count('str', sf.lit(r'\d+'))).show() + +---------+------+----------------------+ + | str|regexp|regexp_count(str, \d+)| + +---------+------+----------------------+ + |1a 2b 14m| \d+| 3| + +---------+------+----------------------+ Review Comment: ```suggestion +---------+------+----------------------+ ``` ########## python/pyspark/sql/functions/builtin.py: ########## @@ -15376,17 +15395,34 @@ def regexp_extract(str: "ColumnOrName", pattern: str, idx: int) -> Column: :class:`~pyspark.sql.Column` matched value specified by `idx` group id. + See Also + -------- + :meth:`pyspark.sql.functions.regexp_extract_all` + Examples -------- + >>> from pyspark.sql import functions as sf >>> df = spark.createDataFrame([('100-200',)], ['str']) - >>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect() - [Row(d='100')] + >>> df.select('*', sf.regexp_extract('str', r'(\d+)-(\d+)', 1)).show() + +-------+-----------------------------------+ + | str|regexp_extract(str, (\d+)-(\d+), 1)| + +-------+-----------------------------------+ + |100-200| 100| + +-------+-----------------------------------+ Review Comment: ```suggestion +-------+-----------------------------------+ ``` ########## python/pyspark/sql/functions/builtin.py: ########## @@ -15406,29 +15442,57 @@ def regexp_extract_all( Parameters ---------- - str : :class:`~pyspark.sql.Column` or str + str : :class:`~pyspark.sql.Column` or column name target column to work on. - regexp : :class:`~pyspark.sql.Column` or str + regexp : :class:`~pyspark.sql.Column` or column name regex pattern to apply. - idx : int, optional + idx : :class:`~pyspark.sql.Column` or int, optional matched group id. Returns ------- :class:`~pyspark.sql.Column` all strings in the `str` that match a Java regex and corresponding to the regex group index. + See Also + -------- + :meth:`pyspark.sql.functions.regexp_extract` + Examples -------- + >>> from pyspark.sql import functions as sf >>> df = spark.createDataFrame([("100-200, 300-400", r"(\d+)-(\d+)")], ["str", "regexp"]) - >>> df.select(regexp_extract_all('str', lit(r'(\d+)-(\d+)')).alias('d')).collect() - [Row(d=['100', '300'])] - >>> df.select(regexp_extract_all('str', lit(r'(\d+)-(\d+)'), 1).alias('d')).collect() - [Row(d=['100', '300'])] - >>> df.select(regexp_extract_all('str', lit(r'(\d+)-(\d+)'), 2).alias('d')).collect() - [Row(d=['200', '400'])] - >>> df.select(regexp_extract_all('str', col("regexp")).alias('d')).collect() - [Row(d=['100', '300'])] + >>> df.select('*', sf.regexp_extract_all('str', sf.lit(r'(\d+)-(\d+)'))).show() + +----------------+-----------+---------------------------------------+ + | str| regexp|regexp_extract_all(str, (\d+)-(\d+), 1)| + +----------------+-----------+---------------------------------------+ + |100-200, 300-400|(\d+)-(\d+)| [100, 300]| + +----------------+-----------+---------------------------------------+ + >>> df.select('*', sf.regexp_extract_all('str', sf.lit(r'(\d+)-(\d+)'), sf.lit(1))).show() + +----------------+-----------+---------------------------------------+ + | str| regexp|regexp_extract_all(str, (\d+)-(\d+), 1)| + +----------------+-----------+---------------------------------------+ + |100-200, 300-400|(\d+)-(\d+)| [100, 300]| + +----------------+-----------+---------------------------------------+ + >>> df.select('*', sf.regexp_extract_all('str', sf.lit(r'(\d+)-(\d+)'), 2)).show() + +----------------+-----------+---------------------------------------+ + | str| regexp|regexp_extract_all(str, (\d+)-(\d+), 2)| + +----------------+-----------+---------------------------------------+ + |100-200, 300-400|(\d+)-(\d+)| [200, 400]| + +----------------+-----------+---------------------------------------+ + >>> df.select('*', sf.regexp_extract_all('str', sf.col("regexp"))).show() + +----------------+-----------+----------------------------------+ + | str| regexp|regexp_extract_all(str, regexp, 1)| + +----------------+-----------+----------------------------------+ + |100-200, 300-400|(\d+)-(\d+)| [100, 300]| + +----------------+-----------+----------------------------------+ Review Comment: ```suggestion +----------------+-----------+----------------------------------+ ``` ########## python/pyspark/sql/functions/builtin.py: ########## @@ -15406,29 +15442,57 @@ def regexp_extract_all( Parameters ---------- - str : :class:`~pyspark.sql.Column` or str + str : :class:`~pyspark.sql.Column` or column name target column to work on. - regexp : :class:`~pyspark.sql.Column` or str + regexp : :class:`~pyspark.sql.Column` or column name regex pattern to apply. - idx : int, optional + idx : :class:`~pyspark.sql.Column` or int, optional matched group id. Returns ------- :class:`~pyspark.sql.Column` all strings in the `str` that match a Java regex and corresponding to the regex group index. + See Also + -------- + :meth:`pyspark.sql.functions.regexp_extract` + Examples -------- + >>> from pyspark.sql import functions as sf >>> df = spark.createDataFrame([("100-200, 300-400", r"(\d+)-(\d+)")], ["str", "regexp"]) - >>> df.select(regexp_extract_all('str', lit(r'(\d+)-(\d+)')).alias('d')).collect() - [Row(d=['100', '300'])] - >>> df.select(regexp_extract_all('str', lit(r'(\d+)-(\d+)'), 1).alias('d')).collect() - [Row(d=['100', '300'])] - >>> df.select(regexp_extract_all('str', lit(r'(\d+)-(\d+)'), 2).alias('d')).collect() - [Row(d=['200', '400'])] - >>> df.select(regexp_extract_all('str', col("regexp")).alias('d')).collect() - [Row(d=['100', '300'])] + >>> df.select('*', sf.regexp_extract_all('str', sf.lit(r'(\d+)-(\d+)'))).show() + +----------------+-----------+---------------------------------------+ + | str| regexp|regexp_extract_all(str, (\d+)-(\d+), 1)| + +----------------+-----------+---------------------------------------+ + |100-200, 300-400|(\d+)-(\d+)| [100, 300]| + +----------------+-----------+---------------------------------------+ + >>> df.select('*', sf.regexp_extract_all('str', sf.lit(r'(\d+)-(\d+)'), sf.lit(1))).show() + +----------------+-----------+---------------------------------------+ + | str| regexp|regexp_extract_all(str, (\d+)-(\d+), 1)| + +----------------+-----------+---------------------------------------+ + |100-200, 300-400|(\d+)-(\d+)| [100, 300]| + +----------------+-----------+---------------------------------------+ + >>> df.select('*', sf.regexp_extract_all('str', sf.lit(r'(\d+)-(\d+)'), 2)).show() + +----------------+-----------+---------------------------------------+ + | str| regexp|regexp_extract_all(str, (\d+)-(\d+), 2)| + +----------------+-----------+---------------------------------------+ + |100-200, 300-400|(\d+)-(\d+)| [200, 400]| + +----------------+-----------+---------------------------------------+ Review Comment: ```suggestion +----------------+-----------+---------------------------------------+ ``` ########## python/pyspark/sql/functions/builtin.py: ########## @@ -15376,17 +15395,34 @@ def regexp_extract(str: "ColumnOrName", pattern: str, idx: int) -> Column: :class:`~pyspark.sql.Column` matched value specified by `idx` group id. + See Also + -------- + :meth:`pyspark.sql.functions.regexp_extract_all` + Examples -------- + >>> from pyspark.sql import functions as sf >>> df = spark.createDataFrame([('100-200',)], ['str']) - >>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect() - [Row(d='100')] + >>> df.select('*', sf.regexp_extract('str', r'(\d+)-(\d+)', 1)).show() + +-------+-----------------------------------+ + | str|regexp_extract(str, (\d+)-(\d+), 1)| + +-------+-----------------------------------+ + |100-200| 100| + +-------+-----------------------------------+ >>> df = spark.createDataFrame([('foo',)], ['str']) - >>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect() - [Row(d='')] + >>> df.select('*', sf.regexp_extract('str', r'(\d+)', 1)).show() + +---+-----------------------------+ + |str|regexp_extract(str, (\d+), 1)| + +---+-----------------------------+ + |foo| | + +---+-----------------------------+ Review Comment: ```suggestion +---+-----------------------------+ ``` ########## python/pyspark/sql/functions/builtin.py: ########## @@ -15406,29 +15442,57 @@ def regexp_extract_all( Parameters ---------- - str : :class:`~pyspark.sql.Column` or str + str : :class:`~pyspark.sql.Column` or column name target column to work on. - regexp : :class:`~pyspark.sql.Column` or str + regexp : :class:`~pyspark.sql.Column` or column name regex pattern to apply. - idx : int, optional + idx : :class:`~pyspark.sql.Column` or int, optional matched group id. Returns ------- :class:`~pyspark.sql.Column` all strings in the `str` that match a Java regex and corresponding to the regex group index. + See Also + -------- + :meth:`pyspark.sql.functions.regexp_extract` + Examples -------- + >>> from pyspark.sql import functions as sf >>> df = spark.createDataFrame([("100-200, 300-400", r"(\d+)-(\d+)")], ["str", "regexp"]) - >>> df.select(regexp_extract_all('str', lit(r'(\d+)-(\d+)')).alias('d')).collect() - [Row(d=['100', '300'])] - >>> df.select(regexp_extract_all('str', lit(r'(\d+)-(\d+)'), 1).alias('d')).collect() - [Row(d=['100', '300'])] - >>> df.select(regexp_extract_all('str', lit(r'(\d+)-(\d+)'), 2).alias('d')).collect() - [Row(d=['200', '400'])] - >>> df.select(regexp_extract_all('str', col("regexp")).alias('d')).collect() - [Row(d=['100', '300'])] + >>> df.select('*', sf.regexp_extract_all('str', sf.lit(r'(\d+)-(\d+)'))).show() + +----------------+-----------+---------------------------------------+ + | str| regexp|regexp_extract_all(str, (\d+)-(\d+), 1)| + +----------------+-----------+---------------------------------------+ + |100-200, 300-400|(\d+)-(\d+)| [100, 300]| + +----------------+-----------+---------------------------------------+ + >>> df.select('*', sf.regexp_extract_all('str', sf.lit(r'(\d+)-(\d+)'), sf.lit(1))).show() + +----------------+-----------+---------------------------------------+ + | str| regexp|regexp_extract_all(str, (\d+)-(\d+), 1)| + +----------------+-----------+---------------------------------------+ + |100-200, 300-400|(\d+)-(\d+)| [100, 300]| + +----------------+-----------+---------------------------------------+ + >>> df.select('*', sf.regexp_extract_all('str', sf.lit(r'(\d+)-(\d+)'), 2)).show() + +----------------+-----------+---------------------------------------+ + | str| regexp|regexp_extract_all(str, (\d+)-(\d+), 2)| + +----------------+-----------+---------------------------------------+ + |100-200, 300-400|(\d+)-(\d+)| [200, 400]| + +----------------+-----------+---------------------------------------+ + >>> df.select('*', sf.regexp_extract_all('str', sf.col("regexp"))).show() + +----------------+-----------+----------------------------------+ + | str| regexp|regexp_extract_all(str, regexp, 1)| + +----------------+-----------+----------------------------------+ + |100-200, 300-400|(\d+)-(\d+)| [100, 300]| + +----------------+-----------+----------------------------------+ + >>> df.select('*', sf.regexp_extract_all(sf.col('str'), "regexp")).show() + +----------------+-----------+----------------------------------+ + | str| regexp|regexp_extract_all(str, regexp, 1)| + +----------------+-----------+----------------------------------+ + |100-200, 300-400|(\d+)-(\d+)| [100, 300]| + +----------------+-----------+----------------------------------+ + Review Comment: ```suggestion ``` ########## python/pyspark/sql/functions/builtin.py: ########## @@ -15406,29 +15442,57 @@ def regexp_extract_all( Parameters ---------- - str : :class:`~pyspark.sql.Column` or str + str : :class:`~pyspark.sql.Column` or column name target column to work on. - regexp : :class:`~pyspark.sql.Column` or str + regexp : :class:`~pyspark.sql.Column` or column name regex pattern to apply. - idx : int, optional + idx : :class:`~pyspark.sql.Column` or int, optional matched group id. Returns ------- :class:`~pyspark.sql.Column` all strings in the `str` that match a Java regex and corresponding to the regex group index. + See Also + -------- + :meth:`pyspark.sql.functions.regexp_extract` + Examples -------- + >>> from pyspark.sql import functions as sf >>> df = spark.createDataFrame([("100-200, 300-400", r"(\d+)-(\d+)")], ["str", "regexp"]) - >>> df.select(regexp_extract_all('str', lit(r'(\d+)-(\d+)')).alias('d')).collect() - [Row(d=['100', '300'])] - >>> df.select(regexp_extract_all('str', lit(r'(\d+)-(\d+)'), 1).alias('d')).collect() - [Row(d=['100', '300'])] - >>> df.select(regexp_extract_all('str', lit(r'(\d+)-(\d+)'), 2).alias('d')).collect() - [Row(d=['200', '400'])] - >>> df.select(regexp_extract_all('str', col("regexp")).alias('d')).collect() - [Row(d=['100', '300'])] + >>> df.select('*', sf.regexp_extract_all('str', sf.lit(r'(\d+)-(\d+)'))).show() + +----------------+-----------+---------------------------------------+ + | str| regexp|regexp_extract_all(str, (\d+)-(\d+), 1)| + +----------------+-----------+---------------------------------------+ + |100-200, 300-400|(\d+)-(\d+)| [100, 300]| + +----------------+-----------+---------------------------------------+ Review Comment: ```suggestion +----------------+-----------+---------------------------------------+ ``` -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org