ericm-db commented on code in PR #50123:
URL: https://github.com/apache/spark/pull/50123#discussion_r2001497587


##########
sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala:
##########
@@ -2236,31 +2236,35 @@ object SQLConf {
       .booleanConf
       .createWithDefault(true)
 
-  val STATE_STORE_COORDINATOR_MIN_SNAPSHOT_VERSION_DELTA_TO_LOG =
-    buildConf("spark.sql.streaming.stateStore.minSnapshotVersionDeltaToLog")
+  val 
STATE_STORE_COORDINATOR_SNAPSHOT_DELTA_MULTIPLIER_FOR_MIN_VERSION_DELTA_TO_LOG =
+    
buildConf("spark.sql.streaming.stateStore.minSnapshotDeltaMultiplierForMinVersionDeltaToLog")
       .internal()
       .doc(
-        "Minimum number of versions between the most recent uploaded snapshot 
version of a " +
-        "single state store instance and the most recent version across all 
state store " +
-        "instances to log a warning message."
+        "This multiplier determines the minimum version threshold for logging 
warnings when a " +
+        "state store instance falls behind. The coordinator logs a warning if 
a state store's " +
+        "last uploaded snapshot's version lags behind the most recent snapshot 
version by this " +
+        "threshold. The threshold is calculated as the configured minimum 
number of deltas " +
+        "needed to create a snapshot, multiplied by this multiplier."
       )
-      .version("4.0.0")
+      .version("4.1.0")
       .intConf
       .checkValue(k => k >= 1, "Must be greater than or equal to 1")
-      .createWithDefault(30)
+      .createWithDefault(5)
 
-  val STATE_STORE_COORDINATOR_MIN_SNAPSHOT_TIME_DELTA_TO_LOG =
-    buildConf("spark.sql.streaming.stateStore.minSnapshotTimeDeltaToLog")
+  val STATE_STORE_COORDINATOR_MAINTENANCE_MULTIPLIER_FOR_MIN_TIME_DELTA_TO_LOG 
=

Review Comment:
   Duplicate with above?



##########
sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala:
##########
@@ -2236,31 +2236,35 @@ object SQLConf {
       .booleanConf
       .createWithDefault(true)
 
-  val STATE_STORE_COORDINATOR_MIN_SNAPSHOT_VERSION_DELTA_TO_LOG =
-    buildConf("spark.sql.streaming.stateStore.minSnapshotVersionDeltaToLog")
+  val 
STATE_STORE_COORDINATOR_SNAPSHOT_DELTA_MULTIPLIER_FOR_MIN_VERSION_DELTA_TO_LOG =
+    
buildConf("spark.sql.streaming.stateStore.minSnapshotDeltaMultiplierForMinVersionDeltaToLog")
       .internal()
       .doc(
-        "Minimum number of versions between the most recent uploaded snapshot 
version of a " +
-        "single state store instance and the most recent version across all 
state store " +
-        "instances to log a warning message."
+        "This multiplier determines the minimum version threshold for logging 
warnings when a " +
+        "state store instance falls behind. The coordinator logs a warning if 
a state store's " +
+        "last uploaded snapshot's version lags behind the most recent snapshot 
version by this " +
+        "threshold. The threshold is calculated as the configured minimum 
number of deltas " +
+        "needed to create a snapshot, multiplied by this multiplier."
       )
-      .version("4.0.0")
+      .version("4.1.0")
       .intConf
       .checkValue(k => k >= 1, "Must be greater than or equal to 1")
-      .createWithDefault(30)
+      .createWithDefault(5)
 
-  val STATE_STORE_COORDINATOR_MIN_SNAPSHOT_TIME_DELTA_TO_LOG =
-    buildConf("spark.sql.streaming.stateStore.minSnapshotTimeDeltaToLog")
+  val STATE_STORE_COORDINATOR_MAINTENANCE_MULTIPLIER_FOR_MIN_TIME_DELTA_TO_LOG 
=

Review Comment:
   We also don't want to remove that conf



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to