ericm-db commented on code in PR #50742:
URL: https://github.com/apache/spark/pull/50742#discussion_r2070607523


##########
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/RocksDBStateStoreProvider.scala:
##########
@@ -446,17 +459,48 @@ private[sql] class RocksDBStateStoreProvider
 
   override def stateStoreId: StateStoreId = stateStoreId_
 
-  override def getStore(version: Long, uniqueId: Option[String] = None): 
StateStore = {
+  /**
+   * Creates and returns a state store with the specified parameters.
+   *
+   * @param version The version of the state store to load
+   * @param uniqueId Optional unique identifier for checkpoint
+   * @param readOnly Whether to open the store in read-only mode
+   * @param existingStore Optional existing store to reuse instead of creating 
a new one
+   * @return The loaded state store
+   */
+  private def loadStateStore(
+      version: Long,
+      uniqueId: Option[String],
+      readOnly: Boolean,
+      existingStore: Option[ReadStateStore] = None): StateStore = {
     try {
       if (version < 0) {
         throw QueryExecutionErrors.unexpectedStateStoreVersion(version)
       }
-      rocksDB.load(
-        version,
-        stateStoreCkptId = if (storeConf.enableStateStoreCheckpointIds) 
uniqueId else None)
-      new RocksDBStateStore(version)
-    }
-    catch {
+      try {
+        // Load RocksDB store
+        rocksDB.load(
+          version,
+          stateStoreCkptId = if (storeConf.enableStateStoreCheckpointIds) 
uniqueId else None,
+          readOnly = readOnly)
+
+        // Return appropriate store instance
+        existingStore match {
+          case Some(stateStore: RocksDBStateStore) =>
+            // Reuse existing store for getWriteStore case
+            StateStoreThreadLocalTracker.setUsedForWriteStore(true)

Review Comment:
   Yeah I wanted to - but I think there would be some abstraction leak since we 
don't 'reuse' stores for HDFS, we just open a new one. 
   The reason for this is because HDFSBackedStateStore and 
HDFSBackedReadStateStore are different classes, so we can't directly return the 
store like we do 
[here](https://github.com/apache/spark/pull/50742/files#diff-76a55d7a6f0b91a19366e01c919c615a4d13afc1c1c37b017afa9df1c1705218R492)



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to