Ngone51 commented on code in PR #50122:
URL: https://github.com/apache/spark/pull/50122#discussion_r1990890014


##########
core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala:
##########
@@ -862,31 +862,50 @@ class BlockManagerMasterEndpoint(
   private def getLocationsAndStatus(
       blockId: BlockId,
       requesterHost: String): Option[BlockLocationsAndStatus] = {
-    val locations = 
Option(blockLocations.get(blockId)).map(_.toSeq).getOrElse(Seq.empty)
-    val status = locations.headOption.flatMap { bmId =>
-      if (externalShuffleServiceRddFetchEnabled && bmId.port == 
externalShuffleServicePort) {
-        blockStatusByShuffleService.get(bmId).flatMap(m => m.get(blockId))
-      } else {
-        blockManagerInfo.get(bmId).flatMap(_.getStatus(blockId))
+    val allLocations = 
Option(blockLocations.get(blockId)).map(_.toSeq).getOrElse(Seq.empty)
+    val hostLocalLocations = allLocations.filter(bmId => bmId.host == 
requesterHost)
+
+    val blockStatusWithBlockManagerId: Option[(BlockStatus, BlockManagerId)] =
+      (if (externalShuffleServiceRddFetchEnabled) {
+         // if fetching RDD is enabled from the external shuffle service then 
first try to find
+         // the block in the external shuffle service of the same host
+         val location = hostLocalLocations.find(_.port == 
externalShuffleServicePort)
+         location
+           .flatMap(blockStatusByShuffleService.get(_).flatMap(_.get(blockId)))
+           .zip(location)
+       } else {
+         None
+       })
+        .orElse {
+          // if the block is not found via the external shuffle service trying 
to find it in the
+          // executors running on the same host and persisted on the disk
+          // using flatMap on iterators makes the transformation lazy
+          hostLocalLocations.iterator
+            .flatMap { bmId =>
+              blockManagerInfo.get(bmId).flatMap { blockInfo =>
+                blockInfo.getStatus(blockId).map((_, bmId))
+              }
+            }
+            .find(_._1.storageLevel.useDisk)

Review Comment:
   Do we enforce the disk first against the memory after this change? Looks 
like a behaviour change.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to