reswqa commented on code in PR #19960: URL: https://github.com/apache/flink/pull/19960#discussion_r910812157
########## flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/hybrid/HsResultPartitionReadScheduler.java: ########## @@ -0,0 +1,424 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.runtime.io.network.partition.hybrid; + +import org.apache.flink.annotation.VisibleForTesting; +import org.apache.flink.configuration.TaskManagerOptions; +import org.apache.flink.core.memory.MemorySegment; +import org.apache.flink.runtime.io.disk.BatchShuffleReadBufferPool; +import org.apache.flink.runtime.io.network.buffer.BufferRecycler; +import org.apache.flink.util.FatalExitExceptionHandler; +import org.apache.flink.util.IOUtils; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.concurrent.GuardedBy; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.time.Duration; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.PriorityQueue; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeoutException; + +import static org.apache.flink.util.Preconditions.checkNotNull; +import static org.apache.flink.util.Preconditions.checkState; + +/** + * Data reader for HsResultPartition which can schedule {@link HsSubpartitionFileReader} and read + * data for all downstream tasks. + */ +public class HsResultPartitionReadScheduler implements Runnable, BufferRecycler { + private static final Logger LOG = LoggerFactory.getLogger(HsResultPartitionReadScheduler.class); + + /** + * Default maximum time (5min) to wait when requesting read buffers from the buffer pool before + * throwing an exception. + */ + private static final Duration DEFAULT_BUFFER_REQUEST_TIMEOUT = Duration.ofMinutes(5); + + /** Executor to run the shuffle data reading task. */ + private final Executor ioExecutor; + + /** Maximum number of buffers can be allocated by this partition reader. */ + private final int maxRequestedBuffers; + + /** + * Maximum time to wait when requesting read buffers from the buffer pool before throwing an + * exception. + */ + private final Duration bufferRequestTimeout; + + /** Lock used to synchronize multi-thread access to thread-unsafe fields. */ + private final Object lock; + + /** + * A {@link CompletableFuture} to be completed when this read scheduler including all resources + * is released. + */ + private final CompletableFuture<?> releaseFuture = new CompletableFuture<>(); + + /** Buffer pool from which to allocate buffers for shuffle data reading. */ + private final BatchShuffleReadBufferPool bufferPool; + + private final Path dataFilePath; + + private final HsFileDataIndex dataIndex; + + /** All failed subpartition readers to be released. */ + @GuardedBy("lock") + private final Set<HsSubpartitionFileReader> failedReaders = new HashSet<>(); + + /** All readers waiting to read data of different subpartitions. */ + @GuardedBy("lock") + private final Set<HsSubpartitionFileReader> allReaders = new HashSet<>(); + + /** + * Whether the data reading task is currently running or not. This flag is used when trying to + * submit the data reading task. + */ + @GuardedBy("lock") + private boolean isRunning; + + /** Number of buffers already allocated and still not recycled by this partition reader. */ + @GuardedBy("lock") + private volatile int numRequestedBuffers; + + /** Whether this reader has been released or not. */ + @GuardedBy("lock") + private volatile boolean isReleased; + + private FileChannel dataFileChannel; + + public HsResultPartitionReadScheduler( + int numSubpartitions, + BatchShuffleReadBufferPool bufferPool, + Executor ioExecutor, + Path dataFilePath, + HsFileDataIndex dataIndex, + Object lock) { + this( + numSubpartitions, + bufferPool, + ioExecutor, + lock, + dataIndex, + dataFilePath, + DEFAULT_BUFFER_REQUEST_TIMEOUT); + } + + public HsResultPartitionReadScheduler( + int numSubpartitions, + BatchShuffleReadBufferPool bufferPool, + Executor ioExecutor, + Object lock, + HsFileDataIndex dataIndex, + Path dataFilePath, + Duration bufferRequestTimeout) { + this.lock = checkNotNull(lock); + this.dataIndex = checkNotNull(dataIndex); + this.dataFilePath = checkNotNull(dataFilePath); + this.bufferPool = checkNotNull(bufferPool); + this.ioExecutor = checkNotNull(ioExecutor); + // one partition reader can consume at most Math.max(16M, numSubpartitions) (the expected + // buffers per request is 8M) buffers for data read, which means larger parallelism, more + // buffers. Currently, it is only an empirical strategy which can not be configured. + this.maxRequestedBuffers = + Math.max(2 * bufferPool.getNumBuffersPerRequest(), numSubpartitions); + this.bufferRequestTimeout = checkNotNull(bufferRequestTimeout); + } + + @Override + public synchronized void run() { + Queue<HsSubpartitionFileReader> availableReaders = getAvailableReaders(); + Queue<MemorySegment> buffers = allocateBuffers(availableReaders); + int numBuffersAllocated = buffers.size(); + + Set<HsSubpartitionFileReader> finishedReaders = readData(availableReaders, buffers); + + int numBuffersRead = numBuffersAllocated - buffers.size(); + releaseBuffers(buffers); + + removeFinishedAndFailedReaders(numBuffersRead, finishedReaders); + } + + /** This method only called by result partition to create subpartitionFileReader. */ + public HsSubpartitionFileReader registerNewSubpartition( + int subpartitionId, HsSubpartitionViewNotifier notifier) throws IOException { + synchronized (lock) { + try { + if (allReaders.isEmpty()) { + dataFileChannel = openFileChannel(dataFilePath); + } + } catch (Throwable throwable) { + if (allReaders.isEmpty()) { + closeFileChannel(); + } + throw throwable; + } + + HsSubpartitionFileReader subpartitionReader = + new HsSubpartitionFileReader( + subpartitionId, dataFileChannel, notifier, dataIndex); + if (allReaders.isEmpty()) { + bufferPool.registerRequester(this); + } + allReaders.add(subpartitionReader); + + mayTriggerReading(); + return subpartitionReader; + } + } + + /** + * Releases this read scheduler and returns a {@link CompletableFuture} which will be completed + * when all resources are released. + */ + public synchronized CompletableFuture<?> release() { + List<HsSubpartitionFileReader> pendingReaders; + synchronized (lock) { + if (isReleased) { + return releaseFuture; + } + isReleased = true; + + failedReaders.addAll(allReaders); + pendingReaders = new ArrayList<>(allReaders); + mayNotifyReleased(); + } + + failSubpartitionReaders( + pendingReaders, + new IllegalStateException("Result partition has been already released.")); + return releaseFuture; + } + + // ------------------------------------------------------------------------ + // Internal Methods + // ------------------------------------------------------------------------ + + private Queue<MemorySegment> allocateBuffers(Queue<HsSubpartitionFileReader> availableReaders) { + if (availableReaders.isEmpty()) { + return new ArrayDeque<>(); + } + + try { + long timeoutTime = getBufferRequestTimeoutTime(); + do { + List<MemorySegment> buffers = bufferPool.requestBuffers(); + if (!buffers.isEmpty()) { + return new ArrayDeque<>(buffers); + } + checkState(!isReleased, "Result partition has been already released."); + } while (System.nanoTime() < timeoutTime + || System.nanoTime() < (timeoutTime = getBufferRequestTimeoutTime())); + + if (numRequestedBuffers <= 0) { + throw new TimeoutException( + String.format( + "Buffer request timeout, this means there is a fierce contention of" + + " the batch shuffle read memory, please increase '%s'.", + TaskManagerOptions.NETWORK_BATCH_SHUFFLE_READ_MEMORY.key())); + } + } catch (Throwable throwable) { + // fail all pending subpartition readers immediately if any exception occurs + failSubpartitionReaders(availableReaders, throwable); + LOG.error("Failed to request buffers for data reading.", throwable); + } + return new ArrayDeque<>(); + } + + private void mayTriggerReading() { + assert Thread.holdsLock(lock); + + if (!isRunning + && !allReaders.isEmpty() + && numRequestedBuffers + bufferPool.getNumBuffersPerRequest() <= maxRequestedBuffers + && numRequestedBuffers < bufferPool.getAverageBuffersPerRequester()) { + isRunning = true; + ioExecutor.execute(this); + } + } + + private void mayNotifyReleased() { + assert Thread.holdsLock(lock); + + if (isReleased && allReaders.isEmpty()) { + releaseFuture.complete(null); + } + } + + private long getBufferRequestTimeoutTime() { + return bufferPool.getLastBufferOperationTimestamp() + bufferRequestTimeout.toNanos(); + } + + private void releaseBuffers(Queue<MemorySegment> buffers) { + if (!buffers.isEmpty()) { + try { + bufferPool.recycle(buffers); + buffers.clear(); + } catch (Throwable throwable) { + // this should never happen so just trigger fatal error + FatalExitExceptionHandler.INSTANCE.uncaughtException( + Thread.currentThread(), throwable); + } + } + } + + private Queue<HsSubpartitionFileReader> getAvailableReaders() { + synchronized (lock) { + if (isReleased) { + return new ArrayDeque<>(); + } + + try { + for (HsSubpartitionFileReader reader : allReaders) { + reader.prepareForScheduling(); + } + } catch (Throwable throwable) { + failSubpartitionReaders(allReaders, throwable); + } + + return new PriorityQueue<>(allReaders); + } + } + + private Set<HsSubpartitionFileReader> readData( + Queue<HsSubpartitionFileReader> availableReaders, Queue<MemorySegment> buffers) { + Set<HsSubpartitionFileReader> finishedReaders = new HashSet<>(); + + while (!availableReaders.isEmpty() && !buffers.isEmpty()) { + HsSubpartitionFileReader subpartitionReader = availableReaders.poll(); + try { + if (!subpartitionReader.readBuffers(buffers, this)) { + // there is no resource to release for finished readers currently + finishedReaders.add(subpartitionReader); + } + } catch (Throwable throwable) { + failSubpartitionReaders(Collections.singletonList(subpartitionReader), throwable); + LOG.debug("Failed to read shuffle data.", throwable); + } + } + return finishedReaders; + } + + private void failSubpartitionReaders( + Collection<HsSubpartitionFileReader> readers, Throwable failureCause) { + synchronized (lock) { + failedReaders.addAll(readers); + } + + for (HsSubpartitionFileReader reader : readers) { + try { + reader.fail(failureCause); + } catch (Throwable throwable) { + // this should never happen so just trigger fatal error + FatalExitExceptionHandler.INSTANCE.uncaughtException( + Thread.currentThread(), throwable); + } + } + } + + private void removeFinishedAndFailedReaders( + int numBuffersRead, Set<HsSubpartitionFileReader> finishedReaders) { + synchronized (lock) { + for (HsSubpartitionFileReader reader : finishedReaders) { + allReaders.remove(reader); + } + finishedReaders.clear(); + + for (HsSubpartitionFileReader reader : failedReaders) { + bufferPool.unregisterRequester(this); + allReaders.remove(reader); + } + failedReaders.clear(); + + if (allReaders.isEmpty()) { + closeFileChannel(); + } + + numRequestedBuffers += numBuffersRead; + isRunning = false; + mayTriggerReading(); + mayNotifyReleased(); Review Comment: fixed -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@flink.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org