wanglijie95 commented on a change in pull request #18376: URL: https://github.com/apache/flink/pull/18376#discussion_r790425357
########## File path: flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/SsgNetworkMemoryCalculationUtils.java ########## @@ -148,6 +161,78 @@ private static TaskInputsOutputsDescriptor buildTaskInputsOutputsDescriptor( return ret; } + private static Map<IntermediateDataSetID, Integer> getMaxInputChannelNumsForDynamicGraph( + ExecutionJobVertex ejv) { + + Map<IntermediateDataSetID, Integer> ret = new HashMap<>(); + for (IntermediateResult consumedResult : ejv.getInputs()) { + ret.put(consumedResult.getId(), getMaxInputChannelNumForResult(ejv, consumedResult)); + } + + return ret; + } + + private static Map<IntermediateDataSetID, Integer> getMaxSubpartitionNumsForDynamicGraph( + ExecutionJobVertex ejv) { + + Map<IntermediateDataSetID, Integer> ret = new HashMap<>(); + + for (IntermediateResult intermediateResult : ejv.getProducedDataSets()) { + final int maxNum = + Arrays.stream(intermediateResult.getPartitions()) + .map(IntermediateResultPartition::getNumberOfSubpartitions) + .reduce(0, Integer::max); + ret.put(intermediateResult.getId(), maxNum); + } + + return ret; + } + + @VisibleForTesting + static int getMaxInputChannelNumForResult( + ExecutionJobVertex ejv, IntermediateResult consumedResult) { + DistributionPattern distributionPattern = consumedResult.getConsumingDistributionPattern(); + + if (distributionPattern == DistributionPattern.ALL_TO_ALL) { + int numChannelsToConsumePerPartition = + getMaxNumOfChannelsForConsuming(consumedResult.getPartitions()[0]); + int numConsumedPartitions = consumedResult.getNumberOfAssignedPartitions(); + return numChannelsToConsumePerPartition * numConsumedPartitions; + + } else if (distributionPattern == DistributionPattern.POINTWISE) { + int numPartitions = consumedResult.getNumberOfAssignedPartitions(); + int numConsumers = ejv.getParallelism(); + // when using dynamic graph, all partitions have the same number of subpartitions + int numOfSubpartitionsPerPartition = + consumedResult.getPartitions()[0].getNumberOfSubpartitions(); + + if (numPartitions >= numConsumers) { + // multiple partitions to one consumer + int maxConsumedPartitionsPerConsumer = + (int) Math.ceil((double) numPartitions / numConsumers); + return numOfSubpartitionsPerPartition * maxConsumedPartitionsPerConsumer; + } else { + // one partition to multiple consumers + int minConsumersPerPartition = + (int) Math.floor((double) numConsumers / numPartitions); + return (int) + Math.ceil( + (double) numOfSubpartitionsPerPartition / minConsumersPerPartition); + } Review comment: This calculation logic here is no longer needed after modification as described above. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@flink.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org