1996fanrui commented on code in PR #904: URL: https://github.com/apache/flink-kubernetes-operator/pull/904#discussion_r1821951582
########## flink-autoscaler/src/main/java/org/apache/flink/autoscaler/ParallelismAdjuster.java: ########## @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.autoscaler; + +import org.apache.flink.autoscaler.config.AutoScalerOptions; +import org.apache.flink.autoscaler.event.AutoScalerEventHandler; +import org.apache.flink.autoscaler.topology.ShipStrategy; +import org.apache.flink.configuration.DescribedEnum; +import org.apache.flink.configuration.description.InlineElement; +import org.apache.flink.runtime.jobgraph.JobVertexID; + +import java.util.Collection; + +import static org.apache.flink.autoscaler.JobVertexScaler.SCALE_LIMITED_MESSAGE_FORMAT; +import static org.apache.flink.autoscaler.JobVertexScaler.SCALING_LIMITED; +import static org.apache.flink.autoscaler.ParallelismAdjuster.KeyGroupOrPartitionsAdjustMode.MAXIMIZE_UTILISATION; +import static org.apache.flink.autoscaler.config.AutoScalerOptions.SCALING_EVENT_INTERVAL; +import static org.apache.flink.autoscaler.topology.ShipStrategy.HASH; +import static org.apache.flink.configuration.description.TextElement.text; + +/** + * Component responsible adjusts the parallelism of a vertex. + * + * <p>When input vertex {@link ShipStrategy} is {@link ShipStrategy#HASH} or knows the number of + * current partitions of vertex. We hope to adjust the parallelism of the current vertex according + * to the number of key groups or partitions to achieve the goal of evenly distributing data among + * subtasks or maximizing utilization. + */ +public class ParallelismAdjuster { + + public static <KEY, Context extends JobAutoScalerContext<KEY>> int adjust( + JobVertexID vertex, + Context context, + AutoScalerEventHandler<KEY, Context> eventHandler, + int maxParallelism, + int numSourcePartitions, + int newParallelism, + int upperBound, + int parallelismLowerLimit, + Collection<ShipStrategy> inputShipStrategies) { + var adjustByMaxParallelismOrPartitions = + numSourcePartitions > 0 || inputShipStrategies.contains(HASH); + if (!adjustByMaxParallelismOrPartitions) { + return newParallelism; + } + var numKeyGroupsOrPartitions = + numSourcePartitions <= 0 ? maxParallelism : numSourcePartitions; + + KeyGroupOrPartitionsAdjustMode mode = + context.getConfiguration() + .get(AutoScalerOptions.SCALING_KEY_GROUP_PARTITIONS_ADJUST_MODE); + + var upperBoundForAlignment = Math.min(numKeyGroupsOrPartitions, upperBound); + + // When the shuffle type of vertex inputs contains keyBy or vertex is a source, + // we try to adjust the parallelism such that it divides + // the numKeyGroupsOrPartitions without a remainder => data is evenly spread across subtasks + for (int p = newParallelism; p <= upperBoundForAlignment; p++) { + if (numKeyGroupsOrPartitions % p == 0 + || + // When Mode is MAXIMIZE_UTILISATION , Try to find the smallest parallelism + // that can satisfy the current consumption rate. + (mode == MAXIMIZE_UTILISATION + && numKeyGroupsOrPartitions / p + < numKeyGroupsOrPartitions / newParallelism)) { + return p; + } + } + + // When adjusting the parallelism after rounding up cannot + // find the right degree of parallelism to meet requirements, + // Try to find the smallest parallelism that can satisfy the current consumption rate. + int p = + calculateMinimumParallelism( + numKeyGroupsOrPartitions, newParallelism, parallelismLowerLimit); + var message = + String.format( + SCALE_LIMITED_MESSAGE_FORMAT, + vertex, + newParallelism, + p, + numKeyGroupsOrPartitions, + upperBound, + parallelismLowerLimit); + eventHandler.handleEvent( + context, + AutoScalerEventHandler.Type.Warning, + SCALING_LIMITED, + message, + SCALING_LIMITED + vertex + newParallelism, + context.getConfiguration().get(SCALING_EVENT_INTERVAL)); + return p; + } + + private static int calculateMinimumParallelism( + int numKeyGroupsOrPartitions, int newParallelism, int parallelismLowerLimit) { + int p = newParallelism; + for (; p > 0; p--) { + if (numKeyGroupsOrPartitions / p > numKeyGroupsOrPartitions / newParallelism) { + if (numKeyGroupsOrPartitions % p != 0) { + p++; + } + break; + } + } + p = Math.max(p, parallelismLowerLimit); + return p; + } + + /** The mode of the key group or parallelism adjustment. */ + public enum KeyGroupOrPartitionsAdjustMode implements DescribedEnum { + DEFAULT( + "This mode ensures that the parallelism adjustment attempts to evenly distribute data across subtasks" + + ". It is particularly effective for source vertices that are aware of partition counts or vertices after " + + "'keyBy' operation. The goal is to have the number of key groups or partitions be divisible by the set parallelism, ensuring even data distribution and reducing data skew."), Review Comment: Can `DEFAULT` be renamed to `ABSOLUTELY_EVENLY_DISTRIBUTION`? It's more clear for users, and we may update the default value for `scaling.key-group.partitions.adjust.mode` option in the future. For example, `MAXIMIZE_UTILISATION` may be more reasonable in the future. And `KeyGroupOrPartitionsAdjustMode.DEFAULT` is a little wired if it's not the default enum. ########## flink-autoscaler/src/main/java/org/apache/flink/autoscaler/ParallelismAdjuster.java: ########## @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.autoscaler; + +import org.apache.flink.autoscaler.config.AutoScalerOptions; +import org.apache.flink.autoscaler.event.AutoScalerEventHandler; +import org.apache.flink.autoscaler.topology.ShipStrategy; +import org.apache.flink.configuration.DescribedEnum; +import org.apache.flink.configuration.description.InlineElement; +import org.apache.flink.runtime.jobgraph.JobVertexID; + +import java.util.Collection; + +import static org.apache.flink.autoscaler.JobVertexScaler.SCALE_LIMITED_MESSAGE_FORMAT; +import static org.apache.flink.autoscaler.JobVertexScaler.SCALING_LIMITED; +import static org.apache.flink.autoscaler.ParallelismAdjuster.KeyGroupOrPartitionsAdjustMode.MAXIMIZE_UTILISATION; +import static org.apache.flink.autoscaler.config.AutoScalerOptions.SCALING_EVENT_INTERVAL; +import static org.apache.flink.autoscaler.topology.ShipStrategy.HASH; +import static org.apache.flink.configuration.description.TextElement.text; + +/** + * Component responsible adjusts the parallelism of a vertex. + * + * <p>When input vertex {@link ShipStrategy} is {@link ShipStrategy#HASH} or knows the number of + * current partitions of vertex. We hope to adjust the parallelism of the current vertex according + * to the number of key groups or partitions to achieve the goal of evenly distributing data among + * subtasks or maximizing utilization. + */ +public class ParallelismAdjuster { + + public static <KEY, Context extends JobAutoScalerContext<KEY>> int adjust( + JobVertexID vertex, + Context context, + AutoScalerEventHandler<KEY, Context> eventHandler, + int maxParallelism, + int numSourcePartitions, + int newParallelism, + int upperBound, + int parallelismLowerLimit, + Collection<ShipStrategy> inputShipStrategies) { + var adjustByMaxParallelismOrPartitions = + numSourcePartitions > 0 || inputShipStrategies.contains(HASH); + if (!adjustByMaxParallelismOrPartitions) { + return newParallelism; + } + var numKeyGroupsOrPartitions = + numSourcePartitions <= 0 ? maxParallelism : numSourcePartitions; + + KeyGroupOrPartitionsAdjustMode mode = + context.getConfiguration() + .get(AutoScalerOptions.SCALING_KEY_GROUP_PARTITIONS_ADJUST_MODE); + + var upperBoundForAlignment = Math.min(numKeyGroupsOrPartitions, upperBound); + + // When the shuffle type of vertex inputs contains keyBy or vertex is a source, + // we try to adjust the parallelism such that it divides + // the numKeyGroupsOrPartitions without a remainder => data is evenly spread across subtasks + for (int p = newParallelism; p <= upperBoundForAlignment; p++) { + if (numKeyGroupsOrPartitions % p == 0 + || + // When Mode is MAXIMIZE_UTILISATION , Try to find the smallest parallelism + // that can satisfy the current consumption rate. + (mode == MAXIMIZE_UTILISATION + && numKeyGroupsOrPartitions / p + < numKeyGroupsOrPartitions / newParallelism)) { + return p; + } + } + + // When adjusting the parallelism after rounding up cannot + // find the right degree of parallelism to meet requirements, + // Try to find the smallest parallelism that can satisfy the current consumption rate. + int p = + calculateMinimumParallelism( + numKeyGroupsOrPartitions, newParallelism, parallelismLowerLimit); + var message = + String.format( + SCALE_LIMITED_MESSAGE_FORMAT, + vertex, + newParallelism, + p, + numKeyGroupsOrPartitions, + upperBound, + parallelismLowerLimit); + eventHandler.handleEvent( + context, + AutoScalerEventHandler.Type.Warning, + SCALING_LIMITED, + message, + SCALING_LIMITED + vertex + newParallelism, + context.getConfiguration().get(SCALING_EVENT_INTERVAL)); + return p; + } + + private static int calculateMinimumParallelism( + int numKeyGroupsOrPartitions, int newParallelism, int parallelismLowerLimit) { + int p = newParallelism; + for (; p > 0; p--) { + if (numKeyGroupsOrPartitions / p > numKeyGroupsOrPartitions / newParallelism) { + if (numKeyGroupsOrPartitions % p != 0) { + p++; + } + break; + } + } + p = Math.max(p, parallelismLowerLimit); + return p; + } + + /** The mode of the key group or parallelism adjustment. */ + public enum KeyGroupOrPartitionsAdjustMode implements DescribedEnum { + DEFAULT( + "This mode ensures that the parallelism adjustment attempts to evenly distribute data across subtasks" + + ". It is particularly effective for source vertices that are aware of partition counts or vertices after " + + "'keyBy' operation. The goal is to have the number of key groups or partitions be divisible by the set parallelism, ensuring even data distribution and reducing data skew."), + + MAXIMIZE_UTILISATION( + "This model is to maximize resource utilization. In this mode, an attempt is made to set" + + " the minimum degree of parallelism that meets the current consumption rate requirements. Unlike the default mode, it is not enforced that the number of key groups or partitions is divisible by the degree of parallelism."), Review Comment: Could the `degree of parallelism` be updated to `parallelism`? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@flink.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org