Hisoka-X commented on code in PR #8233:
URL: https://github.com/apache/seatunnel/pull/8233#discussion_r1875219107


##########
seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/resourcemanager/ResourceRequestHandler.java:
##########
@@ -222,7 +250,53 @@ public Optional<WorkerProfile> 
preCheckWorkerResource(ResourceProfile r) {
                                                         slot ->
                                                                 
slot.getResourceProfile()
                                                                         
.enoughThan(r)))
-                        .findAny();
+                        .collect(Collectors.toList());
+
+        Optional<WorkerProfile> workerProfile;
+        switch (allocateStrategy) {
+            case SYSTEM_LOAD:

Review Comment:
   each strategy use a class to maintain pick logic.



##########
seatunnel-e2e/seatunnel-engine-e2e/connector-seatunnel-e2e-base/src/test/java/org/apache/seatunnel/engine/e2e/allocatestrategy/SystemLoadAllocateStrategyIT.java:
##########
@@ -0,0 +1,259 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.seatunnel.engine.e2e.allocatestrategy;
+
+import org.apache.seatunnel.common.config.Common;
+import org.apache.seatunnel.common.config.DeployMode;
+import org.apache.seatunnel.common.constants.JobMode;
+import org.apache.seatunnel.engine.client.SeaTunnelClient;
+import org.apache.seatunnel.engine.common.config.ConfigProvider;
+import org.apache.seatunnel.engine.common.config.JobConfig;
+import org.apache.seatunnel.engine.common.config.SeaTunnelConfig;
+import org.apache.seatunnel.engine.common.config.server.AllocateStrategy;
+import org.apache.seatunnel.engine.common.config.server.SlotServiceConfig;
+import org.apache.seatunnel.engine.e2e.TestUtils;
+import org.apache.seatunnel.engine.server.SeaTunnelServer;
+import org.apache.seatunnel.engine.server.SeaTunnelServerStarter;
+import org.apache.seatunnel.engine.server.resourcemanager.ResourceManager;
+import org.apache.seatunnel.engine.server.resourcemanager.worker.WorkerProfile;
+
+import org.awaitility.Awaitility;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+import com.hazelcast.client.config.ClientConfig;
+import com.hazelcast.cluster.Address;
+import com.hazelcast.config.MemberAttributeConfig;
+import com.hazelcast.instance.impl.HazelcastInstanceImpl;
+import com.hazelcast.spi.impl.NodeEngineImpl;
+import lombok.NonNull;
+import lombok.extern.slf4j.Slf4j;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
+
+import static 
org.apache.seatunnel.shade.com.google.common.base.Preconditions.checkArgument;
+
+/** Test task allocation strategy */
+@Slf4j
+public class SystemLoadAllocateStrategyIT {
+
+    public static final String DYNAMIC_TEST_CASE_NAME = 
"dynamic_test_case_name";
+
+    public static final String DYNAMIC_JOB_MODE = "dynamic_job_mode";
+
+    public static final String DYNAMIC_TEST_ROW_NUM_PER_PARALLELISM =
+            "dynamic_test_row_num_per_parallelism";
+
+    public static final String DYNAMIC_TEST_PARALLELISM = 
"dynamic_test_parallelism";
+
+    /**
+     * Test steps: <br>
+     * 1. Start two tasks and occupy 5 slots on two nodes respectively <br>
+     * 2. Start 3 parallel tasks ,occupy 4 slots<br>
+     * 3. Expected result: Each node of the two nodes occupies 7 slots 
respectively <br>
+     */
+    @Test
+    public void testSystemLoadStrategy() throws Exception {
+        String testCaseName = "testSystemLoadStrategy";
+        String testClusterName = "TestSystemLoadStrategy";
+        long testRowNumber = 100;
+        int testParallelism = 4;
+
+        HazelcastInstanceImpl node1 = null;
+        HazelcastInstanceImpl node2 = null;
+        SeaTunnelClient engineClient = null;
+
+        SeaTunnelConfig seaTunnelConfig = 
ConfigProvider.locateAndGetSeaTunnelConfig();
+        seaTunnelConfig
+                .getHazelcastConfig()
+                .setClusterName(TestUtils.getClusterName(testClusterName));
+        SlotServiceConfig slotServiceConfig =
+                seaTunnelConfig.getEngineConfig().getSlotServiceConfig();
+        slotServiceConfig.setSlotNum(10);
+        slotServiceConfig.setDynamicSlot(false);
+        // enable system load strategy
+        slotServiceConfig.setAllocateStrategy(AllocateStrategy.SYSTEM_LOAD);
+
+        // Set the node tag and submit a task that occupies 5 slots to each of 
the two nodes
+        MemberAttributeConfig node1Tags = new MemberAttributeConfig();
+        node1Tags.setAttribute("strategy", "system_load1");
+        
seaTunnelConfig.getHazelcastConfig().setMemberAttributeConfig(node1Tags);
+        try {
+            node1 = 
SeaTunnelServerStarter.createHazelcastInstance(seaTunnelConfig);
+            MemberAttributeConfig node2Tags = new MemberAttributeConfig();
+            node2Tags.setAttribute("strategy", "system_load2");
+            
seaTunnelConfig.getHazelcastConfig().setMemberAttributeConfig(node2Tags);
+            node2 = 
SeaTunnelServerStarter.createHazelcastInstance(seaTunnelConfig);
+
+            // waiting all node added to cluster
+            HazelcastInstanceImpl finalNode = node1;
+            Awaitility.await()
+                    .atMost(10, TimeUnit.SECONDS)
+                    .untilAsserted(
+                            () ->
+                                    Assertions.assertEquals(
+                                            2, 
finalNode.getCluster().getMembers().size()));
+
+            // Waiting for worker heartbeat registration
+            Thread.sleep(10000);
+            Common.setDeployMode(DeployMode.CLIENT);
+            JobConfig jobConfig = new JobConfig();
+            jobConfig.setName(testCaseName);
+
+            ClientConfig clientConfig = 
ConfigProvider.locateAndGetClientConfig();
+            
clientConfig.setClusterName(TestUtils.getClusterName(testClusterName));
+            engineClient = new SeaTunnelClient(clientConfig);
+            engineClient
+                    .createExecutionContext(
+                            createTestResources(
+                                    testCaseName,
+                                    JobMode.STREAMING,
+                                    testRowNumber,
+                                    testParallelism,
+                                    
"allocate-strategy/allocate_strategy_tag1_with_system_load.conf"),
+                            jobConfig,
+                            seaTunnelConfig)
+                    .execute();
+
+            engineClient
+                    .createExecutionContext(
+                            createTestResources(
+                                    testCaseName,
+                                    JobMode.STREAMING,
+                                    testRowNumber,
+                                    testParallelism,
+                                    
"allocate-strategy/allocate_strategy_tag2_with_system_load.conf"),
+                            jobConfig,
+                            seaTunnelConfig)
+                    .execute();
+
+            NodeEngineImpl nodeEngine = node1.node.nodeEngine;
+            Address node2Address = node2.node.address;
+            Address node1Address = node1.node.address;
+
+            SeaTunnelServer server = 
nodeEngine.getService(SeaTunnelServer.SERVICE_NAME);
+            ResourceManager resourceManager = 
server.getCoordinatorService().getResourceManager();
+
+            Awaitility.await()
+                    .atMost(600, TimeUnit.SECONDS)
+                    .untilAsserted(
+                            () -> {
+                                ConcurrentMap<Address, WorkerProfile> 
registerWorker =
+                                        resourceManager.getRegisterWorker();
+                                int node1AssignedSlotsNum =
+                                        
registerWorker.get(node1Address).getAssignedSlots().length;
+                                int node2AssignedSlotsNum =
+                                        
registerWorker.get(node2Address).getAssignedSlots().length;
+                                Assertions.assertTrue(node1AssignedSlotsNum == 
5);
+                                Assertions.assertTrue(node2AssignedSlotsNum == 
5);
+                                Assertions.assertEquals(
+                                        10, node1AssignedSlotsNum + 
node2AssignedSlotsNum);
+                            });
+
+            // Waiting to collect the node's System Load information
+            Thread.sleep(60000);
+
+            // Start a task that occupies 4 slots
+            jobConfig = new JobConfig();
+            jobConfig.setName(testCaseName);
+
+            engineClient
+                    .createExecutionContext(
+                            createTestResources(
+                                    testCaseName,
+                                    JobMode.STREAMING,
+                                    testRowNumber,
+                                    3,
+                                    
"allocate-strategy/allocate_strategy_no_tag_with_system_load.conf"),
+                            jobConfig,
+                            seaTunnelConfig)
+                    .execute();
+
+            // Because e2e runs on the same node, the CPU and memory are 
almost the same, but we

Review Comment:
   Is there any way to test the system load not balance situation? Like force 
set system load, then check the algorithm.



##########
seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/resourcemanager/AbstractResourceManager.java:
##########
@@ -98,9 +127,70 @@ private void initWorker() {
                                                         }))
                         .collect(Collectors.toList());
         futures.forEach(CompletableFuture::join);
+
+        scheduledExecutorService.scheduleAtFixedRate(
+                () -> {
+                    try {
+                        log.debug(
+                                "start send system load to resource manager, 
this address: "
+                                        + 
nodeEngine.getClusterService().getThisAddress());
+                        updateWorkerSystemLoad();
+                    } catch (Exception e) {
+                        log.warn(
+                                "failed send system load to resource manager, 
will retry later. this address: "
+                                        + 
nodeEngine.getClusterService().getThisAddress());
+                    }
+                },
+                0,
+                DEFAULT_SYSTEM_LOAD_PERIOD,
+                TimeUnit.MILLISECONDS);
         log.info("registerWorker: {}", registerWorker);
     }
 
+    private void updateWorkerSystemLoad() {
+        nodeEngine.getClusterService().getMembers().stream()
+                .map(Member::getAddress)
+                .forEach(this::collectAndUpdateSystemLoad);
+    }
+
+    private void collectAndUpdateSystemLoad(Address node) {
+        sendToMember(new WorkerSystemLoadOperation(), node)
+                .thenAccept(
+                        systemLoad -> {
+                            if (Objects.isNull(systemLoad)) {
+                                return;
+                            }
+
+                            SystemLoad currentSystemLoad =
+                                    workerLoadMap.computeIfAbsent(node, k -> 
new SystemLoad());
+
+                            updateSystemLoadMetrics(currentSystemLoad, 
(SystemLoad) systemLoad);
+
+                            log.debug(
+                                    "received system load from worker: {}, 
system load: {}",
+                                    node,
+                                    workerLoadMap.get(node));
+                        });
+    }
+
+    private void updateSystemLoadMetrics(SystemLoad currentSystemLoad, 
SystemLoad newSystemLoad) {
+        LinkedHashMap<String, SystemLoad.SystemLoadInfo> metrics = 
currentSystemLoad.getMetrics();
+
+        if (metrics == null) {
+            metrics = new LinkedHashMap<>();
+            currentSystemLoad.setMetrics(metrics);
+        }
+
+        // Keep up to 5 historical records
+        while (metrics.size() >= 5) {

Review Comment:
   why not use `EvictingQueue`?



##########
seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/resourcemanager/ResourceRequestHandler.java:
##########
@@ -222,7 +250,53 @@ public Optional<WorkerProfile> 
preCheckWorkerResource(ResourceProfile r) {
                                                         slot ->
                                                                 
slot.getResourceProfile()
                                                                         
.enoughThan(r)))
-                        .findAny();
+                        .collect(Collectors.toList());
+
+        Optional<WorkerProfile> workerProfile;
+        switch (allocateStrategy) {
+            case SYSTEM_LOAD:
+                workerProfile =
+                        availableWorkers.stream()
+                                .max(
+                                        Comparator.comparingDouble(
+                                                w -> calculateWeight(w, 
workerAssignedSlots)));
+                workerProfile.ifPresent(
+                        profile -> {
+                            workerAssignedSlots.merge(
+                                    profile.getAddress(),
+                                    new ImmutableTriple<>(
+                                            0.0, 1, 
profile.getAssignedSlots().length),
+                                    (oldVal, newVal) ->
+                                            new ImmutableTriple<>(
+                                                    oldVal.left, oldVal.middle 
+ 1, oldVal.right));
+
+                            LOGGER.fine("Selected worker: " + 
profile.getAddress());
+                        });
+                break;
+            case RANDOM:
+                // Randomly obtain a worker
+                Collections.shuffle(availableWorkers);
+                Collections.shuffle(workerProfiles);
+                workerProfile = availableWorkers.stream().findFirst();
+                break;
+            default:
+                // The slot usage rate strategy is used by default. The lower 
the slot usage rate,
+                // the higher the priority.

Review Comment:
   we should keep the `RANDOM` as default by now. Set `SLOT_RATIO` by default 
after 2 versions.



##########
seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/resourcemanager/AbstractResourceManager.java:
##########
@@ -98,9 +127,70 @@ private void initWorker() {
                                                         }))
                         .collect(Collectors.toList());
         futures.forEach(CompletableFuture::join);
+
+        scheduledExecutorService.scheduleAtFixedRate(
+                () -> {
+                    try {
+                        log.debug(
+                                "start send system load to resource manager, 
this address: "
+                                        + 
nodeEngine.getClusterService().getThisAddress());
+                        updateWorkerSystemLoad();
+                    } catch (Exception e) {
+                        log.warn(
+                                "failed send system load to resource manager, 
will retry later. this address: "
+                                        + 
nodeEngine.getClusterService().getThisAddress());
+                    }
+                },
+                0,
+                DEFAULT_SYSTEM_LOAD_PERIOD,
+                TimeUnit.MILLISECONDS);

Review Comment:
   why not let worker send system load information to resource manager by 
heartbeat operation? 
https://github.com/apache/seatunnel/blob/92b3253a5b24b71c941974c06c433d9f3bace07a/seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/resourcemanager/AbstractResourceManager.java#L234



##########
seatunnel-engine/seatunnel-engine-server/src/main/java/org/apache/seatunnel/engine/server/resourcemanager/ResourceRequestHandler.java:
##########
@@ -222,7 +250,53 @@ public Optional<WorkerProfile> 
preCheckWorkerResource(ResourceProfile r) {
                                                         slot ->
                                                                 
slot.getResourceProfile()
                                                                         
.enoughThan(r)))
-                        .findAny();
+                        .collect(Collectors.toList());
+
+        Optional<WorkerProfile> workerProfile;
+        switch (allocateStrategy) {
+            case SYSTEM_LOAD:
+                workerProfile =
+                        availableWorkers.stream()
+                                .max(
+                                        Comparator.comparingDouble(
+                                                w -> calculateWeight(w, 
workerAssignedSlots)));
+                workerProfile.ifPresent(
+                        profile -> {
+                            workerAssignedSlots.merge(
+                                    profile.getAddress(),
+                                    new ImmutableTriple<>(
+                                            0.0, 1, 
profile.getAssignedSlots().length),
+                                    (oldVal, newVal) ->
+                                            new ImmutableTriple<>(
+                                                    oldVal.left, oldVal.middle 
+ 1, oldVal.right));
+
+                            LOGGER.fine("Selected worker: " + 
profile.getAddress());
+                        });
+                break;
+            case RANDOM:
+                // Randomly obtain a worker
+                Collections.shuffle(availableWorkers);
+                Collections.shuffle(workerProfiles);

Review Comment:
   The line looks useless?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@seatunnel.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to