YuweiXiao commented on code in PR #4958:
URL: https://github.com/apache/hudi/pull/4958#discussion_r917395774


##########
hudi-client/hudi-client-common/src/test/java/org/apache/hudi/index/bucket/TestConsistentBucketIdIdentifier.java:
##########
@@ -76,4 +96,90 @@ public void testGetBucket() {
     Assertions.assertEquals(nodes.get(1), 
identifier.getBucketByFileId(FSUtils.createNewFileId("1", 0)));
     Assertions.assertEquals(nodes.get(2), 
identifier.getBucketByFileId(FSUtils.createNewFileId("2", 0)));
   }
+
+  /**
+   * @param v0  first node hash value
+   * @param v1  second node hash value
+   * @param mid mid node hash value generated by split the first bucket v0
+   */
+  @ParameterizedTest
+  @MethodSource("splitBucketParams")
+  public void testSplitBucket(int v0, int v1, int mid) {
+    // Hash range mapping:: [0, 0xf], (0xf, MAX]
+    List<ConsistentHashingNode> nodes = Arrays.asList(
+        new ConsistentHashingNode(v0, "0"),
+        new ConsistentHashingNode(v1, "1"));
+    HoodieConsistentHashingMetadata meta = new 
HoodieConsistentHashingMetadata((short) 0, "", "", 4, 0, nodes);
+    Option<List<ConsistentHashingNode>> res = new 
ConsistentBucketIdentifier(meta).splitBucket(nodes.get(0));
+    if (mid < 0) {
+      Assertions.assertTrue(!res.isPresent());
+      return;
+    }
+
+    List<ConsistentHashingNode> childNodes = res.get();
+    Assertions.assertEquals(2, childNodes.size());
+    Assertions.assertTrue(childNodes.stream().allMatch(c -> c.getTag() == 
ConsistentHashingNode.NodeTag.REPLACE));
+    Assertions.assertEquals(mid, childNodes.get(0).getValue());
+    Assertions.assertEquals(nodes.get(0).getValue(), 
childNodes.get(1).getValue());
+  }
+
+  @Test
+  public void testMerge() {
+    HoodieConsistentHashingMetadata meta = new 
HoodieConsistentHashingMetadata("partition", 8);
+    List<ConsistentHashingNode> nodes = meta.getNodes();
+
+    List<String> fileIds = IntStream.range(0, 3).mapToObj(i -> 
FSUtils.createNewFileId(nodes.get(i).getFileIdPrefix(), 
0)).collect(Collectors.toList());
+    List<ConsistentHashingNode> childNodes = new 
ConsistentBucketIdentifier(meta).mergeBucket(fileIds);
+    Assertions.assertEquals(ConsistentHashingNode.NodeTag.DELETE, 
childNodes.get(0).getTag());
+    Assertions.assertEquals(ConsistentHashingNode.NodeTag.DELETE, 
childNodes.get(1).getTag());
+    Assertions.assertEquals(ConsistentHashingNode.NodeTag.REPLACE, 
childNodes.get(2).getTag());
+    Assertions.assertEquals(nodes.get(2).getValue(), 
childNodes.get(2).getValue());
+    Assertions.assertNotEquals(nodes.get(2).getFileIdPrefix(), 
childNodes.get(2).getFileIdPrefix());
+
+    fileIds = Arrays.asList(nodes.get(7), nodes.get(0), nodes.get(1)).stream()
+        .map(ConsistentHashingNode::getFileIdPrefix).map(f -> 
FSUtils.createNewFileId(f, 0)).collect(Collectors.toList());
+    childNodes = new ConsistentBucketIdentifier(meta).mergeBucket(fileIds);
+    Assertions.assertEquals(ConsistentHashingNode.NodeTag.DELETE, 
childNodes.get(0).getTag());
+    Assertions.assertEquals(ConsistentHashingNode.NodeTag.DELETE, 
childNodes.get(1).getTag());
+    Assertions.assertEquals(ConsistentHashingNode.NodeTag.REPLACE, 
childNodes.get(2).getTag());
+    Assertions.assertEquals(nodes.get(1).getValue(), 
childNodes.get(2).getValue());
+    Assertions.assertNotEquals(nodes.get(1).getFileIdPrefix(), 
childNodes.get(2).getFileIdPrefix());
+
+    boolean exception = false;
+    try {
+      fileIds = IntStream.range(0, 2).mapToObj(i -> 
FSUtils.createNewFileId(nodes.get(i * 2).getFileIdPrefix(), 
0)).collect(Collectors.toList());
+      new ConsistentBucketIdentifier(meta).mergeBucket(fileIds);
+    } catch (Exception e) {
+      exception = true;
+    }
+    Assertions.assertTrue(exception);

Review Comment:
   Sure, will improve it.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to