sadanand48 commented on PR #8066:
URL: https://github.com/apache/ozone/pull/8066#issuecomment-2720976952
```diff
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
index 5c0699c2cb..d7a4149c57 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
@@ -28,6 +28,7 @@
import static
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -42,6 +43,7 @@
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
+import java.util.Random;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
@@ -49,19 +51,29 @@
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.Stream;
+import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.HddsTestUtils;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.utils.TransactionInfo;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
+import org.apache.hadoop.hdds.utils.db.Proto2Codec;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.ClientVersion;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.ListOpenFilesResult;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload;
@@ -71,6 +83,7 @@
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
import org.apache.hadoop.ozone.om.request.util.OMMultipartUploadUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadInfo;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OpenKey;
@@ -155,6 +168,63 @@ public void testListVolumes() throws Exception {
assertEquals(volumeList.size(), totalVol - startOrder - 1);
}
+ private OmKeyInfo getSampleKeyInfo() {
+ int blockNum = 1;
+ List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
+ Pipeline pipeline = HddsTestUtils.getRandomPipeline();
+ BlockID blockID = new BlockID(blockNum, blockNum);
+ OmKeyLocationInfo keyLocationInfo =
+ new
OmKeyLocationInfo.Builder().setBlockID(blockID).setPipeline(pipeline).build();
+ omKeyLocationInfoList.add(keyLocationInfo);
+ OmKeyLocationInfoGroup omKeyLocationInfoGroup = new
OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
+ return new
OmKeyInfo.Builder().setCreationTime(Time.now()).setModificationTime(Time.now())
+
.setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE))
+ .setVolumeName("volume").setBucketName("bucket").setKeyName("key" +
new Random().nextInt(100))
+ .setObjectID(Time.now()).setUpdateID(Time.now()).setDataSize(100)
+
.setOmKeyLocationInfos(Collections.singletonList(omKeyLocationInfoGroup)).build();
+ }
+
+ private static Codec<OmKeyInfo> customCodec() {
+ return new DelegatedCodec<>(
+
Proto2Codec.get(OzoneManagerProtocolProtos.KeyInfo.getDefaultInstance()),
+ TestOmMetadataManager::getBasicFieldsFromProtobuf,
+ k -> k.getProtobuf(true, ClientVersion.CURRENT_VERSION),
+ OmKeyInfo.class);
+ }
+
+ public static OmKeyInfo
getBasicFieldsFromProtobuf(OzoneManagerProtocolProtos.KeyInfo keyInfo) {
+ if (keyInfo == null) {
+ return null;
+ }
+ OmKeyInfo.Builder builder = new OmKeyInfo.Builder()
+ .setVolumeName(keyInfo.getVolumeName())
+ .setBucketName(keyInfo.getBucketName())
+ .setKeyName(keyInfo.getKeyName())
+ .setDataSize(keyInfo.getDataSize())
+ .setCreationTime(keyInfo.getCreationTime());
+ return builder.build();
+ }
+
+ @Test
+ public void testCustomCodecIteratorOnKeyTable() throws Exception {
+ OmKeyInfo omKeyInfo = getSampleKeyInfo();
+ omMetadataManager.getKeyTable(BucketLayout.OBJECT_STORE).put("key",
omKeyInfo);
+ try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
iterator = omMetadataManager.getKeyTable(
+ BucketLayout.OBJECT_STORE).iterator(customCodec(), null)) {
+ while (iterator.hasNext()) {
+ Table.KeyValue<String, OmKeyInfo> kv = iterator.next();
+ String key = kv.getKey();
+ assertEquals(key, "key");
+ OmKeyInfo val = kv.getValue();
+ assertNull(val.getLatestVersionLocations());
+ assertTrue(val.getAcls().isEmpty());
+ assertTrue(val.getCreationTime() != 0);
+ // mod time is not decoded as per codec
+ assertFalse(val.getModificationTime() != 0);
+ }
+ }
+ }
+
@Test
public void testListAllVolumes() throws Exception {
OmVolumeArgs.Builder argsBuilder =
```
added this UT, please feel free to add it.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]