lizhimins commented on code in PR #9256:
URL: https://github.com/apache/rocketmq/pull/9256#discussion_r2113399714


##########
store/src/main/java/org/apache/rocketmq/store/queue/CombineConsumeQueueStore.java:
##########
@@ -0,0 +1,532 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.rocketmq.store.queue;
+
+import com.alibaba.fastjson.JSON;
+import com.google.common.annotations.VisibleForTesting;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.rocketmq.common.BoundaryType;
+import org.apache.rocketmq.common.CheckRocksdbCqWriteResult;
+import org.apache.rocketmq.common.Pair;
+import org.apache.rocketmq.common.constant.LoggerName;
+import org.apache.rocketmq.common.message.MessageExtBrokerInner;
+import org.apache.rocketmq.store.DefaultMessageStore;
+import org.apache.rocketmq.store.DispatchRequest;
+import org.apache.rocketmq.store.StoreType;
+import org.apache.rocketmq.store.config.MessageStoreConfig;
+import org.apache.rocketmq.store.exception.ConsumeQueueException;
+import org.apache.rocketmq.store.exception.StoreException;
+import org.rocksdb.RocksDBException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class CombineConsumeQueueStore implements ConsumeQueueStoreInterface {
+    private static final Logger log = 
LoggerFactory.getLogger(LoggerName.STORE_LOGGER_NAME);
+    private static final Logger BROKER_LOG = 
LoggerFactory.getLogger(LoggerName.BROKER_LOGGER_NAME);
+
+    private final DefaultMessageStore messageStore;
+    private final MessageStoreConfig messageStoreConfig;
+
+    // Inner consume queue store.
+    private final LinkedList<AbstractConsumeQueueStore> 
innerConsumeQueueStoreList = new LinkedList<>();
+    private final ConsumeQueueStore consumeQueueStore;
+    private final RocksDBConsumeQueueStore rocksDBConsumeQueueStore;
+
+    // current read consume queue store.
+    private final AbstractConsumeQueueStore currentReadStore;
+    // consume queue store for assign offset and increase offset.
+    private final AbstractConsumeQueueStore assignOffsetStore;
+
+    // used for search recover form which commitLog mappedFile.
+    // when assignOffsetStore allow recovering from here, but other do not 
allow, toleranceFailuresNum will minus 1.
+    // if toleranceFailuresNum is 0, only pay attention to whether 
assignOffsetStore is allowed
+    private final AtomicInteger toleranceLookBackFailuresNum;
+
+    public CombineConsumeQueueStore(DefaultMessageStore messageStore) {
+        this.messageStore = messageStore;
+        this.messageStoreConfig = messageStore.getMessageStoreConfig();
+        toleranceLookBackFailuresNum = new 
AtomicInteger(messageStoreConfig.getCombineCQMaxExtraLookBackCommitLogFiles());
+
+        Set<StoreType> loadingConsumeQueueTypeSet = 
StoreType.fromString(messageStoreConfig.getCombineCQLoadingCQTypes());
+        if (loadingConsumeQueueTypeSet.isEmpty()) {
+            throw new IllegalArgumentException("CombineConsumeQueueStore 
loadingCQTypes is empty");
+        }
+
+        if (loadingConsumeQueueTypeSet.contains(StoreType.DEFAULT)) {
+            this.consumeQueueStore = new ConsumeQueueStore(messageStore);
+            this.innerConsumeQueueStoreList.addFirst(consumeQueueStore);
+        } else {
+            this.consumeQueueStore = null;
+        }
+
+        if (loadingConsumeQueueTypeSet.contains(StoreType.DEFAULT_ROCKSDB)) {
+            this.rocksDBConsumeQueueStore = new 
RocksDBConsumeQueueStore(messageStore);
+            this.innerConsumeQueueStoreList.addFirst(rocksDBConsumeQueueStore);
+        } else {
+            this.rocksDBConsumeQueueStore = null;
+        }
+
+        if (innerConsumeQueueStoreList.isEmpty()) {
+            throw new IllegalArgumentException("CombineConsumeQueueStore 
loadingCQTypes is empty");
+        }
+
+        assignOffsetStore = 
getInnerStoreByString(messageStoreConfig.getCombineAssignOffsetCQType());
+        if (assignOffsetStore == null) {
+            log.error("CombineConsumeQueue chooseAssignOffsetStore fail, 
prefer={}", messageStoreConfig.getCombineAssignOffsetCQType());
+            throw new IllegalArgumentException("CombineConsumeQueue 
chooseAssignOffsetStore fail");
+        }
+
+        currentReadStore = 
getInnerStoreByString(messageStoreConfig.getCombineCQPreferCQType());
+        if (currentReadStore == null) {
+            log.error("CombineConsumeQueue choosePreferCQ fail, prefer={}", 
messageStoreConfig.getCombineCQPreferCQType());
+            throw new IllegalArgumentException("CombineConsumeQueue 
choosePreferCQ fail");
+        }
+
+        log.info("CombineConsumeQueueStore init, consumeQueueStoreList={}, 
currentReadStore={}, assignOffsetStore={}",
+            innerConsumeQueueStoreList, 
currentReadStore.getClass().getSimpleName(), 
assignOffsetStore.getClass().getSimpleName());
+    }
+
+    @Override
+    public boolean load() {
+        for (AbstractConsumeQueueStore store : innerConsumeQueueStoreList) {
+            if (!store.load()) {
+                log.error("CombineConsumeQueueStore load fail, loadType={}", 
store.getClass().getSimpleName());
+                return false;
+            }
+        }
+        log.info("CombineConsumeQueueStore load success");
+        return true;
+    }
+
+    @Override
+    public void recover(boolean concurrently) throws RocksDBException {
+        for (AbstractConsumeQueueStore store : innerConsumeQueueStoreList) {
+            store.recover(concurrently);
+        }
+        log.info("CombineConsumeQueueStore recover success, concurrently={}", 
concurrently);
+    }
+
+    @Override
+    public boolean isMappedFileMatchedRecover(long phyOffset, long 
storeTimestamp,
+        boolean recoverNormally) throws RocksDBException {
+        if (!assignOffsetStore.isMappedFileMatchedRecover(phyOffset, 
storeTimestamp, recoverNormally)) {
+            return false;
+        }
+
+        for (AbstractConsumeQueueStore store : innerConsumeQueueStoreList) {
+            if (store == assignOffsetStore || 
store.isMappedFileMatchedRecover(phyOffset, storeTimestamp, recoverNormally)) {
+                continue;
+            }
+
+            if (toleranceLookBackFailuresNum.getAndIncrement() <= 0) {
+                // toleranceLookBackFailuresNum <= 0, so only can read from 
assignOffsetStore
+                if (assignOffsetStore != currentReadStore) {
+                    log.error("CombineConsumeQueueStore currentReadStore not 
satisfied readable conditions, assignOffsetStore={}, currentReadStore={}",
+                        assignOffsetStore.getClass().getSimpleName(), 
currentReadStore.getClass().getSimpleName());
+                    throw new 
IllegalArgumentException(store.getClass().getSimpleName() + " not satisfied 
readable conditions, only can read from " + 
assignOffsetStore.getClass().getSimpleName());
+                }
+                log.warn("CombineConsumeQueueStore can not recover all inner 
store, maybe some inner store start haven’t started before, store={}",
+                    store.getClass().getSimpleName());
+                return true;
+            } else {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    @Override
+    public long getDispatchFromPhyOffset() {
+        long dispatchFromPhyOffset = 
assignOffsetStore.getDispatchFromPhyOffset();
+        for (AbstractConsumeQueueStore store : innerConsumeQueueStoreList) {
+            if (store == assignOffsetStore) {
+                continue;
+            }
+            if (store.getDispatchFromPhyOffset() < dispatchFromPhyOffset) {
+                dispatchFromPhyOffset = store.getDispatchFromPhyOffset();
+            }
+        }
+        return dispatchFromPhyOffset;
+    }
+
+    @Override
+    public void start() {
+        boolean success = false;
+        try {
+            success = verifyAndInitOffsetForAllStore(true);
+        } catch (RocksDBException e) {
+            log.error("CombineConsumeQueueStore checkAssignOffsetStore fail", 
e);
+        }
+
+        if (!success && assignOffsetStore != currentReadStore) {
+            log.error("CombineConsumeQueueStore currentReadStore not satisfied 
readable conditions, " +
+                    "checkAssignOffsetResult={}, assignOffsetStore={}, 
currentReadStore={}",
+                success, assignOffsetStore.getClass().getSimpleName(), 
currentReadStore.getClass().getSimpleName());
+            throw new RuntimeException("CombineConsumeQueueStore 
currentReadStore not satisfied readable conditions");
+        }
+
+        for (AbstractConsumeQueueStore store : innerConsumeQueueStoreList) {
+            store.start();
+        }
+    }
+
+    public boolean verifyAndInitOffsetForAllStore(boolean initializeOffset) 
throws RocksDBException {
+        if (innerConsumeQueueStoreList.size() <= 1) {
+            return true;
+        }
+
+        boolean result = true;
+        long minPhyOffset = this.messageStore.getCommitLog().getMinOffset();
+        // for each topic and queueId in assignOffsetStore
+        for (Map.Entry<String, ConcurrentMap<Integer, ConsumeQueueInterface>> 
entry : assignOffsetStore.getConsumeQueueTable().entrySet()) {
+            for (Map.Entry<Integer, ConsumeQueueInterface> entry0 : 
entry.getValue().entrySet()) {
+                String topic = entry.getKey();
+                Integer queueId = entry0.getKey();
+                long maxOffsetInAssign = 
entry0.getValue().getMaxOffsetInQueue();
+
+                for (AbstractConsumeQueueStore abstractConsumeQueueStore : 
innerConsumeQueueStoreList) {

Review Comment:
   triple loop cycle



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@rocketmq.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to