Vladsz83 commented on code in PR #11866:
URL: https://github.com/apache/ignite/pull/11866#discussion_r1958279838


##########
modules/core/src/main/java/org/apache/ignite/internal/management/cache/IdleVerifyResult.java:
##########
@@ -414,4 +379,212 @@ private void printConflicts(Consumer<String> printer) {
     @Override public String toString() {
         return S.toString(IdleVerifyResult.class, this);
     }
+
+    /** */
+    private static <K, V> Map<K, V> notNullFinalMap(@Nullable Map<K, V> map) {
+        return map == null ? Collections.emptyMap() : 
Collections.unmodifiableMap(map);
+    }
+
+    /** */
+    private static <V> List<V> notNullFinalList(@Nullable List<V> lst) {
+        return lst == null ? Collections.emptyList() : 
Collections.unmodifiableList(lst);
+    }
+
+    /**
+     * Builds result holding errors only.
+     *
+     * @see #builder()
+     */
+    public static IdleVerifyResult ofErrors(Map<ClusterNode, Exception> 
exceptions) {
+        return new IdleVerifyResult(exceptions);
+    }
+
+    /** @return A fresh result builder. */
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    /** Builder of {@link IdleVerifyResult}. Is not thread-safe. */
+    public static final class Builder {
+        /** */
+        private @Nullable Map<PartitionKey, List<PartitionHashRecord>> 
partHashes;
+
+        /** */
+        private @Nullable List<List<TransactionsHashRecord>> txHashConflicts;
+
+        /** */
+        private @Nullable Map<ClusterNode, Collection<GridCacheVersion>> 
partCommitTxs;
+
+        /** Incremental snapshot transactions records per consistent id. */
+        private @Nullable Map<Object, Map<Object, TransactionsHashRecord>> 
incrTxHashRecords;
+
+        /** */
+        private @Nullable Map<ClusterNode, Exception> errs;
+
+        /** */
+        private Builder() {
+            // No-op.
+        }
+
+        /** Builds final {@link IdleVerifyResult}. */
+        public IdleVerifyResult build() {
+            // Add all missed incremental pairs to the conflicts.
+            if (!F.isEmpty(incrTxHashRecords))
+                incrTxHashRecords.values().stream().flatMap(e -> 
e.values().stream()).forEach(e -> addTxConflicts(F.asList(e, null)));
+
+            if (F.isEmpty(partHashes))
+                return new IdleVerifyResult(null, null, null, null, 
txHashConflicts, partCommitTxs, errs);
+
+            Map<PartitionKey, List<PartitionHashRecord>> cntrConflicts = null;
+            Map<PartitionKey, List<PartitionHashRecord>> hashConflicts = null;
+            Map<PartitionKey, List<PartitionHashRecord>> movingPartitions = 
null;
+            Map<PartitionKey, List<PartitionHashRecord>> lostPartitions = null;
+
+            for (Map.Entry<PartitionKey, List<PartitionHashRecord>> e : 
partHashes.entrySet()) {
+                Integer partHash = null;
+                Integer partVerHash = null;
+                Object updateCntr = null;
+
+                for (PartitionHashRecord record : e.getValue()) {
+                    if (record.partitionState() == 
PartitionHashRecord.PartitionState.MOVING) {
+                        if (movingPartitions == null)
+                            movingPartitions = new HashMap<>();
+
+                        movingPartitions.computeIfAbsent(e.getKey(), k -> new 
ArrayList<>()).add(record);
+
+                        continue;
+                    }
+
+                    if (record.partitionState() == 
PartitionHashRecord.PartitionState.LOST) {
+                        if (lostPartitions == null)
+                            lostPartitions = new HashMap<>();
+
+                        lostPartitions.computeIfAbsent(e.getKey(), k -> new 
ArrayList<>())
+                            .add(record);
+
+                        continue;
+                    }
+
+                    if (partHash == null) {
+                        partHash = record.partitionHash();
+                        partVerHash = record.partitionVersionsHash();
+
+                        updateCntr = record.updateCounter();
+                    }
+                    else {
+                        if (!Objects.equals(record.updateCounter(), 
updateCntr)) {
+                            if (cntrConflicts == null)
+                                cntrConflicts = new HashMap<>();
+
+                            cntrConflicts.putIfAbsent(e.getKey(), 
e.getValue());
+                        }
+
+                        if (record.partitionHash() != partHash || 
record.partitionVersionsHash() != partVerHash) {
+                            if (hashConflicts == null)
+                                hashConflicts = new HashMap<>();
+
+                            hashConflicts.putIfAbsent(e.getKey(), 
e.getValue());
+                        }
+                    }
+                }
+            }
+
+            return new IdleVerifyResult(cntrConflicts, hashConflicts, 
movingPartitions, lostPartitions, txHashConflicts,
+                partCommitTxs, errs);
+        }
+
+        /** Stores an exception if none is aready set for certain node. */
+        public Builder addException(ClusterNode node, Exception e) {
+            assert e != null;
+
+            if (errs == null)
+                errs = new HashMap<>();
+
+            errs.putIfAbsent(node, e);
+
+            return this;
+        }
+
+        /** Stores collection of partition hashes related to certain partition 
key. */
+        private Builder addPartitionHashes(PartitionKey key, 
Collection<PartitionHashRecord> newHashes) {
+            if (partHashes == null)
+                partHashes = new HashMap<>();
+
+            partHashes.compute(key, (key0, hashes0) -> {
+                if (hashes0 == null)
+                    hashes0 = new ArrayList<>();
+
+                hashes0.addAll(newHashes);
+
+                return hashes0;
+            });
+
+            return this;
+        }
+
+        /** Stores map of partition hashes per partition key. */
+        public void addPartitionHashes(Map<PartitionKey, PartitionHashRecord> 
newHashes) {
+            newHashes.forEach((key, hash) -> addPartitionHashes(key, 
Collections.singletonList(hash)));
+        }
+
+        /** Stores incremental snapshot transaction hash records of a certain 
node. */
+        public void addIncrementalHashRecords(ClusterNode node, Map<Object, 
TransactionsHashRecord> res) {
+            if (incrTxHashRecords == null)
+                incrTxHashRecords = new HashMap<>();
+
+            assert incrTxHashRecords.get(node.consistentId()) == null;
+
+            incrTxHashRecords.put(node.consistentId(), res);
+
+            Iterator<Map.Entry<Object, TransactionsHashRecord>> resIt = 
res.entrySet().iterator();
+
+            while (resIt.hasNext()) {
+                Map.Entry<Object, TransactionsHashRecord> nodeTxHash = 
resIt.next();
+
+                Map<Object, TransactionsHashRecord> prevNodeTxHash = 
incrTxHashRecords.get(nodeTxHash.getKey());
+
+                if (prevNodeTxHash != null) {
+                    TransactionsHashRecord hash = nodeTxHash.getValue();
+                    TransactionsHashRecord prevHash = 
prevNodeTxHash.remove(hash.localConsistentId());
+
+                    if (prevHash == null || prevHash.transactionHash() != 
hash.transactionHash())
+                        addTxConflicts(F.asList(hash, prevHash));
+
+                    resIt.remove();
+                }
+            }
+        }
+
+        /** Stores transaction conflicts. */
+        public Builder addTxConflicts(List<TransactionsHashRecord> 
newTxConflicts) {

Review Comment:
   public default constr. is requirex for the serialization (Externalizable).



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscr...@ignite.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to