SteveStevenpoor commented on code in PR #26313:
URL: https://github.com/apache/flink/pull/26313#discussion_r2101770631


##########
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/join/stream/state/MultiJoinStateViews.java:
##########
@@ -0,0 +1,359 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.table.runtime.operators.join.stream.state;
+
+import org.apache.flink.api.common.functions.RuntimeContext;
+import org.apache.flink.api.common.state.MapState;
+import org.apache.flink.api.common.state.MapStateDescriptor;
+import org.apache.flink.api.common.state.StateTtlConfig;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.common.typeinfo.Types;
+import org.apache.flink.api.java.functions.KeySelector;
+import org.apache.flink.table.data.RowData;
+import 
org.apache.flink.table.runtime.operators.join.stream.utils.JoinInputSideSpec;
+import org.apache.flink.table.runtime.typeutils.InternalTypeInfo;
+import org.apache.flink.types.RowKind;
+import org.apache.flink.util.IterableIterator;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+
+import static 
org.apache.flink.table.runtime.util.StateConfigUtil.createTtlConfig;
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * Factory class to create different implementations of {@link 
MultiJoinStateView} based on the
+ * characteristics described in {@link JoinInputSideSpec}.
+ *
+ * <p>Each state view uses a {@link MapState} where the primary key is the 
`mapKey` derived from the
+ * join conditions (via {@link
+ * 
org.apache.flink.table.runtime.operators.join.stream.keyselector.JoinKeyExtractor}).
 The value
+ * stored within this map depends on whether the input side has a unique key 
and how it relates to
+ * the join key, optimizing storage and access patterns.
+ */
+public final class MultiJoinStateViews {
+
+    /** Creates a {@link MultiJoinStateView} depends on {@link 
JoinInputSideSpec}. */
+    public static MultiJoinStateView create(
+            RuntimeContext ctx,
+            String stateName,
+            JoinInputSideSpec inputSideSpec,
+            InternalTypeInfo<RowData> mapKeyType, // Type info for the outer 
map key
+            InternalTypeInfo<RowData> recordType,
+            long retentionTime) {
+        StateTtlConfig ttlConfig = createTtlConfig(retentionTime);
+
+        if (inputSideSpec.hasUniqueKey()) {
+            if (inputSideSpec.joinKeyContainsUniqueKey()) {
+                return new JoinKeyContainsUniqueKey(
+                        ctx, stateName, mapKeyType, recordType, ttlConfig);
+            } else {
+                return new InputSideHasUniqueKey(
+                        ctx,
+                        stateName,
+                        mapKeyType,
+                        recordType,
+                        inputSideSpec.getUniqueKeyType(),
+                        inputSideSpec.getUniqueKeySelector(),
+                        ttlConfig);
+            }
+        } else {
+            return new InputSideHasNoUniqueKey(ctx, stateName, mapKeyType, 
recordType, ttlConfig);
+        }
+    }
+
+    /**
+     * Creates a {@link MapStateDescriptor} with the given parameters and 
applies TTL configuration.
+     *
+     * @param <K> Key type
+     * @param <V> Value type
+     * @param stateName Unique name for the state
+     * @param keyTypeInfo Type information for the key
+     * @param valueTypeInfo Type information for the value
+     * @param ttlConfig State TTL configuration
+     * @return Configured MapStateDescriptor
+     */
+    private static <K, V> MapStateDescriptor<K, V> createStateDescriptor(
+            String stateName,
+            TypeInformation<K> keyTypeInfo,
+            TypeInformation<V> valueTypeInfo,
+            StateTtlConfig ttlConfig) {
+        MapStateDescriptor<K, V> descriptor =
+                new MapStateDescriptor<>(stateName, keyTypeInfo, 
valueTypeInfo);
+        if (ttlConfig.isEnabled()) {
+            descriptor.enableTimeToLive(ttlConfig);
+        }
+        return descriptor;
+    }
+
+    // 
------------------------------------------------------------------------------------
+    // Multi Join State View Implementations
+    // 
------------------------------------------------------------------------------------
+
+    /**
+     * State view for input sides where the unique key is fully contained 
within the join key.
+     *
+     * <p>Stores data as {@code MapState<MapKey, Record>}.
+     */
+    private static final class JoinKeyContainsUniqueKey implements 
MultiJoinStateView {
+
+        // stores record in the mapping <MapKey, Record>
+        private final MapState<RowData, RowData> recordState;
+        private final List<RowData> reusedList;
+
+        private JoinKeyContainsUniqueKey(
+                RuntimeContext ctx,
+                final String stateName,
+                final InternalTypeInfo<RowData> mapKeyType,
+                final InternalTypeInfo<RowData> recordType,
+                final StateTtlConfig ttlConfig) {
+
+            MapStateDescriptor<RowData, RowData> recordStateDesc =
+                    createStateDescriptor(stateName, mapKeyType, recordType, 
ttlConfig);
+
+            this.recordState = ctx.getMapState(recordStateDesc);
+            // the result records always not more than 1 per mapKey
+            this.reusedList = new ArrayList<>(1);
+        }
+
+        @Override
+        public void addRecord(RowData mapKey, RowData record) throws Exception 
{
+            recordState.put(mapKey, record);
+        }
+
+        @Override
+        public void retractRecord(RowData mapKey, RowData record) throws 
Exception {
+            // Only one record is kept per mapKey, remove it directly.
+            recordState.remove(mapKey);
+        }
+
+        @Override
+        public Iterable<RowData> getRecords(RowData mapKey) throws Exception {
+            reusedList.clear();
+            RowData record = recordState.get(mapKey);
+            if (record != null) {
+                reusedList.add(record);
+            }
+            return reusedList;
+        }
+
+        @Override
+        public void cleanup(RowData mapKey) throws Exception {
+            recordState.remove(mapKey);
+        }
+    }
+
+    /**
+     * State view for input sides that have a unique key, but it differs from 
the join key.
+     *
+     * <p>Stores data as {@code MapState<MapKey, Map<UK, Record>>}.
+     */
+    private static final class InputSideHasUniqueKey implements 
MultiJoinStateView {
+
+        // stores map in the mapping <MapKey, Map<UK, Record>>
+        private final MapState<RowData, Map<RowData, RowData>> recordState;
+        private final KeySelector<RowData, RowData> uniqueKeySelector;
+
+        private InputSideHasUniqueKey(
+                RuntimeContext ctx,
+                final String stateName,
+                final InternalTypeInfo<RowData> mapKeyType,
+                final InternalTypeInfo<RowData> recordType,
+                final InternalTypeInfo<RowData> uniqueKeyType,
+                final KeySelector<RowData, RowData> uniqueKeySelector,
+                final StateTtlConfig ttlConfig) {
+            checkNotNull(uniqueKeyType);
+            checkNotNull(uniqueKeySelector);
+            this.uniqueKeySelector = uniqueKeySelector;
+
+            TypeInformation<Map<RowData, RowData>> mapValueTypeInfo =
+                    Types.MAP(uniqueKeyType, recordType); // UK is the key in 
the inner map
+
+            MapStateDescriptor<RowData, Map<RowData, RowData>> recordStateDesc 
=
+                    createStateDescriptor(stateName, mapKeyType, 
mapValueTypeInfo, ttlConfig);
+
+            this.recordState = ctx.getMapState(recordStateDesc);
+        }
+
+        @Override
+        public void addRecord(RowData mapKey, RowData record) throws Exception 
{
+            RowData uniqueKey = uniqueKeySelector.getKey(record);
+            Map<RowData, RowData> uniqueKeyToRecordMap = 
recordState.get(mapKey);
+            if (uniqueKeyToRecordMap == null) {
+                uniqueKeyToRecordMap = new HashMap<>();
+            }
+            uniqueKeyToRecordMap.put(uniqueKey, record);
+            recordState.put(mapKey, uniqueKeyToRecordMap);
+        }
+
+        @Override
+        public void retractRecord(RowData mapKey, RowData record) throws 
Exception {
+            RowData uniqueKey = uniqueKeySelector.getKey(record);
+            Map<RowData, RowData> uniqueKeyToRecordMap = 
recordState.get(mapKey);
+            if (uniqueKeyToRecordMap != null) {
+                uniqueKeyToRecordMap.remove(uniqueKey);
+                if (uniqueKeyToRecordMap.isEmpty()) {
+                    // Clean up the entry for mapKey if the inner map becomes 
empty
+                    recordState.remove(mapKey);
+                } else {
+                    recordState.put(mapKey, uniqueKeyToRecordMap);
+                }
+            }
+            // ignore uniqueKeyToRecordMap == null
+        }
+
+        @Override
+        public Iterable<RowData> getRecords(RowData mapKey) throws Exception {
+            Map<RowData, RowData> uniqueKeyToRecordMap = 
recordState.get(mapKey);
+            if (uniqueKeyToRecordMap == null) {
+                return Collections.emptyList();
+            } else {
+                // Return the values (records) from the inner map
+                return uniqueKeyToRecordMap.values();
+            }
+        }
+
+        @Override
+        public void cleanup(RowData mapKey) throws Exception {
+            recordState.remove(mapKey);
+        }
+    }
+
+    /**
+     * State view for input sides that do not have a unique key (multi-set 
semantics).
+     *
+     * <p>Stores data as {@code MapState<MapKey, Map<Record, Count>>}.
+     */
+    private static final class InputSideHasNoUniqueKey implements 
MultiJoinStateView {
+
+        // stores map in the mapping <MapKey, Map<Record, Count>>
+        private final MapState<RowData, Map<RowData, Integer>> recordState;

Review Comment:
   > InputSideHasNoUniqueKey in JoinRecordStateViews can simply use 
MapState<Row, Count> since the operator key is already the join key. Our 
Multi-Join Operator has the common key as the operator key, and not necessarily 
the join key, so we need to have MapState<JoinKey, Map<Row, Count>>. We need 
this extra level to be able to do 
[state.getRecords(joinKey)](https://github.com/apache/flink/blob/f0e6cd145d89dd4dde32e3724e57aa0167fecd26/flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/join/stream/StreamingMultiJoinOperator.java#L564.)and
 filter down the records to reprocess. How would we be able to do that if we 
simply use MapState<Row, Count>?
   
   Thanks for the explanation — I see your point about needing a second level 
of mapping due to the difference between the common key and the join key. But 
there won't be such issue if we consider such cases as "common partitioning 
key" violations.
   
   > This would restrict the uses cases drastically. We would only work for 
multiple joins that use exactly the same key across all levels.
   
   I don’t think the restriction would be as drastic in practice. Moreover, 
current optimization may not be safe in real world scenarios, since you have 
risk to get a sudden OOM.
   <pre>
           @Override
           public void addRecord(RowData mapKey, RowData record) throws 
Exception {
               Map<RowData, Integer> recordToCountMap = recordState.get(mapKey);
               if (recordToCountMap == null) {
                   recordToCountMap = new HashMap<>();
               }
               recordToCountMap.merge(record, 1, Integer::sum); // Increment 
count or set to 1
               recordState.put(mapKey, recordToCountMap);
           }</pre>
      
   Here you materialize a whole map which size is unknown and unbounded in 
memory. And it's not uncommon when user have a lot records with the same 
joinKey. I agree that we can tolerate serialization and rocksdb\forstdb 
inefficiencies to some extent. But the possibility of sudden OOM is much more 
critical and harder to ignore.
   
   Thank you too, @gustavodemorais! Let me know what you think — happy to 
continue the discussion or help explore alternatives.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@flink.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to