SteveStevenpoor commented on code in PR #26313:
URL: https://github.com/apache/flink/pull/26313#discussion_r2102239291


##########
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/join/stream/state/MultiJoinStateViews.java:
##########
@@ -0,0 +1,359 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.table.runtime.operators.join.stream.state;
+
+import org.apache.flink.api.common.functions.RuntimeContext;
+import org.apache.flink.api.common.state.MapState;
+import org.apache.flink.api.common.state.MapStateDescriptor;
+import org.apache.flink.api.common.state.StateTtlConfig;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.common.typeinfo.Types;
+import org.apache.flink.api.java.functions.KeySelector;
+import org.apache.flink.table.data.RowData;
+import 
org.apache.flink.table.runtime.operators.join.stream.utils.JoinInputSideSpec;
+import org.apache.flink.table.runtime.typeutils.InternalTypeInfo;
+import org.apache.flink.types.RowKind;
+import org.apache.flink.util.IterableIterator;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+
+import static 
org.apache.flink.table.runtime.util.StateConfigUtil.createTtlConfig;
+import static org.apache.flink.util.Preconditions.checkNotNull;
+
+/**
+ * Factory class to create different implementations of {@link 
MultiJoinStateView} based on the
+ * characteristics described in {@link JoinInputSideSpec}.
+ *
+ * <p>Each state view uses a {@link MapState} where the primary key is the 
`mapKey` derived from the
+ * join conditions (via {@link
+ * 
org.apache.flink.table.runtime.operators.join.stream.keyselector.JoinKeyExtractor}).
 The value
+ * stored within this map depends on whether the input side has a unique key 
and how it relates to
+ * the join key, optimizing storage and access patterns.
+ */
+public final class MultiJoinStateViews {
+
+    /** Creates a {@link MultiJoinStateView} depends on {@link 
JoinInputSideSpec}. */
+    public static MultiJoinStateView create(
+            RuntimeContext ctx,
+            String stateName,
+            JoinInputSideSpec inputSideSpec,
+            InternalTypeInfo<RowData> mapKeyType, // Type info for the outer 
map key
+            InternalTypeInfo<RowData> recordType,
+            long retentionTime) {
+        StateTtlConfig ttlConfig = createTtlConfig(retentionTime);
+
+        if (inputSideSpec.hasUniqueKey()) {
+            if (inputSideSpec.joinKeyContainsUniqueKey()) {
+                return new JoinKeyContainsUniqueKey(
+                        ctx, stateName, mapKeyType, recordType, ttlConfig);
+            } else {
+                return new InputSideHasUniqueKey(
+                        ctx,
+                        stateName,
+                        mapKeyType,
+                        recordType,
+                        inputSideSpec.getUniqueKeyType(),
+                        inputSideSpec.getUniqueKeySelector(),
+                        ttlConfig);
+            }
+        } else {
+            return new InputSideHasNoUniqueKey(ctx, stateName, mapKeyType, 
recordType, ttlConfig);
+        }
+    }
+
+    /**
+     * Creates a {@link MapStateDescriptor} with the given parameters and 
applies TTL configuration.
+     *
+     * @param <K> Key type
+     * @param <V> Value type
+     * @param stateName Unique name for the state
+     * @param keyTypeInfo Type information for the key
+     * @param valueTypeInfo Type information for the value
+     * @param ttlConfig State TTL configuration
+     * @return Configured MapStateDescriptor
+     */
+    private static <K, V> MapStateDescriptor<K, V> createStateDescriptor(
+            String stateName,
+            TypeInformation<K> keyTypeInfo,
+            TypeInformation<V> valueTypeInfo,
+            StateTtlConfig ttlConfig) {
+        MapStateDescriptor<K, V> descriptor =
+                new MapStateDescriptor<>(stateName, keyTypeInfo, 
valueTypeInfo);
+        if (ttlConfig.isEnabled()) {
+            descriptor.enableTimeToLive(ttlConfig);
+        }
+        return descriptor;
+    }
+
+    // 
------------------------------------------------------------------------------------
+    // Multi Join State View Implementations
+    // 
------------------------------------------------------------------------------------
+
+    /**
+     * State view for input sides where the unique key is fully contained 
within the join key.
+     *
+     * <p>Stores data as {@code MapState<MapKey, Record>}.
+     */
+    private static final class JoinKeyContainsUniqueKey implements 
MultiJoinStateView {
+
+        // stores record in the mapping <MapKey, Record>
+        private final MapState<RowData, RowData> recordState;
+        private final List<RowData> reusedList;
+
+        private JoinKeyContainsUniqueKey(
+                RuntimeContext ctx,
+                final String stateName,
+                final InternalTypeInfo<RowData> mapKeyType,
+                final InternalTypeInfo<RowData> recordType,
+                final StateTtlConfig ttlConfig) {
+
+            MapStateDescriptor<RowData, RowData> recordStateDesc =
+                    createStateDescriptor(stateName, mapKeyType, recordType, 
ttlConfig);
+
+            this.recordState = ctx.getMapState(recordStateDesc);
+            // the result records always not more than 1 per mapKey
+            this.reusedList = new ArrayList<>(1);
+        }
+
+        @Override
+        public void addRecord(RowData mapKey, RowData record) throws Exception 
{
+            recordState.put(mapKey, record);
+        }
+
+        @Override
+        public void retractRecord(RowData mapKey, RowData record) throws 
Exception {
+            // Only one record is kept per mapKey, remove it directly.
+            recordState.remove(mapKey);
+        }
+
+        @Override
+        public Iterable<RowData> getRecords(RowData mapKey) throws Exception {
+            reusedList.clear();
+            RowData record = recordState.get(mapKey);
+            if (record != null) {
+                reusedList.add(record);
+            }
+            return reusedList;
+        }
+
+        @Override
+        public void cleanup(RowData mapKey) throws Exception {
+            recordState.remove(mapKey);
+        }
+    }
+
+    /**
+     * State view for input sides that have a unique key, but it differs from 
the join key.
+     *
+     * <p>Stores data as {@code MapState<MapKey, Map<UK, Record>>}.
+     */
+    private static final class InputSideHasUniqueKey implements 
MultiJoinStateView {
+
+        // stores map in the mapping <MapKey, Map<UK, Record>>
+        private final MapState<RowData, Map<RowData, RowData>> recordState;
+        private final KeySelector<RowData, RowData> uniqueKeySelector;
+
+        private InputSideHasUniqueKey(
+                RuntimeContext ctx,
+                final String stateName,
+                final InternalTypeInfo<RowData> mapKeyType,
+                final InternalTypeInfo<RowData> recordType,
+                final InternalTypeInfo<RowData> uniqueKeyType,
+                final KeySelector<RowData, RowData> uniqueKeySelector,
+                final StateTtlConfig ttlConfig) {
+            checkNotNull(uniqueKeyType);
+            checkNotNull(uniqueKeySelector);
+            this.uniqueKeySelector = uniqueKeySelector;
+
+            TypeInformation<Map<RowData, RowData>> mapValueTypeInfo =
+                    Types.MAP(uniqueKeyType, recordType); // UK is the key in 
the inner map
+
+            MapStateDescriptor<RowData, Map<RowData, RowData>> recordStateDesc 
=
+                    createStateDescriptor(stateName, mapKeyType, 
mapValueTypeInfo, ttlConfig);
+
+            this.recordState = ctx.getMapState(recordStateDesc);
+        }
+
+        @Override
+        public void addRecord(RowData mapKey, RowData record) throws Exception 
{
+            RowData uniqueKey = uniqueKeySelector.getKey(record);
+            Map<RowData, RowData> uniqueKeyToRecordMap = 
recordState.get(mapKey);
+            if (uniqueKeyToRecordMap == null) {
+                uniqueKeyToRecordMap = new HashMap<>();
+            }
+            uniqueKeyToRecordMap.put(uniqueKey, record);
+            recordState.put(mapKey, uniqueKeyToRecordMap);
+        }
+
+        @Override
+        public void retractRecord(RowData mapKey, RowData record) throws 
Exception {
+            RowData uniqueKey = uniqueKeySelector.getKey(record);
+            Map<RowData, RowData> uniqueKeyToRecordMap = 
recordState.get(mapKey);
+            if (uniqueKeyToRecordMap != null) {
+                uniqueKeyToRecordMap.remove(uniqueKey);
+                if (uniqueKeyToRecordMap.isEmpty()) {
+                    // Clean up the entry for mapKey if the inner map becomes 
empty
+                    recordState.remove(mapKey);
+                } else {
+                    recordState.put(mapKey, uniqueKeyToRecordMap);
+                }
+            }
+            // ignore uniqueKeyToRecordMap == null
+        }
+
+        @Override
+        public Iterable<RowData> getRecords(RowData mapKey) throws Exception {
+            Map<RowData, RowData> uniqueKeyToRecordMap = 
recordState.get(mapKey);
+            if (uniqueKeyToRecordMap == null) {
+                return Collections.emptyList();
+            } else {
+                // Return the values (records) from the inner map
+                return uniqueKeyToRecordMap.values();
+            }
+        }
+
+        @Override
+        public void cleanup(RowData mapKey) throws Exception {
+            recordState.remove(mapKey);
+        }
+    }
+
+    /**
+     * State view for input sides that do not have a unique key (multi-set 
semantics).
+     *
+     * <p>Stores data as {@code MapState<MapKey, Map<Record, Count>>}.
+     */
+    private static final class InputSideHasNoUniqueKey implements 
MultiJoinStateView {
+
+        // stores map in the mapping <MapKey, Map<Record, Count>>
+        private final MapState<RowData, Map<RowData, Integer>> recordState;

Review Comment:
   Oh, I thought you mean joinKey as a key and uniqueKey as a value, sorry :) 
In this case how will we be able to find associations? Say we have records 
[commonKey, joinKey, primaryKey]. Let them be (1, 2, 3) and (2, 2, 4). Since we 
partition on common key MapState<[JK, PK], Record> will look like this: 1 -> 
Map{2,3 -> (1,2,3)}, 2 -> Map{2,4 -> (2,2,4)}. Both records have the same 
JoinKey but can't be associated because in Flink's MapState we can iterate 
through a Map  — only inside a particular flink key. Given that, maybe we can 
take two parallel tracks? I could work on a simplified version with the 
constraint commonKey = joinKey in a separate FLIP to enable safe and bounded 
memory usage for common cases, while you continue refining the more flexible 
version that supports a wider range of scenarios. Let me know what you think 
about this approach!
   Thanks again for the fast responses — really appreciate the discussion.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@flink.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to