dawidwys commented on code in PR #23680: URL: https://github.com/apache/flink/pull/23680#discussion_r1386592611
########## flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/stream/JoinJsonPlanTest.java: ########## @@ -1,144 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.flink.table.planner.plan.nodes.exec.stream; - -import org.apache.flink.table.api.TableConfig; -import org.apache.flink.table.api.TableEnvironment; -import org.apache.flink.table.planner.utils.StreamTableTestUtil; -import org.apache.flink.table.planner.utils.TableTestBase; - -import org.junit.Before; -import org.junit.Test; - -/** Test json serialization/deserialization for join. */ -public class JoinJsonPlanTest extends TableTestBase { - - private StreamTableTestUtil util; - private TableEnvironment tEnv; - - @Before - public void setup() { Review Comment: Can we remove the json plan files as well? ########## flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/testutils/JoinTestPrograms.java: ########## @@ -0,0 +1,272 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.table.planner.plan.nodes.exec.testutils; + +import org.apache.flink.table.planner.plan.nodes.exec.stream.StreamExecJoin; +import org.apache.flink.table.test.program.SinkTestStep; +import org.apache.flink.table.test.program.SourceTestStep; +import org.apache.flink.table.test.program.TableTestProgram; +import org.apache.flink.types.Row; +import org.apache.flink.types.RowKind; + +/** {@link TableTestProgram} definitions for testing {@link StreamExecJoin}. */ +public class JoinTestPrograms { + + static final TableTestProgram NON_WINDOW_INNER_JOIN; + static final TableTestProgram NON_WINDOW_INNER_JOIN_WITH_NULL; + static final TableTestProgram JOIN; + static final TableTestProgram INNER_JOIN; + static final TableTestProgram JOIN_WITH_FILTER; + static final TableTestProgram INNER_JOIN_WITH_DUPLICATE_KEY; + static final TableTestProgram INNER_JOIN_WITH_NON_EQUI_JOIN; + static final TableTestProgram INNER_JOIN_WITH_EQUAL_PK; + static final TableTestProgram INNER_JOIN_WITH_PK; + + static final SourceTestStep SOURCE_A = + SourceTestStep.newBuilder("A") + .addSchema("a1 int", "a2 bigint", "a3 varchar") + .producedBeforeRestore( + Row.of(1, 1L, "Hi"), + Row.of(2, 2L, "Hello"), + Row.of(3, 2L, "Hello world")) + .producedAfterRestore(Row.of(4, 3L, "Hello there")) + .build(); + + static final SourceTestStep SOURCE_B = + SourceTestStep.newBuilder("B") + .addSchema("b1 int", "b2 bigint", "b3 int", "b4 varchar", "b5 bigint") + .producedBeforeRestore( + Row.of(1, 1L, 0, "Hallo", 1L), + Row.of(2, 2L, 1, "Hallo Welt", 2L), + Row.of(2, 3L, 2, "Hallo Welt wie", 1L), + Row.of(3, 1L, 2, "Hallo Welt wie gehts", 1L)) + .producedAfterRestore(Row.of(2, 4L, 3, "Hallo Welt wie gehts", 4L)) + .build(); + static final SourceTestStep SOURCE_T1 = + SourceTestStep.newBuilder("T1") + .addSchema("a int", "b bigint", "c varchar") + .producedBeforeRestore( + Row.of(1, 1L, "Hi1"), + Row.of(1, 2L, "Hi2"), + Row.of(1, 2L, "Hi2"), + Row.of(1, 5L, "Hi3"), + Row.of(2, 7L, "Hi5"), + Row.of(1, 9L, "Hi6"), + Row.of(1, 8L, "Hi8"), + Row.of(3, 8L, "Hi9")) + .producedAfterRestore(Row.of(1, 1L, "PostRestore")) + .build(); + static final SourceTestStep SOURCE_T2 = + SourceTestStep.newBuilder("T2") + .addSchema("a int", "b bigint", "c varchar") + .producedBeforeRestore( + Row.of(1, 1L, "HiHi"), Row.of(2, 2L, "HeHe"), Row.of(3, 2L, "HeHe")) + .producedAfterRestore(Row.of(2, 1L, "PostRestoreRight")) + .build(); + + static { + NON_WINDOW_INNER_JOIN = + TableTestProgram.of("non-window-inner-join", "test non-window inner join") + .setupTableSource(SOURCE_T1) + .setupTableSource(SOURCE_T2) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a int", "c1 varchar", "c2 varchar") + .consumedBeforeRestore( + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi3"), + Row.of(2, "HeHe", "Hi5"), + Row.of(1, "HiHi", "Hi6"), + Row.of(1, "HiHi", "Hi8")) + .consumedAfterRestore(Row.of(2, "PostRestoreRight", "Hi5")) + .build()) + .runSql( + "insert into MySink " + + "SELECT t2.a, t2.c, t1.c\n" + + "FROM (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T1\n" + + ") as t1\n" + + "JOIN (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T2\n" + + ") as t2\n" + + "ON t1.a = t2.a AND t1.b > t2.b") + .build(); + + NON_WINDOW_INNER_JOIN_WITH_NULL = + TableTestProgram.of( + "non-window-inner-join-with-null-cond", + "test non-window inner join") + .setupTableSource(SOURCE_T1) + .setupTableSource(SOURCE_T2) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a int", "c1 varchar", "c2 varchar") + .consumedBeforeRestore( + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi3"), + Row.of(2, "HeHe", "Hi5"), + Row.of(1, "HiHi", "Hi6"), + Row.of(1, "HiHi", "Hi8"), + Row.of(null, "HeHe", "Hi9")) + .consumedAfterRestore(Row.of(2, "PostRestoreRight", "Hi5")) + .build()) + .runSql( + "insert into MySink " + + "SELECT t2.a, t2.c, t1.c\n" + + "FROM (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T1\n" + + ") as t1\n" + + "JOIN (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T2\n" + + ") as t2\n" + + "ON \n" + + " ((t1.a is null AND t2.a is null) OR\n" + + " (t1.a = t2.a))\n" + + " AND t1.b > t2.b") + .build(); + + JOIN = + TableTestProgram.of("join", "test join") + .setupTableSource(SOURCE_A) + .setupTableSource(SOURCE_B) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a3 varchar", "b4 varchar") + .consumedBeforeRestore( + Row.of("Hi", "Hallo"), + Row.of("Hello", "Hallo Welt"), + Row.of("Hello world", "Hallo Welt"), + Row.of("Hi", "Hallo Welt wie gehts")) + .consumedAfterRestore( + Row.of("Hello there", "Hallo Welt wie")) + .build()) + .runSql("insert into MySink " + "SELECT a3, b4 FROM A, B WHERE a2 = b2") + .build(); + + INNER_JOIN = + TableTestProgram.of("inner join", "test inner join") + .setupTableSource(SOURCE_A) + .setupTableSource(SOURCE_B) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a1 int", "b1 int") + .consumedBeforeRestore( + Row.of(1, 1), Review Comment: could we use outpuit columns which would make it easier to tell which rows actually got joined? e.g. `a3` and `b4` ########## flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/testutils/JoinTestPrograms.java: ########## @@ -0,0 +1,272 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.table.planner.plan.nodes.exec.testutils; + +import org.apache.flink.table.planner.plan.nodes.exec.stream.StreamExecJoin; +import org.apache.flink.table.test.program.SinkTestStep; +import org.apache.flink.table.test.program.SourceTestStep; +import org.apache.flink.table.test.program.TableTestProgram; +import org.apache.flink.types.Row; +import org.apache.flink.types.RowKind; + +/** {@link TableTestProgram} definitions for testing {@link StreamExecJoin}. */ +public class JoinTestPrograms { + + static final TableTestProgram NON_WINDOW_INNER_JOIN; + static final TableTestProgram NON_WINDOW_INNER_JOIN_WITH_NULL; + static final TableTestProgram JOIN; + static final TableTestProgram INNER_JOIN; + static final TableTestProgram JOIN_WITH_FILTER; + static final TableTestProgram INNER_JOIN_WITH_DUPLICATE_KEY; + static final TableTestProgram INNER_JOIN_WITH_NON_EQUI_JOIN; + static final TableTestProgram INNER_JOIN_WITH_EQUAL_PK; + static final TableTestProgram INNER_JOIN_WITH_PK; + + static final SourceTestStep SOURCE_A = + SourceTestStep.newBuilder("A") + .addSchema("a1 int", "a2 bigint", "a3 varchar") + .producedBeforeRestore( + Row.of(1, 1L, "Hi"), + Row.of(2, 2L, "Hello"), + Row.of(3, 2L, "Hello world")) + .producedAfterRestore(Row.of(4, 3L, "Hello there")) + .build(); + + static final SourceTestStep SOURCE_B = + SourceTestStep.newBuilder("B") + .addSchema("b1 int", "b2 bigint", "b3 int", "b4 varchar", "b5 bigint") + .producedBeforeRestore( + Row.of(1, 1L, 0, "Hallo", 1L), + Row.of(2, 2L, 1, "Hallo Welt", 2L), + Row.of(2, 3L, 2, "Hallo Welt wie", 1L), + Row.of(3, 1L, 2, "Hallo Welt wie gehts", 1L)) + .producedAfterRestore(Row.of(2, 4L, 3, "Hallo Welt wie gehts", 4L)) + .build(); + static final SourceTestStep SOURCE_T1 = + SourceTestStep.newBuilder("T1") + .addSchema("a int", "b bigint", "c varchar") + .producedBeforeRestore( + Row.of(1, 1L, "Hi1"), + Row.of(1, 2L, "Hi2"), + Row.of(1, 2L, "Hi2"), + Row.of(1, 5L, "Hi3"), + Row.of(2, 7L, "Hi5"), + Row.of(1, 9L, "Hi6"), + Row.of(1, 8L, "Hi8"), + Row.of(3, 8L, "Hi9")) + .producedAfterRestore(Row.of(1, 1L, "PostRestore")) + .build(); + static final SourceTestStep SOURCE_T2 = + SourceTestStep.newBuilder("T2") + .addSchema("a int", "b bigint", "c varchar") + .producedBeforeRestore( + Row.of(1, 1L, "HiHi"), Row.of(2, 2L, "HeHe"), Row.of(3, 2L, "HeHe")) + .producedAfterRestore(Row.of(2, 1L, "PostRestoreRight")) + .build(); + + static { + NON_WINDOW_INNER_JOIN = + TableTestProgram.of("non-window-inner-join", "test non-window inner join") + .setupTableSource(SOURCE_T1) + .setupTableSource(SOURCE_T2) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a int", "c1 varchar", "c2 varchar") + .consumedBeforeRestore( + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi3"), + Row.of(2, "HeHe", "Hi5"), + Row.of(1, "HiHi", "Hi6"), + Row.of(1, "HiHi", "Hi8")) + .consumedAfterRestore(Row.of(2, "PostRestoreRight", "Hi5")) + .build()) + .runSql( + "insert into MySink " + + "SELECT t2.a, t2.c, t1.c\n" + + "FROM (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T1\n" + + ") as t1\n" + + "JOIN (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T2\n" + + ") as t2\n" + + "ON t1.a = t2.a AND t1.b > t2.b") + .build(); + + NON_WINDOW_INNER_JOIN_WITH_NULL = + TableTestProgram.of( + "non-window-inner-join-with-null-cond", + "test non-window inner join") + .setupTableSource(SOURCE_T1) + .setupTableSource(SOURCE_T2) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a int", "c1 varchar", "c2 varchar") + .consumedBeforeRestore( + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi3"), + Row.of(2, "HeHe", "Hi5"), + Row.of(1, "HiHi", "Hi6"), + Row.of(1, "HiHi", "Hi8"), + Row.of(null, "HeHe", "Hi9")) + .consumedAfterRestore(Row.of(2, "PostRestoreRight", "Hi5")) + .build()) + .runSql( + "insert into MySink " + + "SELECT t2.a, t2.c, t1.c\n" + + "FROM (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T1\n" + + ") as t1\n" + + "JOIN (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T2\n" + + ") as t2\n" + + "ON \n" + + " ((t1.a is null AND t2.a is null) OR\n" + + " (t1.a = t2.a))\n" + + " AND t1.b > t2.b") + .build(); + + JOIN = + TableTestProgram.of("join", "test join") + .setupTableSource(SOURCE_A) + .setupTableSource(SOURCE_B) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a3 varchar", "b4 varchar") + .consumedBeforeRestore( + Row.of("Hi", "Hallo"), + Row.of("Hello", "Hallo Welt"), + Row.of("Hello world", "Hallo Welt"), + Row.of("Hi", "Hallo Welt wie gehts")) + .consumedAfterRestore( + Row.of("Hello there", "Hallo Welt wie")) + .build()) + .runSql("insert into MySink " + "SELECT a3, b4 FROM A, B WHERE a2 = b2") + .build(); + + INNER_JOIN = + TableTestProgram.of("inner join", "test inner join") + .setupTableSource(SOURCE_A) + .setupTableSource(SOURCE_B) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a1 int", "b1 int") + .consumedBeforeRestore( + Row.of(1, 1), + Row.of(2, 2), + Row.of(3, 3), + Row.of(2, 2)) + .consumedAfterRestore(Row.of(2, 2)) + .build()) + .runSql("insert into MySink " + "SELECT a1, b1 FROM A JOIN B ON a1 = b1") + .build(); + + JOIN_WITH_FILTER = + TableTestProgram.of("join-with-filter", "test join with filter") + .setupTableSource(SOURCE_A) + .setupTableSource(SOURCE_B) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a3 varchar", "b4 varchar") + .consumedBeforeRestore( + Row.of("Hi", "Hallo"), + Row.of("Hi", "Hallo Welt wie gehts")) + .consumedAfterRestore(new Row[] {}) + .build()) + .runSql( + "insert into MySink " + + "SELECT a3, b4 FROM A, B where a2 = b2 and a2 < 2") + .build(); + + INNER_JOIN_WITH_DUPLICATE_KEY = + TableTestProgram.of( + "inner-join-with-duplicate-key", "inner join with duplicate key") + .setupTableSource(SOURCE_A) + .setupTableSource(SOURCE_B) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a1 int", "b1 int") + .consumedBeforeRestore(Row.of(2, 2)) Review Comment: can we use columns that would tell us which rows actually got joined? ########## flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/testutils/JoinTestPrograms.java: ########## @@ -0,0 +1,272 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.table.planner.plan.nodes.exec.testutils; + +import org.apache.flink.table.planner.plan.nodes.exec.stream.StreamExecJoin; +import org.apache.flink.table.test.program.SinkTestStep; +import org.apache.flink.table.test.program.SourceTestStep; +import org.apache.flink.table.test.program.TableTestProgram; +import org.apache.flink.types.Row; +import org.apache.flink.types.RowKind; + +/** {@link TableTestProgram} definitions for testing {@link StreamExecJoin}. */ +public class JoinTestPrograms { + + static final TableTestProgram NON_WINDOW_INNER_JOIN; + static final TableTestProgram NON_WINDOW_INNER_JOIN_WITH_NULL; + static final TableTestProgram JOIN; + static final TableTestProgram INNER_JOIN; + static final TableTestProgram JOIN_WITH_FILTER; + static final TableTestProgram INNER_JOIN_WITH_DUPLICATE_KEY; + static final TableTestProgram INNER_JOIN_WITH_NON_EQUI_JOIN; + static final TableTestProgram INNER_JOIN_WITH_EQUAL_PK; + static final TableTestProgram INNER_JOIN_WITH_PK; + + static final SourceTestStep SOURCE_A = + SourceTestStep.newBuilder("A") + .addSchema("a1 int", "a2 bigint", "a3 varchar") + .producedBeforeRestore( + Row.of(1, 1L, "Hi"), + Row.of(2, 2L, "Hello"), + Row.of(3, 2L, "Hello world")) + .producedAfterRestore(Row.of(4, 3L, "Hello there")) + .build(); + + static final SourceTestStep SOURCE_B = + SourceTestStep.newBuilder("B") + .addSchema("b1 int", "b2 bigint", "b3 int", "b4 varchar", "b5 bigint") + .producedBeforeRestore( + Row.of(1, 1L, 0, "Hallo", 1L), + Row.of(2, 2L, 1, "Hallo Welt", 2L), + Row.of(2, 3L, 2, "Hallo Welt wie", 1L), + Row.of(3, 1L, 2, "Hallo Welt wie gehts", 1L)) + .producedAfterRestore(Row.of(2, 4L, 3, "Hallo Welt wie gehts", 4L)) + .build(); + static final SourceTestStep SOURCE_T1 = + SourceTestStep.newBuilder("T1") + .addSchema("a int", "b bigint", "c varchar") + .producedBeforeRestore( + Row.of(1, 1L, "Hi1"), + Row.of(1, 2L, "Hi2"), + Row.of(1, 2L, "Hi2"), + Row.of(1, 5L, "Hi3"), + Row.of(2, 7L, "Hi5"), + Row.of(1, 9L, "Hi6"), + Row.of(1, 8L, "Hi8"), + Row.of(3, 8L, "Hi9")) + .producedAfterRestore(Row.of(1, 1L, "PostRestore")) + .build(); + static final SourceTestStep SOURCE_T2 = + SourceTestStep.newBuilder("T2") + .addSchema("a int", "b bigint", "c varchar") + .producedBeforeRestore( + Row.of(1, 1L, "HiHi"), Row.of(2, 2L, "HeHe"), Row.of(3, 2L, "HeHe")) + .producedAfterRestore(Row.of(2, 1L, "PostRestoreRight")) + .build(); + + static { + NON_WINDOW_INNER_JOIN = + TableTestProgram.of("non-window-inner-join", "test non-window inner join") + .setupTableSource(SOURCE_T1) + .setupTableSource(SOURCE_T2) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a int", "c1 varchar", "c2 varchar") + .consumedBeforeRestore( + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi3"), + Row.of(2, "HeHe", "Hi5"), + Row.of(1, "HiHi", "Hi6"), + Row.of(1, "HiHi", "Hi8")) + .consumedAfterRestore(Row.of(2, "PostRestoreRight", "Hi5")) + .build()) + .runSql( + "insert into MySink " + + "SELECT t2.a, t2.c, t1.c\n" + + "FROM (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T1\n" + + ") as t1\n" + + "JOIN (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T2\n" + + ") as t2\n" + + "ON t1.a = t2.a AND t1.b > t2.b") + .build(); + + NON_WINDOW_INNER_JOIN_WITH_NULL = + TableTestProgram.of( + "non-window-inner-join-with-null-cond", + "test non-window inner join") + .setupTableSource(SOURCE_T1) + .setupTableSource(SOURCE_T2) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a int", "c1 varchar", "c2 varchar") + .consumedBeforeRestore( + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi3"), + Row.of(2, "HeHe", "Hi5"), + Row.of(1, "HiHi", "Hi6"), + Row.of(1, "HiHi", "Hi8"), + Row.of(null, "HeHe", "Hi9")) + .consumedAfterRestore(Row.of(2, "PostRestoreRight", "Hi5")) + .build()) + .runSql( + "insert into MySink " + + "SELECT t2.a, t2.c, t1.c\n" + + "FROM (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T1\n" + + ") as t1\n" + + "JOIN (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T2\n" + + ") as t2\n" + + "ON \n" + + " ((t1.a is null AND t2.a is null) OR\n" + + " (t1.a = t2.a))\n" + + " AND t1.b > t2.b") + .build(); + + JOIN = + TableTestProgram.of("join", "test join") + .setupTableSource(SOURCE_A) + .setupTableSource(SOURCE_B) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a3 varchar", "b4 varchar") + .consumedBeforeRestore( + Row.of("Hi", "Hallo"), + Row.of("Hello", "Hallo Welt"), + Row.of("Hello world", "Hallo Welt"), + Row.of("Hi", "Hallo Welt wie gehts")) + .consumedAfterRestore( + Row.of("Hello there", "Hallo Welt wie")) + .build()) + .runSql("insert into MySink " + "SELECT a3, b4 FROM A, B WHERE a2 = b2") + .build(); + + INNER_JOIN = + TableTestProgram.of("inner join", "test inner join") + .setupTableSource(SOURCE_A) + .setupTableSource(SOURCE_B) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a1 int", "b1 int") + .consumedBeforeRestore( + Row.of(1, 1), + Row.of(2, 2), + Row.of(3, 3), + Row.of(2, 2)) + .consumedAfterRestore(Row.of(2, 2)) + .build()) + .runSql("insert into MySink " + "SELECT a1, b1 FROM A JOIN B ON a1 = b1") + .build(); + + JOIN_WITH_FILTER = + TableTestProgram.of("join-with-filter", "test join with filter") + .setupTableSource(SOURCE_A) + .setupTableSource(SOURCE_B) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a3 varchar", "b4 varchar") + .consumedBeforeRestore( + Row.of("Hi", "Hallo"), + Row.of("Hi", "Hallo Welt wie gehts")) + .consumedAfterRestore(new Row[] {}) Review Comment: can we extend tests to verify the restore behaviour as well? ########## flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/testutils/JoinTestPrograms.java: ########## @@ -0,0 +1,272 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.table.planner.plan.nodes.exec.testutils; + +import org.apache.flink.table.planner.plan.nodes.exec.stream.StreamExecJoin; +import org.apache.flink.table.test.program.SinkTestStep; +import org.apache.flink.table.test.program.SourceTestStep; +import org.apache.flink.table.test.program.TableTestProgram; +import org.apache.flink.types.Row; +import org.apache.flink.types.RowKind; + +/** {@link TableTestProgram} definitions for testing {@link StreamExecJoin}. */ +public class JoinTestPrograms { + + static final TableTestProgram NON_WINDOW_INNER_JOIN; + static final TableTestProgram NON_WINDOW_INNER_JOIN_WITH_NULL; + static final TableTestProgram JOIN; + static final TableTestProgram INNER_JOIN; + static final TableTestProgram JOIN_WITH_FILTER; + static final TableTestProgram INNER_JOIN_WITH_DUPLICATE_KEY; + static final TableTestProgram INNER_JOIN_WITH_NON_EQUI_JOIN; + static final TableTestProgram INNER_JOIN_WITH_EQUAL_PK; + static final TableTestProgram INNER_JOIN_WITH_PK; + + static final SourceTestStep SOURCE_A = + SourceTestStep.newBuilder("A") + .addSchema("a1 int", "a2 bigint", "a3 varchar") + .producedBeforeRestore( + Row.of(1, 1L, "Hi"), + Row.of(2, 2L, "Hello"), + Row.of(3, 2L, "Hello world")) + .producedAfterRestore(Row.of(4, 3L, "Hello there")) + .build(); + + static final SourceTestStep SOURCE_B = + SourceTestStep.newBuilder("B") + .addSchema("b1 int", "b2 bigint", "b3 int", "b4 varchar", "b5 bigint") + .producedBeforeRestore( + Row.of(1, 1L, 0, "Hallo", 1L), + Row.of(2, 2L, 1, "Hallo Welt", 2L), + Row.of(2, 3L, 2, "Hallo Welt wie", 1L), + Row.of(3, 1L, 2, "Hallo Welt wie gehts", 1L)) + .producedAfterRestore(Row.of(2, 4L, 3, "Hallo Welt wie gehts", 4L)) + .build(); + static final SourceTestStep SOURCE_T1 = + SourceTestStep.newBuilder("T1") + .addSchema("a int", "b bigint", "c varchar") + .producedBeforeRestore( + Row.of(1, 1L, "Hi1"), + Row.of(1, 2L, "Hi2"), + Row.of(1, 2L, "Hi2"), + Row.of(1, 5L, "Hi3"), + Row.of(2, 7L, "Hi5"), + Row.of(1, 9L, "Hi6"), + Row.of(1, 8L, "Hi8"), + Row.of(3, 8L, "Hi9")) + .producedAfterRestore(Row.of(1, 1L, "PostRestore")) + .build(); + static final SourceTestStep SOURCE_T2 = + SourceTestStep.newBuilder("T2") + .addSchema("a int", "b bigint", "c varchar") + .producedBeforeRestore( + Row.of(1, 1L, "HiHi"), Row.of(2, 2L, "HeHe"), Row.of(3, 2L, "HeHe")) + .producedAfterRestore(Row.of(2, 1L, "PostRestoreRight")) + .build(); + + static { + NON_WINDOW_INNER_JOIN = + TableTestProgram.of("non-window-inner-join", "test non-window inner join") + .setupTableSource(SOURCE_T1) + .setupTableSource(SOURCE_T2) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a int", "c1 varchar", "c2 varchar") + .consumedBeforeRestore( + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi3"), + Row.of(2, "HeHe", "Hi5"), + Row.of(1, "HiHi", "Hi6"), + Row.of(1, "HiHi", "Hi8")) + .consumedAfterRestore(Row.of(2, "PostRestoreRight", "Hi5")) + .build()) + .runSql( + "insert into MySink " + + "SELECT t2.a, t2.c, t1.c\n" + + "FROM (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T1\n" + + ") as t1\n" + + "JOIN (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T2\n" + + ") as t2\n" + + "ON t1.a = t2.a AND t1.b > t2.b") + .build(); + + NON_WINDOW_INNER_JOIN_WITH_NULL = + TableTestProgram.of( + "non-window-inner-join-with-null-cond", + "test non-window inner join") + .setupTableSource(SOURCE_T1) + .setupTableSource(SOURCE_T2) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a int", "c1 varchar", "c2 varchar") + .consumedBeforeRestore( + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi3"), + Row.of(2, "HeHe", "Hi5"), + Row.of(1, "HiHi", "Hi6"), + Row.of(1, "HiHi", "Hi8"), + Row.of(null, "HeHe", "Hi9")) + .consumedAfterRestore(Row.of(2, "PostRestoreRight", "Hi5")) + .build()) + .runSql( + "insert into MySink " + + "SELECT t2.a, t2.c, t1.c\n" + + "FROM (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T1\n" + + ") as t1\n" + + "JOIN (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T2\n" + + ") as t2\n" + + "ON \n" + + " ((t1.a is null AND t2.a is null) OR\n" + + " (t1.a = t2.a))\n" + + " AND t1.b > t2.b") + .build(); + + JOIN = + TableTestProgram.of("join", "test join") + .setupTableSource(SOURCE_A) + .setupTableSource(SOURCE_B) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a3 varchar", "b4 varchar") + .consumedBeforeRestore( + Row.of("Hi", "Hallo"), + Row.of("Hello", "Hallo Welt"), + Row.of("Hello world", "Hallo Welt"), + Row.of("Hi", "Hallo Welt wie gehts")) + .consumedAfterRestore( + Row.of("Hello there", "Hallo Welt wie")) + .build()) + .runSql("insert into MySink " + "SELECT a3, b4 FROM A, B WHERE a2 = b2") + .build(); + + INNER_JOIN = Review Comment: Can we use the same input/output as `JOIN`. Plans should be the same, no? ########## flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/testutils/JoinTestPrograms.java: ########## @@ -0,0 +1,272 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.table.planner.plan.nodes.exec.testutils; + +import org.apache.flink.table.planner.plan.nodes.exec.stream.StreamExecJoin; +import org.apache.flink.table.test.program.SinkTestStep; +import org.apache.flink.table.test.program.SourceTestStep; +import org.apache.flink.table.test.program.TableTestProgram; +import org.apache.flink.types.Row; +import org.apache.flink.types.RowKind; + +/** {@link TableTestProgram} definitions for testing {@link StreamExecJoin}. */ +public class JoinTestPrograms { + + static final TableTestProgram NON_WINDOW_INNER_JOIN; + static final TableTestProgram NON_WINDOW_INNER_JOIN_WITH_NULL; + static final TableTestProgram JOIN; + static final TableTestProgram INNER_JOIN; + static final TableTestProgram JOIN_WITH_FILTER; + static final TableTestProgram INNER_JOIN_WITH_DUPLICATE_KEY; + static final TableTestProgram INNER_JOIN_WITH_NON_EQUI_JOIN; + static final TableTestProgram INNER_JOIN_WITH_EQUAL_PK; + static final TableTestProgram INNER_JOIN_WITH_PK; + + static final SourceTestStep SOURCE_A = + SourceTestStep.newBuilder("A") + .addSchema("a1 int", "a2 bigint", "a3 varchar") + .producedBeforeRestore( + Row.of(1, 1L, "Hi"), + Row.of(2, 2L, "Hello"), + Row.of(3, 2L, "Hello world")) + .producedAfterRestore(Row.of(4, 3L, "Hello there")) + .build(); + + static final SourceTestStep SOURCE_B = + SourceTestStep.newBuilder("B") + .addSchema("b1 int", "b2 bigint", "b3 int", "b4 varchar", "b5 bigint") + .producedBeforeRestore( + Row.of(1, 1L, 0, "Hallo", 1L), + Row.of(2, 2L, 1, "Hallo Welt", 2L), + Row.of(2, 3L, 2, "Hallo Welt wie", 1L), + Row.of(3, 1L, 2, "Hallo Welt wie gehts", 1L)) + .producedAfterRestore(Row.of(2, 4L, 3, "Hallo Welt wie gehts", 4L)) + .build(); + static final SourceTestStep SOURCE_T1 = + SourceTestStep.newBuilder("T1") + .addSchema("a int", "b bigint", "c varchar") + .producedBeforeRestore( + Row.of(1, 1L, "Hi1"), + Row.of(1, 2L, "Hi2"), + Row.of(1, 2L, "Hi2"), + Row.of(1, 5L, "Hi3"), + Row.of(2, 7L, "Hi5"), + Row.of(1, 9L, "Hi6"), + Row.of(1, 8L, "Hi8"), + Row.of(3, 8L, "Hi9")) + .producedAfterRestore(Row.of(1, 1L, "PostRestore")) + .build(); + static final SourceTestStep SOURCE_T2 = + SourceTestStep.newBuilder("T2") + .addSchema("a int", "b bigint", "c varchar") + .producedBeforeRestore( + Row.of(1, 1L, "HiHi"), Row.of(2, 2L, "HeHe"), Row.of(3, 2L, "HeHe")) + .producedAfterRestore(Row.of(2, 1L, "PostRestoreRight")) + .build(); + + static { + NON_WINDOW_INNER_JOIN = + TableTestProgram.of("non-window-inner-join", "test non-window inner join") + .setupTableSource(SOURCE_T1) + .setupTableSource(SOURCE_T2) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a int", "c1 varchar", "c2 varchar") + .consumedBeforeRestore( + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi3"), + Row.of(2, "HeHe", "Hi5"), + Row.of(1, "HiHi", "Hi6"), + Row.of(1, "HiHi", "Hi8")) + .consumedAfterRestore(Row.of(2, "PostRestoreRight", "Hi5")) + .build()) + .runSql( + "insert into MySink " + + "SELECT t2.a, t2.c, t1.c\n" + + "FROM (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T1\n" + + ") as t1\n" + + "JOIN (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T2\n" + + ") as t2\n" + + "ON t1.a = t2.a AND t1.b > t2.b") + .build(); + + NON_WINDOW_INNER_JOIN_WITH_NULL = + TableTestProgram.of( + "non-window-inner-join-with-null-cond", + "test non-window inner join") + .setupTableSource(SOURCE_T1) + .setupTableSource(SOURCE_T2) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a int", "c1 varchar", "c2 varchar") + .consumedBeforeRestore( + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi3"), + Row.of(2, "HeHe", "Hi5"), + Row.of(1, "HiHi", "Hi6"), + Row.of(1, "HiHi", "Hi8"), + Row.of(null, "HeHe", "Hi9")) + .consumedAfterRestore(Row.of(2, "PostRestoreRight", "Hi5")) + .build()) + .runSql( + "insert into MySink " + + "SELECT t2.a, t2.c, t1.c\n" + + "FROM (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T1\n" + + ") as t1\n" + + "JOIN (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T2\n" + + ") as t2\n" + + "ON \n" + + " ((t1.a is null AND t2.a is null) OR\n" + + " (t1.a = t2.a))\n" + + " AND t1.b > t2.b") + .build(); + + JOIN = + TableTestProgram.of("join", "test join") + .setupTableSource(SOURCE_A) + .setupTableSource(SOURCE_B) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a3 varchar", "b4 varchar") + .consumedBeforeRestore( + Row.of("Hi", "Hallo"), + Row.of("Hello", "Hallo Welt"), + Row.of("Hello world", "Hallo Welt"), + Row.of("Hi", "Hallo Welt wie gehts")) + .consumedAfterRestore( + Row.of("Hello there", "Hallo Welt wie")) + .build()) + .runSql("insert into MySink " + "SELECT a3, b4 FROM A, B WHERE a2 = b2") + .build(); + + INNER_JOIN = + TableTestProgram.of("inner join", "test inner join") + .setupTableSource(SOURCE_A) + .setupTableSource(SOURCE_B) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a1 int", "b1 int") + .consumedBeforeRestore( + Row.of(1, 1), Review Comment: The next test looks really good! ########## flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/testutils/JoinTestPrograms.java: ########## @@ -0,0 +1,272 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.table.planner.plan.nodes.exec.testutils; + +import org.apache.flink.table.planner.plan.nodes.exec.stream.StreamExecJoin; +import org.apache.flink.table.test.program.SinkTestStep; +import org.apache.flink.table.test.program.SourceTestStep; +import org.apache.flink.table.test.program.TableTestProgram; +import org.apache.flink.types.Row; +import org.apache.flink.types.RowKind; + +/** {@link TableTestProgram} definitions for testing {@link StreamExecJoin}. */ +public class JoinTestPrograms { + + static final TableTestProgram NON_WINDOW_INNER_JOIN; + static final TableTestProgram NON_WINDOW_INNER_JOIN_WITH_NULL; + static final TableTestProgram JOIN; + static final TableTestProgram INNER_JOIN; + static final TableTestProgram JOIN_WITH_FILTER; + static final TableTestProgram INNER_JOIN_WITH_DUPLICATE_KEY; + static final TableTestProgram INNER_JOIN_WITH_NON_EQUI_JOIN; + static final TableTestProgram INNER_JOIN_WITH_EQUAL_PK; + static final TableTestProgram INNER_JOIN_WITH_PK; + + static final SourceTestStep SOURCE_A = + SourceTestStep.newBuilder("A") + .addSchema("a1 int", "a2 bigint", "a3 varchar") + .producedBeforeRestore( + Row.of(1, 1L, "Hi"), + Row.of(2, 2L, "Hello"), + Row.of(3, 2L, "Hello world")) + .producedAfterRestore(Row.of(4, 3L, "Hello there")) + .build(); + + static final SourceTestStep SOURCE_B = + SourceTestStep.newBuilder("B") + .addSchema("b1 int", "b2 bigint", "b3 int", "b4 varchar", "b5 bigint") + .producedBeforeRestore( + Row.of(1, 1L, 0, "Hallo", 1L), + Row.of(2, 2L, 1, "Hallo Welt", 2L), + Row.of(2, 3L, 2, "Hallo Welt wie", 1L), + Row.of(3, 1L, 2, "Hallo Welt wie gehts", 1L)) + .producedAfterRestore(Row.of(2, 4L, 3, "Hallo Welt wie gehts", 4L)) + .build(); + static final SourceTestStep SOURCE_T1 = + SourceTestStep.newBuilder("T1") + .addSchema("a int", "b bigint", "c varchar") + .producedBeforeRestore( + Row.of(1, 1L, "Hi1"), + Row.of(1, 2L, "Hi2"), + Row.of(1, 2L, "Hi2"), + Row.of(1, 5L, "Hi3"), + Row.of(2, 7L, "Hi5"), + Row.of(1, 9L, "Hi6"), + Row.of(1, 8L, "Hi8"), + Row.of(3, 8L, "Hi9")) + .producedAfterRestore(Row.of(1, 1L, "PostRestore")) + .build(); + static final SourceTestStep SOURCE_T2 = + SourceTestStep.newBuilder("T2") + .addSchema("a int", "b bigint", "c varchar") + .producedBeforeRestore( + Row.of(1, 1L, "HiHi"), Row.of(2, 2L, "HeHe"), Row.of(3, 2L, "HeHe")) + .producedAfterRestore(Row.of(2, 1L, "PostRestoreRight")) + .build(); + + static { + NON_WINDOW_INNER_JOIN = + TableTestProgram.of("non-window-inner-join", "test non-window inner join") + .setupTableSource(SOURCE_T1) + .setupTableSource(SOURCE_T2) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a int", "c1 varchar", "c2 varchar") + .consumedBeforeRestore( + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi3"), + Row.of(2, "HeHe", "Hi5"), + Row.of(1, "HiHi", "Hi6"), + Row.of(1, "HiHi", "Hi8")) + .consumedAfterRestore(Row.of(2, "PostRestoreRight", "Hi5")) + .build()) + .runSql( + "insert into MySink " + + "SELECT t2.a, t2.c, t1.c\n" + + "FROM (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T1\n" + + ") as t1\n" + + "JOIN (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T2\n" + + ") as t2\n" + + "ON t1.a = t2.a AND t1.b > t2.b") + .build(); + + NON_WINDOW_INNER_JOIN_WITH_NULL = + TableTestProgram.of( + "non-window-inner-join-with-null-cond", + "test non-window inner join") + .setupTableSource(SOURCE_T1) + .setupTableSource(SOURCE_T2) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a int", "c1 varchar", "c2 varchar") + .consumedBeforeRestore( + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi2"), + Row.of(1, "HiHi", "Hi3"), + Row.of(2, "HeHe", "Hi5"), + Row.of(1, "HiHi", "Hi6"), + Row.of(1, "HiHi", "Hi8"), + Row.of(null, "HeHe", "Hi9")) + .consumedAfterRestore(Row.of(2, "PostRestoreRight", "Hi5")) + .build()) + .runSql( + "insert into MySink " + + "SELECT t2.a, t2.c, t1.c\n" + + "FROM (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T1\n" + + ") as t1\n" + + "JOIN (\n" + + " SELECT if(a = 3, cast(null as int), a) as a, b, c FROM T2\n" + + ") as t2\n" + + "ON \n" + + " ((t1.a is null AND t2.a is null) OR\n" + + " (t1.a = t2.a))\n" + + " AND t1.b > t2.b") + .build(); + + JOIN = + TableTestProgram.of("join", "test join") + .setupTableSource(SOURCE_A) + .setupTableSource(SOURCE_B) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a3 varchar", "b4 varchar") + .consumedBeforeRestore( + Row.of("Hi", "Hallo"), + Row.of("Hello", "Hallo Welt"), + Row.of("Hello world", "Hallo Welt"), + Row.of("Hi", "Hallo Welt wie gehts")) + .consumedAfterRestore( + Row.of("Hello there", "Hallo Welt wie")) + .build()) + .runSql("insert into MySink " + "SELECT a3, b4 FROM A, B WHERE a2 = b2") + .build(); + + INNER_JOIN = + TableTestProgram.of("inner join", "test inner join") + .setupTableSource(SOURCE_A) + .setupTableSource(SOURCE_B) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a1 int", "b1 int") + .consumedBeforeRestore( + Row.of(1, 1), + Row.of(2, 2), + Row.of(3, 3), + Row.of(2, 2)) + .consumedAfterRestore(Row.of(2, 2)) + .build()) + .runSql("insert into MySink " + "SELECT a1, b1 FROM A JOIN B ON a1 = b1") + .build(); + + JOIN_WITH_FILTER = + TableTestProgram.of("join-with-filter", "test join with filter") + .setupTableSource(SOURCE_A) + .setupTableSource(SOURCE_B) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a3 varchar", "b4 varchar") + .consumedBeforeRestore( + Row.of("Hi", "Hallo"), + Row.of("Hi", "Hallo Welt wie gehts")) + .consumedAfterRestore(new Row[] {}) + .build()) + .runSql( + "insert into MySink " + + "SELECT a3, b4 FROM A, B where a2 = b2 and a2 < 2") + .build(); + + INNER_JOIN_WITH_DUPLICATE_KEY = + TableTestProgram.of( + "inner-join-with-duplicate-key", "inner join with duplicate key") + .setupTableSource(SOURCE_A) + .setupTableSource(SOURCE_B) + .setupTableSink( + SinkTestStep.newBuilder("MySink") + .addSchema("a1 int", "b1 int") + .consumedBeforeRestore(Row.of(2, 2)) + .consumedAfterRestore(new Row[] {}) + .build()) + .runSql( + "insert into MySink " + + "SELECT a1, b1 FROM A JOIN B ON a1 = b1 AND a1 = b3") + .build(); + + INNER_JOIN_WITH_NON_EQUI_JOIN = Review Comment: How about we use the same data as for `JOIN_WITH_FILTER`? In the end it should end up with the same plan. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@flink.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org