[ 
https://issues.apache.org/jira/browse/FLINK-33579?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

waywtdcc updated FLINK-33579:
-----------------------------
    Description: 
 
{code:java}
set pipeline.operator-chaining=true;
 set execution.runtime-mode=BATCH;
  set table.exec.disabled-operators = NestedLoopJoin;
explain plan for
select
*
from
orders,
supplier,
customer
where
c_custkey = o_custkey and
c_nationkey = s_nationkey {code}
 

 

 

error:
{code:java}
org.apache.flink.table.api.TableException: Cannot generate a valid execution 
plan for the given query: 
 
FlinkLogicalJoin(condition=[AND(=($21, $2), =($24, $15))], joinType=[inner])
:- FlinkLogicalJoin(condition=[true], joinType=[inner])
:  :- FlinkLogicalTableSourceScan(table=[[paimon, tpch100g_paimon, orders]], 
fields=[uuid, o_orderkey, o_custkey, o_orderstatus, o_totalprice, o_orderdate, 
o_orderpriority, o_clerk, o_shippriority, o_comment, ts])
:  +- FlinkLogicalTableSourceScan(table=[[paimon, tpch100g_paimon, supplier]], 
fields=[uuid, s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, 
s_comment, ts])
+- FlinkLogicalTableSourceScan(table=[[paimon, tpch100g_paimon, customer]], 
fields=[uuid, c_custkey, c_name, c_address, c_nationkey, c_phone, c_acctbal, 
c_mktsegment, c_comment, ts])
 
This exception indicates that the query uses an unsupported SQL feature.
Please check the documentation for the set of currently supported SQL features.
 
at 
org.apache.flink.table.planner.plan.optimize.program.FlinkVolcanoProgram.optimize(FlinkVolcanoProgram.scala:70)
at 
org.apache.flink.table.planner.plan.optimize.program.FlinkChainedProgram.$anonfun$optimize$1(FlinkChainedProgram.scala:59)
at 
scala.collection.TraversableOnce.$anonfun$foldLeft$1(TraversableOnce.scala:156)
at 
scala.collection.TraversableOnce.$anonfun$foldLeft$1$adapted(TraversableOnce.scala:156)
at scala.collection.Iterator.foreach(Iterator.scala:937)
at scala.collection.Iterator.foreach$(Iterator.scala:937)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1425)
at scala.collection.IterableLike.foreach(IterableLike.scala:70)
at scala.collection.IterableLike.foreach$(IterableLike.scala:69)
at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
at scala.collection.TraversableOnce.foldLeft(TraversableOnce.scala:156)
at scala.collection.TraversableOnce.foldLeft$(TraversableOnce.scala:154)
at scala.collection.AbstractTraversable.foldLeft(Traversable.scala:104)
at 
org.apache.flink.table.planner.plan.optimize.program.FlinkChainedProgram.optimize(FlinkChainedProgram.scala:55)
at 
org.apache.flink.table.planner.plan.optimize.BatchCommonSubGraphBasedOptimizer.optimizeTree(BatchCommonSubGraphBasedOptimizer.scala:93)
at 
org.apache.flink.table.planner.plan.optimize.BatchCommonSubGraphBasedOptimizer.optimizeBlock(BatchCommonSubGraphBasedOptimizer.scala:58)
at 
org.apache.flink.table.planner.plan.optimize.BatchCommonSubGraphBasedOptimizer.$anonfun$doOptimize$1(BatchCommonSubGraphBasedOptimizer.scala:45)
at 
org.apache.flink.table.planner.plan.optimize.BatchCommonSubGraphBasedOptimizer.$anonfun$doOptimize$1$adapted(BatchCommonSubGraphBasedOptimizer.scala:45)
at scala.collection.immutable.List.foreach(List.scala:388)
at 
org.apache.flink.table.planner.plan.optimize.BatchCommonSubGraphBasedOptimizer.doOptimize(BatchCommonSubGraphBasedOptimizer.scala:45)
at 
org.apache.flink.table.planner.plan.optimize.CommonSubGraphBasedOptimizer.optimize(CommonSubGraphBasedOptimizer.scala:87)
at 
org.apache.flink.table.planner.delegation.PlannerBase.optimize(PlannerBase.scala:329)
at 
org.apache.flink.table.planner.delegation.PlannerBase.getExplainGraphs(PlannerBase.scala:541)
at 
org.apache.flink.table.planner.delegation.BatchPlanner.explain(BatchPlanner.scala:115)
at 
org.apache.flink.table.planner.delegation.BatchPlanner.explain(BatchPlanner.scala:47)
at 
org.apache.flink.table.api.internal.TableEnvironmentImpl.explainInternal(TableEnvironmentImpl.java:620)
at 
org.apache.flink.table.api.internal.TableEnvironmentInternal.explainInternal(TableEnvironmentInternal.java:96)
at 
org.apache.flink.table.api.internal.TableEnvironmentImpl.executeInternal(TableEnvironmentImpl.java:1296)
at 
org.apache.flink.table.api.internal.TableEnvironmentImpl.executeSql(TableEnvironmentImpl.java:658)
at 
org.grg_banking.flink.sqlexecute.FlinkUtils.exeucteSqlFile2(FlinkUtils.java:262)
at org.apache.flink.catalog.test.TestCatalog.testBatchDev(TestCatalog.java:136)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:50)
at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:47)
at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:325)
at 
org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:78)
at 
org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:290)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:71)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:288)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:58)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:268)
at org.junit.runners.ParentRunner.run(ParentRunner.java:363)
at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
at 
com.intellij.junit4.JUnit4IdeaTestRunner.startRunnerWithArgs(JUnit4IdeaTestRunner.java:69)
at 
com.intellij.rt.junit.IdeaTestRunner$Repeater.startRunnerWithArgs(IdeaTestRunner.java:33)
at 
com.intellij.rt.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:235)
at com.intellij.rt.junit.JUnitStarter.main(JUnitStarter.java:54)
Caused by: org.apache.calcite.plan.RelOptPlanner$CannotPlanException: There are 
not enough rules to produce a node with desired properties: 
convention=BATCH_PHYSICAL, FlinkRelDistributionTraitDef=any, sort=[].
Missing conversion is FlinkLogicalJoin[convention: LOGICAL -> BATCH_PHYSICAL, 
FlinkRelDistributionTraitDef: any -> hash[2, 15]true]
There is 1 empty subset: rel#313:RelSubset#7.BATCH_PHYSICAL.hash[2, 15]true.[], 
the relevant part of the original plan is as follows
299:FlinkLogicalJoin(condition=[true], joinType=[inner])
  
272:FlinkLogicalTableSourceScan(subset=[rel#297:RelSubset#5.LOGICAL.any.[]|#5.LOGICAL.any.[]],
 table=[[paimon, tpch100g_paimon, orders]], fields=[uuid, o_orderkey, 
o_custkey, o_orderstatus, o_totalprice, o_orderdate, o_orderpriority, o_clerk, 
o_shippriority, o_comment, ts])
  
274:FlinkLogicalTableSourceScan(subset=[rel#298:RelSubset#6.LOGICAL.any.[]|#6.LOGICAL.any.[]],
 table=[[paimon, tpch100g_paimon, supplier]], fields=[uuid, s_suppkey, s_name, 
s_address, s_nationkey, s_phone, s_acctbal, s_comment, ts]) {code}
 

 

When I change to
{code:java}
select
  *
  from
  orders,
  customer,
  supplier
      
  where
c_custkey = o_custkey and
   c_nationkey = s_nationkey {code}
 will not report an error

  was:
explain plan for
select
*
from
orders,
supplier,
customer

where
c_custkey = o_custkey and
c_nationkey = s_nationkey

 

 

error:

```
 
org.apache.flink.table.api.TableException: Cannot generate a valid execution 
plan for the given query: 
 
FlinkLogicalJoin(condition=[AND(=($21, $2), =($24, $15))], joinType=[inner])
:- FlinkLogicalJoin(condition=[true], joinType=[inner])
:  :- FlinkLogicalTableSourceScan(table=[[paimon, tpch100g_paimon, orders]], 
fields=[uuid, o_orderkey, o_custkey, o_orderstatus, o_totalprice, o_orderdate, 
o_orderpriority, o_clerk, o_shippriority, o_comment, ts])
:  +- FlinkLogicalTableSourceScan(table=[[paimon, tpch100g_paimon, supplier]], 
fields=[uuid, s_suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, 
s_comment, ts])
+- FlinkLogicalTableSourceScan(table=[[paimon, tpch100g_paimon, customer]], 
fields=[uuid, c_custkey, c_name, c_address, c_nationkey, c_phone, c_acctbal, 
c_mktsegment, c_comment, ts])
 
This exception indicates that the query uses an unsupported SQL feature.
Please check the documentation for the set of currently supported SQL features.
 
at 
org.apache.flink.table.planner.plan.optimize.program.FlinkVolcanoProgram.optimize(FlinkVolcanoProgram.scala:70)
at 
org.apache.flink.table.planner.plan.optimize.program.FlinkChainedProgram.$anonfun$optimize$1(FlinkChainedProgram.scala:59)
at 
scala.collection.TraversableOnce.$anonfun$foldLeft$1(TraversableOnce.scala:156)
at 
scala.collection.TraversableOnce.$anonfun$foldLeft$1$adapted(TraversableOnce.scala:156)
at scala.collection.Iterator.foreach(Iterator.scala:937)
at scala.collection.Iterator.foreach$(Iterator.scala:937)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1425)
at scala.collection.IterableLike.foreach(IterableLike.scala:70)
at scala.collection.IterableLike.foreach$(IterableLike.scala:69)
at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
at scala.collection.TraversableOnce.foldLeft(TraversableOnce.scala:156)
at scala.collection.TraversableOnce.foldLeft$(TraversableOnce.scala:154)
at scala.collection.AbstractTraversable.foldLeft(Traversable.scala:104)
at 
org.apache.flink.table.planner.plan.optimize.program.FlinkChainedProgram.optimize(FlinkChainedProgram.scala:55)
at 
org.apache.flink.table.planner.plan.optimize.BatchCommonSubGraphBasedOptimizer.optimizeTree(BatchCommonSubGraphBasedOptimizer.scala:93)
at 
org.apache.flink.table.planner.plan.optimize.BatchCommonSubGraphBasedOptimizer.optimizeBlock(BatchCommonSubGraphBasedOptimizer.scala:58)
at 
org.apache.flink.table.planner.plan.optimize.BatchCommonSubGraphBasedOptimizer.$anonfun$doOptimize$1(BatchCommonSubGraphBasedOptimizer.scala:45)
at 
org.apache.flink.table.planner.plan.optimize.BatchCommonSubGraphBasedOptimizer.$anonfun$doOptimize$1$adapted(BatchCommonSubGraphBasedOptimizer.scala:45)
at scala.collection.immutable.List.foreach(List.scala:388)
at 
org.apache.flink.table.planner.plan.optimize.BatchCommonSubGraphBasedOptimizer.doOptimize(BatchCommonSubGraphBasedOptimizer.scala:45)
at 
org.apache.flink.table.planner.plan.optimize.CommonSubGraphBasedOptimizer.optimize(CommonSubGraphBasedOptimizer.scala:87)
at 
org.apache.flink.table.planner.delegation.PlannerBase.optimize(PlannerBase.scala:329)
at 
org.apache.flink.table.planner.delegation.PlannerBase.getExplainGraphs(PlannerBase.scala:541)
at 
org.apache.flink.table.planner.delegation.BatchPlanner.explain(BatchPlanner.scala:115)
at 
org.apache.flink.table.planner.delegation.BatchPlanner.explain(BatchPlanner.scala:47)
at 
org.apache.flink.table.api.internal.TableEnvironmentImpl.explainInternal(TableEnvironmentImpl.java:620)
at 
org.apache.flink.table.api.internal.TableEnvironmentInternal.explainInternal(TableEnvironmentInternal.java:96)
at 
org.apache.flink.table.api.internal.TableEnvironmentImpl.executeInternal(TableEnvironmentImpl.java:1296)
at 
org.apache.flink.table.api.internal.TableEnvironmentImpl.executeSql(TableEnvironmentImpl.java:658)
at 
org.grg_banking.flink.sqlexecute.FlinkUtils.exeucteSqlFile2(FlinkUtils.java:262)
at org.apache.flink.catalog.test.TestCatalog.testBatchDev(TestCatalog.java:136)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:50)
at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:47)
at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:325)
at 
org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:78)
at 
org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57)
at org.junit.runners.ParentRunner$3.run(ParentRunner.java:290)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:71)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:288)
at org.junit.runners.ParentRunner.access$000(ParentRunner.java:58)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:268)
at org.junit.runners.ParentRunner.run(ParentRunner.java:363)
at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
at 
com.intellij.junit4.JUnit4IdeaTestRunner.startRunnerWithArgs(JUnit4IdeaTestRunner.java:69)
at 
com.intellij.rt.junit.IdeaTestRunner$Repeater.startRunnerWithArgs(IdeaTestRunner.java:33)
at 
com.intellij.rt.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:235)
at com.intellij.rt.junit.JUnitStarter.main(JUnitStarter.java:54)
Caused by: org.apache.calcite.plan.RelOptPlanner$CannotPlanException: There are 
not enough rules to produce a node with desired properties: 
convention=BATCH_PHYSICAL, FlinkRelDistributionTraitDef=any, sort=[].
Missing conversion is FlinkLogicalJoin[convention: LOGICAL -> BATCH_PHYSICAL, 
FlinkRelDistributionTraitDef: any -> hash[2, 15]true]
There is 1 empty subset: rel#313:RelSubset#7.BATCH_PHYSICAL.hash[2, 15]true.[], 
the relevant part of the original plan is as follows
299:FlinkLogicalJoin(condition=[true], joinType=[inner])
  
272:FlinkLogicalTableSourceScan(subset=[rel#297:RelSubset#5.LOGICAL.any.[]|#5.LOGICAL.any.[]],
 table=[[paimon, tpch100g_paimon, orders]], fields=[uuid, o_orderkey, 
o_custkey, o_orderstatus, o_totalprice, o_orderdate, o_orderpriority, o_clerk, 
o_shippriority, o_comment, ts])
  
274:FlinkLogicalTableSourceScan(subset=[rel#298:RelSubset#6.LOGICAL.any.[]|#6.LOGICAL.any.[]],
 table=[[paimon, tpch100g_paimon, supplier]], fields=[uuid, s_suppkey, s_name, 
s_address, s_nationkey, s_phone, s_acctbal, s_comment, ts])
```

 

When I change to " select
  *
  from
  orders,
  customer,
  supplier
      

  where
c_custkey = o_custkey and
   c_nationkey = s_nationkey" will not report an error


> Join sql error
> --------------
>
>                 Key: FLINK-33579
>                 URL: https://issues.apache.org/jira/browse/FLINK-33579
>             Project: Flink
>          Issue Type: Bug
>    Affects Versions: 1.17.1
>            Reporter: waywtdcc
>            Priority: Major
>
>  
> {code:java}
> set pipeline.operator-chaining=true;
>  set execution.runtime-mode=BATCH;
>   set table.exec.disabled-operators = NestedLoopJoin;
> explain plan for
> select
> *
> from
> orders,
> supplier,
> customer
> where
> c_custkey = o_custkey and
> c_nationkey = s_nationkey {code}
>  
>  
>  
> error:
> {code:java}
> org.apache.flink.table.api.TableException: Cannot generate a valid execution 
> plan for the given query: 
>  
> FlinkLogicalJoin(condition=[AND(=($21, $2), =($24, $15))], joinType=[inner])
> :- FlinkLogicalJoin(condition=[true], joinType=[inner])
> :  :- FlinkLogicalTableSourceScan(table=[[paimon, tpch100g_paimon, orders]], 
> fields=[uuid, o_orderkey, o_custkey, o_orderstatus, o_totalprice, 
> o_orderdate, o_orderpriority, o_clerk, o_shippriority, o_comment, ts])
> :  +- FlinkLogicalTableSourceScan(table=[[paimon, tpch100g_paimon, 
> supplier]], fields=[uuid, s_suppkey, s_name, s_address, s_nationkey, s_phone, 
> s_acctbal, s_comment, ts])
> +- FlinkLogicalTableSourceScan(table=[[paimon, tpch100g_paimon, customer]], 
> fields=[uuid, c_custkey, c_name, c_address, c_nationkey, c_phone, c_acctbal, 
> c_mktsegment, c_comment, ts])
>  
> This exception indicates that the query uses an unsupported SQL feature.
> Please check the documentation for the set of currently supported SQL 
> features.
>  
> at 
> org.apache.flink.table.planner.plan.optimize.program.FlinkVolcanoProgram.optimize(FlinkVolcanoProgram.scala:70)
> at 
> org.apache.flink.table.planner.plan.optimize.program.FlinkChainedProgram.$anonfun$optimize$1(FlinkChainedProgram.scala:59)
> at 
> scala.collection.TraversableOnce.$anonfun$foldLeft$1(TraversableOnce.scala:156)
> at 
> scala.collection.TraversableOnce.$anonfun$foldLeft$1$adapted(TraversableOnce.scala:156)
> at scala.collection.Iterator.foreach(Iterator.scala:937)
> at scala.collection.Iterator.foreach$(Iterator.scala:937)
> at scala.collection.AbstractIterator.foreach(Iterator.scala:1425)
> at scala.collection.IterableLike.foreach(IterableLike.scala:70)
> at scala.collection.IterableLike.foreach$(IterableLike.scala:69)
> at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
> at scala.collection.TraversableOnce.foldLeft(TraversableOnce.scala:156)
> at scala.collection.TraversableOnce.foldLeft$(TraversableOnce.scala:154)
> at scala.collection.AbstractTraversable.foldLeft(Traversable.scala:104)
> at 
> org.apache.flink.table.planner.plan.optimize.program.FlinkChainedProgram.optimize(FlinkChainedProgram.scala:55)
> at 
> org.apache.flink.table.planner.plan.optimize.BatchCommonSubGraphBasedOptimizer.optimizeTree(BatchCommonSubGraphBasedOptimizer.scala:93)
> at 
> org.apache.flink.table.planner.plan.optimize.BatchCommonSubGraphBasedOptimizer.optimizeBlock(BatchCommonSubGraphBasedOptimizer.scala:58)
> at 
> org.apache.flink.table.planner.plan.optimize.BatchCommonSubGraphBasedOptimizer.$anonfun$doOptimize$1(BatchCommonSubGraphBasedOptimizer.scala:45)
> at 
> org.apache.flink.table.planner.plan.optimize.BatchCommonSubGraphBasedOptimizer.$anonfun$doOptimize$1$adapted(BatchCommonSubGraphBasedOptimizer.scala:45)
> at scala.collection.immutable.List.foreach(List.scala:388)
> at 
> org.apache.flink.table.planner.plan.optimize.BatchCommonSubGraphBasedOptimizer.doOptimize(BatchCommonSubGraphBasedOptimizer.scala:45)
> at 
> org.apache.flink.table.planner.plan.optimize.CommonSubGraphBasedOptimizer.optimize(CommonSubGraphBasedOptimizer.scala:87)
> at 
> org.apache.flink.table.planner.delegation.PlannerBase.optimize(PlannerBase.scala:329)
> at 
> org.apache.flink.table.planner.delegation.PlannerBase.getExplainGraphs(PlannerBase.scala:541)
> at 
> org.apache.flink.table.planner.delegation.BatchPlanner.explain(BatchPlanner.scala:115)
> at 
> org.apache.flink.table.planner.delegation.BatchPlanner.explain(BatchPlanner.scala:47)
> at 
> org.apache.flink.table.api.internal.TableEnvironmentImpl.explainInternal(TableEnvironmentImpl.java:620)
> at 
> org.apache.flink.table.api.internal.TableEnvironmentInternal.explainInternal(TableEnvironmentInternal.java:96)
> at 
> org.apache.flink.table.api.internal.TableEnvironmentImpl.executeInternal(TableEnvironmentImpl.java:1296)
> at 
> org.apache.flink.table.api.internal.TableEnvironmentImpl.executeSql(TableEnvironmentImpl.java:658)
> at 
> org.grg_banking.flink.sqlexecute.FlinkUtils.exeucteSqlFile2(FlinkUtils.java:262)
> at 
> org.apache.flink.catalog.test.TestCatalog.testBatchDev(TestCatalog.java:136)
> at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
> at 
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
> at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
> at java.lang.reflect.Method.invoke(Method.java:498)
> at 
> org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:50)
> at 
> org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
> at 
> org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:47)
> at 
> org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
> at 
> org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
> at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:325)
> at 
> org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:78)
> at 
> org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57)
> at org.junit.runners.ParentRunner$3.run(ParentRunner.java:290)
> at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:71)
> at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:288)
> at org.junit.runners.ParentRunner.access$000(ParentRunner.java:58)
> at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:268)
> at org.junit.runners.ParentRunner.run(ParentRunner.java:363)
> at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
> at 
> com.intellij.junit4.JUnit4IdeaTestRunner.startRunnerWithArgs(JUnit4IdeaTestRunner.java:69)
> at 
> com.intellij.rt.junit.IdeaTestRunner$Repeater.startRunnerWithArgs(IdeaTestRunner.java:33)
> at 
> com.intellij.rt.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:235)
> at com.intellij.rt.junit.JUnitStarter.main(JUnitStarter.java:54)
> Caused by: org.apache.calcite.plan.RelOptPlanner$CannotPlanException: There 
> are not enough rules to produce a node with desired properties: 
> convention=BATCH_PHYSICAL, FlinkRelDistributionTraitDef=any, sort=[].
> Missing conversion is FlinkLogicalJoin[convention: LOGICAL -> BATCH_PHYSICAL, 
> FlinkRelDistributionTraitDef: any -> hash[2, 15]true]
> There is 1 empty subset: rel#313:RelSubset#7.BATCH_PHYSICAL.hash[2, 
> 15]true.[], the relevant part of the original plan is as follows
> 299:FlinkLogicalJoin(condition=[true], joinType=[inner])
>   
> 272:FlinkLogicalTableSourceScan(subset=[rel#297:RelSubset#5.LOGICAL.any.[]|#5.LOGICAL.any.[]],
>  table=[[paimon, tpch100g_paimon, orders]], fields=[uuid, o_orderkey, 
> o_custkey, o_orderstatus, o_totalprice, o_orderdate, o_orderpriority, 
> o_clerk, o_shippriority, o_comment, ts])
>   
> 274:FlinkLogicalTableSourceScan(subset=[rel#298:RelSubset#6.LOGICAL.any.[]|#6.LOGICAL.any.[]],
>  table=[[paimon, tpch100g_paimon, supplier]], fields=[uuid, s_suppkey, 
> s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment, ts]) {code}
>  
>  
> When I change to
> {code:java}
> select
>   *
>   from
>   orders,
>   customer,
>   supplier
>       
>   where
> c_custkey = o_custkey and
>    c_nationkey = s_nationkey {code}
>  will not report an error



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

Reply via email to