This is an automated email from the ASF dual-hosted git repository. yiguolei pushed a commit to branch branch-2.1 in repository https://gitbox.apache.org/repos/asf/doris.git
commit 46fa64f34ba68112d40473fabfb346fa8b5f1641 Author: jakevin <jakevin...@gmail.com> AuthorDate: Thu Apr 18 16:34:05 2024 +0800 [minor](Nereids): remove useless getFilterConjuncts() filter() in Translator (#33801) --- .../glue/translator/PhysicalPlanTranslator.java | 317 ++++++++++----------- .../java/org/apache/doris/nereids/memo/Memo.java | 10 - .../nereids/rules/rewrite/EliminateFilterTest.java | 42 ++- 3 files changed, 191 insertions(+), 178 deletions(-) diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java index fc9a88cf2e0..fb00990ebe9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/glue/translator/PhysicalPlanTranslator.java @@ -94,7 +94,6 @@ import org.apache.doris.nereids.trees.expressions.WindowFrame; import org.apache.doris.nereids.trees.expressions.functions.agg.AggregateFunction; import org.apache.doris.nereids.trees.expressions.functions.agg.AggregateParam; import org.apache.doris.nereids.trees.expressions.functions.scalar.PushDownToProjectionFunction; -import org.apache.doris.nereids.trees.expressions.literal.BooleanLiteral; import org.apache.doris.nereids.trees.plans.AbstractPlan; import org.apache.doris.nereids.trees.plans.AggMode; import org.apache.doris.nereids.trees.plans.AggPhase; @@ -1366,11 +1365,9 @@ public class PhysicalPlanTranslator extends DefaultPlanVisitor<PlanFragment, Pla .map(TupleDescriptor::getSlots) .flatMap(Collection::stream) .collect(Collectors.toList()); - Map<ExprId, SlotReference> outputSlotReferenceMap = Maps.newHashMap(); - - hashJoin.getOutput().stream() + Map<ExprId, SlotReference> outputSlotReferenceMap = hashJoin.getOutput().stream() .map(SlotReference.class::cast) - .forEach(s -> outputSlotReferenceMap.put(s.getExprId(), s)); + .collect(Collectors.toMap(Slot::getExprId, s -> s, (existing, replacement) -> existing)); List<SlotReference> outputSlotReferences = Stream.concat(leftTuples.stream(), rightTuples.stream()) .map(TupleDescriptor::getSlots) .flatMap(Collection::stream) @@ -1396,19 +1393,16 @@ public class PhysicalPlanTranslator extends DefaultPlanVisitor<PlanFragment, Pla .forEach(s -> hashOutputSlotReferenceMap.put(s.getExprId(), s)); } hashJoin.getFilterConjuncts().stream() - .filter(e -> !(e.equals(BooleanLiteral.TRUE))) .flatMap(e -> e.getInputSlots().stream()) .map(SlotReference.class::cast) .forEach(s -> hashOutputSlotReferenceMap.put(s.getExprId(), s)); - Map<ExprId, SlotReference> leftChildOutputMap = Maps.newHashMap(); - hashJoin.child(0).getOutput().stream() + Map<ExprId, SlotReference> leftChildOutputMap = hashJoin.left().getOutput().stream() .map(SlotReference.class::cast) - .forEach(s -> leftChildOutputMap.put(s.getExprId(), s)); - Map<ExprId, SlotReference> rightChildOutputMap = Maps.newHashMap(); - hashJoin.child(1).getOutput().stream() + .collect(Collectors.toMap(Slot::getExprId, s -> s, (existing, replacement) -> existing)); + Map<ExprId, SlotReference> rightChildOutputMap = hashJoin.right().getOutput().stream() .map(SlotReference.class::cast) - .forEach(s -> rightChildOutputMap.put(s.getExprId(), s)); + .collect(Collectors.toMap(Slot::getExprId, s -> s, (existing, replacement) -> existing)); // translate runtime filter context.getRuntimeTranslator().ifPresent(runtimeFilterTranslator -> physicalHashJoin.getRuntimeFilters() @@ -1434,7 +1428,6 @@ public class PhysicalPlanTranslator extends DefaultPlanVisitor<PlanFragment, Pla sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, leftSlotDescriptor); } else { sd = context.createSlotDesc(intermediateDescriptor, sf, leftSlotDescriptor.getParent().getTable()); - // sd = context.createSlotDesc(intermediateDescriptor, sf); if (hashOutputSlotReferenceMap.get(sf.getExprId()) != null) { hashJoinNode.addSlotIdToHashOutputSlotIds(leftSlotDescriptor.getId()); hashJoinNode.getHashOutputExprSlotIdMap().put(sf.getExprId(), leftSlotDescriptor.getId()); @@ -1455,7 +1448,6 @@ public class PhysicalPlanTranslator extends DefaultPlanVisitor<PlanFragment, Pla sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, rightSlotDescriptor); } else { sd = context.createSlotDesc(intermediateDescriptor, sf, rightSlotDescriptor.getParent().getTable()); - // sd = context.createSlotDesc(intermediateDescriptor, sf); if (hashOutputSlotReferenceMap.get(sf.getExprId()) != null) { hashJoinNode.addSlotIdToHashOutputSlotIds(rightSlotDescriptor.getId()); hashJoinNode.getHashOutputExprSlotIdMap().put(sf.getExprId(), rightSlotDescriptor.getId()); @@ -1494,7 +1486,6 @@ public class PhysicalPlanTranslator extends DefaultPlanVisitor<PlanFragment, Pla sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, rightSlotDescriptor); } else { sd = context.createSlotDesc(intermediateDescriptor, sf, rightSlotDescriptor.getParent().getTable()); - // sd = context.createSlotDesc(intermediateDescriptor, sf); if (hashOutputSlotReferenceMap.get(sf.getExprId()) != null) { hashJoinNode.addSlotIdToHashOutputSlotIds(rightSlotDescriptor.getId()); hashJoinNode.getHashOutputExprSlotIdMap().put(sf.getExprId(), rightSlotDescriptor.getId()); @@ -1532,7 +1523,6 @@ public class PhysicalPlanTranslator extends DefaultPlanVisitor<PlanFragment, Pla .collect(Collectors.toList()); hashJoin.getFilterConjuncts().stream() - .filter(e -> !(e.equals(BooleanLiteral.TRUE))) .map(e -> ExpressionTranslator.translate(e, context)) .forEach(hashJoinNode::addConjunct); @@ -1581,176 +1571,171 @@ public class PhysicalPlanTranslator extends DefaultPlanVisitor<PlanFragment, Pla List<List<Expr>> distributeExprLists = getDistributeExprs(nestedLoopJoin.child(0), nestedLoopJoin.child(1)); PlanNode leftFragmentPlanRoot = leftFragment.getPlanRoot(); PlanNode rightFragmentPlanRoot = rightFragment.getPlanRoot(); - if (JoinUtils.shouldNestedLoopJoin(nestedLoopJoin)) { - List<TupleDescriptor> leftTuples = context.getTupleDesc(leftFragmentPlanRoot); - List<TupleDescriptor> rightTuples = context.getTupleDesc(rightFragmentPlanRoot); - List<TupleId> tupleIds = Stream.concat(leftTuples.stream(), rightTuples.stream()) - .map(TupleDescriptor::getId) - .collect(Collectors.toList()); - JoinType joinType = nestedLoopJoin.getJoinType(); + if (!JoinUtils.shouldNestedLoopJoin(nestedLoopJoin)) { + throw new RuntimeException("Physical nested loop join could not execute with equal join condition."); + } + + List<TupleDescriptor> leftTuples = context.getTupleDesc(leftFragmentPlanRoot); + List<TupleDescriptor> rightTuples = context.getTupleDesc(rightFragmentPlanRoot); + List<TupleId> tupleIds = Stream.concat(leftTuples.stream(), rightTuples.stream()) + .map(TupleDescriptor::getId) + .collect(Collectors.toList()); - NestedLoopJoinNode nestedLoopJoinNode = new NestedLoopJoinNode(context.nextPlanNodeId(), - leftFragmentPlanRoot, rightFragmentPlanRoot, tupleIds, JoinType.toJoinOperator(joinType), - null, null, null, nestedLoopJoin.isMarkJoin()); - nestedLoopJoinNode.setUseSpecificProjections(false); - nestedLoopJoinNode.setNereidsId(nestedLoopJoin.getId()); - nestedLoopJoinNode.setChildrenDistributeExprLists(distributeExprLists); - if (nestedLoopJoin.getStats() != null) { - nestedLoopJoinNode.setCardinality((long) nestedLoopJoin.getStats().getRowCount()); - } - nestedLoopJoinNode.setChild(0, leftFragment.getPlanRoot()); - nestedLoopJoinNode.setChild(1, rightFragment.getPlanRoot()); - setPlanRoot(leftFragment, nestedLoopJoinNode, nestedLoopJoin); - // TODO: what's this? do we really need to set this? - rightFragment.getPlanRoot().setCompactData(false); - context.mergePlanFragment(rightFragment, leftFragment); - for (PlanFragment rightChild : rightFragment.getChildren()) { - leftFragment.addChild(rightChild); + JoinType joinType = nestedLoopJoin.getJoinType(); + + NestedLoopJoinNode nestedLoopJoinNode = new NestedLoopJoinNode(context.nextPlanNodeId(), + leftFragmentPlanRoot, rightFragmentPlanRoot, tupleIds, JoinType.toJoinOperator(joinType), + null, null, null, nestedLoopJoin.isMarkJoin()); + nestedLoopJoinNode.setUseSpecificProjections(false); + nestedLoopJoinNode.setNereidsId(nestedLoopJoin.getId()); + nestedLoopJoinNode.setChildrenDistributeExprLists(distributeExprLists); + if (nestedLoopJoin.getStats() != null) { + nestedLoopJoinNode.setCardinality((long) nestedLoopJoin.getStats().getRowCount()); + } + nestedLoopJoinNode.setChild(0, leftFragment.getPlanRoot()); + nestedLoopJoinNode.setChild(1, rightFragment.getPlanRoot()); + setPlanRoot(leftFragment, nestedLoopJoinNode, nestedLoopJoin); + // TODO: what's this? do we really need to set this? + rightFragment.getPlanRoot().setCompactData(false); + context.mergePlanFragment(rightFragment, leftFragment); + for (PlanFragment rightChild : rightFragment.getChildren()) { + leftFragment.addChild(rightChild); + } + // translate runtime filter + context.getRuntimeTranslator().ifPresent(runtimeFilterTranslator -> { + List<RuntimeFilter> filters = nestedLoopJoin.getRuntimeFilters(); + filters.forEach(filter -> runtimeFilterTranslator + .createLegacyRuntimeFilter(filter, nestedLoopJoinNode, context)); + if (filters.stream().anyMatch(filter -> filter.getType() == TRuntimeFilterType.BITMAP)) { + nestedLoopJoinNode.setOutputLeftSideOnly(true); } - // translate runtime filter - context.getRuntimeTranslator().ifPresent(runtimeFilterTranslator -> { - List<RuntimeFilter> filters = nestedLoopJoin.getRuntimeFilters(); - filters.forEach(filter -> runtimeFilterTranslator - .createLegacyRuntimeFilter(filter, nestedLoopJoinNode, context)); - if (filters.stream().anyMatch(filter -> filter.getType() == TRuntimeFilterType.BITMAP)) { - nestedLoopJoinNode.setOutputLeftSideOnly(true); - } - }); + }); - Map<ExprId, SlotReference> leftChildOutputMap = Maps.newHashMap(); - nestedLoopJoin.child(0).getOutput().stream() - .map(SlotReference.class::cast) - .forEach(s -> leftChildOutputMap.put(s.getExprId(), s)); - Map<ExprId, SlotReference> rightChildOutputMap = Maps.newHashMap(); - nestedLoopJoin.child(1).getOutput().stream() - .map(SlotReference.class::cast) - .forEach(s -> rightChildOutputMap.put(s.getExprId(), s)); - // make intermediate tuple - List<SlotDescriptor> leftIntermediateSlotDescriptor = Lists.newArrayList(); - List<SlotDescriptor> rightIntermediateSlotDescriptor = Lists.newArrayList(); - TupleDescriptor intermediateDescriptor = context.generateTupleDesc(); - - // Nereids does not care about output order of join, - // but BE need left child's output must be before right child's output. - // So we need to swap the output order of left and right child if necessary. - // TODO: revert this after Nereids could ensure the output order is correct. - List<SlotDescriptor> leftSlotDescriptors = leftTuples.stream() - .map(TupleDescriptor::getSlots) - .flatMap(Collection::stream) - .collect(Collectors.toList()); - List<SlotDescriptor> rightSlotDescriptors = rightTuples.stream() - .map(TupleDescriptor::getSlots) - .flatMap(Collection::stream) - .collect(Collectors.toList()); - Map<ExprId, SlotReference> outputSlotReferenceMap = Maps.newHashMap(); + Map<ExprId, SlotReference> leftChildOutputMap = nestedLoopJoin.child(0).getOutput().stream() + .map(SlotReference.class::cast) + .collect(Collectors.toMap(Slot::getExprId, s -> s, (existing, replacement) -> existing)); + Map<ExprId, SlotReference> rightChildOutputMap = nestedLoopJoin.child(1).getOutput().stream() + .map(SlotReference.class::cast) + .collect(Collectors.toMap(Slot::getExprId, s -> s, (existing, replacement) -> existing)); + // make intermediate tuple + List<SlotDescriptor> leftIntermediateSlotDescriptor = Lists.newArrayList(); + List<SlotDescriptor> rightIntermediateSlotDescriptor = Lists.newArrayList(); + TupleDescriptor intermediateDescriptor = context.generateTupleDesc(); - nestedLoopJoin.getOutput().stream() - .map(SlotReference.class::cast) - .forEach(s -> outputSlotReferenceMap.put(s.getExprId(), s)); - nestedLoopJoin.getFilterConjuncts().stream() - .filter(e -> !(e.equals(BooleanLiteral.TRUE))) - .flatMap(e -> e.getInputSlots().stream()) - .map(SlotReference.class::cast) - .forEach(s -> outputSlotReferenceMap.put(s.getExprId(), s)); - List<SlotReference> outputSlotReferences = Stream.concat(leftTuples.stream(), rightTuples.stream()) - .map(TupleDescriptor::getSlots) - .flatMap(Collection::stream) - .map(sd -> context.findExprId(sd.getId())) - .map(outputSlotReferenceMap::get) - .filter(Objects::nonNull) - .collect(Collectors.toList()); + // Nereids does not care about output order of join, + // but BE need left child's output must be before right child's output. + // So we need to swap the output order of left and right child if necessary. + // TODO: revert this after Nereids could ensure the output order is correct. + List<SlotDescriptor> leftSlotDescriptors = leftTuples.stream() + .map(TupleDescriptor::getSlots) + .flatMap(Collection::stream) + .collect(Collectors.toList()); + List<SlotDescriptor> rightSlotDescriptors = rightTuples.stream() + .map(TupleDescriptor::getSlots) + .flatMap(Collection::stream) + .collect(Collectors.toList()); + Map<ExprId, SlotReference> outputSlotReferenceMap = Maps.newHashMap(); - // TODO: because of the limitation of be, the VNestedLoopJoinNode will output column from both children - // in the intermediate tuple, so fe have to do the same, if be fix the problem, we can change it back. - for (SlotDescriptor leftSlotDescriptor : leftSlotDescriptors) { - if (!leftSlotDescriptor.isMaterialized()) { - continue; - } - SlotReference sf = leftChildOutputMap.get(context.findExprId(leftSlotDescriptor.getId())); - SlotDescriptor sd; - if (sf == null && leftSlotDescriptor.getColumn().getName().equals(Column.ROWID_COL)) { - // TODO: temporary code for two phase read, should remove it after refactor - sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, leftSlotDescriptor); - } else { - sd = context.createSlotDesc(intermediateDescriptor, sf, leftSlotDescriptor.getParent().getTable()); - // sd = context.createSlotDesc(intermediateDescriptor, sf); - } - leftIntermediateSlotDescriptor.add(sd); - } - for (SlotDescriptor rightSlotDescriptor : rightSlotDescriptors) { - if (!rightSlotDescriptor.isMaterialized()) { - continue; - } - SlotReference sf = rightChildOutputMap.get(context.findExprId(rightSlotDescriptor.getId())); - SlotDescriptor sd; - if (sf == null && rightSlotDescriptor.getColumn().getName().equals(Column.ROWID_COL)) { - // TODO: temporary code for two phase read, should remove it after refactor - sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, rightSlotDescriptor); - } else { - sd = context.createSlotDesc(intermediateDescriptor, sf, rightSlotDescriptor.getParent().getTable()); - // sd = context.createSlotDesc(intermediateDescriptor, sf); - } - rightIntermediateSlotDescriptor.add(sd); - } + nestedLoopJoin.getOutput().stream() + .map(SlotReference.class::cast) + .forEach(s -> outputSlotReferenceMap.put(s.getExprId(), s)); + nestedLoopJoin.getFilterConjuncts().stream() + .flatMap(e -> e.getInputSlots().stream()) + .map(SlotReference.class::cast) + .forEach(s -> outputSlotReferenceMap.put(s.getExprId(), s)); + List<SlotReference> outputSlotReferences = Stream.concat(leftTuples.stream(), rightTuples.stream()) + .map(TupleDescriptor::getSlots) + .flatMap(Collection::stream) + .map(sd -> context.findExprId(sd.getId())) + .map(outputSlotReferenceMap::get) + .filter(Objects::nonNull) + .collect(Collectors.toList()); - if (nestedLoopJoin.getMarkJoinSlotReference().isPresent()) { - outputSlotReferences.add(nestedLoopJoin.getMarkJoinSlotReference().get()); - context.createSlotDesc(intermediateDescriptor, nestedLoopJoin.getMarkJoinSlotReference().get()); + // TODO: because of the limitation of be, the VNestedLoopJoinNode will output column from both children + // in the intermediate tuple, so fe have to do the same, if be fix the problem, we can change it back. + for (SlotDescriptor leftSlotDescriptor : leftSlotDescriptors) { + if (!leftSlotDescriptor.isMaterialized()) { + continue; } - - // set slots as nullable for outer join - if (joinType == JoinType.LEFT_OUTER_JOIN || joinType == JoinType.FULL_OUTER_JOIN) { - rightIntermediateSlotDescriptor.forEach(sd -> sd.setIsNullable(true)); + SlotReference sf = leftChildOutputMap.get(context.findExprId(leftSlotDescriptor.getId())); + SlotDescriptor sd; + if (sf == null && leftSlotDescriptor.getColumn().getName().equals(Column.ROWID_COL)) { + // TODO: temporary code for two phase read, should remove it after refactor + sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, leftSlotDescriptor); + } else { + sd = context.createSlotDesc(intermediateDescriptor, sf, leftSlotDescriptor.getParent().getTable()); + } + leftIntermediateSlotDescriptor.add(sd); + } + for (SlotDescriptor rightSlotDescriptor : rightSlotDescriptors) { + if (!rightSlotDescriptor.isMaterialized()) { + continue; } - if (joinType == JoinType.RIGHT_OUTER_JOIN || joinType == JoinType.FULL_OUTER_JOIN) { - leftIntermediateSlotDescriptor.forEach(sd -> sd.setIsNullable(true)); + SlotReference sf = rightChildOutputMap.get(context.findExprId(rightSlotDescriptor.getId())); + SlotDescriptor sd; + if (sf == null && rightSlotDescriptor.getColumn().getName().equals(Column.ROWID_COL)) { + // TODO: temporary code for two phase read, should remove it after refactor + sd = context.getDescTable().copySlotDescriptor(intermediateDescriptor, rightSlotDescriptor); + } else { + sd = context.createSlotDesc(intermediateDescriptor, sf, rightSlotDescriptor.getParent().getTable()); } + rightIntermediateSlotDescriptor.add(sd); + } - nestedLoopJoinNode.setvIntermediateTupleDescList(Lists.newArrayList(intermediateDescriptor)); + if (nestedLoopJoin.getMarkJoinSlotReference().isPresent()) { + outputSlotReferences.add(nestedLoopJoin.getMarkJoinSlotReference().get()); + context.createSlotDesc(intermediateDescriptor, nestedLoopJoin.getMarkJoinSlotReference().get()); + } - List<Expr> joinConjuncts = nestedLoopJoin.getOtherJoinConjuncts().stream() - .filter(e -> !nestedLoopJoin.isBitmapRuntimeFilterCondition(e)) - .map(e -> ExpressionTranslator.translate(e, context)).collect(Collectors.toList()); + // set slots as nullable for outer join + if (joinType == JoinType.LEFT_OUTER_JOIN || joinType == JoinType.FULL_OUTER_JOIN) { + rightIntermediateSlotDescriptor.forEach(sd -> sd.setIsNullable(true)); + } + if (joinType == JoinType.RIGHT_OUTER_JOIN || joinType == JoinType.FULL_OUTER_JOIN) { + leftIntermediateSlotDescriptor.forEach(sd -> sd.setIsNullable(true)); + } - if (!nestedLoopJoin.isBitMapRuntimeFilterConditionsEmpty() && joinConjuncts.isEmpty()) { - // left semi join need at least one conjunct. otherwise left-semi-join fallback to cross-join - joinConjuncts.add(new BoolLiteral(true)); - } + nestedLoopJoinNode.setvIntermediateTupleDescList(Lists.newArrayList(intermediateDescriptor)); - nestedLoopJoinNode.setJoinConjuncts(joinConjuncts); + List<Expr> joinConjuncts = nestedLoopJoin.getOtherJoinConjuncts().stream() + .filter(e -> !nestedLoopJoin.isBitmapRuntimeFilterCondition(e)) + .map(e -> ExpressionTranslator.translate(e, context)).collect(Collectors.toList()); - if (!nestedLoopJoin.getOtherJoinConjuncts().isEmpty()) { - List<Expr> markJoinConjuncts = nestedLoopJoin.getMarkJoinConjuncts().stream() - .map(e -> ExpressionTranslator.translate(e, context)).collect(Collectors.toList()); - nestedLoopJoinNode.setMarkJoinConjuncts(markJoinConjuncts); - } + if (!nestedLoopJoin.isBitMapRuntimeFilterConditionsEmpty() && joinConjuncts.isEmpty()) { + // left semi join need at least one conjunct. otherwise left-semi-join fallback to cross-join + joinConjuncts.add(new BoolLiteral(true)); + } - nestedLoopJoin.getFilterConjuncts().stream() - .filter(e -> !(e.equals(BooleanLiteral.TRUE))) - .map(e -> ExpressionTranslator.translate(e, context)) - .forEach(nestedLoopJoinNode::addConjunct); + nestedLoopJoinNode.setJoinConjuncts(joinConjuncts); - if (nestedLoopJoin.isShouldTranslateOutput()) { - // translate output expr on intermediate tuple - List<Expr> srcToOutput = outputSlotReferences.stream() - .map(e -> ExpressionTranslator.translate(e, context)) - .collect(Collectors.toList()); + if (!nestedLoopJoin.getOtherJoinConjuncts().isEmpty()) { + List<Expr> markJoinConjuncts = nestedLoopJoin.getMarkJoinConjuncts().stream() + .map(e -> ExpressionTranslator.translate(e, context)).collect(Collectors.toList()); + nestedLoopJoinNode.setMarkJoinConjuncts(markJoinConjuncts); + } - TupleDescriptor outputDescriptor = context.generateTupleDesc(); - outputSlotReferences.forEach(s -> context.createSlotDesc(outputDescriptor, s)); + nestedLoopJoin.getFilterConjuncts().stream() + .map(e -> ExpressionTranslator.translate(e, context)) + .forEach(nestedLoopJoinNode::addConjunct); - nestedLoopJoinNode.setOutputTupleDesc(outputDescriptor); - nestedLoopJoinNode.setProjectList(srcToOutput); - } - if (nestedLoopJoin.getStats() != null) { - nestedLoopJoinNode.setCardinality((long) nestedLoopJoin.getStats().getRowCount()); - } - updateLegacyPlanIdToPhysicalPlan(leftFragment.getPlanRoot(), nestedLoopJoin); - return leftFragment; - } else { - throw new RuntimeException("Physical nested loop join could not execute with equal join condition."); + if (nestedLoopJoin.isShouldTranslateOutput()) { + // translate output expr on intermediate tuple + List<Expr> srcToOutput = outputSlotReferences.stream() + .map(e -> ExpressionTranslator.translate(e, context)) + .collect(Collectors.toList()); + + TupleDescriptor outputDescriptor = context.generateTupleDesc(); + outputSlotReferences.forEach(s -> context.createSlotDesc(outputDescriptor, s)); + + nestedLoopJoinNode.setOutputTupleDesc(outputDescriptor); + nestedLoopJoinNode.setProjectList(srcToOutput); } + if (nestedLoopJoin.getStats() != null) { + nestedLoopJoinNode.setCardinality((long) nestedLoopJoin.getStats().getRowCount()); + } + updateLegacyPlanIdToPhysicalPlan(leftFragment.getPlanRoot(), nestedLoopJoin); + return leftFragment; } @Override diff --git a/fe/fe-core/src/main/java/org/apache/doris/nereids/memo/Memo.java b/fe/fe-core/src/main/java/org/apache/doris/nereids/memo/Memo.java index 2075badcb49..854ff0f51cb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/nereids/memo/Memo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/nereids/memo/Memo.java @@ -581,16 +581,6 @@ public class Memo { return group; } - // This function is used to copy new group expression - // It's used in DPHyp after construct new group expression - public Group copyInGroupExpression(GroupExpression newGroupExpression) { - Group newGroup = new Group(groupIdGenerator.getNextId(), newGroupExpression, - newGroupExpression.getPlan().getLogicalProperties()); - groups.put(newGroup.getGroupId(), newGroup); - groupExpressions.put(newGroupExpression, newGroupExpression); - return newGroup; - } - private CopyInResult rewriteByNewGroupExpression(Group targetGroup, Plan newPlan, GroupExpression newGroupExpression) { if (targetGroup == null) { diff --git a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/EliminateFilterTest.java b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/EliminateFilterTest.java index 9f35a0c0253..cbdc95db559 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/EliminateFilterTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/nereids/rules/rewrite/EliminateFilterTest.java @@ -17,7 +17,13 @@ package org.apache.doris.nereids.rules.rewrite; +import org.apache.doris.nereids.rules.expression.ExpressionNormalization; +import org.apache.doris.nereids.trees.expressions.And; +import org.apache.doris.nereids.trees.expressions.GreaterThan; +import org.apache.doris.nereids.trees.expressions.Or; import org.apache.doris.nereids.trees.expressions.literal.BooleanLiteral; +import org.apache.doris.nereids.trees.expressions.literal.Literal; +import org.apache.doris.nereids.trees.plans.logical.LogicalOlapScan; import org.apache.doris.nereids.trees.plans.logical.LogicalPlan; import org.apache.doris.nereids.util.LogicalPlanBuilder; import org.apache.doris.nereids.util.MemoPatternMatchSupported; @@ -31,9 +37,11 @@ import org.junit.jupiter.api.Test; * Tests for {@link EliminateFilter}. */ class EliminateFilterTest implements MemoPatternMatchSupported { + private static final LogicalOlapScan scan1 = PlanConstructor.newLogicalOlapScan(0, "t1", 0); + @Test void testEliminateFilterFalse() { - LogicalPlan filterFalse = new LogicalPlanBuilder(PlanConstructor.newLogicalOlapScan(0, "t1", 0)) + LogicalPlan filterFalse = new LogicalPlanBuilder(scan1) .filter(BooleanLiteral.FALSE) .build(); @@ -44,7 +52,7 @@ class EliminateFilterTest implements MemoPatternMatchSupported { @Test void testEliminateFilterTrue() { - LogicalPlan filterTrue = new LogicalPlanBuilder(PlanConstructor.newLogicalOlapScan(0, "t1", 0)) + LogicalPlan filterTrue = new LogicalPlanBuilder(scan1) .filter(BooleanLiteral.TRUE) .build(); @@ -52,4 +60,34 @@ class EliminateFilterTest implements MemoPatternMatchSupported { .applyTopDown(new EliminateFilter()) .matches(logicalOlapScan()); } + + @Test + void testEliminateOneFilterTrue() { + And expr = new And(BooleanLiteral.TRUE, new GreaterThan(scan1.getOutput().get(0), Literal.of("1"))); + LogicalPlan filter = new LogicalPlanBuilder(scan1) + .filter(expr) + .build(); + + PlanChecker.from(MemoTestUtils.createConnectContext(), filter) + .applyTopDown(new EliminateFilter()) + .applyBottomUp(new ExpressionNormalization()) + .matches( + logicalFilter(logicalOlapScan()).when(f -> f.getPredicate() instanceof GreaterThan) + ); + } + + @Test + void testEliminateOneFilterFalse() { + Or expr = new Or(BooleanLiteral.FALSE, new GreaterThan(scan1.getOutput().get(0), Literal.of("1"))); + LogicalPlan filter = new LogicalPlanBuilder(scan1) + .filter(expr) + .build(); + + PlanChecker.from(MemoTestUtils.createConnectContext(), filter) + .applyTopDown(new EliminateFilter()) + .applyBottomUp(new ExpressionNormalization()) + .matches( + logicalFilter(logicalOlapScan()).when(f -> f.getPredicate() instanceof GreaterThan) + ); + } } --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org For additional commands, e-mail: commits-h...@doris.apache.org