================ @@ -898,6 +910,53 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, } break; } + case Intrinsic::ptrmask: { + // Fail loudly in case this is ever changed. + // Likely not much needs to be changed here to support vector types. + assert(!I->getOperand(0)->getType()->isVectorTy() && + !I->getOperand(1)->getType()->isVectorTy() && + "These simplifications where written at a time when ptrmask did " + "not support vector types and may not work for vectors"); + + unsigned MaskWidth = I->getOperand(1)->getType()->getScalarSizeInBits(); + RHSKnown = KnownBits(MaskWidth); + // If either the LHS or the RHS are Zero, the result is zero. + if (SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Depth + 1) || + SimplifyDemandedBits( + I, 1, (DemandedMask & ~LHSKnown.Zero).zextOrTrunc(MaskWidth), + RHSKnown, Depth + 1)) + return I; + + RHSKnown = RHSKnown.zextOrTrunc(BitWidth); + assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?"); + assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?"); + + Known = LHSKnown & RHSKnown; + KnownBitsComputed = DemandedMask.isAllOnes(); + + // If the client is only demanding bits we know to be zero, return + // `llvm.ptrmask(p, 0)`. We can't return `null` here due to pointer + // provenance, but making the mask zero will be easily optimizable in + // the backend. + if (DemandedMask.isSubsetOf(Known.Zero)) + return replaceOperand( + *I, 1, Constant::getNullValue(I->getOperand(1)->getType())); ---------------- goldsteinn wrote:
Added proper tests. https://github.com/llvm/llvm-project/pull/67166 _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits