================ @@ -30095,12 +30102,16 @@ TargetLoweringBase::AtomicExpansionKind X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { Type *MemType = SI->getValueOperand()->getType(); - bool NoImplicitFloatOps = - SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat); - if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() && - !Subtarget.useSoftFloat() && !NoImplicitFloatOps && - (Subtarget.hasSSE1() || Subtarget.hasX87())) - return AtomicExpansionKind::None; + if (!SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat) && + !Subtarget.useSoftFloat()) { + if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() && + (Subtarget.hasSSE1() || Subtarget.hasX87())) ---------------- jyknight wrote:
No, misaligned atomic ops are converted to `__atomic_*` libcall before this function is ever called. https://github.com/llvm/llvm-project/pull/74275 _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits