diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -572,6 +572,7 @@ case Intrinsic::assume: case Intrinsic::sideeffect: case Intrinsic::pseudoprobe: + case Intrinsic::arithmetic_fence: case Intrinsic::dbg_declare: case Intrinsic::dbg_value: case Intrinsic::dbg_label: diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -1545,6 +1545,7 @@ case Intrinsic::lifetime_end: case Intrinsic::sideeffect: case Intrinsic::pseudoprobe: + case Intrinsic::arithmetic_fence: return 0; case Intrinsic::masked_store: { Type *Ty = Tys[0]; diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h --- a/llvm/include/llvm/CodeGen/ISDOpcodes.h +++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h @@ -1085,6 +1085,10 @@ /// specifier. PREFETCH, + /// ARITH_FENCE - This corresponds to a arithmetic fence intrinsic. Both its + /// operand and output are the same floating type. + ARITH_FENCE, + /// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) /// This corresponds to the fence instruction. It takes an input chain, and /// two integer constants: an AtomicOrdering and a SynchronizationScope. diff --git a/llvm/include/llvm/CodeGen/SelectionDAGISel.h b/llvm/include/llvm/CodeGen/SelectionDAGISel.h --- a/llvm/include/llvm/CodeGen/SelectionDAGISel.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGISel.h @@ -317,6 +317,7 @@ void CannotYetSelect(SDNode *N); void Select_FREEZE(SDNode *N); + void Select_ARITH_FENCE(SDNode *N); private: void DoInstructionSelection(); diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -897,6 +897,13 @@ return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name); } + /// Create a call to the arithmetic_fence intrinsic. + CallInst *CreateArithmeticFence(Value *Val, Type *DstType, + const Twine &Name = "") { + return CreateIntrinsic(Intrinsic::arithmetic_fence, {DstType}, {Val}, nullptr, + Name); + } + /// Create a call to the experimental.vector.extract intrinsic. CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx, const Twine &Name = "") { diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1311,6 +1311,9 @@ def int_pseudoprobe : Intrinsic<[], [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], [IntrInaccessibleMemOnly, IntrWillReturn]>; +// Arithmetic fence intrinsic. +def int_arithmetic_fence : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>; + // Intrinsics to support half precision floating point format let IntrProperties = [IntrNoMem, IntrWillReturn] in { def int_convert_to_fp16 : DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_anyfloat_ty]>; diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def --- a/llvm/include/llvm/Support/TargetOpcodes.def +++ b/llvm/include/llvm/Support/TargetOpcodes.def @@ -117,6 +117,9 @@ /// Pseudo probe HANDLE_TARGET_OPCODE(PSEUDO_PROBE) +/// Arithmetic fence. +HANDLE_TARGET_OPCODE(ARITH_FENCE) + /// A Stackmap instruction captures the location of live variables at its /// position in the instruction stream. It is followed by a shadow of bytes /// that must lie within the function and not contain another stackmap. diff --git a/llvm/include/llvm/Target/Target.td b/llvm/include/llvm/Target/Target.td --- a/llvm/include/llvm/Target/Target.td +++ b/llvm/include/llvm/Target/Target.td @@ -1172,6 +1172,12 @@ let AsmString = "PSEUDO_PROBE"; let hasSideEffects = 1; } +def ARITH_FENCE : StandardPseudoInstruction { + let OutOperandList = (outs unknown:$dst); + let InOperandList = (ins unknown:$src); + let AsmString = ""; + let hasSideEffects = false; +} def STACKMAP : StandardPseudoInstruction { let OutOperandList = (outs); diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -1275,6 +1275,9 @@ case TargetOpcode::PSEUDO_PROBE: emitPseudoProbe(MI); break; + case TargetOpcode::ARITH_FENCE: + OutStreamer->emitRawComment("ARITH_FENCE"); + break; default: emitInstruction(&MI); if (CanDoExtraAnalysis) { diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7210,6 +7210,13 @@ } break; } + case Intrinsic::arithmetic_fence: { + auto DL = getCurSDLoc(); + SDValue Val = getValue(FPI.getArgOperand(0)); + EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), FPI.getType()); + setValue(&FPI, DAG.getNode(ISD::ARITH_FENCE, DL, ResultVT, Val)); + return; + } } // A few strict DAG nodes carry additional operands that are not diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -2321,6 +2321,11 @@ N->getOperand(0)); } +void SelectionDAGISel::Select_ARITH_FENCE(SDNode *N) { + CurDAG->SelectNodeTo(N, TargetOpcode::ARITH_FENCE, N->getValueType(0), + N->getOperand(0)); +} + /// GetVBR - decode a vbr encoding whose top bit is set. LLVM_ATTRIBUTE_ALWAYS_INLINE static uint64_t GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) { @@ -2872,6 +2877,9 @@ case ISD::FREEZE: Select_FREEZE(NodeToMatch); return; + case ISD::ARITH_FENCE: + Select_ARITH_FENCE(NodeToMatch); + return; } assert(!NodeToMatch->isMachineOpcode() && "Node already selected!"); diff --git a/llvm/test/CodeGen/X86/arithmetic_fence.ll b/llvm/test/CodeGen/X86/arithmetic_fence.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/arithmetic_fence.ll @@ -0,0 +1,122 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+fma | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+fma | FileCheck %s --check-prefix=X64 + +define float @f1(float %a, float %b, float %c) { +; X86-LABEL: f1: +; X86: # %bb.0: +; X86-NEXT: pushl %eax +; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; X86-NEXT: vfmadd213ss {{.*#+}} xmm1 = (xmm0 * xmm1) + mem +; X86-NEXT: vmovss %xmm1, (%esp) +; X86-NEXT: flds (%esp) +; X86-NEXT: popl %eax +; X86-NEXT: .cfi_def_cfa_offset 4 +; X86-NEXT: retl +; +; X64-LABEL: f1: +; X64: # %bb.0: +; X64-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 +; X64-NEXT: retq + %mul = fmul fast float %b, %a + %add = fadd fast float %mul, %c + ret float %add +} + +define float @f2(float %a, float %b, float %c) { +; X86-LABEL: f2: +; X86: # %bb.0: +; X86-NEXT: pushl %eax +; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-NEXT: vmulss {{[0-9]+}}(%esp), %xmm0, %xmm0 +; X86-NEXT: #ARITH_FENCE +; X86-NEXT: vaddss {{[0-9]+}}(%esp), %xmm0, %xmm0 +; X86-NEXT: vmovss %xmm0, (%esp) +; X86-NEXT: flds (%esp) +; X86-NEXT: popl %eax +; X86-NEXT: .cfi_def_cfa_offset 4 +; X86-NEXT: retl +; +; X64-LABEL: f2: +; X64: # %bb.0: +; X64-NEXT: vmulss %xmm0, %xmm1, %xmm0 +; X64-NEXT: #ARITH_FENCE +; X64-NEXT: vaddss %xmm2, %xmm0, %xmm0 +; X64-NEXT: retq + %mul = fmul fast float %b, %a + %tmp = call float @llvm.arithmetic.fence.f32(float %mul) + %add = fadd fast float %tmp, %c + ret float %add +} + +define double @f3(double %a, double %b, double %c) { +; X86-LABEL: f3: +; X86: # %bb.0: +; X86-NEXT: pushl %ebp +; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: .cfi_offset %ebp, -8 +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: .cfi_def_cfa_register %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; X86-NEXT: vaddsd 16(%ebp), %xmm0, %xmm0 +; X86-NEXT: vmovsd %xmm0, (%esp) +; X86-NEXT: fldl (%esp) +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: .cfi_def_cfa %esp, 4 +; X86-NEXT: retl +; +; X64-LABEL: f3: +; X64: # %bb.0: +; X64-NEXT: vaddsd %xmm2, %xmm1, %xmm0 +; X64-NEXT: retq + %1 = fadd fast double %a, %b + %2 = fsub fast double %c, %a + %3 = fadd fast double %1, %2 + ret double %3 +} + +define double @f4(double %a, double %b, double %c) { +; X86-LABEL: f4: +; X86: # %bb.0: +; X86-NEXT: pushl %ebp +; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: .cfi_offset %ebp, -8 +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: .cfi_def_cfa_register %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; X86-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; X86-NEXT: vaddsd 16(%ebp), %xmm1, %xmm2 +; X86-NEXT: #ARITH_FENCE +; X86-NEXT: vsubsd %xmm1, %xmm0, %xmm0 +; X86-NEXT: vaddsd %xmm0, %xmm2, %xmm0 +; X86-NEXT: vmovsd %xmm0, (%esp) +; X86-NEXT: fldl (%esp) +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: .cfi_def_cfa %esp, 4 +; X86-NEXT: retl +; +; X64-LABEL: f4: +; X64: # %bb.0: +; X64-NEXT: vaddsd %xmm1, %xmm0, %xmm1 +; X64-NEXT: #ARITH_FENCE +; X64-NEXT: vsubsd %xmm0, %xmm2, %xmm0 +; X64-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; X64-NEXT: retq + %1 = fadd fast double %a, %b + %t = call double @llvm.arithmetic.fence.f64(double %1) + %2 = fsub fast double %c, %a + %3 = fadd fast double %t, %2 + ret double %3 +} + +declare float @llvm.arithmetic.fence.f32(float) +declare double @llvm.arithmetic.fence.f64(double)