diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -10,59 +10,59 @@ // //===----------------------------------------------------------------------===// -let TargetPrefix = "riscv" in { - //===----------------------------------------------------------------------===// // Atomics -class MaskedAtomicRMW32Intrinsic - : Intrinsic<[llvm_i32_ty], - [llvm_anyptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrArgMemOnly, NoCapture<0>, ImmArg<3>]>; - -class MaskedAtomicRMW32WithSextIntrinsic - : Intrinsic<[llvm_i32_ty], - [llvm_anyptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, - llvm_i32_ty], - [IntrArgMemOnly, NoCapture<0>, ImmArg<4>]>; +// Atomic Intrinsics have multiple versions for different access widths, which +// all follow one of the following signatures (depending on how many arguments +// they require). We carefully instantiate only specific versions of these for +// specific integer widths, rather than using `llvm_anyint_ty`. +// +// In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the +// canonical names, and the intrinsics used in the code will have a name +// suffixed with the pointer type they are specialised for (denoted `

` in the +// names below), in order to avoid type conflicts. -def int_riscv_masked_atomicrmw_xchg_i32 : MaskedAtomicRMW32Intrinsic; -def int_riscv_masked_atomicrmw_add_i32 : MaskedAtomicRMW32Intrinsic; -def int_riscv_masked_atomicrmw_sub_i32 : MaskedAtomicRMW32Intrinsic; -def int_riscv_masked_atomicrmw_nand_i32 : MaskedAtomicRMW32Intrinsic; -def int_riscv_masked_atomicrmw_max_i32 : MaskedAtomicRMW32WithSextIntrinsic; -def int_riscv_masked_atomicrmw_min_i32 : MaskedAtomicRMW32WithSextIntrinsic; -def int_riscv_masked_atomicrmw_umax_i32 : MaskedAtomicRMW32Intrinsic; -def int_riscv_masked_atomicrmw_umin_i32 : MaskedAtomicRMW32Intrinsic; +let TargetPrefix = "riscv" in { -def int_riscv_masked_cmpxchg_i32 - : Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty, llvm_i32_ty, llvm_i32_ty, - llvm_i32_ty, llvm_i32_ty], - [IntrArgMemOnly, NoCapture<0>, ImmArg<4>]>; + // T @llvm..T.

(any*, T, T, T imm); + class MaskedAtomicRMWFourArg + : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype], + [IntrArgMemOnly, NoCapture<0>, ImmArg<3>]>; + // T @llvm..T.

(any*, T, T, T, T imm); + class MaskedAtomicRMWFiveArg + : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype], + [IntrArgMemOnly, NoCapture<0>, ImmArg<4>]>; -class MaskedAtomicRMW64Intrinsic - : Intrinsic<[llvm_i64_ty], - [llvm_anyptr_ty, llvm_i64_ty, llvm_i64_ty, llvm_i64_ty], - [IntrArgMemOnly, NoCapture<0>, ImmArg<3>]>; + // We define 32-bit and 64-bit variants of the above, where T stands for i32 + // or i64 respectively: + multiclass MaskedAtomicRMWFourArgIntrinsics { + // i32 @llvm..i32.

(any*, i32, i32, i32 imm); + def _i32 : MaskedAtomicRMWFourArg; + // i64 @llvm..i32.

(any*, i64, i64, i64 imm); + def _i64 : MaskedAtomicRMWFourArg; + } -class MaskedAtomicRMW64WithSextIntrinsic - : Intrinsic<[llvm_i64_ty], - [llvm_anyptr_ty, llvm_i64_ty, llvm_i64_ty, llvm_i64_ty, - llvm_i64_ty], - [IntrArgMemOnly, NoCapture<0>, ImmArg<4>]>; + multiclass MaskedAtomicRMWFiveArgIntrinsics { + // i32 @llvm..i32.

(any*, i32, i32, i32, i32 imm); + def _i32 : MaskedAtomicRMWFiveArg; + // i64 @llvm..i64.

(any*, i64, i64, i64, i64 imm); + def _i64 : MaskedAtomicRMWFiveArg; + } -def int_riscv_masked_atomicrmw_xchg_i64 : MaskedAtomicRMW64Intrinsic; -def int_riscv_masked_atomicrmw_add_i64 : MaskedAtomicRMW64Intrinsic; -def int_riscv_masked_atomicrmw_sub_i64 : MaskedAtomicRMW64Intrinsic; -def int_riscv_masked_atomicrmw_nand_i64 : MaskedAtomicRMW64Intrinsic; -def int_riscv_masked_atomicrmw_max_i64 : MaskedAtomicRMW64WithSextIntrinsic; -def int_riscv_masked_atomicrmw_min_i64 : MaskedAtomicRMW64WithSextIntrinsic; -def int_riscv_masked_atomicrmw_umax_i64 : MaskedAtomicRMW64Intrinsic; -def int_riscv_masked_atomicrmw_umin_i64 : MaskedAtomicRMW64Intrinsic; + // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.

(...) + defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics; + defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics; + defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics; + defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics; + // Signed min and max need an extra operand to do sign extension with. + defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics; + defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics; + // Unsigned min and max don't need the extra operand. + defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics; + defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics; -def int_riscv_masked_cmpxchg_i64 - : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty, llvm_i64_ty, llvm_i64_ty, - llvm_i64_ty, llvm_i64_ty], - [IntrArgMemOnly, NoCapture<0>, ImmArg<4>]>; + // @llvm.riscv.masked.cmpxchg.{i32,i64}.

(...) + defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics; } // TargetPrefix = "riscv"