diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h --- a/llvm/lib/Target/ARM/ARMISelLowering.h +++ b/llvm/lib/Target/ARM/ARMISelLowering.h @@ -833,6 +833,7 @@ enum NEONModImmType { VMOVModImm, VMVNModImm, + MVEVMVNModImm, OtherModImm }; diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -823,9 +823,6 @@ setTargetDAGCombine(ISD::SIGN_EXTEND); setTargetDAGCombine(ISD::ZERO_EXTEND); setTargetDAGCombine(ISD::ANY_EXTEND); - setTargetDAGCombine(ISD::BUILD_VECTOR); - setTargetDAGCombine(ISD::VECTOR_SHUFFLE); - setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); setTargetDAGCombine(ISD::STORE); setTargetDAGCombine(ISD::FP_TO_SINT); setTargetDAGCombine(ISD::FP_TO_UINT); @@ -843,6 +840,12 @@ } } + if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) { + setTargetDAGCombine(ISD::BUILD_VECTOR); + setTargetDAGCombine(ISD::VECTOR_SHUFFLE); + setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); + } + if (!Subtarget->hasFP64()) { // When targeting a floating-point unit with only single-precision // operations, f64 is legal for the few double-precision instructions which @@ -5942,7 +5945,7 @@ } /// isNEONModifiedImm - Check if the specified splat value corresponds to a -/// valid vector constant for a NEON instruction with a "modified immediate" +/// valid vector constant for a NEON or MVE instruction with a "modified immediate" /// operand (e.g., VMOV). If so, return the encoded value. static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, unsigned SplatBitSize, SelectionDAG &DAG, @@ -6028,6 +6031,10 @@ break; } + // cmode == 0b1101 is not supported for MVE VMVN + if (type == MVEVMVNModImm) + return SDValue(); + if ((SplatBits & ~0xffffff) == 0 && ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { // Value = 0x00nnffff: Op=x, Cmode=1101. @@ -6594,13 +6601,15 @@ if (SplatUndef.isAllOnesValue()) return DAG.getUNDEF(VT); - if (ST->hasNEON() && SplatBitSize <= 64) { + if ((ST->hasNEON() && SplatBitSize <= 64) || + (ST->hasMVEIntegerOps() && SplatBitSize <= 32)) { // Check if an immediate VMOV works. EVT VmovVT; SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(), SplatBitSize, DAG, dl, VmovVT, VT.is128BitVector(), VMOVModImm); + if (Val.getNode()) { SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); @@ -6608,10 +6617,10 @@ // Try an immediate VMVN. uint64_t NegatedImm = (~SplatBits).getZExtValue(); - Val = isNEONModifiedImm(NegatedImm, - SplatUndef.getZExtValue(), SplatBitSize, - DAG, dl, VmovVT, VT.is128BitVector(), - VMVNModImm); + Val = isNEONModifiedImm( + NegatedImm, SplatUndef.getZExtValue(), SplatBitSize, + DAG, dl, VmovVT, VT.is128BitVector(), + ST->hasMVEIntegerOps() ? MVEVMVNModImm : VMVNModImm); if (Val.getNode()) { SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.td b/llvm/lib/Target/ARM/ARMInstrInfo.td --- a/llvm/lib/Target/ARM/ARMInstrInfo.td +++ b/llvm/lib/Target/ARM/ARMInstrInfo.td @@ -249,6 +249,11 @@ def ARMvgetlaneu : SDNode<"ARMISD::VGETLANEu", SDTARMVGETLN>; def ARMvgetlanes : SDNode<"ARMISD::VGETLANEs", SDTARMVGETLN>; +def SDTARMVMOVIMM : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVT<1, i32>]>; +def ARMvmovImm : SDNode<"ARMISD::VMOVIMM", SDTARMVMOVIMM>; +def ARMvmvnImm : SDNode<"ARMISD::VMVNIMM", SDTARMVMOVIMM>; +def ARMvmovFPImm : SDNode<"ARMISD::VMOVFPIMM", SDTARMVMOVIMM>; + def ARMWLS : SDNode<"ARMISD::WLS", SDT_ARMWhileLoop, [SDNPHasChain]>; diff --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td --- a/llvm/lib/Target/ARM/ARMInstrMVE.td +++ b/llvm/lib/Target/ARM/ARMInstrMVE.td @@ -2192,6 +2192,23 @@ } } // let isReMaterializable = 1 +let Predicates = [HasMVEInt] in { + def : Pat<(v16i8 (ARMvmovImm timm:$simm)), + (v16i8 (MVE_VMOVimmi8 nImmSplatI8:$simm))>; + def : Pat<(v8i16 (ARMvmovImm timm:$simm)), + (v8i16 (MVE_VMOVimmi16 nImmSplatI16:$simm))>; + def : Pat<(v4i32 (ARMvmovImm timm:$simm)), + (v4i32 (MVE_VMOVimmi32 nImmVMOVI32:$simm))>; + + def : Pat<(v8i16 (ARMvmvnImm timm:$simm)), + (v8i16 (MVE_VMVNimmi16 nImmSplatI16:$simm))>; + def : Pat<(v4i32 (ARMvmvnImm timm:$simm)), + (v4i32 (MVE_VMVNimmi32 nImmVMOVI32:$simm))>; + + def : Pat<(v4f32 (ARMvmovFPImm timm:$simm)), + (v4f32 (MVE_VMOVimmf32 nImmVMOVF32:$simm))>; +} + class MVE_VMINMAXA size, bit bit_12, list pattern=[]> : MVE_p<(outs MQPR:$Qd), (ins MQPR:$Qd_src, MQPR:$Qm), diff --git a/llvm/lib/Target/ARM/ARMInstrNEON.td b/llvm/lib/Target/ARM/ARMInstrNEON.td --- a/llvm/lib/Target/ARM/ARMInstrNEON.td +++ b/llvm/lib/Target/ARM/ARMInstrNEON.td @@ -526,11 +526,6 @@ def NEONvsli : SDNode<"ARMISD::VSLI", SDTARMVSHINS>; def NEONvsri : SDNode<"ARMISD::VSRI", SDTARMVSHINS>; -def SDTARMVMOVIMM : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVT<1, i32>]>; -def NEONvmovImm : SDNode<"ARMISD::VMOVIMM", SDTARMVMOVIMM>; -def NEONvmvnImm : SDNode<"ARMISD::VMVNIMM", SDTARMVMOVIMM>; -def NEONvmovFPImm : SDNode<"ARMISD::VMOVFPIMM", SDTARMVMOVIMM>; - def SDTARMVORRIMM : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisVT<2, i32>]>; def NEONvorrImm : SDNode<"ARMISD::VORRIMM", SDTARMVORRIMM>; @@ -566,14 +561,14 @@ def NEONvtbl2 : SDNode<"ARMISD::VTBL2", SDTARMVTBL2>; -def NEONimmAllZerosV: PatLeaf<(NEONvmovImm (i32 timm)), [{ +def NEONimmAllZerosV: PatLeaf<(ARMvmovImm (i32 timm)), [{ ConstantSDNode *ConstVal = cast(N->getOperand(0)); unsigned EltBits = 0; uint64_t EltVal = ARM_AM::decodeNEONModImm(ConstVal->getZExtValue(), EltBits); return (EltBits == 32 && EltVal == 0); }]>; -def NEONimmAllOnesV: PatLeaf<(NEONvmovImm (i32 timm)), [{ +def NEONimmAllOnesV: PatLeaf<(ARMvmovImm (i32 timm)), [{ ConstantSDNode *ConstVal = cast(N->getOperand(0)); unsigned EltBits = 0; uint64_t EltVal = ARM_AM::decodeNEONModImm(ConstVal->getZExtValue(), EltBits); @@ -5345,28 +5340,28 @@ def VMVNv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 1, 1, (outs DPR:$Vd), (ins nImmSplatI16:$SIMM), IIC_VMOVImm, "vmvn", "i16", "$Vd, $SIMM", "", - [(set DPR:$Vd, (v4i16 (NEONvmvnImm timm:$SIMM)))]> { + [(set DPR:$Vd, (v4i16 (ARMvmvnImm timm:$SIMM)))]> { let Inst{9} = SIMM{9}; } def VMVNv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 1, 1, (outs QPR:$Vd), (ins nImmSplatI16:$SIMM), IIC_VMOVImm, "vmvn", "i16", "$Vd, $SIMM", "", - [(set QPR:$Vd, (v8i16 (NEONvmvnImm timm:$SIMM)))]> { + [(set QPR:$Vd, (v8i16 (ARMvmvnImm timm:$SIMM)))]> { let Inst{9} = SIMM{9}; } def VMVNv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 1, 1, (outs DPR:$Vd), (ins nImmVMOVI32:$SIMM), IIC_VMOVImm, "vmvn", "i32", "$Vd, $SIMM", "", - [(set DPR:$Vd, (v2i32 (NEONvmvnImm timm:$SIMM)))]> { + [(set DPR:$Vd, (v2i32 (ARMvmvnImm timm:$SIMM)))]> { let Inst{11-8} = SIMM{11-8}; } def VMVNv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 1, 1, (outs QPR:$Vd), (ins nImmVMOVI32:$SIMM), IIC_VMOVImm, "vmvn", "i32", "$Vd, $SIMM", "", - [(set QPR:$Vd, (v4i32 (NEONvmvnImm timm:$SIMM)))]> { + [(set QPR:$Vd, (v4i32 (ARMvmvnImm timm:$SIMM)))]> { let Inst{11-8} = SIMM{11-8}; } } @@ -6053,57 +6048,57 @@ def VMOVv8i8 : N1ModImm<1, 0b000, 0b1110, 0, 0, 0, 1, (outs DPR:$Vd), (ins nImmSplatI8:$SIMM), IIC_VMOVImm, "vmov", "i8", "$Vd, $SIMM", "", - [(set DPR:$Vd, (v8i8 (NEONvmovImm timm:$SIMM)))]>; + [(set DPR:$Vd, (v8i8 (ARMvmovImm timm:$SIMM)))]>; def VMOVv16i8 : N1ModImm<1, 0b000, 0b1110, 0, 1, 0, 1, (outs QPR:$Vd), (ins nImmSplatI8:$SIMM), IIC_VMOVImm, "vmov", "i8", "$Vd, $SIMM", "", - [(set QPR:$Vd, (v16i8 (NEONvmovImm timm:$SIMM)))]>; + [(set QPR:$Vd, (v16i8 (ARMvmovImm timm:$SIMM)))]>; def VMOVv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 0, 1, (outs DPR:$Vd), (ins nImmSplatI16:$SIMM), IIC_VMOVImm, "vmov", "i16", "$Vd, $SIMM", "", - [(set DPR:$Vd, (v4i16 (NEONvmovImm timm:$SIMM)))]> { + [(set DPR:$Vd, (v4i16 (ARMvmovImm timm:$SIMM)))]> { let Inst{9} = SIMM{9}; } def VMOVv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 0, 1, (outs QPR:$Vd), (ins nImmSplatI16:$SIMM), IIC_VMOVImm, "vmov", "i16", "$Vd, $SIMM", "", - [(set QPR:$Vd, (v8i16 (NEONvmovImm timm:$SIMM)))]> { + [(set QPR:$Vd, (v8i16 (ARMvmovImm timm:$SIMM)))]> { let Inst{9} = SIMM{9}; } def VMOVv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 0, 1, (outs DPR:$Vd), (ins nImmVMOVI32:$SIMM), IIC_VMOVImm, "vmov", "i32", "$Vd, $SIMM", "", - [(set DPR:$Vd, (v2i32 (NEONvmovImm timm:$SIMM)))]> { + [(set DPR:$Vd, (v2i32 (ARMvmovImm timm:$SIMM)))]> { let Inst{11-8} = SIMM{11-8}; } def VMOVv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 0, 1, (outs QPR:$Vd), (ins nImmVMOVI32:$SIMM), IIC_VMOVImm, "vmov", "i32", "$Vd, $SIMM", "", - [(set QPR:$Vd, (v4i32 (NEONvmovImm timm:$SIMM)))]> { + [(set QPR:$Vd, (v4i32 (ARMvmovImm timm:$SIMM)))]> { let Inst{11-8} = SIMM{11-8}; } def VMOVv1i64 : N1ModImm<1, 0b000, 0b1110, 0, 0, 1, 1, (outs DPR:$Vd), (ins nImmSplatI64:$SIMM), IIC_VMOVImm, "vmov", "i64", "$Vd, $SIMM", "", - [(set DPR:$Vd, (v1i64 (NEONvmovImm timm:$SIMM)))]>; + [(set DPR:$Vd, (v1i64 (ARMvmovImm timm:$SIMM)))]>; def VMOVv2i64 : N1ModImm<1, 0b000, 0b1110, 0, 1, 1, 1, (outs QPR:$Vd), (ins nImmSplatI64:$SIMM), IIC_VMOVImm, "vmov", "i64", "$Vd, $SIMM", "", - [(set QPR:$Vd, (v2i64 (NEONvmovImm timm:$SIMM)))]>; + [(set QPR:$Vd, (v2i64 (ARMvmovImm timm:$SIMM)))]>; def VMOVv2f32 : N1ModImm<1, 0b000, 0b1111, 0, 0, 0, 1, (outs DPR:$Vd), (ins nImmVMOVF32:$SIMM), IIC_VMOVImm, "vmov", "f32", "$Vd, $SIMM", "", - [(set DPR:$Vd, (v2f32 (NEONvmovFPImm timm:$SIMM)))]>; + [(set DPR:$Vd, (v2f32 (ARMvmovFPImm timm:$SIMM)))]>; def VMOVv4f32 : N1ModImm<1, 0b000, 0b1111, 0, 1, 0, 1, (outs QPR:$Vd), (ins nImmVMOVF32:$SIMM), IIC_VMOVImm, "vmov", "f32", "$Vd, $SIMM", "", - [(set QPR:$Vd, (v4f32 (NEONvmovFPImm timm:$SIMM)))]>; + [(set QPR:$Vd, (v4f32 (ARMvmovFPImm timm:$SIMM)))]>; } // isReMaterializable, isAsCheapAsAMove // Add support for bytes replication feature, so it could be GAS compatible. diff --git a/llvm/test/CodeGen/Thumb2/mve-loadstore.ll b/llvm/test/CodeGen/Thumb2/mve-loadstore.ll --- a/llvm/test/CodeGen/Thumb2/mve-loadstore.ll +++ b/llvm/test/CodeGen/Thumb2/mve-loadstore.ll @@ -92,8 +92,7 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .pad #40 ; CHECK-NEXT: sub sp, #40 -; CHECK-NEXT: movs r0, #1 -; CHECK-NEXT: vdup.32 q0, r0 +; CHECK-NEXT: vmov.i32 q0, #0x1 ; CHECK-NEXT: mov r0, sp ; CHECK-NEXT: vstrw.32 q0, [r0] ; CHECK-NEXT: movs r0, #3 @@ -121,8 +120,7 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .pad #40 ; CHECK-NEXT: sub sp, #40 -; CHECK-NEXT: movs r0, #1 -; CHECK-NEXT: vdup.16 q0, r0 +; CHECK-NEXT: vmov.i16 q0, #0x1 ; CHECK-NEXT: mov r0, sp ; CHECK-NEXT: vstrh.16 q0, [r0] ; CHECK-NEXT: movs r0, #3 @@ -150,8 +148,7 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .pad #40 ; CHECK-NEXT: sub sp, #40 -; CHECK-NEXT: movs r0, #1 -; CHECK-NEXT: vdup.8 q0, r0 +; CHECK-NEXT: vmov.i8 q0, #0x1 ; CHECK-NEXT: mov r0, sp ; CHECK-NEXT: vstrb.8 q0, [r0] ; CHECK-NEXT: movs r0, #3 diff --git a/llvm/test/CodeGen/Thumb2/mve-vmovimm.ll b/llvm/test/CodeGen/Thumb2/mve-vmovimm.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-vmovimm.ll @@ -0,0 +1,243 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s +; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s + +define arm_aapcs_vfpcc <16 x i8> @mov_int8_1() { +; CHECK-LABEL: mov_int8_1: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i8 q0, #0x1 +; CHECK-NEXT: bx lr +entry: + ret <16 x i8> +} + +define arm_aapcs_vfpcc <16 x i8> @mov_int8_m1(i8 *%dest) { +; CHECK-LABEL: mov_int8_m1: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i8 q0, #0xff +; CHECK-NEXT: bx lr +entry: + ret <16 x i8> +} + +define arm_aapcs_vfpcc <8 x i16> @mov_int16_1(i16 *%dest) { +; CHECK-LABEL: mov_int16_1: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i16 q0, #0x1 +; CHECK-NEXT: bx lr +entry: + ret <8 x i16> +} + +define arm_aapcs_vfpcc <8 x i16> @mov_int16_m1(i16 *%dest) { +; CHECK-LABEL: mov_int16_m1: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i8 q0, #0xff +; CHECK-NEXT: bx lr +entry: + ret <8 x i16> +} + +define arm_aapcs_vfpcc <8 x i16> @mov_int16_256(i16 *%dest) { +; CHECK-LABEL: mov_int16_256: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i16 q0, #0x100 +; CHECK-NEXT: bx lr +entry: + ret <8 x i16> +} + +define arm_aapcs_vfpcc <8 x i16> @mov_int16_257() { +; CHECK-LABEL: mov_int16_257: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i8 q0, #0x1 +; CHECK-NEXT: bx lr +entry: + ret <8 x i16> +} + +define arm_aapcs_vfpcc <8 x i16> @mov_int16_258(i16 *%dest) { +; CHECK-LABEL: mov_int16_258: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: adr r0, .LCPI6_0 +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI6_0: +; CHECK-NEXT: .long 16908546 @ double 8.204306265173532E-304 +; CHECK-NEXT: .long 16908546 +; CHECK-NEXT: .long 16908546 @ double 8.204306265173532E-304 +; CHECK-NEXT: .long 16908546 +entry: + ret <8 x i16> +} + +define arm_aapcs_vfpcc <4 x i32> @mov_int32_1(i32 *%dest) { +; CHECK-LABEL: mov_int32_1: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i32 q0, #0x1 +; CHECK-NEXT: bx lr +entry: + ret <4 x i32> +} + +define arm_aapcs_vfpcc <4 x i32> @mov_int32_256(i32 *%dest) { +; CHECK-LABEL: mov_int32_256: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i32 q0, #0x100 +; CHECK-NEXT: bx lr +entry: + ret <4 x i32> +} + +define arm_aapcs_vfpcc <4 x i32> @mov_int32_65536(i32 *%dest) { +; CHECK-LABEL: mov_int32_65536: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i32 q0, #0x10000 +; CHECK-NEXT: bx lr +entry: + ret <4 x i32> +} + +define arm_aapcs_vfpcc <4 x i32> @mov_int32_16777216(i32 *%dest) { +; CHECK-LABEL: mov_int32_16777216: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i32 q0, #0x1000000 +; CHECK-NEXT: bx lr +entry: + ret <4 x i32> +} + +define arm_aapcs_vfpcc <4 x i32> @mov_int32_16777217(i32 *%dest) { +; CHECK-LABEL: mov_int32_16777217: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: adr r0, .LCPI11_0 +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI11_0: +; CHECK-NEXT: .long 16777217 @ double 7.2911290000737531E-304 +; CHECK-NEXT: .long 16777217 +; CHECK-NEXT: .long 16777217 @ double 7.2911290000737531E-304 +; CHECK-NEXT: .long 16777217 +entry: + ret <4 x i32> +} + +define arm_aapcs_vfpcc <4 x i32> @mov_int32_17919(i32 *%dest) { +; CHECK-LABEL: mov_int32_17919: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i32 q0, #0x45ff +; CHECK-NEXT: bx lr +entry: + ret <4 x i32> +} + +define arm_aapcs_vfpcc <4 x i32> @mov_int32_4587519(i32 *%dest) { +; CHECK-LABEL: mov_int32_4587519: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i32 q0, #0x45ffff +; CHECK-NEXT: bx lr +entry: + ret <4 x i32> +} + +define arm_aapcs_vfpcc <4 x i32> @mov_int32_m1(i32 *%dest) { +; CHECK-LABEL: mov_int32_m1: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i8 q0, #0xff +; CHECK-NEXT: bx lr +entry: + ret <4 x i32> +} + +define arm_aapcs_vfpcc <4 x i32> @mov_int32_4294901760(i32 *%dest) { +; CHECK-LABEL: mov_int32_4294901760: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmvn.i32 q0, #0xffff +; CHECK-NEXT: bx lr +entry: + ret <4 x i32> +} + +define arm_aapcs_vfpcc <4 x i32> @mov_int32_4278190335(i32 *%dest) { +; CHECK-LABEL: mov_int32_4278190335: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: adr r0, .LCPI16_0 +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI16_0: +; CHECK-NEXT: .long 4278190335 @ double -5.4874634341155774E+303 +; CHECK-NEXT: .long 4278190335 +; CHECK-NEXT: .long 4278190335 @ double -5.4874634341155774E+303 +; CHECK-NEXT: .long 4278190335 +entry: + ret <4 x i32> +} + +define arm_aapcs_vfpcc <4 x i32> @mov_int32_4278255615(i32 *%dest) { +; CHECK-LABEL: mov_int32_4278255615: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmvn.i32 q0, #0xff0000 +; CHECK-NEXT: bx lr +entry: + ret <4 x i32> +} + +define arm_aapcs_vfpcc <4 x float> @mov_float_1(float *%dest) { +; CHECK-LABEL: mov_float_1: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: adr r0, .LCPI18_0 +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI18_0: +; CHECK-NEXT: .long 1065353216 @ double 0.007812501848093234 +; CHECK-NEXT: .long 1065353216 +; CHECK-NEXT: .long 1065353216 @ double 0.007812501848093234 +; CHECK-NEXT: .long 1065353216 +entry: + ret <4 x float> +} + +define arm_aapcs_vfpcc <4 x float> @mov_float_m3(float *%dest) { +; CHECK-LABEL: mov_float_m3: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: adr r0, .LCPI19_0 +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI19_0: +; CHECK-NEXT: .long 3225419776 @ double -32.000022917985916 +; CHECK-NEXT: .long 3225419776 +; CHECK-NEXT: .long 3225419776 @ double -32.000022917985916 +; CHECK-NEXT: .long 3225419776 +entry: + ret <4 x float> +} + +define arm_aapcs_vfpcc <8 x half> @mov_float16_1(half *%dest) { +; CHECK-LABEL: mov_float16_1: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i16 q0, #0x3c00 +; CHECK-NEXT: bx lr + +entry: + ret <8 x half> +} + +define arm_aapcs_vfpcc <8 x half> @mov_float16_m3(half *%dest) { +; CHECK-LABEL: mov_float16_m3: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i16 q0, #0xc200 +; CHECK-NEXT: bx lr + +entry: + ret <8 x half> +} diff --git a/llvm/test/CodeGen/Thumb2/mve-vmvnimm.ll b/llvm/test/CodeGen/Thumb2/mve-vmvnimm.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-vmvnimm.ll @@ -0,0 +1,83 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s +; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s + +define arm_aapcs_vfpcc <8 x i16> @mov_int16_511(i16 *%dest) { +; CHECK-LABEL: mov_int16_511: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmvn.i16 q0, #0xfe00 +; CHECK-NEXT: bx lr +entry: + ret <8 x i16> +} + +define arm_aapcs_vfpcc <8 x i16> @mov_int16_65281(i16 *%dest) { +; CHECK-LABEL: mov_int16_65281: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmvn.i16 q0, #0xfe +; CHECK-NEXT: bx lr +entry: + ret <8 x i16> +} + +define arm_aapcs_vfpcc <4 x i32> @mov_int32_m7(i32 *%dest) { +; CHECK-LABEL: mov_int32_m7: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmvn.i32 q0, #0x6 +; CHECK-NEXT: bx lr +entry: + ret <4 x i32> +} + +define arm_aapcs_vfpcc <4 x i32> @mov_int32_m769(i32 *%dest) { +; CHECK-LABEL: mov_int32_m769: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmvn.i32 q0, #0x300 +; CHECK-NEXT: bx lr +entry: + ret <4 x i32> +} + +define arm_aapcs_vfpcc <4 x i32> @mov_int32_m262145(i32 *%dest) { +; CHECK-LABEL: mov_int32_m262145: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmvn.i32 q0, #0x40000 +; CHECK-NEXT: bx lr +entry: + ret <4 x i32> +} + +define arm_aapcs_vfpcc <4 x i32> @mov_int32_m134217729(i32 *%dest) { +; CHECK-LABEL: mov_int32_m134217729: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmvn.i32 q0, #0x8000000 +; CHECK-NEXT: bx lr +entry: + ret <4 x i32> +} + +define arm_aapcs_vfpcc <4 x i32> @mov_int32_4294902528(i32 *%dest) { +; CHECK-LABEL: mov_int32_4294902528: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmvn.i32 q0, #0xfcff +; CHECK-NEXT: bx lr +entry: + ret <4 x i32> +} + +define arm_aapcs_vfpcc <4 x i32> @mov_int32_4278386688(i32 *%dest) { +; CHECK-LABEL: mov_int32_4278386688: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: adr r0, .LCPI7_0 +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: bx lr +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI7_0: +; CHECK-NEXT: .long 4278386688 @ double -6.5147775434702224E+303 +; CHECK-NEXT: .long 4278386688 +; CHECK-NEXT: .long 4278386688 @ double -6.5147775434702224E+303 +; CHECK-NEXT: .long 4278386688 +entry: + ret <4 x i32> +}