Index: lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -1105,7 +1105,7 @@ if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == TargetLowering::TypePromoteInteger) { EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); - APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits()); + APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); Elt = ConstantInt::get(*getContext(), NewVal); } // In other cases the element type is illegal and needs to be expanded, for @@ -1131,7 +1131,7 @@ SmallVector EltParts; for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) { EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits) - .trunc(ViaEltSizeInBits), DL, + .zextOrTrunc(ViaEltSizeInBits), DL, ViaEltVT, isT, isO)); } Index: lib/Target/Mips/MipsSEISelLowering.cpp =================================================================== --- lib/Target/Mips/MipsSEISelLowering.cpp +++ lib/Target/Mips/MipsSEISelLowering.cpp @@ -14,6 +14,7 @@ #include "MipsMachineFunction.h" #include "MipsRegisterInfo.h" #include "MipsTargetMachine.h" +#include "llvm/ADT/APInt.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/IR/Intrinsics.h" @@ -1406,9 +1407,12 @@ return Result; } -static SDValue lowerMSASplatImm(SDValue Op, unsigned ImmOp, SelectionDAG &DAG) { - return DAG.getConstant(Op->getConstantOperandVal(ImmOp), SDLoc(Op), - Op->getValueType(0)); +static SDValue lowerMSASplatImm(SDValue Op, unsigned ImmOp, SelectionDAG &DAG, + bool IsSigned = false) { + return DAG.getConstant( + APInt(Op->getValueType(0).getScalarType().getSizeInBits(), + Op->getConstantOperandVal(ImmOp), IsSigned), + SDLoc(Op), Op->getValueType(0)); } static SDValue getBuildVectorSplat(EVT VecTy, SDValue SplatValue, @@ -1514,8 +1518,8 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); - - switch (cast(Op->getOperand(0))->getZExtValue()) { + unsigned Intrinsic = cast(Op->getOperand(0))->getZExtValue(); + switch (Intrinsic) { default: return SDValue(); case Intrinsic::mips_shilo: @@ -1585,6 +1589,8 @@ // binsli_x(IfClear, IfSet, nbits) -> (vselect LBitsMask, IfSet, IfClear) EVT VecTy = Op->getValueType(0); EVT EltTy = VecTy.getVectorElementType(); + if (Op->getConstantOperandVal(3) >= EltTy.getSizeInBits()) + return DAG.getNode(ISD::UNDEF, DL, VecTy); APInt Mask = APInt::getHighBitsSet(EltTy.getSizeInBits(), Op->getConstantOperandVal(3)); return DAG.getNode(ISD::VSELECT, DL, VecTy, @@ -1598,6 +1604,8 @@ // binsri_x(IfClear, IfSet, nbits) -> (vselect RBitsMask, IfSet, IfClear) EVT VecTy = Op->getValueType(0); EVT EltTy = VecTy.getVectorElementType(); + if (Op->getConstantOperandVal(3) >= EltTy.getSizeInBits()) + return DAG.getNode(ISD::UNDEF, DL, VecTy); APInt Mask = APInt::getLowBitsSet(EltTy.getSizeInBits(), Op->getConstantOperandVal(3)); return DAG.getNode(ISD::VSELECT, DL, VecTy, @@ -1691,7 +1699,7 @@ case Intrinsic::mips_ceqi_w: case Intrinsic::mips_ceqi_d: return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), - lowerMSASplatImm(Op, 2, DAG), ISD::SETEQ); + lowerMSASplatImm(Op, 2, DAG, true), ISD::SETEQ); case Intrinsic::mips_cle_s_b: case Intrinsic::mips_cle_s_h: case Intrinsic::mips_cle_s_w: @@ -1703,7 +1711,7 @@ case Intrinsic::mips_clei_s_w: case Intrinsic::mips_clei_s_d: return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), - lowerMSASplatImm(Op, 2, DAG), ISD::SETLE); + lowerMSASplatImm(Op, 2, DAG, true), ISD::SETLE); case Intrinsic::mips_cle_u_b: case Intrinsic::mips_cle_u_h: case Intrinsic::mips_cle_u_w: @@ -1727,7 +1735,7 @@ case Intrinsic::mips_clti_s_w: case Intrinsic::mips_clti_s_d: return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), - lowerMSASplatImm(Op, 2, DAG), ISD::SETLT); + lowerMSASplatImm(Op, 2, DAG, true), ISD::SETLT); case Intrinsic::mips_clt_u_b: case Intrinsic::mips_clt_u_h: case Intrinsic::mips_clt_u_w: @@ -1940,15 +1948,28 @@ case Intrinsic::mips_insve_b: case Intrinsic::mips_insve_h: case Intrinsic::mips_insve_w: - case Intrinsic::mips_insve_d: + case Intrinsic::mips_insve_d: { + // Mark out of range values as UNDEF. + int64_t Max; + switch (Intrinsic) { + case Intrinsic::mips_insve_b: Max = 15; break; + case Intrinsic::mips_insve_h: Max = 7; break; + case Intrinsic::mips_insve_w: Max = 3; break; + case Intrinsic::mips_insve_d: Max = 1; break; + default: llvm_unreachable("Unmatched intrinsic"); + } + int64_t Value = cast(Op->getOperand(2))->getSExtValue(); + if (Value < 0 || Value > Max) + return DAG.getNode(ISD::UNDEF, DL, Op->getValueType(0)); return DAG.getNode(MipsISD::INSVE, DL, Op->getValueType(0), Op->getOperand(1), Op->getOperand(2), Op->getOperand(3), DAG.getConstant(0, DL, MVT::i32)); + } case Intrinsic::mips_ldi_b: case Intrinsic::mips_ldi_h: case Intrinsic::mips_ldi_w: case Intrinsic::mips_ldi_d: - return lowerMSASplatImm(Op, 1, DAG); + return lowerMSASplatImm(Op, 1, DAG, true); case Intrinsic::mips_lsa: case Intrinsic::mips_dlsa: { EVT ResTy = Op->getValueType(0); @@ -1982,7 +2003,7 @@ case Intrinsic::mips_maxi_s_w: case Intrinsic::mips_maxi_s_d: return DAG.getNode(MipsISD::VSMAX, DL, Op->getValueType(0), - Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); + Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG, true)); case Intrinsic::mips_maxi_u_b: case Intrinsic::mips_maxi_u_h: case Intrinsic::mips_maxi_u_w: @@ -2006,7 +2027,7 @@ case Intrinsic::mips_mini_s_w: case Intrinsic::mips_mini_s_d: return DAG.getNode(MipsISD::VSMIN, DL, Op->getValueType(0), - Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); + Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG, true)); case Intrinsic::mips_mini_u_b: case Intrinsic::mips_mini_u_h: case Intrinsic::mips_mini_u_w: @@ -2079,11 +2100,59 @@ case Intrinsic::mips_pcnt_w: case Intrinsic::mips_pcnt_d: return DAG.getNode(ISD::CTPOP, DL, Op->getValueType(0), Op->getOperand(1)); + case Intrinsic::mips_sat_s_b: + case Intrinsic::mips_sat_s_h: + case Intrinsic::mips_sat_s_w: + case Intrinsic::mips_sat_s_d: + case Intrinsic::mips_sat_u_b: + case Intrinsic::mips_sat_u_h: + case Intrinsic::mips_sat_u_w: + case Intrinsic::mips_sat_u_d: { + // Mark out of range values as UNDEF. + int64_t Max; + switch (Intrinsic) { + case Intrinsic::mips_sat_s_b: + case Intrinsic::mips_sat_u_b: Max = 7; break; + case Intrinsic::mips_sat_s_h: + case Intrinsic::mips_sat_u_h: Max = 15; break; + case Intrinsic::mips_sat_s_w: + case Intrinsic::mips_sat_u_w: Max = 31; break; + case Intrinsic::mips_sat_s_d: + case Intrinsic::mips_sat_u_d: Max = 63; break; + default: llvm_unreachable("Unmatched intrinsic"); + } + int64_t Value = cast(Op->getOperand(2))->getSExtValue(); + if (Value < 0 || Value > Max) + return DAG.getNode(ISD::UNDEF, DL, Op->getValueType(0)); + return SDValue(); + } case Intrinsic::mips_shf_b: case Intrinsic::mips_shf_h: - case Intrinsic::mips_shf_w: + case Intrinsic::mips_shf_w: { + int64_t Value = cast(Op->getOperand(2))->getSExtValue(); + if (Value < 0 || Value > 255) + return DAG.getNode(ISD::UNDEF, DL, Op->getValueType(0)); return DAG.getNode(MipsISD::SHF, DL, Op->getValueType(0), Op->getOperand(2), Op->getOperand(1)); + } + case Intrinsic::mips_sldi_b: + case Intrinsic::mips_sldi_h: + case Intrinsic::mips_sldi_w: + case Intrinsic::mips_sldi_d: { + // Mark out of range values as UNDEF. + int64_t Max; + switch (Intrinsic) { + case Intrinsic::mips_sldi_b: Max = 15; break; + case Intrinsic::mips_sldi_h: Max = 7; break; + case Intrinsic::mips_sldi_w: Max = 3; break; + case Intrinsic::mips_sldi_d: Max = 1; break; + default: llvm_unreachable("Unmatched intrinsic"); + } + int64_t Value = cast(Op->getOperand(3))->getSExtValue(); + if (Value < 0 || Value > Max) + return DAG.getNode(ISD::UNDEF, DL, Op->getValueType(0)); + return SDValue(); + } case Intrinsic::mips_sll_b: case Intrinsic::mips_sll_h: case Intrinsic::mips_sll_w: @@ -2126,6 +2195,24 @@ case Intrinsic::mips_srai_d: return DAG.getNode(ISD::SRA, DL, Op->getValueType(0), Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); + case Intrinsic::mips_srari_b: + case Intrinsic::mips_srari_h: + case Intrinsic::mips_srari_w: + case Intrinsic::mips_srari_d: { + // Mark out of range values as UNDEF. + int64_t Max; + switch (Intrinsic) { + case Intrinsic::mips_srari_b: Max = 7; break; + case Intrinsic::mips_srari_h: Max = 15; break; + case Intrinsic::mips_srari_w: Max = 31; break; + case Intrinsic::mips_srari_d: Max = 63; break; + default: llvm_unreachable("Unmatched intrinsic"); + } + int64_t Value = cast(Op->getOperand(2))->getSExtValue(); + if (Value < 0 || Value > Max) + return DAG.getNode(ISD::UNDEF, DL, Op->getValueType(0)); + return SDValue(); + } case Intrinsic::mips_srl_b: case Intrinsic::mips_srl_h: case Intrinsic::mips_srl_w: @@ -2138,6 +2225,24 @@ case Intrinsic::mips_srli_d: return DAG.getNode(ISD::SRL, DL, Op->getValueType(0), Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); + case Intrinsic::mips_srlri_b: + case Intrinsic::mips_srlri_h: + case Intrinsic::mips_srlri_w: + case Intrinsic::mips_srlri_d: { + // Mark out of range values as UNDEF. + int64_t Max; + switch (Intrinsic) { + case Intrinsic::mips_srlri_b: Max = 7; break; + case Intrinsic::mips_srlri_h: Max = 15; break; + case Intrinsic::mips_srlri_w: Max = 31; break; + case Intrinsic::mips_srlri_d: Max = 63; break; + default: llvm_unreachable("Unmatched intrinsic"); + } + int64_t Value = cast(Op->getOperand(2))->getSExtValue(); + if (Value < 0 || Value > Max) + return DAG.getNode(ISD::UNDEF, DL, Op->getValueType(0)); + return SDValue(); + } case Intrinsic::mips_subv_b: case Intrinsic::mips_subv_h: case Intrinsic::mips_subv_w: Index: test/CodeGen/Mips/msa/immediates-bad.ll =================================================================== --- /dev/null +++ test/CodeGen/Mips/msa/immediates-bad.ll @@ -0,0 +1,2167 @@ +; RUN: llc -march=mips -mattr=+msa,+fp64 -relocation-model=pic < %s | FileCheck %s -check-prefixes=CHECK,MSA32 +; RUN: llc -march=mips64 -mattr=+msa,+fp64 -relocation-model=pic < %s | FileCheck %s -check-prefixes=CHECK,MSA64 + +; Test that the immediate intrinsics don't crash LLVM. + +; Some of the intrinsics lower to equivalent instructions. + +define void @addvi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: addvi_b: +; CHECK: addv.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %a, i32 63) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @andi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: andi_b: +; CHECK: andi.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.andi.b(<16 x i8> %a, i32 63) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @bclri_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: bclri_b: +; CHECK: nop + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bclri.b(<16 x i8> %a, i32 65) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @binsli_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: binsli_b: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.b $w[[R0]], 0($4) + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %a, <16 x i8> %a, i32 65) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @binsri_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: binsri_b: +; CHECK: binsri.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %a, <16 x i8> %a, i32 5) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @bmnzi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: bmnzi_b: +; CHECK: binsri.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %a, <16 x i8> %a, i32 63) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @bmzi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: bmzi_b: +; CHECK: binsri.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %a, <16 x i8> %a, i32 63) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @bnegi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: bnegi_b: +; CHECK: bnegi.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bnegi.b(<16 x i8> %a, i32 6) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @bseli_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: bseli_b: +; CHECK: bseli.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bseli.b(<16 x i8> %a, <16 x i8> %a, i32 63) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @bseti_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: bseti_b: +; CHECK: nop + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bseti.b(<16 x i8> %a, i32 9) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @clei_s_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: clei_s_b: +; CHECK: cle_s.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %a, i32 152) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @clei_u_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: clei_u_b: +; CHECK: cle_u.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.clei.u.b(<16 x i8> %a, i32 163) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @clti_s_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: clti_s_b: +; CHECK: clt_s.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %a, i32 129) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @clti_u_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: clti_u_b: +; CHECK: clt_u.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.clti.u.b(<16 x i8> %a, i32 163) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @ldi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: ldi_b: +; CHECK: ldi.b + %r = call <16 x i8> @llvm.mips.ldi.b(i32 1025) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @maxi_s_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: maxi_s_b: +; CHECK: max_s.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %a, i32 163) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @maxi_u_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: maxi_u_b: +; CHECK: max_u.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %a, i32 163) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @mini_s_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: mini_s_b: +; CHECK: min_s.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %a, i32 163) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @mini_u_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: mini_u_b: +; CHECK: min_u.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %a, i32 163) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @nori_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: nori_b: +; CHECK: nori.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.nori.b(<16 x i8> %a, i32 63) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @ori_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: ori_b: +; CHECK: ori.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.ori.b(<16 x i8> %a, i32 63) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @sldi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: sldi_b: +; CHECK: sldi.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %a, <16 x i8> %a, i32 7) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @slli_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: slli_b: +; CHECK: sll.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.slli.b(<16 x i8> %a, i32 65) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @splati_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: splati_b: +; CHECK: splat.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.splati.b(<16 x i8> %a, i32 65) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @srai_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: srai_b: +; CHECK: sra.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.srai.b(<16 x i8> %a, i32 65) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @srari_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: srari_b: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.b $w[[R0:[0-9]+]], 0($4) + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.srari.b(<16 x i8> %a, i32 65) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @srli_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: srli_b: +; CHECK: srl.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.srli.b(<16 x i8> %a, i32 65) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @srlri_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: srlri_b: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.b $w[[R0:[0-9]+]], 0($4) + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %a, i32 65) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @addvi_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: addvi_w: +; CHECK: addv.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %a, i32 63) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @bclri_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: bclri_w: +; CHECK: nop + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.bclri.w(<4 x i32> %a, i32 63) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @binsli_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: binsli_w: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.w $w[[R0:[0-9]+]], 0($4) + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %a, <4 x i32> %a, i32 63) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @binsri_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: binsri_w: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.w $w[[R0:[0-9]+]], 0($4) + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %a, <4 x i32> %a, i32 63) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @bnegi_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: bnegi_w: +; CHECK: nop + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.bnegi.w(<4 x i32> %a, i32 63) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @bseti_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: bseti_w: +; CHECK: nop + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.bseti.w(<4 x i32> %a, i32 63) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @clei_s_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: clei_s_w: +; CHECK: cle_s.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %a, i32 63) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @clei_u_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: clei_u_w: +; CHECK: cle_u.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.clei.u.w(<4 x i32> %a, i32 63) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @clti_s_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: clti_s_w: +; CHECK: clt_s.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %a, i32 63) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @clti_u_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: clti_u_w: +; CHECK: clt_u.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.clti.u.w(<4 x i32> %a, i32 63) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @maxi_s_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: maxi_s_w: +; CHECK: max_s.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %a, i32 63) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @maxi_u_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: maxi_u_w: +; CHECK: max_u.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %a, i32 63) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @mini_s_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: mini_s_w: +; CHECK: min_s.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %a, i32 63) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @mini_u_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: mini_u_w: +; CHECK: min_u.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %a, i32 63) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @ldi_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: ldi_w: +; CHECK: addiu $[[R0:[0-9]]], $zero, 1024 +; CHECK: fill.w $w[[R1:[0-9]+]], $[[R0]] +; CHECK: st.w $w[[R1:[0-9]+]], 0($4) + %r = call <4 x i32> @llvm.mips.ldi.w(i32 1024) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @sldi_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: sldi_w: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.w $w[[R0:[0-9]+]], 0($4) + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %a, <4 x i32> %a, i32 63) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @slli_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: slli_w: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.w $w[[R0:[0-9]+]], 0($4) + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.slli.w(<4 x i32> %a, i32 65) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @splati_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: splati_w: +; CHECK: splat.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.splati.w(<4 x i32> %a, i32 65) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @srai_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: srai_w: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.w $w[[R0]], 0($4) + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.srai.w(<4 x i32> %a, i32 65) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @srari_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: srari_w: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.w $w[[R0:[0-9]+]], 0($4) + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.srari.w(<4 x i32> %a, i32 65) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @srli_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: srli_w: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.w $w[[R0:[0-9]+]], 0($4) + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.srli.w(<4 x i32> %a, i32 65) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @srlri_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: srlri_w: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.w $w[[R0:[0-9]+]], 0($4) + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %a, i32 65) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @addvi_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: addvi_h: +; CHECK: addv.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %a, i32 65) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @bclri_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: bclri_h: +; CHECK: nop + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.bclri.h(<8 x i16> %a, i32 16) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @binsli_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: binsli_h: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.h $w[[R0]], 0($4) + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %a, <8 x i16> %a, i32 17) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @binsri_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: binsri_h: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.h $w[[R0:[0-9]+]], 0($4) + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %a, <8 x i16> %a, i32 19) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @bnegi_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: bnegi_h: +; CHECK: nop + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.bnegi.h(<8 x i16> %a, i32 19) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @bseti_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: bseti_h: +; CHECK: nop + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.bseti.h(<8 x i16> %a, i32 19) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @clei_s_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: clei_s_h: +; CHECK: cle_s.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %a, i32 63) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @clei_u_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: clei_u_h: +; CHECK: cle_u.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.clei.u.h(<8 x i16> %a, i32 130) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @clti_s_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: clti_s_h: +; CHECK: clt_s.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %a, i32 63) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @clti_u_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: clti_u_h: +; CHECK: clt_u.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.clti.u.h(<8 x i16> %a, i32 63) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @maxi_s_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: maxi_s_h: +; CHECK: max_s.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %a, i32 63) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @maxi_u_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: maxi_u_h: +; CHECK: max_u.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %a, i32 130) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @mini_s_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: mini_s_h: +; CHECK: min_s.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %a, i32 63) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @mini_u_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: mini_u_h: +; CHECK: min_u.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %a, i32 130) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @ldi_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: ldi_h: +; CHECK: addiu $[[R0:[0-9]]], $zero, 1024 +; CHECK: fill.h $w[[R1:[0-9]+]], $[[R0]] +; CHECK: st.h $w[[R1:[0-9]+]], 0($4) + %r = call <8 x i16> @llvm.mips.ldi.h(i32 1024) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @sldi_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: sldi_h: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.h $w[[R0:[0-9]+]], 0($4) + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %a, <8 x i16> %a, i32 65) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @slli_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: slli_h: +; CHECK: sll.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.slli.h(<8 x i16> %a, i32 65) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @splati_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: splati_h: +; CHECK: splat.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.splati.h(<8 x i16> %a, i32 65) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @srai_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: srai_h: +; CHECK: sra.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.srai.h(<8 x i16> %a, i32 65) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @srari_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: srari_h: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.h $w[[R0:[0-9]+]], 0($4) + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.srari.h(<8 x i16> %a, i32 65) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @srli_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: srli_h: +; CHECK: srl.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.srli.h(<8 x i16> %a, i32 65) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @srlri_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: srlri_h: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.h $w[[R0:[0-9]+]], 0($4) + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %a, i32 65) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define i32 @copy_s_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: copy_s_b: +; CHECK: splat.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.s.b(<16 x i8> %a, i32 17) + ret i32 %r +} +define i32 @copy_s_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: copy_s_h: +; CHECK: splat.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.s.h(<8 x i16> %a, i32 9) + ret i32 %r +} +define i32 @copy_s_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: copy_s_w: +; CHECK: splat.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.s.w(<4 x i32> %a, i32 5) + ret i32 %r +} +define i32 @copy_u_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: copy_u_b: +; CHECK: splat.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.u.b(<16 x i8> %a, i32 16) + ret i32 %r +} +define i32 @copy_u_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: copy_u_h: +; CHECK: splat.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.u.h(<8 x i16> %a, i32 9) + ret i32 %r +} +define i32 @copy_u_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: copy_u_w: +; CHECK: splat.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.u.w(<4 x i32> %a, i32 5) + ret i32 %r +} + +define i64 @copy_s_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: copy_s_d: +; MSA32: nop +; MSA64: splat.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call i64 @llvm.mips.copy.s.d(<2 x i64> %a, i32 3) + ret i64 %r +} + +define i64 @copy_u_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: copy_u_d: +; MSA32: nop +; MSA64: splat.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call i64 @llvm.mips.copy.u.d(<2 x i64> %a, i32 3) + ret i64 %r +} + +define void @addvi_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: addvi_d: +; CHECK: addv.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %a, i32 65) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @bclri_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: bclri_d: +; CHECK: nop + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %a, i32 64) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @binsli_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: binsli_d: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; MSA32: st.w $w[[R0:[0-9]+]], 0($4) +; MSA64: st.d $w[[R0:[0-9]+]], 0($4) + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %a, <2 x i64> %a, i32 65) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @binsri_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: binsri_d: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; MSA32: st.w $w[[R0:[0-9]+]], 0($4) +; MSA64: st.d $w[[R0:[0-9]+]], 0($4) + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %a, <2 x i64> %a, i32 65) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @bnegi_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: bnegi_d: +; CHECK: nop + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %a, i32 65) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @bseti_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: bseti_d: +; CHECK: nop + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %a, i32 65) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @clei_s_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: clei_s_d: +; CHECK: cle_s.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %a, i32 63) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @clei_u_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: clei_u_d: +; CHECK: cle_u.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.clei.u.d(<2 x i64> %a, i32 63) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @clti_s_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: clti_s_d: +; CHECK: clt_s.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %a, i32 63) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @clti_u_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: clti_u_d: +; CHECK: clt_u.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.clti.u.d(<2 x i64> %a, i32 63) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @ldi_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: ldi_d: +; CHECK: ld.d $w[[R0:[0-9]+]], 0(${{[0-9]}}) +; CHECK: st.d $w[[R0:[0-9]+]], 0($4) + + %r = call <2 x i64> @llvm.mips.ldi.d(i32 1024) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @maxi_s_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: maxi_s_d: +; CHECK: max_s.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %a, i32 63) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @maxi_u_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: maxi_u_d: +; CHECK: max_u.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %a, i32 63) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @mini_s_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: mini_s_d: +; CHECK: min_s.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %a, i32 63) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @mini_u_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: mini_u_d: +; CHECK: min_u.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %a, i32 63) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @sldi_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: sldi_d: +; CHECK: sldi.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %a, <2 x i64> %a, i32 1) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @slli_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: slli_d: +; MSA32: sll.d +; MSA64: ldi.b $w[[R0:[0-9]+]], 0 +; MSA64: st.d $w[[R0]], 0($4) + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.slli.d(<2 x i64> %a, i32 65) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @srai_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: srai_d: +; MSA32: sra.d +; MSA64: ldi.b $w[[R0:[0-9]+]], 0 +; MSA64: st.d $w[[R0]], 0($4) + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.srai.d(<2 x i64> %a, i32 65) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @srari_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: srari_d: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; MSA32: st.w $w[[R0]], 0($4) +; MSA64: st.d $w[[R0]], 0($4) + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.srari.d(<2 x i64> %a, i32 65) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @srli_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: srli_d: +; MSA32: srl.d +; MSA64: ldi.b $w[[R0:[0-9]]], 0 +; MSA64: st.d $w[[R0]], 0($4) + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.srli.d(<2 x i64> %a, i32 65) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @srlri_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: srlri_d: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; MSA32: st.w $w[[R0]], 0($4) +; MSA64: st.d $w[[R0]], 0($4) + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %a, i32 65) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +; Negative numbers +define void @neg_addvi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_addvi_b: +; CHECK: addv.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %a, i32 -25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_andi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_andi_b: +; CHECK: andi.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.andi.b(<16 x i8> %a, i32 -25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_bclri_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_bclri_b: +; CHECK: nop + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bclri.b(<16 x i8> %a, i32 -3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_binsli_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_binsli_b: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.b $w[[R0:[0-9]+]], 0($4) + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %a, <16 x i8> %a, i32 -3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_binsri_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_binsri_b: +; CHECK: binsri.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %a, <16 x i8> %a, i32 5) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_bmnzi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_bmnzi_b: +; CHECK: bmnzi.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %a, <16 x i8> %a, i32 -25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_bmzi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_bmzi_b: +; CHECK: bmnzi.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %a, <16 x i8> %a, i32 -25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_bnegi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_bnegi_b: +; CHECK: bnegi.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bnegi.b(<16 x i8> %a, i32 6) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_bseli_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_bseli_b: +; CHECK: bseli.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bseli.b(<16 x i8> %a, <16 x i8> %a, i32 -25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_bseti_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_bseti_b: +; CHECK: nop + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bseti.b(<16 x i8> %a, i32 -5) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_clei_s_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_clei_s_b: +; CHECK: cle_s.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %a, i32 -120) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_clei_u_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_clei_u_b: +; CHECK: cle_u.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.clei.u.b(<16 x i8> %a, i32 -25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_clti_s_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_clti_s_b: +; CHECK: clt_s.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %a, i32 -35) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_clti_u_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_clti_u_b: +; CHECK: clt_u.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.clti.u.b(<16 x i8> %a, i32 -25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_ldi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_ldi_b: +; CHECK: ldi.b + %r = call <16 x i8> @llvm.mips.ldi.b(i32 -3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_maxi_s_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_maxi_s_b: +; CHECK: maxi_s.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %a, i32 2) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_maxi_u_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_maxi_u_b: +; CHECK: maxi_u.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %a, i32 2) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_mini_s_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_mini_s_b: +; CHECK: mini_s.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %a, i32 2) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_mini_u_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_mini_u_b: +; CHECK: mini_u.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %a, i32 2) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_nori_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_nori_b: +; CHECK: nori.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.nori.b(<16 x i8> %a, i32 -25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_ori_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_ori_b: +; CHECK: ori.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.ori.b(<16 x i8> %a, i32 -25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_sldi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_sldi_b: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.b $w[[R0:[0-9]+]], 0($4) + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %a, <16 x i8> %a, i32 -7) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_slli_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_slli_b: +; CHECK: sll.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.slli.b(<16 x i8> %a, i32 -3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_splati_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_splati_b: +; CHECK: splat.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.splati.b(<16 x i8> %a, i32 -3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_srai_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_srai_b: +; CHECK: sra.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.srai.b(<16 x i8> %a, i32 -3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_srari_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_srari_b: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.b $w[[R0:[0-9]+]], 0($4) + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.srari.b(<16 x i8> %a, i32 -3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_srli_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_srli_b: +; CHECK: srl.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.srli.b(<16 x i8> %a, i32 -3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_srlri_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_srlri_b: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.b $w[[R0:[0-9]+]], 0($4) + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %a, i32 -3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @neg_addvi_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_addvi_w: +; CHECK: addv.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %a, i32 -25) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_bclri_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_bclri_w: +; CHECK: nop + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.bclri.w(<4 x i32> %a, i32 -25) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_binsli_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_binsli_w: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.w $w[[R0:[0-9]+]], 0($4) + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %a, <4 x i32> %a, i32 -25) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_binsri_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_binsri_w: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.w $w[[R0:[0-9]+]], 0($4) + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %a, <4 x i32> %a, i32 -25) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_bnegi_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_bnegi_w: +; CHECK: nop + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.bnegi.w(<4 x i32> %a, i32 -25) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_bseti_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_bseti_w: +; CHECK: nop + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.bseti.w(<4 x i32> %a, i32 -25) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_clei_s_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_clei_s_w: +; CHECK: cle_s.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %a, i32 -140) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_clei_u_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_clei_u_w: +; CHECK: cle_u.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.clei.u.w(<4 x i32> %a, i32 -25) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_clti_s_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_clti_s_w: +; CHECK: clt_s.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %a, i32 -150) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_clti_u_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_clti_u_w: +; CHECK: clt_u.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.clti.u.w(<4 x i32> %a, i32 -25) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_maxi_s_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_maxi_s_w: +; CHECK: max_s.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %a, i32 -200) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_maxi_u_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_maxi_u_w: +; CHECK: max_u.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %a, i32 -200) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_mini_s_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_mini_s_w: +; CHECK: min_s.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %a, i32 -200) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_mini_u_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_mini_u_w: +; CHECK: min_u.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %a, i32 -200) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_ldi_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_ldi_w: +; CHECK: ldi.w + %r = call <4 x i32> @llvm.mips.ldi.w(i32 -300) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_sldi_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_sldi_w: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.w $w[[R0]], 0($4) + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %a, <4 x i32> %a, i32 -20) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_slli_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_slli_w: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.w $w[[R0]], 0($4) + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.slli.w(<4 x i32> %a, i32 -3) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_splati_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_splati_w: +; CHECK: splat.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.splati.w(<4 x i32> %a, i32 -3) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_srai_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_srai_w: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.w $w[[R0]], 0($4) + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.srai.w(<4 x i32> %a, i32 -3) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_srari_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_srari_w: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.w $w[[R0]], 0($4) + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.srari.w(<4 x i32> %a, i32 -3) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_srli_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_srli_w: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.w $w[[R0]], 0($4) + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.srli.w(<4 x i32> %a, i32 -3) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_srlri_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_srlri_w: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.w $w[[R0]], 0($4) + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %a, i32 -3) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @neg_addvi_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_addvi_h: +; CHECK: addv.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %a, i32 -25) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_bclri_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_bclri_h: +; CHECK: nop + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.bclri.h(<8 x i16> %a, i32 -8) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_binsli_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_binsli_h: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.h $w[[R0]], 0($4) + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %a, <8 x i16> %a, i32 -8) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_binsri_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_binsri_h: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.h $w[[R0]], 0($4) + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %a, <8 x i16> %a, i32 -15) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_bnegi_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_bnegi_h: +; CHECK: nop + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.bnegi.h(<8 x i16> %a, i32 -14) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_bseti_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_bseti_h: +; CHECK: nop + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.bseti.h(<8 x i16> %a, i32 -15) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_clei_s_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_clei_s_h: +; CHECK: cle_s.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %a, i32 -25) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_clei_u_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_clei_u_h: +; CHECK: cle_u.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.clei.u.h(<8 x i16> %a, i32 -25) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_clti_s_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_clti_s_h: +; CHECK: clt_s.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %a, i32 -150) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_clti_u_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_clti_u_h: +; CHECK: clt_u.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.clti.u.h(<8 x i16> %a, i32 -25) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_maxi_s_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_maxi_s_h: +; CHECK: max_s.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %a, i32 -200) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_maxi_u_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_maxi_u_h: +; CHECK: max_u.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %a, i32 -200) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_mini_s_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_mini_s_h: +; CHECK: min_s.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %a, i32 -200) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_mini_u_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_mini_u_h: +; CHECK: min_u.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %a, i32 -2) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_ldi_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_ldi_h: +; CHECK: ldi.h + %r = call <8 x i16> @llvm.mips.ldi.h(i32 -300) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_sldi_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_sldi_h: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; CHECK: st.h $w[[R0]], 0($4) + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %a, <8 x i16> %a, i32 -3) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_slli_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_slli_h: +; CHECK: sll.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.slli.h(<8 x i16> %a, i32 -3) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_splati_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_splati_h: +; CHECK: splat.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.splati.h(<8 x i16> %a, i32 -3) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_srai_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_srai_h: +; CHECK: ld.h $w[[R0:[0-9]]], 0($4) +; CHECK: ldi.h $w[[R1:[0-9]]], -3 +; CHECK: sra.h $w{{[0-9]}}, $w[[R0]], $w[[R1]] + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.srai.h(<8 x i16> %a, i32 -3) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_srari_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_srari_h: +; CHECK: ldi.b $w[[R0:[0-9]]], 0 +; CHECK: st.h $w[[R0:[0-9]]], 0($4) + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.srari.h(<8 x i16> %a, i32 -3) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_srli_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_srli_h: +; CHECK: ld.h $w[[R0:[0-9]]], 0($4) +; CHECK: ldi.h $w[[R1:[0-9]]], -3 +; CHECK: srl.h $w{{[0-9]}}, $w[[R0]], $w[[R1]] + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.srli.h(<8 x i16> %a, i32 -3) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @neg_srlri_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_srlri_h: +; CHECK: ldi.b $w[[R0:[0-9]]], 0 +; CHECK: st.h $w[[R0:[0-9]]], 0($4) + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %a, i32 -3) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define i32 @neg_copy_s_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_copy_s_b: +; CHECK: splat.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.s.b(<16 x i8> %a, i32 -1) + ret i32 %r +} +define i32 @neg_copy_s_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_copy_s_h: +; CHECK: splat.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.s.h(<8 x i16> %a, i32 -1) + ret i32 %r +} +define i32 @neg_copy_s_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_copy_s_w: +; CHECK: splat.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.s.w(<4 x i32> %a, i32 -1) + ret i32 %r +} +define i32 @neg_copy_u_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: neg_copy_u_b: +; CHECK: splat.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.u.b(<16 x i8> %a, i32 -1) + ret i32 %r +} +define i32 @neg_copy_u_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: neg_copy_u_h: +; CHECK: splat.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.u.h(<8 x i16> %a, i32 -1) + ret i32 %r +} +define i32 @neg_copy_u_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: neg_copy_u_w: +; CHECK: splat.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.u.w(<4 x i32> %a, i32 -1) + ret i32 %r +} + +define i64 @neg_copy_s_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_copy_s_d: +; MSA32: nop +; MSA64: splat.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call i64 @llvm.mips.copy.s.d(<2 x i64> %a, i32 -1) + ret i64 %r +} + +define i64 @neg_copy_u_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_copy_u_d: +; MSA32: nop +; MSA64: splat.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call i64 @llvm.mips.copy.u.d(<2 x i64> %a, i32 -1) + ret i64 %r +} + +define void @neg_addvi_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_addvi_d: +; CHECK: addv.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %a, i32 -25) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_bclri_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_bclri_d: +; CHECK: nop + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %a, i32 -25) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_binsli_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_binsli_d: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; MSA32: st.w $w[[R0]], 0($4) +; MSA64: st.d $w[[R0]], 0($4) + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %a, <2 x i64> %a, i32 -25) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_binsri_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_binsri_d: +; CHECK: ldi.b $w[[R0:[0-9]+]], 0 +; MSA32: st.w $w[[R0]], 0($4) +; MSA64: st.d $w[[R0]], 0($4) + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %a, <2 x i64> %a, i32 -25) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_bnegi_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_bnegi_d: +; CHECK: nop + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %a, i32 -25) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_bseti_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_bseti_d: +; CHECK: nop + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %a, i32 -25) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_clei_s_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_clei_s_d: +; CHECK: cle_s.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %a, i32 -45) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_clei_u_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_clei_u_d: +; CHECK: cle_u.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.clei.u.d(<2 x i64> %a, i32 -25) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_clti_s_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_clti_s_d: +; CHECK: clt_s.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %a, i32 -32) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_clti_u_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_clti_u_d: +; CHECK: clt_u.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.clti.u.d(<2 x i64> %a, i32 -25) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_ldi_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_ldi_d: +; CHECK: ld.d + %r = call <2 x i64> @llvm.mips.ldi.d(i32 -3) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_maxi_s_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_maxi_s_d: +; CHECK: max_s.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %a, i32 -202) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_maxi_u_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_maxi_u_d: +; CHECK: max_u.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %a, i32 -2) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_mini_s_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_mini_s_d: +; CHECK: min_s.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %a, i32 -202) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_mini_u_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_mini_u_d: +; CHECK: min_u.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %a, i32 -2) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_sldi_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_sldi_d: +; CHECK: ldi.b $w[[R0:[0-9]]], 0 +; MSA32: st.w $w[[R0]], 0($4) +; MSA64: st.d $w[[R0]], 0($4) + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %a, <2 x i64> %a, i32 -1) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_slli_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_slli_d: +; CHECK: sll.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.slli.d(<2 x i64> %a, i32 -3) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_srai_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_srai_d: +; CHECK: sra.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.srai.d(<2 x i64> %a, i32 -3) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_srari_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_srari_d: +; CHECK: ldi.b $w[[R0:[0-9]]], 0 +; MSA32: st.w $w[[R0]], 0($4) +; MSA64: st.d $w[[R0]], 0($4) + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.srari.d(<2 x i64> %a, i32 -3) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_srli_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_srli_d: +; CHECK: srl.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.srli.d(<2 x i64> %a, i32 -3) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @neg_srlri_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: neg_srlri_d: +; CHECK: ldi.b $w[[R0:[0-9]]], 0 +; MSA32: st.w $w[[R0]], 0($4) +; MSA64: st.d $w[[R0]], 0($4) + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %a, i32 -3) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +declare <8 x i16> @llvm.mips.ldi.h(i32) +declare <8 x i16> @llvm.mips.addvi.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.bclri.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.binsli.h(<8 x i16>, <8 x i16>, i32) +declare <8 x i16> @llvm.mips.binsri.h(<8 x i16>, <8 x i16>, i32) +declare <8 x i16> @llvm.mips.bnegi.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.bseti.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.clei.s.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.clei.u.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.clti.s.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.clti.u.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.maxi.s.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.maxi.u.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.mini.s.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.mini.u.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.sldi.h(<8 x i16>, <8 x i16>, i32) +declare <8 x i16> @llvm.mips.slli.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.splati.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.srai.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.srari.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.srli.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.srlri.h(<8 x i16>, i32) +declare <4 x i32> @llvm.mips.addvi.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.bclri.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.binsli.w(<4 x i32>, <4 x i32>, i32) +declare <4 x i32> @llvm.mips.binsri.w(<4 x i32>, <4 x i32>, i32) +declare <4 x i32> @llvm.mips.bnegi.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.bseti.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.ldi.w(i32) +declare <4 x i32> @llvm.mips.clei.s.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.clei.u.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.clti.s.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.clti.u.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.maxi.s.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.maxi.u.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.mini.s.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.mini.u.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.sldi.w(<4 x i32>, <4 x i32>, i32) +declare <4 x i32> @llvm.mips.slli.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.splati.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.srai.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.srari.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.srli.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.srlri.w(<4 x i32>, i32) +declare <2 x i64> @llvm.mips.ldi.d(i32) +declare <2 x i64> @llvm.mips.addvi.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.bclri.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.binsli.d(<2 x i64>, <2 x i64>, i32) +declare <2 x i64> @llvm.mips.binsri.d(<2 x i64>, <2 x i64>, i32) +declare <2 x i64> @llvm.mips.bnegi.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.bseti.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.clei.s.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.clei.u.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.clti.s.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.clti.u.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.maxi.s.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.maxi.u.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.mini.s.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.mini.u.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.sldi.d(<2 x i64>, <2 x i64>, i32) +declare <2 x i64> @llvm.mips.slli.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.splati.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.srai.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.srari.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.srli.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.srlri.d(<2 x i64>, i32) +declare <16 x i8> @llvm.mips.ldi.b(i32) +declare <16 x i8> @llvm.mips.addvi.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.andi.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.bclri.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.binsli.b(<16 x i8>, <16 x i8>, i32) +declare <16 x i8> @llvm.mips.binsri.b(<16 x i8>, <16 x i8>, i32) +declare <16 x i8> @llvm.mips.bmnzi.b(<16 x i8>, <16 x i8>, i32) +declare <16 x i8> @llvm.mips.bnegi.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.bseli.b(<16 x i8>, <16 x i8>, i32) +declare <16 x i8> @llvm.mips.bseti.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.clei.s.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.clei.u.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.clti.s.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.clti.u.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.maxi.s.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.maxi.u.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.mini.s.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.mini.u.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.nori.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.ori.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.sldi.b(<16 x i8>, <16 x i8>, i32) +declare <16 x i8> @llvm.mips.slli.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.splati.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.srai.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.srari.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.srli.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.srlri.b(<16 x i8>, i32) +declare i32 @llvm.mips.copy.s.h(<8 x i16>, i32) +declare i32 @llvm.mips.copy.u.h(<8 x i16>, i32) +declare i32 @llvm.mips.copy.s.w(<4 x i32>, i32) +declare i32 @llvm.mips.copy.u.w(<4 x i32>, i32) +declare i64 @llvm.mips.copy.s.d(<2 x i64>, i32) +declare i64 @llvm.mips.copy.u.d(<2 x i64>, i32) +declare i32 @llvm.mips.copy.s.b(<16 x i8>, i32) +declare i32 @llvm.mips.copy.u.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.bmzi.b(<16 x i8>, <16 x i8>, i32) Index: test/CodeGen/Mips/msa/immediates.ll =================================================================== --- /dev/null +++ test/CodeGen/Mips/msa/immediates.ll @@ -0,0 +1,1107 @@ +; RUN: llc -march=mips -mattr=+msa,+fp64 -relocation-model=pic < %s | FileCheck %s -check-prefixes=CHECK,MSA32 +; RUN: llc -march=mips64 -mattr=+msa,+fp64 -relocation-model=pic < %s | FileCheck %s -check-prefixes=CHECK,MSA64 + +; Test that the immediate intrinsics don't crash LLVM. + +; Some of the intrinsics lower to equivalent forms. + +define void @addvi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: addvi_b: +; CHECK: addvi.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %a, i32 25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @andi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: andi_b: +; CHECK: andi.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.andi.b(<16 x i8> %a, i32 25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @bclri_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: bclri_b: +; CHECK: andi.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bclri.b(<16 x i8> %a, i32 3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @binsli_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: binsli_b: +; CHECK: binsli.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %a, <16 x i8> %a, i32 3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @binsri_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: binsri_b: +; CHECK: binsri.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %a, <16 x i8> %a, i32 5) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @bmnzi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: bmnzi_b: +; CHECK: bmnzi.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %a, <16 x i8> %a, i32 25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @bmzi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: bmzi_b: +; CHECK: bmnzi.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %a, <16 x i8> %a, i32 25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @bnegi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: bnegi_b: +; CHECK: bnegi.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bnegi.b(<16 x i8> %a, i32 6) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @bseli_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: bseli_b: +; CHECK: bseli.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bseli.b(<16 x i8> %a, <16 x i8> %a, i32 25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @bseti_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: bseti_b: +; CHECK: bseti.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.bseti.b(<16 x i8> %a, i32 5) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @clei_s_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: clei_s_b: +; CHECK: clei_s.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %a, i32 12) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @clei_u_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: clei_u_b: +; CHECK: clei_u.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.clei.u.b(<16 x i8> %a, i32 25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @clti_s_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: clti_s_b: +; CHECK: clti_s.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %a, i32 15) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @clti_u_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: clti_u_b: +; CHECK: clti_u.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.clti.u.b(<16 x i8> %a, i32 25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @ldi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: ldi_b: +; CHECK: ldi.b + %r = call <16 x i8> @llvm.mips.ldi.b(i32 3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @maxi_s_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: maxi_s_b: +; CHECK: maxi_s.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %a, i32 2) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @maxi_u_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: maxi_u_b: +; CHECK: maxi_u.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %a, i32 2) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @mini_s_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: mini_s_b: +; CHECK: mini_s.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %a, i32 2) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @mini_u_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: mini_u_b: +; CHECK: mini_u.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %a, i32 2) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @nori_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: nori_b: +; CHECK: nori.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.nori.b(<16 x i8> %a, i32 25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @ori_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: ori_b: +; CHECK: ori.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.ori.b(<16 x i8> %a, i32 25) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @sldi_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: sldi_b: +; CHECK: sldi.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %a, <16 x i8> %a, i32 7) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @slli_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: slli_b: +; CHECK: slli.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.slli.b(<16 x i8> %a, i32 3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @splati_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: splati_b: +; CHECK: splati.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.splati.b(<16 x i8> %a, i32 3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @srai_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: srai_b: +; CHECK: srai.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.srai.b(<16 x i8> %a, i32 3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @srari_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: srari_b: +; CHECK: srari.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.srari.b(<16 x i8> %a, i32 3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @srli_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: srli_b: +; CHECK: srli.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.srli.b(<16 x i8> %a, i32 3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @srlri_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: srlri_b: +; CHECK: srlri.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %a, i32 3) + store <16 x i8> %r, <16 x i8> * %ptr, align 16 + ret void +} + +define void @addvi_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: addvi_w: +; CHECK: addvi.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %a, i32 25) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @bclri_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: bclri_w: +; CHECK: bclri.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.bclri.w(<4 x i32> %a, i32 25) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @binsli_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: binsli_w: +; CHECK: binsli.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %a, <4 x i32> %a, i32 25) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @binsri_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: binsri_w: +; CHECK: binsri.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %a, <4 x i32> %a, i32 25) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @bnegi_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: bnegi_w: +; CHECK: bnegi.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.bnegi.w(<4 x i32> %a, i32 25) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @bseti_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: bseti_w: +; CHECK: bseti.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.bseti.w(<4 x i32> %a, i32 25) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @clei_s_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: clei_s_w: +; CHECK: clei_s.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %a, i32 14) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @clei_u_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: clei_u_w: +; CHECK: clei_u.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.clei.u.w(<4 x i32> %a, i32 25) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @clti_s_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: clti_s_w: +; CHECK: clti_s.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %a, i32 15) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @clti_u_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: clti_u_w: +; CHECK: clti_u.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.clti.u.w(<4 x i32> %a, i32 25) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @maxi_s_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: maxi_s_w: +; CHECK: maxi_s.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %a, i32 2) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @maxi_u_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: maxi_u_w: +; CHECK: maxi_u.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %a, i32 2) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @mini_s_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: mini_s_w: +; CHECK: mini_s.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %a, i32 2) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @mini_u_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: mini_u_w: +; CHECK: mini_u.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %a, i32 2) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @ldi_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: ldi_w: +; CHECK: ldi.w + %r = call <4 x i32> @llvm.mips.ldi.w(i32 3) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @sldi_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: sldi_w: +; CHECK: sldi.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %a, <4 x i32> %a, i32 2) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @slli_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: slli_w: +; CHECK: slli.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.slli.w(<4 x i32> %a, i32 3) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @splati_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: splati_w: +; CHECK: splati.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.splati.w(<4 x i32> %a, i32 3) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @srai_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: srai_w: +; CHECK: srai.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.srai.w(<4 x i32> %a, i32 3) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @srari_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: srari_w: +; CHECK: srari.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.srari.w(<4 x i32> %a, i32 3) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @srli_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: srli_w: +; CHECK: srli.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.srli.w(<4 x i32> %a, i32 3) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @srlri_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: srlri_w: +; CHECK: srlri.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %a, i32 3) + store <4 x i32> %r, <4 x i32> * %ptr, align 16 + ret void +} + +define void @addvi_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: addvi_h: +; CHECK: addvi.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %a, i32 25) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @bclri_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: bclri_h: +; CHECK: bclri.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.bclri.h(<8 x i16> %a, i32 8) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @binsli_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: binsli_h: +; CHECK: binsli.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %a, <8 x i16> %a, i32 8) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @binsri_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: binsri_h: +; CHECK: binsri.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %a, <8 x i16> %a, i32 15) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @bnegi_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: bnegi_h: +; CHECK: bnegi.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.bnegi.h(<8 x i16> %a, i32 14) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @bseti_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: bseti_h: +; CHECK: bseti.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.bseti.h(<8 x i16> %a, i32 15) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @clei_s_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: clei_s_h: +; CHECK: clei_s.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %a, i32 13) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @clei_u_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: clei_u_h: +; CHECK: clei_u.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.clei.u.h(<8 x i16> %a, i32 25) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @clti_s_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: clti_s_h: +; CHECK: clti_s.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %a, i32 15) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @clti_u_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: clti_u_h: +; CHECK: clti_u.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.clti.u.h(<8 x i16> %a, i32 25) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @maxi_s_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: maxi_s_h: +; CHECK: maxi_s.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %a, i32 2) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @maxi_u_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: maxi_u_h: +; CHECK: maxi_u.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %a, i32 2) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @mini_s_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: mini_s_h: +; CHECK: mini_s.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %a, i32 2) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @mini_u_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: mini_u_h: +; CHECK: mini_u.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %a, i32 2) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @ldi_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: ldi_h: +; CHECK: ldi.h + %r = call <8 x i16> @llvm.mips.ldi.h(i32 3) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @sldi_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: sldi_h: +; CHECK: sldi.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %a, <8 x i16> %a, i32 3) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @slli_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: slli_h: +; CHECK: slli.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.slli.h(<8 x i16> %a, i32 3) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @splati_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: splati_h: +; CHECK: splati.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.splati.h(<8 x i16> %a, i32 3) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @srai_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: srai_h: +; CHECK: srai.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.srai.h(<8 x i16> %a, i32 3) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @srari_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: srari_h: +; CHECK: srari.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.srari.h(<8 x i16> %a, i32 3) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @srli_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: srli_h: +; CHECK: srli.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.srli.h(<8 x i16> %a, i32 3) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define void @srlri_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: srlri_h: +; CHECK: srlri.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %a, i32 3) + store <8 x i16> %r, <8 x i16> * %ptr, align 16 + ret void +} + +define i32 @copy_s_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: copy_s_b: +; CHECK: copy_s.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.s.b(<16 x i8> %a, i32 1) + ret i32 %r +} +define i32 @copy_s_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: copy_s_h: +; CHECK: copy_s.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.s.h(<8 x i16> %a, i32 1) + ret i32 %r +} +define i32 @copy_s_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: copy_s_w: +; CHECK: copy_s.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.s.w(<4 x i32> %a, i32 1) + ret i32 %r +} +define i32 @copy_u_b(<16 x i8> * %ptr) { +entry: +; CHECK-LABEL: copy_u_b: +; CHECK: copy_u.b + %a = load <16 x i8>, <16 x i8> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.u.b(<16 x i8> %a, i32 1) + ret i32 %r +} +define i32 @copy_u_h(<8 x i16> * %ptr) { +entry: +; CHECK-LABEL: copy_u_h: +; CHECK: copy_u.h + %a = load <8 x i16>, <8 x i16> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.u.h(<8 x i16> %a, i32 1) + ret i32 %r +} +define i32 @copy_u_w(<4 x i32> * %ptr) { +entry: +; CHECK-LABEL: copy_u_w: +; MSA32: copy_s.w +; MSA64: copy_u.w + %a = load <4 x i32>, <4 x i32> * %ptr, align 16 + %r = call i32 @llvm.mips.copy.u.w(<4 x i32> %a, i32 1) + ret i32 %r +} + +define i64 @copy_s_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: copy_s_d: +; MSA32: copy_s.w +; MSA32: copy_s.w +; MSA64: copy_s.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call i64 @llvm.mips.copy.s.d(<2 x i64> %a, i32 1) + ret i64 %r +} + +define i64 @copy_u_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: copy_u_d: +; MSA32: copy_s.w +; MSA32: copy_s.w +; MSA64: copy_s.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call i64 @llvm.mips.copy.u.d(<2 x i64> %a, i32 1) + ret i64 %r +} + +define void @addvi_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: addvi_d: +; CHECK: addvi.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %a, i32 25) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @bclri_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: bclri_d: +; CHECK: and.v + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %a, i32 25) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @binsli_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: binsli_d: +; CHECK: bsel.v + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %a, <2 x i64> %a, i32 25) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @binsri_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: binsri_d: +; CHECK: bsel.v + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %a, <2 x i64> %a, i32 25) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @bnegi_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: bnegi_d: +; CHECK: xor.v + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %a, i32 25) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @bseti_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: bseti_d: +; CHECK: or.v + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %a, i32 25) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @clei_s_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: clei_s_d: +; CHECK: clei_s.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %a, i32 15) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @clei_u_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: clei_u_d: +; CHECK: clei_u.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.clei.u.d(<2 x i64> %a, i32 25) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @clti_s_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: clti_s_d: +; CHECK: clti_s.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %a, i32 15) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @clti_u_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: clti_u_d: +; CHECK: clti_u.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.clti.u.d(<2 x i64> %a, i32 25) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @ldi_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: ldi_d: +; CHECK: ldi.d + %r = call <2 x i64> @llvm.mips.ldi.d(i32 3) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @maxi_s_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: maxi_s_d: +; CHECK: maxi_s.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %a, i32 2) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @maxi_u_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: maxi_u_d: +; CHECK: maxi_u.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %a, i32 2) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @mini_s_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: mini_s_d: +; CHECK: mini_s.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %a, i32 2) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @mini_u_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: mini_u_d: +; CHECK: mini_u.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %a, i32 2) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @sldi_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: sldi_d: +; CHECK: sldi.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %a, <2 x i64> %a, i32 1) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @slli_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: slli_d: +; CHECK: slli.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.slli.d(<2 x i64> %a, i32 3) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @srai_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: srai_d: +; CHECK: srai.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.srai.d(<2 x i64> %a, i32 3) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @srari_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: srari_d: +; CHECK: srari.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.srari.d(<2 x i64> %a, i32 3) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @srli_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: srli_d: +; CHECK: srli.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.srli.d(<2 x i64> %a, i32 3) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +define void @srlri_d(<2 x i64> * %ptr) { +entry: +; CHECK-LABEL: srlri_d: +; CHECK: srlri.d + %a = load <2 x i64>, <2 x i64> * %ptr, align 16 + %r = call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %a, i32 3) + store <2 x i64> %r, <2 x i64> * %ptr, align 16 + ret void +} + +declare <8 x i16> @llvm.mips.ldi.h(i32) +declare <8 x i16> @llvm.mips.addvi.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.bclri.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.binsli.h(<8 x i16>, <8 x i16>, i32) +declare <8 x i16> @llvm.mips.binsri.h(<8 x i16>, <8 x i16>, i32) +declare <8 x i16> @llvm.mips.bnegi.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.bseti.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.clei.s.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.clei.u.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.clti.s.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.clti.u.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.maxi.s.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.maxi.u.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.mini.s.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.mini.u.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.sldi.h(<8 x i16>, <8 x i16>, i32) +declare <8 x i16> @llvm.mips.slli.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.splati.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.srai.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.srari.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.srli.h(<8 x i16>, i32) +declare <8 x i16> @llvm.mips.srlri.h(<8 x i16>, i32) +declare <4 x i32> @llvm.mips.addvi.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.bclri.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.binsli.w(<4 x i32>, <4 x i32>, i32) +declare <4 x i32> @llvm.mips.binsri.w(<4 x i32>, <4 x i32>, i32) +declare <4 x i32> @llvm.mips.bnegi.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.bseti.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.ldi.w(i32) +declare <4 x i32> @llvm.mips.clei.s.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.clei.u.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.clti.s.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.clti.u.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.maxi.s.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.maxi.u.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.mini.s.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.mini.u.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.sldi.w(<4 x i32>, <4 x i32>, i32) +declare <4 x i32> @llvm.mips.slli.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.splati.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.srai.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.srari.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.srli.w(<4 x i32>, i32) +declare <4 x i32> @llvm.mips.srlri.w(<4 x i32>, i32) +declare <2 x i64> @llvm.mips.ldi.d(i32) +declare <2 x i64> @llvm.mips.addvi.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.bclri.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.binsli.d(<2 x i64>, <2 x i64>, i32) +declare <2 x i64> @llvm.mips.binsri.d(<2 x i64>, <2 x i64>, i32) +declare <2 x i64> @llvm.mips.bnegi.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.bseti.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.clei.s.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.clei.u.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.clti.s.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.clti.u.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.maxi.s.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.maxi.u.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.mini.s.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.mini.u.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.sldi.d(<2 x i64>, <2 x i64>, i32) +declare <2 x i64> @llvm.mips.slli.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.splati.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.srai.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.srari.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.srli.d(<2 x i64>, i32) +declare <2 x i64> @llvm.mips.srlri.d(<2 x i64>, i32) +declare <16 x i8> @llvm.mips.ldi.b(i32) +declare <16 x i8> @llvm.mips.addvi.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.andi.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.bclri.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.binsli.b(<16 x i8>, <16 x i8>, i32) +declare <16 x i8> @llvm.mips.binsri.b(<16 x i8>, <16 x i8>, i32) +declare <16 x i8> @llvm.mips.bmnzi.b(<16 x i8>, <16 x i8>, i32) +declare <16 x i8> @llvm.mips.bnegi.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.bseli.b(<16 x i8>, <16 x i8>, i32) +declare <16 x i8> @llvm.mips.bseti.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.clei.s.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.clei.u.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.clti.s.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.clti.u.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.maxi.s.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.maxi.u.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.mini.s.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.mini.u.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.nori.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.ori.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.sldi.b(<16 x i8>, <16 x i8>, i32) +declare <16 x i8> @llvm.mips.slli.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.splati.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.srai.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.srari.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.srli.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.srlri.b(<16 x i8>, i32) +declare i32 @llvm.mips.copy.s.h(<8 x i16>, i32) +declare i32 @llvm.mips.copy.u.h(<8 x i16>, i32) +declare i32 @llvm.mips.copy.s.w(<4 x i32>, i32) +declare i32 @llvm.mips.copy.u.w(<4 x i32>, i32) +declare i64 @llvm.mips.copy.s.d(<2 x i64>, i32) +declare i64 @llvm.mips.copy.u.d(<2 x i64>, i32) +declare i32 @llvm.mips.copy.s.b(<16 x i8>, i32) +declare i32 @llvm.mips.copy.u.b(<16 x i8>, i32) +declare <16 x i8> @llvm.mips.bmzi.b(<16 x i8>, <16 x i8>, i32)