Index: lib/Target/Mips/MipsSEISelDAGToDAG.cpp =================================================================== --- lib/Target/Mips/MipsSEISelDAGToDAG.cpp +++ lib/Target/Mips/MipsSEISelDAGToDAG.cpp @@ -692,7 +692,7 @@ // as the original value. if (ImmValue == ~(~ImmValue & ~(~ImmValue + 1))) { - Imm = CurDAG->getTargetConstant(ImmValue.countPopulation(), SDLoc(N), + Imm = CurDAG->getTargetConstant(ImmValue.countPopulation() - 1, SDLoc(N), EltTy); return true; } @@ -724,7 +724,7 @@ // Extract the run of set bits starting with bit zero, and test that the // result is the same as the original value if (ImmValue == (ImmValue & ~(ImmValue + 1))) { - Imm = CurDAG->getTargetConstant(ImmValue.countPopulation(), SDLoc(N), + Imm = CurDAG->getTargetConstant(ImmValue.countPopulation() - 1, SDLoc(N), EltTy); return true; } Index: lib/Target/Mips/MipsSEISelLowering.cpp =================================================================== --- lib/Target/Mips/MipsSEISelLowering.cpp +++ lib/Target/Mips/MipsSEISelLowering.cpp @@ -1643,7 +1643,7 @@ if (Op->getConstantOperandVal(3) >= EltTy.getSizeInBits()) report_fatal_error("Immediate out of range"); APInt Mask = APInt::getHighBitsSet(EltTy.getSizeInBits(), - Op->getConstantOperandVal(3)); + Op->getConstantOperandVal(3) + 1); return DAG.getNode(ISD::VSELECT, DL, VecTy, DAG.getConstant(Mask, DL, VecTy, true), Op->getOperand(2), Op->getOperand(1)); @@ -1658,7 +1658,7 @@ if (Op->getConstantOperandVal(3) >= EltTy.getSizeInBits()) report_fatal_error("Immediate out of range"); APInt Mask = APInt::getLowBitsSet(EltTy.getSizeInBits(), - Op->getConstantOperandVal(3)); + Op->getConstantOperandVal(3) + 1); return DAG.getNode(ISD::VSELECT, DL, VecTy, DAG.getConstant(Mask, DL, VecTy, true), Op->getOperand(2), Op->getOperand(1)); Index: test/CodeGen/Mips/msa/bitwise.ll =================================================================== --- test/CodeGen/Mips/msa/bitwise.ll +++ test/CodeGen/Mips/msa/bitwise.ll @@ -1099,7 +1099,7 @@ i8 63, i8 63, i8 63, i8 63, i8 63, i8 63, i8 63, i8 63> %5 = or <16 x i8> %3, %4 - ; CHECK-DAG: binsli.b [[R2]], [[R1]], 2 + ; CHECK-DAG: binsli.b [[R2]], [[R1]], 1 store <16 x i8> %5, <16 x i8>* %c ; CHECK-DAG: st.b [[R2]], 0($4) @@ -1119,7 +1119,7 @@ %4 = and <8 x i16> %2, %5 = or <8 x i16> %3, %4 - ; CHECK-DAG: binsli.h [[R2]], [[R1]], 2 + ; CHECK-DAG: binsli.h [[R2]], [[R1]], 1 store <8 x i16> %5, <8 x i16>* %c ; CHECK-DAG: st.h [[R2]], 0($4) @@ -1137,7 +1137,7 @@ %3 = and <4 x i32> %1, %4 = and <4 x i32> %2, %5 = or <4 x i32> %3, %4 - ; CHECK-DAG: binsli.w [[R2]], [[R1]], 2 + ; CHECK-DAG: binsli.w [[R2]], [[R1]], 1 store <4 x i32> %5, <4 x i32>* %c ; CHECK-DAG: st.w [[R2]], 0($4) @@ -1159,7 +1159,7 @@ ; issue. If the mask doesn't fit within a 10-bit immediate, it gets ; legalized into a constant pool. We should add a test to cover the ; other cases once they correctly select binsli.d. - ; CHECK-DAG: binsli.d [[R2]], [[R1]], 61 + ; CHECK-DAG: binsli.d [[R2]], [[R1]], 60 store <2 x i64> %5, <2 x i64>* %c ; CHECK-DAG: st.d [[R2]], 0($4) @@ -1181,7 +1181,7 @@ i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252, i8 252> %5 = or <16 x i8> %3, %4 - ; CHECK-DAG: binsri.b [[R2]], [[R1]], 2 + ; CHECK-DAG: binsri.b [[R2]], [[R1]], 1 store <16 x i8> %5, <16 x i8>* %c ; CHECK-DAG: st.b [[R2]], 0($4) @@ -1201,7 +1201,7 @@ %4 = and <8 x i16> %2, %5 = or <8 x i16> %3, %4 - ; CHECK-DAG: binsri.h [[R2]], [[R1]], 2 + ; CHECK-DAG: binsri.h [[R2]], [[R1]], 1 store <8 x i16> %5, <8 x i16>* %c ; CHECK-DAG: st.h [[R2]], 0($4) @@ -1219,7 +1219,7 @@ %3 = and <4 x i32> %1, %4 = and <4 x i32> %2, %5 = or <4 x i32> %3, %4 - ; CHECK-DAG: binsri.w [[R2]], [[R1]], 2 + ; CHECK-DAG: binsri.w [[R2]], [[R1]], 1 store <4 x i32> %5, <4 x i32>* %c ; CHECK-DAG: st.w [[R2]], 0($4) @@ -1237,7 +1237,7 @@ %3 = and <2 x i64> %1, %4 = and <2 x i64> %2, %5 = or <2 x i64> %3, %4 - ; CHECK-DAG: binsri.d [[R2]], [[R1]], 2 + ; CHECK-DAG: binsri.d [[R2]], [[R1]], 1 store <2 x i64> %5, <2 x i64>* %c ; CHECK-DAG: st.d [[R2]], 0($4) Index: test/CodeGen/Mips/msa/bmzi_bmnzi.ll =================================================================== --- test/CodeGen/Mips/msa/bmzi_bmnzi.ll +++ test/CodeGen/Mips/msa/bmzi_bmnzi.ll @@ -0,0 +1,55 @@ +; RUN: llc -march=mipsel -mattr=+msa,+fp64 -relocation-model=pic < %s | FileCheck %s + +@llvm_mips_bmnzi_b_ARG1 = global <16 x i8> , align 16 +@llvm_mips_bmnzi_b_ARG2 = global <16 x i8> zeroinitializer, align 16 +@llvm_mips_bmnzi_b_RES = global <16 x i8> zeroinitializer, align 16 + +define void @llvm_mips_bmnzi_b_test() nounwind { +entry: + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG2 + %2 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 240) + store <16 x i8> %2, <16 x i8>* @llvm_mips_bmnzi_b_RES + %3 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 15) + store <16 x i8> %3, <16 x i8>* @llvm_mips_bmnzi_b_RES + %4 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 170) + store <16 x i8> %4, <16 x i8>* @llvm_mips_bmnzi_b_RES + ret void +} +; CHECK: llvm_mips_bmnzi_b_test: +; CHECK: lw [[R0:\$[0-9]+]], %got(llvm_mips_bmnzi_b_RES)( +; CHECK: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnzi_b_ARG1)( +; CHECK: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnzi_b_ARG2)( +; CHECK: ld.b [[R3:\$w[0-9]+]], 0([[R2]]) +; CHECK: ld.b [[R4:\$w[0-9]+]], 0([[R1]]) +; CHECK: move.v [[R5:\$w[0-9]+]], [[R4]] +; CHECK: binsli.b [[R5]], [[R3]], 3 +; CHECK: binsri.b [[R5]], [[R3]], 3 +; CHECK: bmnzi.b [[R4]], [[R3]], 170 + +define void @llvm_mips_bmzi_b_test() nounwind { +entry: + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG2 + %2 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 240) + store <16 x i8> %2, <16 x i8>* @llvm_mips_bmnzi_b_RES + %3 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 15) + store <16 x i8> %3, <16 x i8>* @llvm_mips_bmnzi_b_RES + %4 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 170) + store <16 x i8> %4, <16 x i8>* @llvm_mips_bmnzi_b_RES + ret void +} +; CHECK: llvm_mips_bmzi_b_test: +; CHECK: lw [[R0:\$[0-9]+]], %got(llvm_mips_bmnzi_b_RES)( +; CHECK: lw [[R1:\$[0-9]+]], %got(llvm_mips_bmnzi_b_ARG2)( +; CHECK: lw [[R2:\$[0-9]+]], %got(llvm_mips_bmnzi_b_ARG1)( +; CHECK: ld.b [[R3:\$w[0-9]+]], 0([[R2]]) +; CHECK: ld.b [[R4:\$w[0-9]+]], 0([[R1]]) +; CHECK: move.v [[R5:\$w[0-9]+]], [[R4]] +; CHECK: binsli.b [[R5]], [[R3]], 3 +; CHECK: binsri.b [[R5]], [[R3]], 3 +; bmnzi.b is the same as bmzi.b with ws and wd_in swapped +; CHECK: bmnzi.b [[R4]], [[R3]], 170 + +declare <16 x i8> @llvm.mips.bmnzi.b(<16 x i8>, <16 x i8>, i32) nounwind +declare <16 x i8> @llvm.mips.bmzi.b(<16 x i8>, <16 x i8>, i32) nounwind Index: test/CodeGen/Mips/msa/i5-b.ll =================================================================== --- test/CodeGen/Mips/msa/i5-b.ll +++ test/CodeGen/Mips/msa/i5-b.ll @@ -89,7 +89,7 @@ entry: %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsli_b_ARG1 %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsli_b_ARG2 - %2 = tail call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %0, <16 x i8> %1, i32 7) + %2 = tail call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %0, <16 x i8> %1, i32 6) store <16 x i8> %2, <16 x i8>* @llvm_mips_binsli_b_RES ret void } @@ -101,7 +101,7 @@ ; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsli_b_ARG2)( ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]]) ; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]]) -; CHECK-DAG: binsli.b [[R3]], [[R4]], 7 +; CHECK-DAG: binsli.b [[R3]], [[R4]], 6 ; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsli_b_RES)( ; CHECK-DAG: st.b [[R3]], 0([[R5]]) ; CHECK: .size llvm_mips_binsli_b_test @@ -114,7 +114,7 @@ entry: %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsli_h_ARG1 %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsli_h_ARG2 - %2 = tail call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %0, <8 x i16> %1, i32 7) + %2 = tail call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %0, <8 x i16> %1, i32 6) store <8 x i16> %2, <8 x i16>* @llvm_mips_binsli_h_RES ret void } @@ -126,7 +126,7 @@ ; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsli_h_ARG2)( ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[R1]]) ; CHECK-DAG: ld.h [[R4:\$w[0-9]+]], 0([[R2]]) -; CHECK-DAG: binsli.h [[R3]], [[R4]], 7 +; CHECK-DAG: binsli.h [[R3]], [[R4]], 6 ; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsli_h_RES)( ; CHECK-DAG: st.h [[R3]], 0([[R5]]) ; CHECK: .size llvm_mips_binsli_h_test @@ -139,7 +139,7 @@ entry: %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsli_w_ARG1 %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsli_w_ARG2 - %2 = tail call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %0, <4 x i32> %1, i32 7) + %2 = tail call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %0, <4 x i32> %1, i32 6) store <4 x i32> %2, <4 x i32>* @llvm_mips_binsli_w_RES ret void } @@ -151,7 +151,7 @@ ; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsli_w_ARG2)( ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0([[R1]]) ; CHECK-DAG: ld.w [[R4:\$w[0-9]+]], 0([[R2]]) -; CHECK-DAG: binsli.w [[R3]], [[R4]], 7 +; CHECK-DAG: binsli.w [[R3]], [[R4]], 6 ; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsli_w_RES)( ; CHECK-DAG: st.w [[R3]], 0([[R5]]) ; CHECK: .size llvm_mips_binsli_w_test @@ -168,7 +168,7 @@ ; issue. If the mask doesn't fit within a 10-bit immediate, it gets ; legalized into a constant pool. We should add a test to cover the ; other cases once they correctly select binsli.d. - %2 = tail call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %0, <2 x i64> %1, i32 61) + %2 = tail call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %0, <2 x i64> %1, i32 60) store <2 x i64> %2, <2 x i64>* @llvm_mips_binsli_d_RES ret void } @@ -180,7 +180,7 @@ ; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsli_d_ARG2)( ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[R1]]) ; CHECK-DAG: ld.d [[R4:\$w[0-9]+]], 0([[R2]]) -; CHECK-DAG: binsli.d [[R3]], [[R4]], 61 +; CHECK-DAG: binsli.d [[R3]], [[R4]], 60 ; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsli_d_RES)( ; CHECK-DAG: st.d [[R3]], 0([[R5]]) ; CHECK: .size llvm_mips_binsli_d_test @@ -193,7 +193,7 @@ entry: %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsri_b_ARG1 %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsri_b_ARG2 - %2 = tail call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %0, <16 x i8> %1, i32 7) + %2 = tail call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %0, <16 x i8> %1, i32 6) store <16 x i8> %2, <16 x i8>* @llvm_mips_binsri_b_RES ret void } @@ -205,7 +205,7 @@ ; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsri_b_ARG2)( ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[R1]]) ; CHECK-DAG: ld.b [[R4:\$w[0-9]+]], 0([[R2]]) -; CHECK-DAG: binsri.b [[R3]], [[R4]], 7 +; CHECK-DAG: binsri.b [[R3]], [[R4]], 6 ; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsri_b_RES)( ; CHECK-DAG: st.b [[R3]], 0([[R5]]) ; CHECK: .size llvm_mips_binsri_b_test @@ -218,7 +218,7 @@ entry: %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsri_h_ARG1 %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsri_h_ARG2 - %2 = tail call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %0, <8 x i16> %1, i32 7) + %2 = tail call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %0, <8 x i16> %1, i32 6) store <8 x i16> %2, <8 x i16>* @llvm_mips_binsri_h_RES ret void } @@ -230,7 +230,7 @@ ; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsri_h_ARG2)( ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[R1]]) ; CHECK-DAG: ld.h [[R4:\$w[0-9]+]], 0([[R2]]) -; CHECK-DAG: binsri.h [[R3]], [[R4]], 7 +; CHECK-DAG: binsri.h [[R3]], [[R4]], 6 ; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsri_h_RES)( ; CHECK-DAG: st.h [[R3]], 0([[R5]]) ; CHECK: .size llvm_mips_binsri_h_test @@ -243,7 +243,7 @@ entry: %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsri_w_ARG1 %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsri_w_ARG2 - %2 = tail call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %0, <4 x i32> %1, i32 7) + %2 = tail call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %0, <4 x i32> %1, i32 6) store <4 x i32> %2, <4 x i32>* @llvm_mips_binsri_w_RES ret void } @@ -255,7 +255,7 @@ ; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsri_w_ARG2)( ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0([[R1]]) ; CHECK-DAG: ld.w [[R4:\$w[0-9]+]], 0([[R2]]) -; CHECK-DAG: binsri.w [[R3]], [[R4]], 7 +; CHECK-DAG: binsri.w [[R3]], [[R4]], 6 ; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsri_w_RES)( ; CHECK-DAG: st.w [[R3]], 0([[R5]]) ; CHECK: .size llvm_mips_binsri_w_test @@ -268,7 +268,7 @@ entry: %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsri_d_ARG1 %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsri_d_ARG2 - %2 = tail call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %0, <2 x i64> %1, i32 7) + %2 = tail call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %0, <2 x i64> %1, i32 6) store <2 x i64> %2, <2 x i64>* @llvm_mips_binsri_d_RES ret void } @@ -280,7 +280,7 @@ ; CHECK-DAG: lw [[R2:\$[0-9]+]], %got(llvm_mips_binsri_d_ARG2)( ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[R1]]) ; CHECK-DAG: ld.d [[R4:\$w[0-9]+]], 0([[R2]]) -; CHECK-DAG: binsri.d [[R3]], [[R4]], 7 +; CHECK-DAG: binsri.d [[R3]], [[R4]], 6 ; CHECK-DAG: lw [[R5:\$[0-9]+]], %got(llvm_mips_binsri_d_RES)( ; CHECK-DAG: st.d [[R3]], 0([[R5]]) ; CHECK: .size llvm_mips_binsri_d_test Index: test/CodeGen/Mips/msa/immediates.ll =================================================================== --- test/CodeGen/Mips/msa/immediates.ll +++ test/CodeGen/Mips/msa/immediates.ll @@ -44,7 +44,7 @@ ; CHECK: binsli.b %a = load <16 x i8>, <16 x i8> * %ptr, align 16 %b = load <16 x i8>, <16 x i8> * %ptr2, align 16 - %r = call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %a, <16 x i8> %b, i32 3) + %r = call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %a, <16 x i8> %b, i32 2) store <16 x i8> %r, <16 x i8> * %ptr, align 16 ret void } @@ -55,7 +55,7 @@ ; CHECK: binsri.b %a = load <16 x i8>, <16 x i8> * %ptr, align 16 %b = load <16 x i8>, <16 x i8> * %ptr2, align 16 - %r = call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %a, <16 x i8> %b, i32 5) + %r = call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %a, <16 x i8> %b, i32 4) store <16 x i8> %r, <16 x i8> * %ptr, align 16 ret void } @@ -352,7 +352,7 @@ ; CHECK: binsli.w %a = load <4 x i32>, <4 x i32> * %ptr, align 16 %b = load <4 x i32>, <4 x i32> * %ptr2, align 16 - %r = call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %a, <4 x i32> %b, i32 25) + %r = call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %a, <4 x i32> %b, i32 24) store <4 x i32> %r, <4 x i32> * %ptr, align 16 ret void } @@ -363,7 +363,7 @@ ; CHECK: binsri.w %a = load <4 x i32>, <4 x i32> * %ptr, align 16 %b = load <4 x i32>, <4 x i32> * %ptr2, align 16 - %r = call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %a, <4 x i32> %b, i32 25) + %r = call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %a, <4 x i32> %b, i32 24) store <4 x i32> %r, <4 x i32> * %ptr, align 16 ret void } @@ -605,7 +605,7 @@ ; CHECK: binsli.h %a = load <8 x i16>, <8 x i16> * %ptr, align 16 %b = load <8 x i16>, <8 x i16> * %ptr2, align 16 - %r = call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %a, <8 x i16> %b, i32 8) + %r = call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %a, <8 x i16> %b, i32 7) store <8 x i16> %r, <8 x i16> * %ptr, align 16 ret void } @@ -616,7 +616,7 @@ ; CHECK: binsri.h %a = load <8 x i16>, <8 x i16> * %ptr, align 16 %b = load <8 x i16>, <8 x i16> * %ptr2, align 16 - %r = call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %a, <8 x i16> %b, i32 15) + %r = call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %a, <8 x i16> %b, i32 14) store <8 x i16> %r, <8 x i16> * %ptr, align 16 ret void } @@ -933,7 +933,7 @@ ; CHECK: bsel.v %a = load <2 x i64>, <2 x i64> * %ptr, align 16 %b = load <2 x i64>, <2 x i64> * %ptr2, align 16 - %r = call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %a, <2 x i64> %b, i32 4) + %r = call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %a, <2 x i64> %b, i32 3) store <2 x i64> %r, <2 x i64> * %ptr, align 16 ret void } @@ -944,7 +944,7 @@ ; CHECK: binsri.d %a = load <2 x i64>, <2 x i64> * %ptr, align 16 %b = load <2 x i64>, <2 x i64> * %ptr2, align 16 - %r = call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %a, <2 x i64> %b, i32 5) + %r = call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %a, <2 x i64> %b, i32 4) store <2 x i64> %r, <2 x i64> * %ptr, align 16 ret void }