diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4621,13 +4621,11 @@ assert(XLenVT == MVT::i32 && OpVT == MVT::i64 && VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!"); - // If this is a sign-extended 32-bit constant, we can truncate it and rely - // on the instruction to sign-extend since SEW>XLEN. - if (auto *CVal = dyn_cast(ScalarOp)) { - if (isInt<32>(CVal->getSExtValue())) { - ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32); - return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); - } + // If this is a sign-extended 32-bit value, we can truncate it and rely on the + // instruction to sign-extend since SEW>XLEN. + if (DAG.ComputeNumSignBits(ScalarOp) > 32) { + ScalarOp = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, ScalarOp); + return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); } switch (IntNo) { @@ -4749,8 +4747,6 @@ } // We need to convert the scalar to a splat vector. - // FIXME: Can we implicitly truncate the scalar if it is known to - // be sign extended? SDValue VL = getVLOperand(Op); assert(VL.getValueType() == XLenVT); ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG); diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd.ll b/llvm/test/CodeGen/RISCV/rvv/vadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd.ll @@ -1881,6 +1881,49 @@ ret %a } +define @intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64( %0, i32 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV32-NEXT: vadd.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vadd.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %ext = sext i32 %1 to i64 + %a = call @llvm.riscv.vadd.nxv1i64.i64( + undef, + %0, + i64 %ext, + iXLen %2) + + ret %a +} + +define @intrinsic_vadd_vx_sextload_nxv1i64_nxv1i64_i64( %0, i32* %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_sextload_nxv1i64_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lw a0, 0(a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %load = load i32, i32* %1 + %ext = sext i32 %load to i64 + %a = call @llvm.riscv.vadd.nxv1i64.i64( + undef, + %0, + i64 %ext, + iXLen %2) + + ret %a +} + declare @llvm.riscv.vadd.mask.nxv1i64.i64( , ,