diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td @@ -178,6 +178,7 @@ // Floating point registers let RegAltNameIndices = [ABIRegAltName] in { + let CostPerUse = [0, 1] in { def F0_H : RISCVReg16<0, "f0", ["ft0"]>, DwarfRegNum<[32]>; def F1_H : RISCVReg16<1, "f1", ["ft1"]>, DwarfRegNum<[33]>; def F2_H : RISCVReg16<2, "f2", ["ft2"]>, DwarfRegNum<[34]>; @@ -186,6 +187,7 @@ def F5_H : RISCVReg16<5, "f5", ["ft5"]>, DwarfRegNum<[37]>; def F6_H : RISCVReg16<6, "f6", ["ft6"]>, DwarfRegNum<[38]>; def F7_H : RISCVReg16<7, "f7", ["ft7"]>, DwarfRegNum<[39]>; + } def F8_H : RISCVReg16<8, "f8", ["fs0"]>, DwarfRegNum<[40]>; def F9_H : RISCVReg16<9, "f9", ["fs1"]>, DwarfRegNum<[41]>; def F10_H : RISCVReg16<10,"f10", ["fa0"]>, DwarfRegNum<[42]>; @@ -194,6 +196,7 @@ def F13_H : RISCVReg16<13,"f13", ["fa3"]>, DwarfRegNum<[45]>; def F14_H : RISCVReg16<14,"f14", ["fa4"]>, DwarfRegNum<[46]>; def F15_H : RISCVReg16<15,"f15", ["fa5"]>, DwarfRegNum<[47]>; + let CostPerUse = [0, 1] in { def F16_H : RISCVReg16<16,"f16", ["fa6"]>, DwarfRegNum<[48]>; def F17_H : RISCVReg16<17,"f17", ["fa7"]>, DwarfRegNum<[49]>; def F18_H : RISCVReg16<18,"f18", ["fs2"]>, DwarfRegNum<[50]>; @@ -210,16 +213,32 @@ def F29_H : RISCVReg16<29,"f29", ["ft9"]>, DwarfRegNum<[61]>; def F30_H : RISCVReg16<30,"f30", ["ft10"]>, DwarfRegNum<[62]>; def F31_H : RISCVReg16<31,"f31", ["ft11"]>, DwarfRegNum<[63]>; + } - foreach Index = 0-31 in { - def F#Index#_F : RISCVReg32("F"#Index#"_H")>, - DwarfRegNum<[!add(Index, 32)]>; + foreach Index = 0-7 in { + let CostPerUse = [0, 1] in { + def F#Index#_F : RISCVReg32("F"#Index#"_H")>, + DwarfRegNum<[!add(Index, 32)]>; + def F#Index#_D : RISCVReg64("F"#Index#"_F")>, + DwarfRegNum<[!add(Index, 32)]>; + } } - foreach Index = 0-31 in { + foreach Index = 8-15 in { + def F#Index#_F : RISCVReg32("F"#Index#"_H")>, + DwarfRegNum<[!add(Index, 32)]>; def F#Index#_D : RISCVReg64("F"#Index#"_F")>, DwarfRegNum<[!add(Index, 32)]>; } + + foreach Index = 16-31 in { + let CostPerUse = [0, 1] in { + def F#Index#_F : RISCVReg32("F"#Index#"_H")>, + DwarfRegNum<[!add(Index, 32)]>; + def F#Index#_D : RISCVReg64("F"#Index#"_F")>, + DwarfRegNum<[!add(Index, 32)]>; + } + } } // The order of registers represents the preferred allocation sequence, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -448,7 +448,7 @@ ; CHECK-NEXT: vsetvli a4, a0, e32, m8, ta, mu ; CHECK-NEXT: beqz a4, .LBB8_3 ; CHECK-NEXT: # %bb.1: # %for.body.preheader -; CHECK-NEXT: fmv.w.x ft0, a1 +; CHECK-NEXT: fmv.w.x fs1, a1 ; CHECK-NEXT: .LBB8_2: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a2) @@ -456,7 +456,7 @@ ; CHECK-NEXT: slli a1, a4, 2 ; CHECK-NEXT: add a2, a2, a1 ; CHECK-NEXT: vsetvli zero, a4, e32, m8, tu, mu -; CHECK-NEXT: vfmacc.vf v16, ft0, v8 +; CHECK-NEXT: vfmacc.vf v16, fs1, v8 ; CHECK-NEXT: vse32.v v16, (a3) ; CHECK-NEXT: sub a0, a0, a4 ; CHECK-NEXT: vsetvli a4, a0, e32, m8, ta, mu diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -193,9 +193,9 @@ define @test10( %a, double %b) nounwind { ; CHECK-LABEL: test10: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: fmv.d.x fs1, a0 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu -; CHECK-NEXT: vfmv.s.f v8, ft0 +; CHECK-NEXT: vfmv.s.f v8, fs1 ; CHECK-NEXT: ret entry: %x = tail call i64 @llvm.riscv.vsetvlimax(i64 3, i64 0) @@ -207,9 +207,9 @@ define @test11( %a, double %b) nounwind { ; CHECK-LABEL: test11: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: fmv.d.x fs1, a0 ; CHECK-NEXT: vsetivli a0, 6, e64, m1, tu, mu -; CHECK-NEXT: vfmv.s.f v8, ft0 +; CHECK-NEXT: vfmv.s.f v8, fs1 ; CHECK-NEXT: ret entry: %x = tail call i64 @llvm.riscv.vsetvli(i64 6, i64 3, i64 0) @@ -221,10 +221,10 @@ define @test12( %a, double %b, %mask) nounwind { ; CHECK-LABEL: test12: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fmv.d.x ft0, a0 +; CHECK-NEXT: fmv.d.x fs1, a0 ; CHECK-NEXT: vsetivli zero, 9, e64, m1, tu, mu ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t -; CHECK-NEXT: vfmv.s.f v8, ft0 +; CHECK-NEXT: vfmv.s.f v8, fs1 ; CHECK-NEXT: ret entry: %x = call @llvm.riscv.vfadd.mask.nxv1f64.f64(