diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoB.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoB.td @@ -816,12 +816,17 @@ (CMIX GPR:$rs1, GPR:$rs2, GPR:$rs3)>; def : Pat<(riscv_selectcc GPR:$rs2, (XLenVT 0), (XLenVT 17), GPR:$rs3, GPR:$rs1), (CMOV GPR:$rs1, GPR:$rs2, GPR:$rs3)>; -def : Pat<(fshl GPR:$rs1, GPR:$rs2, GPR:$rs3), + +// fshl and fshr concatenate their operands in the same order. fsr and fsl +// instruction use different orders. fshl will return its first operand for +// shift of zero, fshr will return its second operand. fsl and fsr both return +// $rs1 so the patterns need to have different operand orders. +def : Pat<(fshl GPR:$rs1, GPR:$rs3, GPR:$rs2), (FSL GPR:$rs1, GPR:$rs2, GPR:$rs3)>; -def : Pat<(fshr GPR:$rs1, GPR:$rs2, GPR:$rs3), +def : Pat<(fshr GPR:$rs3, GPR:$rs1, GPR:$rs2), (FSR GPR:$rs1, GPR:$rs2, GPR:$rs3)>; -def : Pat<(fshr GPR:$rs1, GPR:$rs2, uimmlog2xlen:$shamt), - (FSRI GPR:$rs1, GPR:$rs2, uimmlog2xlen:$shamt)>; +def : Pat<(fshr GPR:$rs3, GPR:$rs1, uimmlog2xlen:$shamt), + (FSRI GPR:$rs1, GPR:$rs3, uimmlog2xlen:$shamt)>; } // Predicates = [HasStdExtZbt] let Predicates = [HasStdExtZbb] in { @@ -1020,20 +1025,20 @@ let Predicates = [HasStdExtZbt, IsRV64] in { def : Pat<(sext_inreg (fshl (assertsexti32 GPR:$rs1), - (shl (assertsexti32 GPR:$rs2), (i64 32)), - (and (assertsexti32 GPR:$rs3), (i64 31))), + (shl (assertsexti32 GPR:$rs3), (i64 32)), + (and (assertsexti32 GPR:$rs2), (i64 31))), i32), (FSLW GPR:$rs1, GPR:$rs2, GPR:$rs3)>; -def : Pat<(sext_inreg (fshr (assertsexti32 GPR:$rs1), - (shl (assertsexti32 GPR:$rs2), (i64 32)), - (or (assertsexti32 GPR:$rs3), (i64 32))), +def : Pat<(sext_inreg (fshr (assertsexti32 GPR:$rs3), + (shl (assertsexti32 GPR:$rs1), (i64 32)), + (or (assertsexti32 GPR:$rs2), (i64 32))), i32), (FSRW GPR:$rs1, GPR:$rs2, GPR:$rs3)>; -def : Pat<(sext_inreg (fshr (assertsexti32 GPR:$rs1), - (shl (assertsexti32 GPR:$rs2), (i64 32)), +def : Pat<(sext_inreg (fshr (assertsexti32 GPR:$rs3), + (shl (assertsexti32 GPR:$rs1), (i64 32)), uimm6gt32:$shamt), i32), - (FSRIW GPR:$rs1, GPR:$rs2, (ImmSub32 uimm6gt32:$shamt))>; + (FSRIW GPR:$rs1, GPR:$rs3, (ImmSub32 uimm6gt32:$shamt))>; } // Predicates = [HasStdExtZbt, IsRV64] let Predicates = [HasStdExtZbb, IsRV64] in { diff --git a/llvm/test/CodeGen/RISCV/rv32Zbb.ll b/llvm/test/CodeGen/RISCV/rv32Zbb.ll --- a/llvm/test/CodeGen/RISCV/rv32Zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv32Zbb.ll @@ -246,7 +246,7 @@ ; RV32IB-LABEL: sloi_i64: ; RV32IB: # %bb.0: ; RV32IB-NEXT: addi a2, zero, 1 -; RV32IB-NEXT: fsl a1, a1, a2, a0 +; RV32IB-NEXT: fsl a1, a1, a0, a2 ; RV32IB-NEXT: sloi a0, a0, 1 ; RV32IB-NEXT: ret ; @@ -298,7 +298,7 @@ ; RV32IB-LABEL: sroi_i64: ; RV32IB: # %bb.0: ; RV32IB-NEXT: addi a2, zero, 31 -; RV32IB-NEXT: fsl a0, a1, a2, a0 +; RV32IB-NEXT: fsl a0, a1, a0, a2 ; RV32IB-NEXT: sroi a1, a1, 1 ; RV32IB-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rv32Zbbp.ll b/llvm/test/CodeGen/RISCV/rv32Zbbp.ll --- a/llvm/test/CodeGen/RISCV/rv32Zbbp.ll +++ b/llvm/test/CodeGen/RISCV/rv32Zbbp.ll @@ -694,8 +694,8 @@ ; RV32IB-LABEL: rori_i64: ; RV32IB: # %bb.0: ; RV32IB-NEXT: addi a3, zero, 31 -; RV32IB-NEXT: fsl a2, a1, a3, a0 -; RV32IB-NEXT: fsl a1, a0, a3, a1 +; RV32IB-NEXT: fsl a2, a1, a0, a3 +; RV32IB-NEXT: fsl a1, a0, a1, a3 ; RV32IB-NEXT: mv a0, a2 ; RV32IB-NEXT: ret ; @@ -739,8 +739,8 @@ ; RV32IB-LABEL: rori_i64_fshr: ; RV32IB: # %bb.0: ; RV32IB-NEXT: addi a3, zero, 1 -; RV32IB-NEXT: fsl a2, a0, a3, a1 -; RV32IB-NEXT: fsl a1, a1, a3, a0 +; RV32IB-NEXT: fsl a2, a0, a1, a3 +; RV32IB-NEXT: fsl a1, a1, a0, a3 ; RV32IB-NEXT: mv a0, a2 ; RV32IB-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rv32Zbt.ll b/llvm/test/CodeGen/RISCV/rv32Zbt.ll --- a/llvm/test/CodeGen/RISCV/rv32Zbt.ll +++ b/llvm/test/CodeGen/RISCV/rv32Zbt.ll @@ -131,12 +131,12 @@ ; ; RV32IB-LABEL: fshl_i32: ; RV32IB: # %bb.0: -; RV32IB-NEXT: fsl a0, a0, a2, a1 +; RV32IB-NEXT: fsl a0, a0, a1, a2 ; RV32IB-NEXT: ret ; ; RV32IBT-LABEL: fshl_i32: ; RV32IBT: # %bb.0: -; RV32IBT-NEXT: fsl a0, a0, a2, a1 +; RV32IBT-NEXT: fsl a0, a0, a1, a2 ; RV32IBT-NEXT: ret %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c) ret i32 %1 @@ -227,7 +227,7 @@ ; RV32IB-NEXT: mv t0, zero ; RV32IB-NEXT: bgez a1, .LBB5_8 ; RV32IB-NEXT: .LBB5_5: -; RV32IB-NEXT: fsl a1, a3, a6, a2 +; RV32IB-NEXT: fsl a1, a3, a2, a6 ; RV32IB-NEXT: srl a1, a1, t1 ; RV32IB-NEXT: sub a2, a6, a5 ; RV32IB-NEXT: slli a3, t3, 1 @@ -275,7 +275,7 @@ ; RV32IBT-NEXT: mv t0, zero ; RV32IBT-NEXT: bgez a5, .LBB5_8 ; RV32IBT-NEXT: .LBB5_5: -; RV32IBT-NEXT: fsl a2, a3, a6, a2 +; RV32IBT-NEXT: fsl a2, a3, a2, a6 ; RV32IBT-NEXT: srl a1, a2, a1 ; RV32IBT-NEXT: sub a2, a6, t3 ; RV32IBT-NEXT: slli a3, t2, 1 @@ -315,12 +315,12 @@ ; ; RV32IB-LABEL: fshr_i32: ; RV32IB: # %bb.0: -; RV32IB-NEXT: fsr a0, a0, a2, a1 +; RV32IB-NEXT: fsr a0, a1, a0, a2 ; RV32IB-NEXT: ret ; ; RV32IBT-LABEL: fshr_i32: ; RV32IBT: # %bb.0: -; RV32IBT-NEXT: fsr a0, a0, a2, a1 +; RV32IBT-NEXT: fsr a0, a1, a0, a2 ; RV32IBT-NEXT: ret %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 %c) ret i32 %1 @@ -414,7 +414,7 @@ ; RV32IB-NEXT: bgez a5, .LBB7_8 ; RV32IB-NEXT: .LBB7_5: ; RV32IB-NEXT: addi a5, zero, 1 -; RV32IB-NEXT: fsl a1, a1, a5, a0 +; RV32IB-NEXT: fsl a1, a1, a0, a5 ; RV32IB-NEXT: sll a1, a1, t1 ; RV32IB-NEXT: sub a2, a6, a2 ; RV32IB-NEXT: lui a5, 524288 @@ -452,7 +452,7 @@ ; RV32IBT-NEXT: j .LBB7_3 ; RV32IBT-NEXT: .LBB7_2: ; RV32IBT-NEXT: addi a5, zero, 1 -; RV32IBT-NEXT: fsl a1, a1, a5, a0 +; RV32IBT-NEXT: fsl a1, a1, a0, a5 ; RV32IBT-NEXT: sll a1, a1, a7 ; RV32IBT-NEXT: lui a5, 524288 ; RV32IBT-NEXT: addi a5, a5, -1 @@ -503,12 +503,12 @@ ; ; RV32IB-LABEL: fshri_i32: ; RV32IB: # %bb.0: -; RV32IB-NEXT: fsri a0, a0, a1, 5 +; RV32IB-NEXT: fsri a0, a1, a0, 5 ; RV32IB-NEXT: ret ; ; RV32IBT-LABEL: fshri_i32: ; RV32IBT: # %bb.0: -; RV32IBT-NEXT: fsri a0, a0, a1, 5 +; RV32IBT-NEXT: fsri a0, a1, a0, 5 ; RV32IBT-NEXT: ret %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 5) ret i32 %1 @@ -529,16 +529,16 @@ ; RV32IB-LABEL: fshri_i64: ; RV32IB: # %bb.0: ; RV32IB-NEXT: addi a1, zero, 27 -; RV32IB-NEXT: fsl a2, a3, a1, a2 -; RV32IB-NEXT: fsl a1, a0, a1, a3 +; RV32IB-NEXT: fsl a2, a3, a2, a1 +; RV32IB-NEXT: fsl a1, a0, a3, a1 ; RV32IB-NEXT: mv a0, a2 ; RV32IB-NEXT: ret ; ; RV32IBT-LABEL: fshri_i64: ; RV32IBT: # %bb.0: ; RV32IBT-NEXT: addi a1, zero, 27 -; RV32IBT-NEXT: fsl a2, a3, a1, a2 -; RV32IBT-NEXT: fsl a1, a0, a1, a3 +; RV32IBT-NEXT: fsl a2, a3, a2, a1 +; RV32IBT-NEXT: fsl a1, a0, a3, a1 ; RV32IBT-NEXT: mv a0, a2 ; RV32IBT-NEXT: ret %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 5) @@ -556,13 +556,13 @@ ; RV32IB-LABEL: fshli_i32: ; RV32IB: # %bb.0: ; RV32IB-NEXT: addi a2, zero, 5 -; RV32IB-NEXT: fsl a0, a0, a2, a1 +; RV32IB-NEXT: fsl a0, a0, a1, a2 ; RV32IB-NEXT: ret ; ; RV32IBT-LABEL: fshli_i32: ; RV32IBT: # %bb.0: ; RV32IBT-NEXT: addi a2, zero, 5 -; RV32IBT-NEXT: fsl a0, a0, a2, a1 +; RV32IBT-NEXT: fsl a0, a0, a1, a2 ; RV32IBT-NEXT: ret %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 5) ret i32 %1 @@ -583,16 +583,16 @@ ; RV32IB-LABEL: fshli_i64: ; RV32IB: # %bb.0: ; RV32IB-NEXT: addi a4, zero, 5 -; RV32IB-NEXT: fsl a2, a0, a4, a3 -; RV32IB-NEXT: fsl a1, a1, a4, a0 +; RV32IB-NEXT: fsl a2, a0, a3, a4 +; RV32IB-NEXT: fsl a1, a1, a0, a4 ; RV32IB-NEXT: mv a0, a2 ; RV32IB-NEXT: ret ; ; RV32IBT-LABEL: fshli_i64: ; RV32IBT: # %bb.0: ; RV32IBT-NEXT: addi a4, zero, 5 -; RV32IBT-NEXT: fsl a2, a0, a4, a3 -; RV32IBT-NEXT: fsl a1, a1, a4, a0 +; RV32IBT-NEXT: fsl a2, a0, a3, a4 +; RV32IBT-NEXT: fsl a1, a1, a0, a4 ; RV32IBT-NEXT: mv a0, a2 ; RV32IBT-NEXT: ret %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 5) diff --git a/llvm/test/CodeGen/RISCV/rv64Zbt.ll b/llvm/test/CodeGen/RISCV/rv64Zbt.ll --- a/llvm/test/CodeGen/RISCV/rv64Zbt.ll +++ b/llvm/test/CodeGen/RISCV/rv64Zbt.ll @@ -120,12 +120,12 @@ ; ; RV64IB-LABEL: fshl_i32: ; RV64IB: # %bb.0: -; RV64IB-NEXT: fslw a0, a0, a2, a1 +; RV64IB-NEXT: fslw a0, a0, a1, a2 ; RV64IB-NEXT: ret ; ; RV64IBT-LABEL: fshl_i32: ; RV64IBT: # %bb.0: -; RV64IBT-NEXT: fslw a0, a0, a2, a1 +; RV64IBT-NEXT: fslw a0, a0, a1, a2 ; RV64IBT-NEXT: ret %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c) ret i32 %1 @@ -145,12 +145,12 @@ ; ; RV64IB-LABEL: fshl_i64: ; RV64IB: # %bb.0: -; RV64IB-NEXT: fsl a0, a0, a2, a1 +; RV64IB-NEXT: fsl a0, a0, a1, a2 ; RV64IB-NEXT: ret ; ; RV64IBT-LABEL: fshl_i64: ; RV64IBT: # %bb.0: -; RV64IBT-NEXT: fsl a0, a0, a2, a1 +; RV64IBT-NEXT: fsl a0, a0, a1, a2 ; RV64IBT-NEXT: ret %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %c) ret i64 %1 @@ -172,12 +172,12 @@ ; ; RV64IB-LABEL: fshr_i32: ; RV64IB: # %bb.0: -; RV64IB-NEXT: fsrw a0, a0, a2, a1 +; RV64IB-NEXT: fsrw a0, a1, a0, a2 ; RV64IB-NEXT: ret ; ; RV64IBT-LABEL: fshr_i32: ; RV64IBT: # %bb.0: -; RV64IBT-NEXT: fsrw a0, a0, a2, a1 +; RV64IBT-NEXT: fsrw a0, a1, a0, a2 ; RV64IBT-NEXT: ret %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 %c) ret i32 %1 @@ -197,12 +197,12 @@ ; ; RV64IB-LABEL: fshr_i64: ; RV64IB: # %bb.0: -; RV64IB-NEXT: fsr a0, a0, a2, a1 +; RV64IB-NEXT: fsr a0, a1, a0, a2 ; RV64IB-NEXT: ret ; ; RV64IBT-LABEL: fshr_i64: ; RV64IBT: # %bb.0: -; RV64IBT-NEXT: fsr a0, a0, a2, a1 +; RV64IBT-NEXT: fsr a0, a1, a0, a2 ; RV64IBT-NEXT: ret %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %c) ret i64 %1 @@ -219,12 +219,12 @@ ; ; RV64IB-LABEL: fshri_i32: ; RV64IB: # %bb.0: -; RV64IB-NEXT: fsriw a0, a0, a1, 5 +; RV64IB-NEXT: fsriw a0, a1, a0, 5 ; RV64IB-NEXT: ret ; ; RV64IBT-LABEL: fshri_i32: ; RV64IBT: # %bb.0: -; RV64IBT-NEXT: fsriw a0, a0, a1, 5 +; RV64IBT-NEXT: fsriw a0, a1, a0, 5 ; RV64IBT-NEXT: ret %1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 5) ret i32 %1 @@ -240,12 +240,12 @@ ; ; RV64IB-LABEL: fshri_i64: ; RV64IB: # %bb.0: -; RV64IB-NEXT: fsri a0, a0, a1, 5 +; RV64IB-NEXT: fsri a0, a1, a0, 5 ; RV64IB-NEXT: ret ; ; RV64IBT-LABEL: fshri_i64: ; RV64IBT: # %bb.0: -; RV64IBT-NEXT: fsri a0, a0, a1, 5 +; RV64IBT-NEXT: fsri a0, a1, a0, 5 ; RV64IBT-NEXT: ret %1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 5) ret i64 %1 @@ -264,7 +264,7 @@ ; RV64IB: # %bb.0: ; RV64IB-NEXT: slli a1, a1, 32 ; RV64IB-NEXT: addi a2, zero, 5 -; RV64IB-NEXT: fsl a0, a0, a2, a1 +; RV64IB-NEXT: fsl a0, a0, a1, a2 ; RV64IB-NEXT: sext.w a0, a0 ; RV64IB-NEXT: ret ; @@ -272,7 +272,7 @@ ; RV64IBT: # %bb.0: ; RV64IBT-NEXT: slli a1, a1, 32 ; RV64IBT-NEXT: addi a2, zero, 5 -; RV64IBT-NEXT: fsl a0, a0, a2, a1 +; RV64IBT-NEXT: fsl a0, a0, a1, a2 ; RV64IBT-NEXT: sext.w a0, a0 ; RV64IBT-NEXT: ret %1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 5) @@ -290,13 +290,13 @@ ; RV64IB-LABEL: fshli_i64: ; RV64IB: # %bb.0: ; RV64IB-NEXT: addi a2, zero, 5 -; RV64IB-NEXT: fsl a0, a0, a2, a1 +; RV64IB-NEXT: fsl a0, a0, a1, a2 ; RV64IB-NEXT: ret ; ; RV64IBT-LABEL: fshli_i64: ; RV64IBT: # %bb.0: ; RV64IBT-NEXT: addi a2, zero, 5 -; RV64IBT-NEXT: fsl a0, a0, a2, a1 +; RV64IBT-NEXT: fsl a0, a0, a1, a2 ; RV64IBT-NEXT: ret %1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 5) ret i64 %1