diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.h b/llvm/lib/Target/RISCV/RISCVFrameLowering.h --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.h +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.h @@ -65,6 +65,8 @@ bool canUseAsPrologue(const MachineBasicBlock &MBB) const override; bool canUseAsEpilogue(const MachineBasicBlock &MBB) const override; + bool enableShrinkWrapping(const MachineFunction &MF) const override; + bool isSupportedStackID(TargetStackID::Value ID) const override; TargetStackID::Value getStackIDForScalableVectors() const override; diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -1087,6 +1087,14 @@ return true; } +bool RISCVFrameLowering::enableShrinkWrapping(const MachineFunction &MF) const { + // Keep the conventional code flow when not optimizing. + if (MF.getFunction().hasOptNone()) + return false; + + return true; +} + bool RISCVFrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const { MachineBasicBlock *TmpMBB = const_cast(&MBB); const MachineFunction *MF = MBB.getParent(); diff --git a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll --- a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll @@ -137,11 +137,11 @@ define i8 @test_cttz_i8(i8 %a) nounwind { ; RV32I-LABEL: test_cttz_i8: ; RV32I: # %bb.0: -; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: andi a1, a0, 255 ; RV32I-NEXT: beqz a1, .LBB3_2 ; RV32I-NEXT: # %bb.1: # %cond.false +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: and a0, a0, a1 @@ -165,21 +165,20 @@ ; RV32I-NEXT: addi a1, a1, 257 ; RV32I-NEXT: call __mulsi3@plt ; RV32I-NEXT: srli a0, a0, 24 -; RV32I-NEXT: j .LBB3_3 -; RV32I-NEXT: .LBB3_2: -; RV32I-NEXT: addi a0, zero, 8 -; RV32I-NEXT: .LBB3_3: # %cond.end ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret +; RV32I-NEXT: .LBB3_2: +; RV32I-NEXT: addi a0, zero, 8 +; RV32I-NEXT: ret ; ; RV64I-LABEL: test_cttz_i8: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: andi a1, a0, 255 ; RV64I-NEXT: beqz a1, .LBB3_2 ; RV64I-NEXT: # %bb.1: # %cond.false +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: addi a1, a0, -1 ; RV64I-NEXT: not a0, a0 ; RV64I-NEXT: and a0, a0, a1 @@ -225,12 +224,11 @@ ; RV64I-NEXT: addi a1, a1, 257 ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: srli a0, a0, 56 -; RV64I-NEXT: j .LBB3_3 -; RV64I-NEXT: .LBB3_2: -; RV64I-NEXT: addi a0, zero, 8 -; RV64I-NEXT: .LBB3_3: # %cond.end ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; RV64I-NEXT: .LBB3_2: +; RV64I-NEXT: addi a0, zero, 8 ; RV64I-NEXT: ret %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 false) ret i8 %tmp @@ -239,13 +237,13 @@ define i16 @test_cttz_i16(i16 %a) nounwind { ; RV32I-LABEL: test_cttz_i16: ; RV32I: # %bb.0: -; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: and a1, a0, a1 ; RV32I-NEXT: beqz a1, .LBB4_2 ; RV32I-NEXT: # %bb.1: # %cond.false +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: and a0, a0, a1 @@ -269,23 +267,22 @@ ; RV32I-NEXT: addi a1, a1, 257 ; RV32I-NEXT: call __mulsi3@plt ; RV32I-NEXT: srli a0, a0, 24 -; RV32I-NEXT: j .LBB4_3 -; RV32I-NEXT: .LBB4_2: -; RV32I-NEXT: addi a0, zero, 16 -; RV32I-NEXT: .LBB4_3: # %cond.end ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret +; RV32I-NEXT: .LBB4_2: +; RV32I-NEXT: addi a0, zero, 16 +; RV32I-NEXT: ret ; ; RV64I-LABEL: test_cttz_i16: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: lui a1, 16 ; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: and a1, a0, a1 ; RV64I-NEXT: beqz a1, .LBB4_2 ; RV64I-NEXT: # %bb.1: # %cond.false +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: addi a1, a0, -1 ; RV64I-NEXT: not a0, a0 ; RV64I-NEXT: and a0, a0, a1 @@ -331,12 +328,11 @@ ; RV64I-NEXT: addi a1, a1, 257 ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: srli a0, a0, 56 -; RV64I-NEXT: j .LBB4_3 -; RV64I-NEXT: .LBB4_2: -; RV64I-NEXT: addi a0, zero, 16 -; RV64I-NEXT: .LBB4_3: # %cond.end ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; RV64I-NEXT: .LBB4_2: +; RV64I-NEXT: addi a0, zero, 16 ; RV64I-NEXT: ret %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 false) ret i16 %tmp @@ -345,10 +341,10 @@ define i32 @test_cttz_i32(i32 %a) nounwind { ; RV32I-LABEL: test_cttz_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: beqz a0, .LBB5_2 ; RV32I-NEXT: # %bb.1: # %cond.false +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: and a0, a0, a1 @@ -372,21 +368,20 @@ ; RV32I-NEXT: addi a1, a1, 257 ; RV32I-NEXT: call __mulsi3@plt ; RV32I-NEXT: srli a0, a0, 24 -; RV32I-NEXT: j .LBB5_3 -; RV32I-NEXT: .LBB5_2: -; RV32I-NEXT: addi a0, zero, 32 -; RV32I-NEXT: .LBB5_3: # %cond.end ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret +; RV32I-NEXT: .LBB5_2: +; RV32I-NEXT: addi a0, zero, 32 +; RV32I-NEXT: ret ; ; RV64I-LABEL: test_cttz_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sext.w a1, a0 ; RV64I-NEXT: beqz a1, .LBB5_2 ; RV64I-NEXT: # %bb.1: # %cond.false +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: addi a1, a0, -1 ; RV64I-NEXT: not a0, a0 ; RV64I-NEXT: and a0, a0, a1 @@ -432,12 +427,11 @@ ; RV64I-NEXT: addi a1, a1, 257 ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: srli a0, a0, 56 -; RV64I-NEXT: j .LBB5_3 -; RV64I-NEXT: .LBB5_2: -; RV64I-NEXT: addi a0, zero, 32 -; RV64I-NEXT: .LBB5_3: # %cond.end ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; RV64I-NEXT: .LBB5_2: +; RV64I-NEXT: addi a0, zero, 32 ; RV64I-NEXT: ret %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false) ret i32 %tmp @@ -446,10 +440,10 @@ define i32 @test_ctlz_i32(i32 %a) nounwind { ; RV32I-LABEL: test_ctlz_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: beqz a0, .LBB6_2 ; RV32I-NEXT: # %bb.1: # %cond.false +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: srli a1, a0, 1 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: srli a1, a0, 2 @@ -481,21 +475,20 @@ ; RV32I-NEXT: addi a1, a1, 257 ; RV32I-NEXT: call __mulsi3@plt ; RV32I-NEXT: srli a0, a0, 24 -; RV32I-NEXT: j .LBB6_3 -; RV32I-NEXT: .LBB6_2: -; RV32I-NEXT: addi a0, zero, 32 -; RV32I-NEXT: .LBB6_3: # %cond.end ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret +; RV32I-NEXT: .LBB6_2: +; RV32I-NEXT: addi a0, zero, 32 +; RV32I-NEXT: ret ; ; RV64I-LABEL: test_ctlz_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sext.w a1, a0 ; RV64I-NEXT: beqz a1, .LBB6_2 ; RV64I-NEXT: # %bb.1: # %cond.false +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: srliw a1, a0, 1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 @@ -554,12 +547,11 @@ ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: srli a0, a0, 56 ; RV64I-NEXT: addi a0, a0, -32 -; RV64I-NEXT: j .LBB6_3 -; RV64I-NEXT: .LBB6_2: -; RV64I-NEXT: addi a0, zero, 32 -; RV64I-NEXT: .LBB6_3: # %cond.end ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; RV64I-NEXT: .LBB6_2: +; RV64I-NEXT: addi a0, zero, 32 ; RV64I-NEXT: ret %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false) ret i32 %tmp @@ -640,10 +632,10 @@ ; ; RV64I-LABEL: test_cttz_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: beqz a0, .LBB7_2 ; RV64I-NEXT: # %bb.1: # %cond.false +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: addi a1, a0, -1 ; RV64I-NEXT: not a0, a0 ; RV64I-NEXT: and a0, a0, a1 @@ -689,12 +681,11 @@ ; RV64I-NEXT: addi a1, a1, 257 ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: srli a0, a0, 56 -; RV64I-NEXT: j .LBB7_3 -; RV64I-NEXT: .LBB7_2: -; RV64I-NEXT: addi a0, zero, 64 -; RV64I-NEXT: .LBB7_3: # %cond.end ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret +; RV64I-NEXT: .LBB7_2: +; RV64I-NEXT: addi a0, zero, 64 ; RV64I-NEXT: ret %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 false) ret i64 %tmp diff --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll --- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll @@ -10,28 +10,24 @@ define void @br_fcmp_false(double %a, double %b) nounwind { ; RV32IFD-LABEL: br_fcmp_false: ; RV32IFD: # %bb.0: -; RV32IFD-NEXT: addi sp, sp, -16 -; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: addi a0, zero, 1 ; RV32IFD-NEXT: bnez a0, .LBB0_2 ; RV32IFD-NEXT: # %bb.1: # %if.then -; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB0_2: # %if.else +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: call abort@plt ; ; RV64IFD-LABEL: br_fcmp_false: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: addi a0, zero, 1 ; RV64IFD-NEXT: bnez a0, .LBB0_2 ; RV64IFD-NEXT: # %bb.1: # %if.then -; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB0_2: # %if.else +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt %1 = fcmp false double %a, %b br i1 %1, label %if.then, label %if.else @@ -64,17 +60,15 @@ ; ; RV64IFD-LABEL: br_fcmp_oeq: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: fmv.d.x ft1, a0 ; RV64IFD-NEXT: feq.d a0, ft1, ft0 ; RV64IFD-NEXT: bnez a0, .LBB1_2 ; RV64IFD-NEXT: # %bb.1: # %if.else -; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB1_2: # %if.then +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt %1 = fcmp oeq double %a, %b br i1 %1, label %if.then, label %if.else @@ -110,17 +104,15 @@ ; ; RV64IFD-LABEL: br_fcmp_oeq_alt: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: fmv.d.x ft1, a0 ; RV64IFD-NEXT: feq.d a0, ft1, ft0 ; RV64IFD-NEXT: bnez a0, .LBB2_2 ; RV64IFD-NEXT: # %bb.1: # %if.else -; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB2_2: # %if.then +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt %1 = fcmp oeq double %a, %b br i1 %1, label %if.then, label %if.else @@ -153,17 +145,15 @@ ; ; RV64IFD-LABEL: br_fcmp_ogt: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fmv.d.x ft0, a0 ; RV64IFD-NEXT: fmv.d.x ft1, a1 ; RV64IFD-NEXT: flt.d a0, ft1, ft0 ; RV64IFD-NEXT: bnez a0, .LBB3_2 ; RV64IFD-NEXT: # %bb.1: # %if.else -; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB3_2: # %if.then +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt %1 = fcmp ogt double %a, %b br i1 %1, label %if.then, label %if.else @@ -196,17 +186,15 @@ ; ; RV64IFD-LABEL: br_fcmp_oge: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fmv.d.x ft0, a0 ; RV64IFD-NEXT: fmv.d.x ft1, a1 ; RV64IFD-NEXT: fle.d a0, ft1, ft0 ; RV64IFD-NEXT: bnez a0, .LBB4_2 ; RV64IFD-NEXT: # %bb.1: # %if.else -; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB4_2: # %if.then +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt %1 = fcmp oge double %a, %b br i1 %1, label %if.then, label %if.else @@ -239,17 +227,15 @@ ; ; RV64IFD-LABEL: br_fcmp_olt: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: fmv.d.x ft1, a0 ; RV64IFD-NEXT: flt.d a0, ft1, ft0 ; RV64IFD-NEXT: bnez a0, .LBB5_2 ; RV64IFD-NEXT: # %bb.1: # %if.else -; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB5_2: # %if.then +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt %1 = fcmp olt double %a, %b br i1 %1, label %if.then, label %if.else @@ -282,17 +268,15 @@ ; ; RV64IFD-LABEL: br_fcmp_ole: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: fmv.d.x ft1, a0 ; RV64IFD-NEXT: fle.d a0, ft1, ft0 ; RV64IFD-NEXT: bnez a0, .LBB6_2 ; RV64IFD-NEXT: # %bb.1: # %if.else -; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB6_2: # %if.then +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt %1 = fcmp ole double %a, %b br i1 %1, label %if.then, label %if.else @@ -327,8 +311,6 @@ ; ; RV64IFD-LABEL: br_fcmp_one: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: fmv.d.x ft1, a0 ; RV64IFD-NEXT: flt.d a0, ft1, ft0 @@ -336,10 +318,10 @@ ; RV64IFD-NEXT: or a0, a1, a0 ; RV64IFD-NEXT: bnez a0, .LBB7_2 ; RV64IFD-NEXT: # %bb.1: # %if.else -; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB7_2: # %if.then +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt %1 = fcmp one double %a, %b br i1 %1, label %if.then, label %if.else @@ -374,8 +356,6 @@ ; ; RV64IFD-LABEL: br_fcmp_ord: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fmv.d.x ft0, a0 ; RV64IFD-NEXT: fmv.d.x ft1, a1 ; RV64IFD-NEXT: feq.d a0, ft1, ft1 @@ -383,10 +363,10 @@ ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: bnez a0, .LBB8_2 ; RV64IFD-NEXT: # %bb.1: # %if.else -; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB8_2: # %if.then +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt %1 = fcmp ord double %a, %b br i1 %1, label %if.then, label %if.else @@ -421,8 +401,6 @@ ; ; RV64IFD-LABEL: br_fcmp_ueq: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: fmv.d.x ft1, a0 ; RV64IFD-NEXT: flt.d a0, ft1, ft0 @@ -430,10 +408,10 @@ ; RV64IFD-NEXT: or a0, a1, a0 ; RV64IFD-NEXT: beqz a0, .LBB9_2 ; RV64IFD-NEXT: # %bb.1: # %if.else -; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB9_2: # %if.then +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt %1 = fcmp ueq double %a, %b br i1 %1, label %if.then, label %if.else @@ -466,17 +444,15 @@ ; ; RV64IFD-LABEL: br_fcmp_ugt: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: fmv.d.x ft1, a0 ; RV64IFD-NEXT: fle.d a0, ft1, ft0 ; RV64IFD-NEXT: beqz a0, .LBB10_2 ; RV64IFD-NEXT: # %bb.1: # %if.else -; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB10_2: # %if.then +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt %1 = fcmp ugt double %a, %b br i1 %1, label %if.then, label %if.else @@ -509,17 +485,15 @@ ; ; RV64IFD-LABEL: br_fcmp_uge: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: fmv.d.x ft1, a0 ; RV64IFD-NEXT: flt.d a0, ft1, ft0 ; RV64IFD-NEXT: beqz a0, .LBB11_2 ; RV64IFD-NEXT: # %bb.1: # %if.else -; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB11_2: # %if.then +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt %1 = fcmp uge double %a, %b br i1 %1, label %if.then, label %if.else @@ -552,17 +526,15 @@ ; ; RV64IFD-LABEL: br_fcmp_ult: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fmv.d.x ft0, a0 ; RV64IFD-NEXT: fmv.d.x ft1, a1 ; RV64IFD-NEXT: fle.d a0, ft1, ft0 ; RV64IFD-NEXT: beqz a0, .LBB12_2 ; RV64IFD-NEXT: # %bb.1: # %if.else -; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB12_2: # %if.then +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt %1 = fcmp ult double %a, %b br i1 %1, label %if.then, label %if.else @@ -595,17 +567,15 @@ ; ; RV64IFD-LABEL: br_fcmp_ule: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fmv.d.x ft0, a0 ; RV64IFD-NEXT: fmv.d.x ft1, a1 ; RV64IFD-NEXT: flt.d a0, ft1, ft0 ; RV64IFD-NEXT: beqz a0, .LBB13_2 ; RV64IFD-NEXT: # %bb.1: # %if.else -; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB13_2: # %if.then +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt %1 = fcmp ule double %a, %b br i1 %1, label %if.then, label %if.else @@ -638,17 +608,15 @@ ; ; RV64IFD-LABEL: br_fcmp_une: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: fmv.d.x ft1, a0 ; RV64IFD-NEXT: feq.d a0, ft1, ft0 ; RV64IFD-NEXT: beqz a0, .LBB14_2 ; RV64IFD-NEXT: # %bb.1: # %if.else -; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB14_2: # %if.then +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt %1 = fcmp une double %a, %b br i1 %1, label %if.then, label %if.else @@ -684,8 +652,6 @@ ; ; RV64IFD-LABEL: br_fcmp_uno: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fmv.d.x ft0, a0 ; RV64IFD-NEXT: fmv.d.x ft1, a1 ; RV64IFD-NEXT: feq.d a0, ft1, ft1 @@ -693,10 +659,10 @@ ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: beqz a0, .LBB15_2 ; RV64IFD-NEXT: # %bb.1: # %if.else -; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB15_2: # %if.then +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt %1 = fcmp uno double %a, %b br i1 %1, label %if.then, label %if.else @@ -710,28 +676,24 @@ define void @br_fcmp_true(double %a, double %b) nounwind { ; RV32IFD-LABEL: br_fcmp_true: ; RV32IFD: # %bb.0: -; RV32IFD-NEXT: addi sp, sp, -16 -; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: addi a0, zero, 1 ; RV32IFD-NEXT: bnez a0, .LBB16_2 ; RV32IFD-NEXT: # %bb.1: # %if.else -; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; RV32IFD-NEXT: .LBB16_2: # %if.then +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: call abort@plt ; ; RV64IFD-LABEL: br_fcmp_true: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: addi a0, zero, 1 ; RV64IFD-NEXT: bnez a0, .LBB16_2 ; RV64IFD-NEXT: # %bb.1: # %if.else -; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; RV64IFD-NEXT: .LBB16_2: # %if.then +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt %1 = fcmp true double %a, %b br i1 %1, label %if.then, label %if.else diff --git a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll --- a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll +++ b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll @@ -35,12 +35,12 @@ ; ; RV64IFD-LABEL: func: ; RV64IFD: # %bb.0: # %entry -; RV64IFD-NEXT: addi sp, sp, -16 -; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: sext.w a2, a1 ; RV64IFD-NEXT: fmv.d.x ft0, a0 ; RV64IFD-NEXT: beqz a2, .LBB0_2 ; RV64IFD-NEXT: # %bb.1: # %if.else +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: addiw a1, a1, -1 ; RV64IFD-NEXT: fmv.x.d a0, ft0 ; RV64IFD-NEXT: fsd ft0, 0(sp) # 8-byte Folded Spill @@ -48,11 +48,13 @@ ; RV64IFD-NEXT: fmv.d.x ft0, a0 ; RV64IFD-NEXT: fld ft1, 0(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 -; RV64IFD-NEXT: .LBB0_2: # %return ; RV64IFD-NEXT: fmv.x.d a0, ft0 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret +; RV64IFD-NEXT: .LBB0_2: # %return +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret entry: %cmp = icmp eq i32 %n, 0 br i1 %cmp, label %return, label %if.else diff --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll --- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll @@ -11,28 +11,24 @@ define void @br_fcmp_false(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_false: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: addi a0, zero, 1 ; RV32IF-NEXT: bnez a0, .LBB0_2 ; RV32IF-NEXT: # %bb.1: # %if.then -; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB0_2: # %if.else +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: call abort@plt ; ; RV64IF-LABEL: br_fcmp_false: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: addi a0, zero, 1 ; RV64IF-NEXT: bnez a0, .LBB0_2 ; RV64IF-NEXT: # %bb.1: # %if.then -; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB0_2: # %if.else +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: call abort@plt %1 = fcmp false float %a, %b br i1 %1, label %if.then, label %if.else @@ -46,32 +42,28 @@ define void @br_fcmp_oeq(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_oeq: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: feq.s a0, ft1, ft0 ; RV32IF-NEXT: bnez a0, .LBB1_2 ; RV32IF-NEXT: # %bb.1: # %if.else -; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB1_2: # %if.then +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: call abort@plt ; ; RV64IF-LABEL: br_fcmp_oeq: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: fmv.w.x ft1, a0 ; RV64IF-NEXT: feq.s a0, ft1, ft0 ; RV64IF-NEXT: bnez a0, .LBB1_2 ; RV64IF-NEXT: # %bb.1: # %if.else -; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB1_2: # %if.then +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: call abort@plt %1 = fcmp oeq float %a, %b br i1 %1, label %if.then, label %if.else @@ -88,32 +80,28 @@ define void @br_fcmp_oeq_alt(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_oeq_alt: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: feq.s a0, ft1, ft0 ; RV32IF-NEXT: bnez a0, .LBB2_2 ; RV32IF-NEXT: # %bb.1: # %if.else -; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB2_2: # %if.then +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: call abort@plt ; ; RV64IF-LABEL: br_fcmp_oeq_alt: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: fmv.w.x ft1, a0 ; RV64IF-NEXT: feq.s a0, ft1, ft0 ; RV64IF-NEXT: bnez a0, .LBB2_2 ; RV64IF-NEXT: # %bb.1: # %if.else -; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB2_2: # %if.then +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: call abort@plt %1 = fcmp oeq float %a, %b br i1 %1, label %if.then, label %if.else @@ -127,32 +115,28 @@ define void @br_fcmp_ogt(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_ogt: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fmv.w.x ft1, a1 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: bnez a0, .LBB3_2 ; RV32IF-NEXT: # %bb.1: # %if.else -; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB3_2: # %if.then +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: call abort@plt ; ; RV64IF-LABEL: br_fcmp_ogt: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fmv.w.x ft1, a1 ; RV64IF-NEXT: flt.s a0, ft1, ft0 ; RV64IF-NEXT: bnez a0, .LBB3_2 ; RV64IF-NEXT: # %bb.1: # %if.else -; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB3_2: # %if.then +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: call abort@plt %1 = fcmp ogt float %a, %b br i1 %1, label %if.then, label %if.else @@ -166,32 +150,28 @@ define void @br_fcmp_oge(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_oge: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fmv.w.x ft1, a1 ; RV32IF-NEXT: fle.s a0, ft1, ft0 ; RV32IF-NEXT: bnez a0, .LBB4_2 ; RV32IF-NEXT: # %bb.1: # %if.else -; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB4_2: # %if.then +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: call abort@plt ; ; RV64IF-LABEL: br_fcmp_oge: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fmv.w.x ft1, a1 ; RV64IF-NEXT: fle.s a0, ft1, ft0 ; RV64IF-NEXT: bnez a0, .LBB4_2 ; RV64IF-NEXT: # %bb.1: # %if.else -; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB4_2: # %if.then +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: call abort@plt %1 = fcmp oge float %a, %b br i1 %1, label %if.then, label %if.else @@ -205,32 +185,28 @@ define void @br_fcmp_olt(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_olt: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: bnez a0, .LBB5_2 ; RV32IF-NEXT: # %bb.1: # %if.else -; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB5_2: # %if.then +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: call abort@plt ; ; RV64IF-LABEL: br_fcmp_olt: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: fmv.w.x ft1, a0 ; RV64IF-NEXT: flt.s a0, ft1, ft0 ; RV64IF-NEXT: bnez a0, .LBB5_2 ; RV64IF-NEXT: # %bb.1: # %if.else -; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB5_2: # %if.then +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: call abort@plt %1 = fcmp olt float %a, %b br i1 %1, label %if.then, label %if.else @@ -244,32 +220,28 @@ define void @br_fcmp_ole(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_ole: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: fle.s a0, ft1, ft0 ; RV32IF-NEXT: bnez a0, .LBB6_2 ; RV32IF-NEXT: # %bb.1: # %if.else -; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB6_2: # %if.then +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: call abort@plt ; ; RV64IF-LABEL: br_fcmp_ole: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: fmv.w.x ft1, a0 ; RV64IF-NEXT: fle.s a0, ft1, ft0 ; RV64IF-NEXT: bnez a0, .LBB6_2 ; RV64IF-NEXT: # %bb.1: # %if.else -; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB6_2: # %if.then +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: call abort@plt %1 = fcmp ole float %a, %b br i1 %1, label %if.then, label %if.else @@ -283,8 +255,6 @@ define void @br_fcmp_one(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_one: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 @@ -292,16 +262,14 @@ ; RV32IF-NEXT: or a0, a1, a0 ; RV32IF-NEXT: bnez a0, .LBB7_2 ; RV32IF-NEXT: # %bb.1: # %if.else -; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB7_2: # %if.then +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: call abort@plt ; ; RV64IF-LABEL: br_fcmp_one: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: fmv.w.x ft1, a0 ; RV64IF-NEXT: flt.s a0, ft1, ft0 @@ -309,10 +277,10 @@ ; RV64IF-NEXT: or a0, a1, a0 ; RV64IF-NEXT: bnez a0, .LBB7_2 ; RV64IF-NEXT: # %bb.1: # %if.else -; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB7_2: # %if.then +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: call abort@plt %1 = fcmp one float %a, %b br i1 %1, label %if.then, label %if.else @@ -326,8 +294,6 @@ define void @br_fcmp_ord(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_ord: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fmv.w.x ft1, a1 ; RV32IF-NEXT: feq.s a0, ft1, ft1 @@ -335,16 +301,14 @@ ; RV32IF-NEXT: and a0, a1, a0 ; RV32IF-NEXT: bnez a0, .LBB8_2 ; RV32IF-NEXT: # %bb.1: # %if.else -; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB8_2: # %if.then +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: call abort@plt ; ; RV64IF-LABEL: br_fcmp_ord: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fmv.w.x ft1, a1 ; RV64IF-NEXT: feq.s a0, ft1, ft1 @@ -352,10 +316,10 @@ ; RV64IF-NEXT: and a0, a1, a0 ; RV64IF-NEXT: bnez a0, .LBB8_2 ; RV64IF-NEXT: # %bb.1: # %if.else -; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB8_2: # %if.then +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: call abort@plt %1 = fcmp ord float %a, %b br i1 %1, label %if.then, label %if.else @@ -369,8 +333,6 @@ define void @br_fcmp_ueq(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_ueq: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 @@ -378,16 +340,14 @@ ; RV32IF-NEXT: or a0, a1, a0 ; RV32IF-NEXT: beqz a0, .LBB9_2 ; RV32IF-NEXT: # %bb.1: # %if.else -; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB9_2: # %if.then +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: call abort@plt ; ; RV64IF-LABEL: br_fcmp_ueq: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: fmv.w.x ft1, a0 ; RV64IF-NEXT: flt.s a0, ft1, ft0 @@ -395,10 +355,10 @@ ; RV64IF-NEXT: or a0, a1, a0 ; RV64IF-NEXT: beqz a0, .LBB9_2 ; RV64IF-NEXT: # %bb.1: # %if.else -; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB9_2: # %if.then +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: call abort@plt %1 = fcmp ueq float %a, %b br i1 %1, label %if.then, label %if.else @@ -412,32 +372,28 @@ define void @br_fcmp_ugt(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_ugt: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: fle.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB10_2 ; RV32IF-NEXT: # %bb.1: # %if.else -; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB10_2: # %if.then +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: call abort@plt ; ; RV64IF-LABEL: br_fcmp_ugt: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: fmv.w.x ft1, a0 ; RV64IF-NEXT: fle.s a0, ft1, ft0 ; RV64IF-NEXT: beqz a0, .LBB10_2 ; RV64IF-NEXT: # %bb.1: # %if.else -; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB10_2: # %if.then +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: call abort@plt %1 = fcmp ugt float %a, %b br i1 %1, label %if.then, label %if.else @@ -451,32 +407,28 @@ define void @br_fcmp_uge(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_uge: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB11_2 ; RV32IF-NEXT: # %bb.1: # %if.else -; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB11_2: # %if.then +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: call abort@plt ; ; RV64IF-LABEL: br_fcmp_uge: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: fmv.w.x ft1, a0 ; RV64IF-NEXT: flt.s a0, ft1, ft0 ; RV64IF-NEXT: beqz a0, .LBB11_2 ; RV64IF-NEXT: # %bb.1: # %if.else -; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB11_2: # %if.then +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: call abort@plt %1 = fcmp uge float %a, %b br i1 %1, label %if.then, label %if.else @@ -490,32 +442,28 @@ define void @br_fcmp_ult(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_ult: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fmv.w.x ft1, a1 ; RV32IF-NEXT: fle.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB12_2 ; RV32IF-NEXT: # %bb.1: # %if.else -; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB12_2: # %if.then +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: call abort@plt ; ; RV64IF-LABEL: br_fcmp_ult: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fmv.w.x ft1, a1 ; RV64IF-NEXT: fle.s a0, ft1, ft0 ; RV64IF-NEXT: beqz a0, .LBB12_2 ; RV64IF-NEXT: # %bb.1: # %if.else -; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB12_2: # %if.then +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: call abort@plt %1 = fcmp ult float %a, %b br i1 %1, label %if.then, label %if.else @@ -529,32 +477,28 @@ define void @br_fcmp_ule(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_ule: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fmv.w.x ft1, a1 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB13_2 ; RV32IF-NEXT: # %bb.1: # %if.else -; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB13_2: # %if.then +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: call abort@plt ; ; RV64IF-LABEL: br_fcmp_ule: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fmv.w.x ft1, a1 ; RV64IF-NEXT: flt.s a0, ft1, ft0 ; RV64IF-NEXT: beqz a0, .LBB13_2 ; RV64IF-NEXT: # %bb.1: # %if.else -; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB13_2: # %if.then +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: call abort@plt %1 = fcmp ule float %a, %b br i1 %1, label %if.then, label %if.else @@ -568,32 +512,28 @@ define void @br_fcmp_une(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_une: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: feq.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB14_2 ; RV32IF-NEXT: # %bb.1: # %if.else -; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB14_2: # %if.then +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: call abort@plt ; ; RV64IF-LABEL: br_fcmp_une: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: fmv.w.x ft1, a0 ; RV64IF-NEXT: feq.s a0, ft1, ft0 ; RV64IF-NEXT: beqz a0, .LBB14_2 ; RV64IF-NEXT: # %bb.1: # %if.else -; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB14_2: # %if.then +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: call abort@plt %1 = fcmp une float %a, %b br i1 %1, label %if.then, label %if.else @@ -608,8 +548,6 @@ ; TODO: sltiu+bne -> beq ; RV32IF-LABEL: br_fcmp_uno: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fmv.w.x ft1, a1 ; RV32IF-NEXT: feq.s a0, ft1, ft1 @@ -617,16 +555,14 @@ ; RV32IF-NEXT: and a0, a1, a0 ; RV32IF-NEXT: beqz a0, .LBB15_2 ; RV32IF-NEXT: # %bb.1: # %if.else -; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB15_2: # %if.then +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: call abort@plt ; ; RV64IF-LABEL: br_fcmp_uno: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fmv.w.x ft1, a1 ; RV64IF-NEXT: feq.s a0, ft1, ft1 @@ -634,10 +570,10 @@ ; RV64IF-NEXT: and a0, a1, a0 ; RV64IF-NEXT: beqz a0, .LBB15_2 ; RV64IF-NEXT: # %bb.1: # %if.else -; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB15_2: # %if.then +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: call abort@plt %1 = fcmp uno float %a, %b br i1 %1, label %if.then, label %if.else @@ -651,28 +587,24 @@ define void @br_fcmp_true(float %a, float %b) nounwind { ; RV32IF-LABEL: br_fcmp_true: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: addi a0, zero, 1 ; RV32IF-NEXT: bnez a0, .LBB16_2 ; RV32IF-NEXT: # %bb.1: # %if.else -; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; RV32IF-NEXT: .LBB16_2: # %if.then +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: call abort@plt ; ; RV64IF-LABEL: br_fcmp_true: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: addi a0, zero, 1 ; RV64IF-NEXT: bnez a0, .LBB16_2 ; RV64IF-NEXT: # %bb.1: # %if.else -; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IF-NEXT: addi sp, sp, 16 ; RV64IF-NEXT: ret ; RV64IF-NEXT: .LBB16_2: # %if.then +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IF-NEXT: call abort@plt %1 = fcmp true float %a, %b br i1 %1, label %if.then, label %if.else diff --git a/llvm/test/CodeGen/RISCV/frame-info.ll b/llvm/test/CodeGen/RISCV/frame-info.ll --- a/llvm/test/CodeGen/RISCV/frame-info.ll +++ b/llvm/test/CodeGen/RISCV/frame-info.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 < %s | FileCheck -check-prefix=RV32 %s -; RUN: llc -mtriple=riscv64 < %s | FileCheck -check-prefix=RV64 %s +; RUN: llc -mtriple=riscv32 < %s \ +; RUN: | FileCheck -check-prefix=RV32 %s +; RUN: llc -mtriple=riscv64 < %s \ +; RUN: | FileCheck -check-prefix=RV64 %s ; RUN: llc -mtriple=riscv32 -frame-pointer=all -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32-WITHFP %s ; RUN: llc -mtriple=riscv64 -frame-pointer=all -verify-machineinstrs < %s \ @@ -144,17 +146,15 @@ define void @branch_and_tail_call(i1 %a) { ; RV32-LABEL: branch_and_tail_call: ; RV32: # %bb.0: -; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: andi a0, a0, 1 ; RV32-NEXT: beqz a0, .LBB2_2 ; RV32-NEXT: # %bb.1: # %blue_pill -; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: tail callee1@plt ; RV32-NEXT: .LBB2_2: # %red_pill +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: call callee2@plt ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 16 @@ -162,17 +162,15 @@ ; ; RV64-LABEL: branch_and_tail_call: ; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: .cfi_def_cfa_offset 16 -; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: andi a0, a0, 1 ; RV64-NEXT: beqz a0, .LBB2_2 ; RV64-NEXT: # %bb.1: # %blue_pill -; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: tail callee1@plt ; RV64-NEXT: .LBB2_2: # %red_pill +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: call callee2@plt ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 16 @@ -180,6 +178,11 @@ ; ; RV32-WITHFP-LABEL: branch_and_tail_call: ; RV32-WITHFP: # %bb.0: +; RV32-WITHFP-NEXT: andi a0, a0, 1 +; RV32-WITHFP-NEXT: beqz a0, .LBB2_2 +; RV32-WITHFP-NEXT: # %bb.1: # %blue_pill +; RV32-WITHFP-NEXT: tail callee1@plt +; RV32-WITHFP-NEXT: .LBB2_2: # %red_pill ; RV32-WITHFP-NEXT: addi sp, sp, -16 ; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 16 ; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill @@ -188,14 +191,6 @@ ; RV32-WITHFP-NEXT: .cfi_offset s0, -8 ; RV32-WITHFP-NEXT: addi s0, sp, 16 ; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 0 -; RV32-WITHFP-NEXT: andi a0, a0, 1 -; RV32-WITHFP-NEXT: beqz a0, .LBB2_2 -; RV32-WITHFP-NEXT: # %bb.1: # %blue_pill -; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload -; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32-WITHFP-NEXT: addi sp, sp, 16 -; RV32-WITHFP-NEXT: tail callee1@plt -; RV32-WITHFP-NEXT: .LBB2_2: # %red_pill ; RV32-WITHFP-NEXT: call callee2@plt ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -204,6 +199,11 @@ ; ; RV64-WITHFP-LABEL: branch_and_tail_call: ; RV64-WITHFP: # %bb.0: +; RV64-WITHFP-NEXT: andi a0, a0, 1 +; RV64-WITHFP-NEXT: beqz a0, .LBB2_2 +; RV64-WITHFP-NEXT: # %bb.1: # %blue_pill +; RV64-WITHFP-NEXT: tail callee1@plt +; RV64-WITHFP-NEXT: .LBB2_2: # %red_pill ; RV64-WITHFP-NEXT: addi sp, sp, -16 ; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 16 ; RV64-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill @@ -212,14 +212,6 @@ ; RV64-WITHFP-NEXT: .cfi_offset s0, -16 ; RV64-WITHFP-NEXT: addi s0, sp, 16 ; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 0 -; RV64-WITHFP-NEXT: andi a0, a0, 1 -; RV64-WITHFP-NEXT: beqz a0, .LBB2_2 -; RV64-WITHFP-NEXT: # %bb.1: # %blue_pill -; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload -; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64-WITHFP-NEXT: addi sp, sp, 16 -; RV64-WITHFP-NEXT: tail callee1@plt -; RV64-WITHFP-NEXT: .LBB2_2: # %red_pill ; RV64-WITHFP-NEXT: call callee2@plt ; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload ; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll --- a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll @@ -11,28 +11,24 @@ define void @br_fcmp_false(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_false: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi sp, sp, -16 -; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: addi a0, zero, 1 ; RV32IZFH-NEXT: bnez a0, .LBB0_2 ; RV32IZFH-NEXT: # %bb.1: # %if.then -; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB0_2: # %if.else +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: call abort@plt ; ; RV64IZFH-LABEL: br_fcmp_false: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi sp, sp, -16 -; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: addi a0, zero, 1 ; RV64IZFH-NEXT: bnez a0, .LBB0_2 ; RV64IZFH-NEXT: # %bb.1: # %if.then -; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB0_2: # %if.else +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt %1 = fcmp false half %a, %b br i1 %1, label %if.then, label %if.else @@ -46,28 +42,24 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_oeq: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi sp, sp, -16 -; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: feq.h a0, fa0, fa1 ; RV32IZFH-NEXT: bnez a0, .LBB1_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else -; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB1_2: # %if.then +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: call abort@plt ; ; RV64IZFH-LABEL: br_fcmp_oeq: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi sp, sp, -16 -; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: feq.h a0, fa0, fa1 ; RV64IZFH-NEXT: bnez a0, .LBB1_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else -; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB1_2: # %if.then +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt %1 = fcmp oeq half %a, %b br i1 %1, label %if.then, label %if.else @@ -84,28 +76,24 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_oeq_alt: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi sp, sp, -16 -; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: feq.h a0, fa0, fa1 ; RV32IZFH-NEXT: bnez a0, .LBB2_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else -; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB2_2: # %if.then +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: call abort@plt ; ; RV64IZFH-LABEL: br_fcmp_oeq_alt: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi sp, sp, -16 -; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: feq.h a0, fa0, fa1 ; RV64IZFH-NEXT: bnez a0, .LBB2_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else -; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB2_2: # %if.then +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt %1 = fcmp oeq half %a, %b br i1 %1, label %if.then, label %if.else @@ -119,28 +107,24 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_ogt: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi sp, sp, -16 -; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: flt.h a0, fa1, fa0 ; RV32IZFH-NEXT: bnez a0, .LBB3_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else -; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB3_2: # %if.then +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: call abort@plt ; ; RV64IZFH-LABEL: br_fcmp_ogt: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi sp, sp, -16 -; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: flt.h a0, fa1, fa0 ; RV64IZFH-NEXT: bnez a0, .LBB3_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else -; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB3_2: # %if.then +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt %1 = fcmp ogt half %a, %b br i1 %1, label %if.then, label %if.else @@ -154,28 +138,24 @@ define void @br_fcmp_oge(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_oge: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi sp, sp, -16 -; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: fle.h a0, fa1, fa0 ; RV32IZFH-NEXT: bnez a0, .LBB4_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else -; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB4_2: # %if.then +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: call abort@plt ; ; RV64IZFH-LABEL: br_fcmp_oge: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi sp, sp, -16 -; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: fle.h a0, fa1, fa0 ; RV64IZFH-NEXT: bnez a0, .LBB4_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else -; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB4_2: # %if.then +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt %1 = fcmp oge half %a, %b br i1 %1, label %if.then, label %if.else @@ -189,28 +169,24 @@ define void @br_fcmp_olt(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_olt: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi sp, sp, -16 -; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: flt.h a0, fa0, fa1 ; RV32IZFH-NEXT: bnez a0, .LBB5_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else -; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB5_2: # %if.then +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: call abort@plt ; ; RV64IZFH-LABEL: br_fcmp_olt: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi sp, sp, -16 -; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: flt.h a0, fa0, fa1 ; RV64IZFH-NEXT: bnez a0, .LBB5_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else -; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB5_2: # %if.then +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt %1 = fcmp olt half %a, %b br i1 %1, label %if.then, label %if.else @@ -224,28 +200,24 @@ define void @br_fcmp_ole(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_ole: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi sp, sp, -16 -; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: fle.h a0, fa0, fa1 ; RV32IZFH-NEXT: bnez a0, .LBB6_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else -; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB6_2: # %if.then +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: call abort@plt ; ; RV64IZFH-LABEL: br_fcmp_ole: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi sp, sp, -16 -; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: fle.h a0, fa0, fa1 ; RV64IZFH-NEXT: bnez a0, .LBB6_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else -; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB6_2: # %if.then +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt %1 = fcmp ole half %a, %b br i1 %1, label %if.then, label %if.else @@ -259,32 +231,28 @@ define void @br_fcmp_one(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_one: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi sp, sp, -16 -; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: flt.h a0, fa0, fa1 ; RV32IZFH-NEXT: flt.h a1, fa1, fa0 ; RV32IZFH-NEXT: or a0, a1, a0 ; RV32IZFH-NEXT: bnez a0, .LBB7_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else -; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB7_2: # %if.then +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: call abort@plt ; ; RV64IZFH-LABEL: br_fcmp_one: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi sp, sp, -16 -; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: flt.h a0, fa0, fa1 ; RV64IZFH-NEXT: flt.h a1, fa1, fa0 ; RV64IZFH-NEXT: or a0, a1, a0 ; RV64IZFH-NEXT: bnez a0, .LBB7_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else -; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB7_2: # %if.then +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt %1 = fcmp one half %a, %b br i1 %1, label %if.then, label %if.else @@ -298,32 +266,28 @@ define void @br_fcmp_ord(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_ord: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi sp, sp, -16 -; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: feq.h a0, fa1, fa1 ; RV32IZFH-NEXT: feq.h a1, fa0, fa0 ; RV32IZFH-NEXT: and a0, a1, a0 ; RV32IZFH-NEXT: bnez a0, .LBB8_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else -; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB8_2: # %if.then +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: call abort@plt ; ; RV64IZFH-LABEL: br_fcmp_ord: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi sp, sp, -16 -; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: feq.h a0, fa1, fa1 ; RV64IZFH-NEXT: feq.h a1, fa0, fa0 ; RV64IZFH-NEXT: and a0, a1, a0 ; RV64IZFH-NEXT: bnez a0, .LBB8_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else -; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB8_2: # %if.then +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt %1 = fcmp ord half %a, %b br i1 %1, label %if.then, label %if.else @@ -337,32 +301,28 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_ueq: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi sp, sp, -16 -; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: flt.h a0, fa0, fa1 ; RV32IZFH-NEXT: flt.h a1, fa1, fa0 ; RV32IZFH-NEXT: or a0, a1, a0 ; RV32IZFH-NEXT: beqz a0, .LBB9_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else -; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB9_2: # %if.then +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: call abort@plt ; ; RV64IZFH-LABEL: br_fcmp_ueq: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi sp, sp, -16 -; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: flt.h a0, fa0, fa1 ; RV64IZFH-NEXT: flt.h a1, fa1, fa0 ; RV64IZFH-NEXT: or a0, a1, a0 ; RV64IZFH-NEXT: beqz a0, .LBB9_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else -; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB9_2: # %if.then +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt %1 = fcmp ueq half %a, %b br i1 %1, label %if.then, label %if.else @@ -376,28 +336,24 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_ugt: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi sp, sp, -16 -; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: fle.h a0, fa0, fa1 ; RV32IZFH-NEXT: beqz a0, .LBB10_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else -; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB10_2: # %if.then +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: call abort@plt ; ; RV64IZFH-LABEL: br_fcmp_ugt: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi sp, sp, -16 -; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: fle.h a0, fa0, fa1 ; RV64IZFH-NEXT: beqz a0, .LBB10_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else -; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB10_2: # %if.then +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt %1 = fcmp ugt half %a, %b br i1 %1, label %if.then, label %if.else @@ -411,28 +367,24 @@ define void @br_fcmp_uge(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_uge: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi sp, sp, -16 -; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: flt.h a0, fa0, fa1 ; RV32IZFH-NEXT: beqz a0, .LBB11_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else -; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB11_2: # %if.then +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: call abort@plt ; ; RV64IZFH-LABEL: br_fcmp_uge: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi sp, sp, -16 -; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: flt.h a0, fa0, fa1 ; RV64IZFH-NEXT: beqz a0, .LBB11_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else -; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB11_2: # %if.then +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt %1 = fcmp uge half %a, %b br i1 %1, label %if.then, label %if.else @@ -446,28 +398,24 @@ define void @br_fcmp_ult(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_ult: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi sp, sp, -16 -; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: fle.h a0, fa1, fa0 ; RV32IZFH-NEXT: beqz a0, .LBB12_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else -; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB12_2: # %if.then +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: call abort@plt ; ; RV64IZFH-LABEL: br_fcmp_ult: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi sp, sp, -16 -; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: fle.h a0, fa1, fa0 ; RV64IZFH-NEXT: beqz a0, .LBB12_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else -; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB12_2: # %if.then +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt %1 = fcmp ult half %a, %b br i1 %1, label %if.then, label %if.else @@ -481,28 +429,24 @@ define void @br_fcmp_ule(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_ule: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi sp, sp, -16 -; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: flt.h a0, fa1, fa0 ; RV32IZFH-NEXT: beqz a0, .LBB13_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else -; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB13_2: # %if.then +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: call abort@plt ; ; RV64IZFH-LABEL: br_fcmp_ule: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi sp, sp, -16 -; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: flt.h a0, fa1, fa0 ; RV64IZFH-NEXT: beqz a0, .LBB13_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else -; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB13_2: # %if.then +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt %1 = fcmp ule half %a, %b br i1 %1, label %if.then, label %if.else @@ -516,28 +460,24 @@ define void @br_fcmp_une(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_une: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi sp, sp, -16 -; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: feq.h a0, fa0, fa1 ; RV32IZFH-NEXT: beqz a0, .LBB14_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else -; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB14_2: # %if.then +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: call abort@plt ; ; RV64IZFH-LABEL: br_fcmp_une: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi sp, sp, -16 -; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: feq.h a0, fa0, fa1 ; RV64IZFH-NEXT: beqz a0, .LBB14_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else -; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB14_2: # %if.then +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt %1 = fcmp une half %a, %b br i1 %1, label %if.then, label %if.else @@ -552,32 +492,28 @@ ; TODO: sltiu+bne -> beq ; RV32IZFH-LABEL: br_fcmp_uno: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi sp, sp, -16 -; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: feq.h a0, fa1, fa1 ; RV32IZFH-NEXT: feq.h a1, fa0, fa0 ; RV32IZFH-NEXT: and a0, a1, a0 ; RV32IZFH-NEXT: beqz a0, .LBB15_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else -; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB15_2: # %if.then +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: call abort@plt ; ; RV64IZFH-LABEL: br_fcmp_uno: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi sp, sp, -16 -; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: feq.h a0, fa1, fa1 ; RV64IZFH-NEXT: feq.h a1, fa0, fa0 ; RV64IZFH-NEXT: and a0, a1, a0 ; RV64IZFH-NEXT: beqz a0, .LBB15_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else -; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB15_2: # %if.then +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt %1 = fcmp uno half %a, %b br i1 %1, label %if.then, label %if.else @@ -591,28 +527,24 @@ define void @br_fcmp_true(half %a, half %b) nounwind { ; RV32IZFH-LABEL: br_fcmp_true: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: addi sp, sp, -16 -; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: addi a0, zero, 1 ; RV32IZFH-NEXT: bnez a0, .LBB16_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else -; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32IZFH-NEXT: addi sp, sp, 16 ; RV32IZFH-NEXT: ret ; RV32IZFH-NEXT: .LBB16_2: # %if.then +; RV32IZFH-NEXT: addi sp, sp, -16 +; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: call abort@plt ; ; RV64IZFH-LABEL: br_fcmp_true: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: addi sp, sp, -16 -; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: addi a0, zero, 1 ; RV64IZFH-NEXT: bnez a0, .LBB16_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else -; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; RV64IZFH-NEXT: .LBB16_2: # %if.then +; RV64IZFH-NEXT: addi sp, sp, -16 +; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt %1 = fcmp true half %a, %b br i1 %1, label %if.then, label %if.else diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll --- a/llvm/test/CodeGen/RISCV/rv32zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll @@ -11,10 +11,10 @@ define i32 @ctlz_i32(i32 %a) nounwind { ; RV32I-LABEL: ctlz_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: beqz a0, .LBB0_2 ; RV32I-NEXT: # %bb.1: # %cond.false +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: srli a1, a0, 1 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: srli a1, a0, 2 @@ -46,13 +46,12 @@ ; RV32I-NEXT: addi a1, a1, 257 ; RV32I-NEXT: call __mulsi3@plt ; RV32I-NEXT: srli a0, a0, 24 -; RV32I-NEXT: j .LBB0_3 -; RV32I-NEXT: .LBB0_2: -; RV32I-NEXT: addi a0, zero, 32 -; RV32I-NEXT: .LBB0_3: # %cond.end ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret +; RV32I-NEXT: .LBB0_2: +; RV32I-NEXT: addi a0, zero, 32 +; RV32I-NEXT: ret ; ; RV32B-LABEL: ctlz_i32: ; RV32B: # %bb.0: @@ -188,10 +187,10 @@ define i32 @cttz_i32(i32 %a) nounwind { ; RV32I-LABEL: cttz_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: addi sp, sp, -16 -; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: beqz a0, .LBB2_2 ; RV32I-NEXT: # %bb.1: # %cond.false +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: and a0, a0, a1 @@ -215,13 +214,12 @@ ; RV32I-NEXT: addi a1, a1, 257 ; RV32I-NEXT: call __mulsi3@plt ; RV32I-NEXT: srli a0, a0, 24 -; RV32I-NEXT: j .LBB2_3 -; RV32I-NEXT: .LBB2_2: -; RV32I-NEXT: addi a0, zero, 32 -; RV32I-NEXT: .LBB2_3: # %cond.end ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret +; RV32I-NEXT: .LBB2_2: +; RV32I-NEXT: addi a0, zero, 32 +; RV32I-NEXT: ret ; ; RV32B-LABEL: cttz_i32: ; RV32B: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll --- a/llvm/test/CodeGen/RISCV/rv64zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll @@ -11,10 +11,10 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind { ; RV64I-LABEL: ctlz_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: beqz a0, .LBB0_2 ; RV64I-NEXT: # %bb.1: # %cond.false +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: srliw a1, a0, 1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 @@ -73,13 +73,12 @@ ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: srli a0, a0, 56 ; RV64I-NEXT: addi a0, a0, -32 -; RV64I-NEXT: j .LBB0_3 -; RV64I-NEXT: .LBB0_2: -; RV64I-NEXT: addi a0, zero, 32 -; RV64I-NEXT: .LBB0_3: # %cond.end ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; RV64I-NEXT: .LBB0_2: +; RV64I-NEXT: addi a0, zero, 32 +; RV64I-NEXT: ret ; ; RV64B-LABEL: ctlz_i32: ; RV64B: # %bb.0: @@ -97,10 +96,10 @@ define signext i32 @log2_i32(i32 signext %a) nounwind { ; RV64I-LABEL: log2_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: beqz a0, .LBB1_2 ; RV64I-NEXT: # %bb.1: # %cond.false +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: srliw a1, a0, 1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 @@ -159,14 +158,14 @@ ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: srli a0, a0, 56 ; RV64I-NEXT: addi a0, a0, -32 +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: j .LBB1_3 ; RV64I-NEXT: .LBB1_2: ; RV64I-NEXT: addi a0, zero, 32 ; RV64I-NEXT: .LBB1_3: # %cond.end ; RV64I-NEXT: addi a1, zero, 31 ; RV64I-NEXT: sub a0, a1, a0 -; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret ; ; RV64B-LABEL: log2_i32: @@ -388,13 +387,13 @@ define i32 @ctlz_lshr_i32(i32 signext %a) { ; RV64I-LABEL: ctlz_lshr_i32: ; RV64I: # %bb.0: +; RV64I-NEXT: srliw a0, a0, 1 +; RV64I-NEXT: beqz a0, .LBB4_2 +; RV64I-NEXT: # %bb.1: # %cond.false ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: .cfi_def_cfa_offset 16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: .cfi_offset ra, -8 -; RV64I-NEXT: srliw a0, a0, 1 -; RV64I-NEXT: beqz a0, .LBB4_2 -; RV64I-NEXT: # %bb.1: # %cond.false ; RV64I-NEXT: srli a1, a0, 1 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: srli a1, a0, 2 @@ -451,13 +450,12 @@ ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: srli a0, a0, 56 ; RV64I-NEXT: addi a0, a0, -32 -; RV64I-NEXT: j .LBB4_3 -; RV64I-NEXT: .LBB4_2: -; RV64I-NEXT: addi a0, zero, 32 -; RV64I-NEXT: .LBB4_3: # %cond.end ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; RV64I-NEXT: .LBB4_2: +; RV64I-NEXT: addi a0, zero, 32 +; RV64I-NEXT: ret ; ; RV64B-LABEL: ctlz_lshr_i32: ; RV64B: # %bb.0: @@ -480,10 +478,10 @@ define i64 @ctlz_i64(i64 %a) nounwind { ; RV64I-LABEL: ctlz_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: beqz a0, .LBB5_2 ; RV64I-NEXT: # %bb.1: # %cond.false +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: srli a1, a0, 1 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: srli a1, a0, 2 @@ -539,13 +537,12 @@ ; RV64I-NEXT: addi a1, a1, 257 ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: srli a0, a0, 56 -; RV64I-NEXT: j .LBB5_3 -; RV64I-NEXT: .LBB5_2: -; RV64I-NEXT: addi a0, zero, 64 -; RV64I-NEXT: .LBB5_3: # %cond.end ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; RV64I-NEXT: .LBB5_2: +; RV64I-NEXT: addi a0, zero, 64 +; RV64I-NEXT: ret ; ; RV64B-LABEL: ctlz_i64: ; RV64B: # %bb.0: @@ -565,10 +562,10 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind { ; RV64I-LABEL: cttz_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: beqz a0, .LBB6_2 ; RV64I-NEXT: # %bb.1: # %cond.false +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: addi a1, a0, -1 ; RV64I-NEXT: not a0, a0 ; RV64I-NEXT: and a0, a0, a1 @@ -614,13 +611,12 @@ ; RV64I-NEXT: addi a1, a1, 257 ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: srli a0, a0, 56 -; RV64I-NEXT: j .LBB6_3 -; RV64I-NEXT: .LBB6_2: -; RV64I-NEXT: addi a0, zero, 32 -; RV64I-NEXT: .LBB6_3: # %cond.end ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; RV64I-NEXT: .LBB6_2: +; RV64I-NEXT: addi a0, zero, 32 +; RV64I-NEXT: ret ; ; RV64B-LABEL: cttz_i32: ; RV64B: # %bb.0: @@ -878,10 +874,10 @@ define i64 @cttz_i64(i64 %a) nounwind { ; RV64I-LABEL: cttz_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: addi sp, sp, -16 -; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: beqz a0, .LBB10_2 ; RV64I-NEXT: # %bb.1: # %cond.false +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: addi a1, a0, -1 ; RV64I-NEXT: not a0, a0 ; RV64I-NEXT: and a0, a0, a1 @@ -927,13 +923,12 @@ ; RV64I-NEXT: addi a1, a1, 257 ; RV64I-NEXT: call __muldi3@plt ; RV64I-NEXT: srli a0, a0, 56 -; RV64I-NEXT: j .LBB10_3 -; RV64I-NEXT: .LBB10_2: -; RV64I-NEXT: addi a0, zero, 64 -; RV64I-NEXT: .LBB10_3: # %cond.end ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: addi sp, sp, 16 ; RV64I-NEXT: ret +; RV64I-NEXT: .LBB10_2: +; RV64I-NEXT: addi a0, zero, 64 +; RV64I-NEXT: ret ; ; RV64B-LABEL: cttz_i64: ; RV64B: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/shrinkwrap.ll b/llvm/test/CodeGen/RISCV/shrinkwrap.ll --- a/llvm/test/CodeGen/RISCV/shrinkwrap.ll +++ b/llvm/test/CodeGen/RISCV/shrinkwrap.ll @@ -1,26 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple riscv32 < %s | FileCheck %s -check-prefix=RV32I-NOSW -; RUN: llc -mtriple riscv32 -enable-shrink-wrap < %s | FileCheck %s -check-prefix=RV32I-SW -; RUN: llc -mtriple riscv32 -enable-shrink-wrap -mattr=+save-restore < %s \ -; RUN: | FileCheck %s -check-prefix=RV32I-SW-SR - +; RUN: llc -mtriple riscv32 < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I-SW +; RUN: llc -mtriple riscv32 -mattr=+save-restore < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I-SW-SR +; RUN: llc -mtriple riscv64 < %s \ +; RUN: | FileCheck %s -check-prefix=RV64I-SW declare void @abort() define void @eliminate_restore(i32 %n) nounwind { -; RV32I-NOSW-LABEL: eliminate_restore: -; RV32I-NOSW: # %bb.0: -; RV32I-NOSW-NEXT: addi sp, sp, -16 -; RV32I-NOSW-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NOSW-NEXT: addi a1, zero, 32 -; RV32I-NOSW-NEXT: bgeu a1, a0, .LBB0_2 -; RV32I-NOSW-NEXT: # %bb.1: # %if.end -; RV32I-NOSW-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32I-NOSW-NEXT: addi sp, sp, 16 -; RV32I-NOSW-NEXT: ret -; RV32I-NOSW-NEXT: .LBB0_2: # %if.then -; RV32I-NOSW-NEXT: call abort@plt -; ; RV32I-SW-LABEL: eliminate_restore: ; RV32I-SW: # %bb.0: ; RV32I-SW-NEXT: addi a1, zero, 32 @@ -41,6 +29,18 @@ ; RV32I-SW-SR-NEXT: .LBB0_2: # %if.then ; RV32I-SW-SR-NEXT: call t0, __riscv_save_0 ; RV32I-SW-SR-NEXT: call abort@plt +; +; RV64I-SW-LABEL: eliminate_restore: +; RV64I-SW: # %bb.0: +; RV64I-SW-NEXT: sext.w a0, a0 +; RV64I-SW-NEXT: addi a1, zero, 32 +; RV64I-SW-NEXT: bgeu a1, a0, .LBB0_2 +; RV64I-SW-NEXT: # %bb.1: # %if.end +; RV64I-SW-NEXT: ret +; RV64I-SW-NEXT: .LBB0_2: # %if.then +; RV64I-SW-NEXT: addi sp, sp, -16 +; RV64I-SW-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-SW-NEXT: call abort@plt %cmp = icmp ule i32 %n, 32 br i1 %cmp, label %if.then, label %if.end @@ -55,27 +55,6 @@ declare void @notdead(i8*) define void @conditional_alloca(i32 %n) nounwind { -; RV32I-NOSW-LABEL: conditional_alloca: -; RV32I-NOSW: # %bb.0: -; RV32I-NOSW-NEXT: addi sp, sp, -16 -; RV32I-NOSW-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NOSW-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; RV32I-NOSW-NEXT: addi s0, sp, 16 -; RV32I-NOSW-NEXT: addi a1, zero, 32 -; RV32I-NOSW-NEXT: bltu a1, a0, .LBB1_2 -; RV32I-NOSW-NEXT: # %bb.1: # %if.then -; RV32I-NOSW-NEXT: addi a0, a0, 15 -; RV32I-NOSW-NEXT: andi a0, a0, -16 -; RV32I-NOSW-NEXT: sub a0, sp, a0 -; RV32I-NOSW-NEXT: mv sp, a0 -; RV32I-NOSW-NEXT: call notdead@plt -; RV32I-NOSW-NEXT: .LBB1_2: # %if.end -; RV32I-NOSW-NEXT: addi sp, s0, -16 -; RV32I-NOSW-NEXT: lw s0, 8(sp) # 4-byte Folded Reload -; RV32I-NOSW-NEXT: lw ra, 12(sp) # 4-byte Folded Reload -; RV32I-NOSW-NEXT: addi sp, sp, 16 -; RV32I-NOSW-NEXT: ret -; ; RV32I-SW-LABEL: conditional_alloca: ; RV32I-SW: # %bb.0: ; RV32I-SW-NEXT: addi a1, zero, 32 @@ -113,6 +92,30 @@ ; RV32I-SW-SR-NEXT: tail __riscv_restore_1 ; RV32I-SW-SR-NEXT: .LBB1_2: # %if.end ; RV32I-SW-SR-NEXT: ret +; +; RV64I-SW-LABEL: conditional_alloca: +; RV64I-SW: # %bb.0: +; RV64I-SW-NEXT: sext.w a1, a0 +; RV64I-SW-NEXT: addi a2, zero, 32 +; RV64I-SW-NEXT: bltu a2, a1, .LBB1_2 +; RV64I-SW-NEXT: # %bb.1: # %if.then +; RV64I-SW-NEXT: addi sp, sp, -16 +; RV64I-SW-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-SW-NEXT: sd s0, 0(sp) # 8-byte Folded Spill +; RV64I-SW-NEXT: addi s0, sp, 16 +; RV64I-SW-NEXT: slli a0, a0, 32 +; RV64I-SW-NEXT: srli a0, a0, 32 +; RV64I-SW-NEXT: addi a0, a0, 15 +; RV64I-SW-NEXT: andi a0, a0, -16 +; RV64I-SW-NEXT: sub a0, sp, a0 +; RV64I-SW-NEXT: mv sp, a0 +; RV64I-SW-NEXT: call notdead@plt +; RV64I-SW-NEXT: addi sp, s0, -16 +; RV64I-SW-NEXT: ld s0, 0(sp) # 8-byte Folded Reload +; RV64I-SW-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-SW-NEXT: addi sp, sp, 16 +; RV64I-SW-NEXT: .LBB1_2: # %if.end +; RV64I-SW-NEXT: ret %cmp = icmp ule i32 %n, 32 br i1 %cmp, label %if.then, label %if.end