Index: lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp =================================================================== --- lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp +++ lib/Target/RISCV/MCTargetDesc/RISCVMCAsmInfo.cpp @@ -21,6 +21,7 @@ CommentString = "#"; AlignmentIsInBytes = false; SupportsDebugInformation = true; + ExceptionsType = ExceptionHandling::DwarfCFI; Data16bitsDirective = "\t.half\t"; Data32bitsDirective = "\t.word\t"; } Index: lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.cpp =================================================================== --- lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.cpp +++ lib/Target/RISCV/MCTargetDesc/RISCVMCTargetDesc.cpp @@ -50,7 +50,13 @@ static MCAsmInfo *createRISCVMCAsmInfo(const MCRegisterInfo &MRI, const Triple &TT) { - return new RISCVMCAsmInfo(TT); + MCAsmInfo *MAI = new RISCVMCAsmInfo(TT); + + unsigned SP = MRI.getDwarfRegNum(RISCV::X2, true); + MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, SP, 0); + MAI->addInitialFrameState(Inst); + + return MAI; } static MCSubtargetInfo *createRISCVMCSubtargetInfo(const Triple &TT, Index: lib/Target/RISCV/RISCVFrameLowering.cpp =================================================================== --- lib/Target/RISCV/RISCVFrameLowering.cpp +++ lib/Target/RISCV/RISCVFrameLowering.cpp @@ -18,6 +18,7 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/RegisterScavenging.h" +#include "llvm/MC/MCDwarf.h" using namespace llvm; @@ -96,6 +97,8 @@ MachineFrameInfo &MFI = MF.getFrameInfo(); auto *RVFI = MF.getInfo(); + const RISCVRegisterInfo *RI = STI.getRegisterInfo(); + const RISCVInstrInfo *TII = STI.getInstrInfo(); MachineBasicBlock::iterator MBBI = MBB.begin(); unsigned FPReg = getFPReg(STI); @@ -119,6 +122,12 @@ // Allocate space on the stack if necessary. adjustReg(MBB, MBBI, DL, SPReg, SPReg, -StackSize, MachineInstr::FrameSetup); + // Emit ".cfi_def_cfa_offset StackSize" + unsigned CFIIndex = MF.addFrameInst( + MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize)); + BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + // The frame pointer is callee-saved, and code has been generated for us to // save it to the stack. We need to skip over the storing of callee-saved // registers as the frame pointer must be modified after it has been saved @@ -128,10 +137,30 @@ const std::vector &CSI = MFI.getCalleeSavedInfo(); std::advance(MBBI, CSI.size()); + // Iterate over list of callee-saved registers and emit .cfi_offset + // directives. + for (std::vector::const_iterator I = CSI.begin(), + E = CSI.end(); + I != E; ++I) { + int64_t Offset = MFI.getObjectOffset(I->getFrameIdx()); + unsigned Reg = I->getReg(); + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset( + nullptr, RI->getDwarfRegNum(Reg, true), Offset)); + BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + } + // Generate new FP. - if (hasFP(MF)) + if (hasFP(MF)) { adjustReg(MBB, MBBI, DL, FPReg, SPReg, StackSize - RVFI->getVarArgsSaveSize(), MachineInstr::FrameSetup); + + // Emit ".cfi_def_cfa $fp, 0" + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfa( + nullptr, RI->getDwarfRegNum(FPReg, true), 0)); + BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + } } void RISCVFrameLowering::emitEpilogue(MachineFunction &MF, @@ -141,6 +170,7 @@ MachineFrameInfo &MFI = MF.getFrameInfo(); auto *RVFI = MF.getInfo(); DebugLoc DL = MBBI->getDebugLoc(); + const RISCVInstrInfo *TII = STI.getInstrInfo(); unsigned FPReg = getFPReg(STI); unsigned SPReg = getSPReg(STI); @@ -161,8 +191,25 @@ MachineInstr::FrameDestroy); } + if (hasFP(MF)) { + // If there is frame pointer, before restoring callee-saved registers, we + // need adjust CFA to ($sp - FPOffset). emit ".cfi_def_cfa $sp, -FPOffset" + uint64_t FPOffset = StackSize - RVFI->getVarArgsSaveSize(); + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfa( + nullptr, RI->getDwarfRegNum(SPReg, true), -FPOffset)); + BuildMI(MBB, LastFrameDestroy, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); + } + // Deallocate stack adjustReg(MBB, MBBI, DL, SPReg, SPReg, StackSize, MachineInstr::FrameDestroy); + + // After restoring $sp, we need to adjust CFA to $(sp + 0) + // Emit ".cfi_def_cfa_offset 0" + unsigned CFIIndex = + MF.addFrameInst(MCCFIInstruction::createDefCfaOffset(nullptr, 0)); + BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) + .addCFIIndex(CFIIndex); } int RISCVFrameLowering::getFrameIndexReference(const MachineFunction &MF, Index: test/CodeGen/RISCV/addc-adde-sube-subc.ll =================================================================== --- test/CodeGen/RISCV/addc-adde-sube-subc.ll +++ test/CodeGen/RISCV/addc-adde-sube-subc.ll @@ -12,6 +12,7 @@ ; RV32I-NEXT: sltu a0, a2, a0 ; RV32I-NEXT: add a1, a1, a0 ; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = add i64 %a, %b ret i64 %1 @@ -24,6 +25,7 @@ ; RV32I-NEXT: sltu a3, a0, a2 ; RV32I-NEXT: sub a1, a1, a3 ; RV32I-NEXT: sub a0, a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = sub i64 %a, %b ret i64 %1 Index: test/CodeGen/RISCV/alu64.ll =================================================================== --- test/CodeGen/RISCV/alu64.ll +++ test/CodeGen/RISCV/alu64.ll @@ -379,11 +379,13 @@ ; RV64I-LABEL: addiw: ; RV64I: # %bb.0: ; RV64I-NEXT: addiw a0, a0, 123 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: addiw: ; RV32I: # %bb.0: ; RV32I-NEXT: addi a0, a0, 123 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = add i32 %a, 123 ret i32 %1 @@ -393,11 +395,13 @@ ; RV64I-LABEL: slliw: ; RV64I: # %bb.0: ; RV64I-NEXT: slliw a0, a0, 17 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: slliw: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 17 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = shl i32 %a, 17 ret i32 %1 @@ -407,11 +411,13 @@ ; RV64I-LABEL: srliw: ; RV64I: # %bb.0: ; RV64I-NEXT: srliw a0, a0, 8 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: srliw: ; RV32I: # %bb.0: ; RV32I-NEXT: srli a0, a0, 8 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = lshr i32 %a, 8 ret i32 %1 @@ -421,11 +427,13 @@ ; RV64I-LABEL: sraiw: ; RV64I: # %bb.0: ; RV64I-NEXT: sraiw a0, a0, 9 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: sraiw: ; RV32I: # %bb.0: ; RV32I-NEXT: srai a0, a0, 9 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = ashr i32 %a, 9 ret i32 %1 @@ -435,10 +443,12 @@ ; RV64I-LABEL: sextw: ; RV64I: # %bb.0: ; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: sextw: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ret i32 %a } @@ -447,11 +457,13 @@ ; RV64I-LABEL: addw: ; RV64I: # %bb.0: ; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: addw: ; RV32I: # %bb.0: ; RV32I-NEXT: add a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = add i32 %a, %b ret i32 %1 @@ -461,11 +473,13 @@ ; RV64I-LABEL: subw: ; RV64I: # %bb.0: ; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: subw: ; RV32I: # %bb.0: ; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = sub i32 %a, %b ret i32 %1 @@ -475,11 +489,13 @@ ; RV64I-LABEL: sllw: ; RV64I: # %bb.0: ; RV64I-NEXT: sllw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: sllw: ; RV32I: # %bb.0: ; RV32I-NEXT: sll a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = shl i32 %a, %b ret i32 %1 @@ -489,11 +505,13 @@ ; RV64I-LABEL: srlw: ; RV64I: # %bb.0: ; RV64I-NEXT: srlw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: srlw: ; RV32I: # %bb.0: ; RV32I-NEXT: srl a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = lshr i32 %a, %b ret i32 %1 @@ -503,11 +521,13 @@ ; RV64I-LABEL: sraw: ; RV64I: # %bb.0: ; RV64I-NEXT: sraw a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret ; ; RV32I-LABEL: sraw: ; RV32I: # %bb.0: ; RV32I-NEXT: sra a0, a0, a2 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = trunc i64 %a to i32 %2 = ashr i32 %1, %b Index: test/CodeGen/RISCV/atomic-cmpxchg-flag.ll =================================================================== --- test/CodeGen/RISCV/atomic-cmpxchg-flag.ll +++ test/CodeGen/RISCV/atomic-cmpxchg-flag.ll @@ -7,7 +7,6 @@ ; higher bits were masked to zero for the comparison. define i1 @cmpxchg_i32_seq_cst_seq_cst(i32* %ptr, i32 signext %cmp, - i32 signext %val) { ; RV64IA-LABEL: cmpxchg_i32_seq_cst_seq_cst: ; RV64IA: # %bb.0: # %entry ; RV64IA-NEXT: .LBB0_1: # %entry @@ -21,7 +20,9 @@ ; RV64IA-NEXT: .LBB0_3: # %entry ; RV64IA-NEXT: xor a0, a3, a1 ; RV64IA-NEXT: seqz a0, a0 +; RV64IA-NEXT: .cfi_def_cfa_offset 0 ; RV64IA-NEXT: ret + i32 signext %val) { entry: %0 = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst seq_cst %1 = extractvalue { i32, i1 } %0, 1 Index: test/CodeGen/RISCV/bare-select.ll =================================================================== --- test/CodeGen/RISCV/bare-select.ll +++ test/CodeGen/RISCV/bare-select.ll @@ -11,6 +11,7 @@ ; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: .LBB0_2: ; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = select i1 %a, i32 %b, i32 %c ret i32 %1 @@ -25,6 +26,7 @@ ; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: .LBB1_2: ; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %1 = select i1 %a, float %b, float %c ret float %1 Index: test/CodeGen/RISCV/branch-relaxation.ll =================================================================== --- test/CodeGen/RISCV/branch-relaxation.ll +++ test/CodeGen/RISCV/branch-relaxation.ll @@ -14,6 +14,7 @@ ; CHECK-NEXT: .space 4096 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: .LBB0_2: # %tail +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret br i1 %a, label %iftrue, label %tail @@ -40,11 +41,13 @@ ; CHECK-NEXT: .space 1048576 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB1_2: # %jmp ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: addi a0, zero, 1 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret br i1 %a, label %iftrue, label %jmp Index: test/CodeGen/RISCV/branch.ll =================================================================== --- test/CodeGen/RISCV/branch.ll +++ test/CodeGen/RISCV/branch.ll @@ -41,6 +41,7 @@ ; RV32I-NEXT: # %bb.11: # %test12 ; RV32I-NEXT: lw a0, 0(a1) ; RV32I-NEXT: .LBB0_12: # %end +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %val1 = load volatile i32, i32* %b %tst1 = icmp eq i32 %val1, %a Index: test/CodeGen/RISCV/frame-info.ll =================================================================== --- /dev/null +++ test/CodeGen/RISCV/frame-info.ll @@ -0,0 +1,17 @@ +; RUN: llc -mtriple=riscv32 < %s | FileCheck %s + +define void @foo(i32 signext %size) { +; CHECK: .cfi_startproc +; CHECK: .cfi_def_cfa_offset 16 +; CHECK: .cfi_offset ra, -4 +; CHECK: .cfi_offset s0, -8 +; CHECK: .cfi_def_cfa s0, 0 +entry: + %0 = alloca i8, i32 %size, align 16 + call void @bar(i8* nonnull %0) #2 + ret void +; CHECK: .cfi_def_cfa sp, 16 +; CHECK: .cfi_def_cfa_offset 0 +} + +declare void @bar(i8*) Index: test/CodeGen/RISCV/get-setcc-result-type.ll =================================================================== --- test/CodeGen/RISCV/get-setcc-result-type.ll +++ test/CodeGen/RISCV/get-setcc-result-type.ll @@ -21,6 +21,7 @@ ; RV32I-NEXT: seqz a1, a1 ; RV32I-NEXT: neg a1, a1 ; RV32I-NEXT: sw a1, 0(a0) +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret entry: %0 = load <4 x i32>, <4 x i32>* %p, align 16 Index: test/CodeGen/RISCV/hoist-global-addr-base.ll =================================================================== --- test/CodeGen/RISCV/hoist-global-addr-base.ll +++ test/CodeGen/RISCV/hoist-global-addr-base.ll @@ -16,6 +16,7 @@ ; CHECK-NEXT: sw a1, 164(a0) ; CHECK-NEXT: addi a1, zero, 10 ; CHECK-NEXT: sw a1, 160(a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: store i32 10, i32* getelementptr inbounds (%struct.S, %struct.S* @s, i32 0, i32 1), align 4 @@ -35,6 +36,7 @@ ; CHECK-NEXT: addi a1, zero, 10 ; CHECK-NEXT: sw a1, 160(a0) ; CHECK-NEXT: .LBB1_2: # %if.end +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = load i32, i32* getelementptr inbounds (%struct.S, %struct.S* @s, i32 0, i32 2), align 4 @@ -62,6 +64,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(g+73568) ; CHECK-NEXT: addi a0, a0, %lo(g+73568) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ret i8* getelementptr inbounds ([1048576 x i8], [1048576 x i8]* @g, i32 0, i32 73568) } @@ -77,6 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(g+524288) ; CHECK-NEXT: addi a0, a0, %lo(g+524288) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ret i8* getelementptr inbounds ([1048576 x i8], [1048576 x i8]* @g, i32 0, i32 524288) } @@ -86,6 +90,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lui a0, %hi(s+16572) ; CHECK-NEXT: addi a0, a0, %lo(s+16572) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: ret i32* getelementptr inbounds (%struct.S, %struct.S* @s, i32 0, i32 5) @@ -96,6 +101,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lui a0, %hi(s+160) ; CHECK-NEXT: addi a0, a0, %lo(s+160) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: ret i32* getelementptr inbounds (%struct.S, %struct.S* @s, i32 0, i32 1) @@ -111,9 +117,11 @@ ; CHECK-NEXT: beqz a1, .LBB6_2 ; CHECK-NEXT: # %bb.1: # %if.end ; CHECK-NEXT: addi a0, a0, 168 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB6_2: # %if.then ; CHECK-NEXT: addi a0, a0, 160 +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: %0 = load i32, i32* getelementptr inbounds (%struct.S, %struct.S* @s, i32 0, i32 2), align 4 @@ -162,6 +170,7 @@ ; CHECK-NEXT: lui a0, %hi(s+160) ; CHECK-NEXT: addi a1, zero, 10 ; CHECK-NEXT: sw a1, %lo(s+160)(a0) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret entry: store i32 10, i32* getelementptr inbounds (%struct.S, %struct.S* @s, i32 0, i32 1), align 4 Index: test/CodeGen/RISCV/inline-asm.ll =================================================================== --- test/CodeGen/RISCV/inline-asm.ll +++ test/CodeGen/RISCV/inline-asm.ll @@ -14,6 +14,7 @@ ; RV32I-NEXT: #APP ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: #NO_APP +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: constraint_r: @@ -23,6 +24,7 @@ ; RV64I-NEXT: #APP ; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: #NO_APP +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = load i32, i32* @gi %2 = tail call i32 asm "add $0, $1, $2", "=r,r,r"(i32 %a, i32 %1) @@ -35,6 +37,7 @@ ; RV32I-NEXT: #APP ; RV32I-NEXT: addi a0, a0, 113 ; RV32I-NEXT: #NO_APP +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: constraint_i: @@ -42,6 +45,7 @@ ; RV64I-NEXT: #APP ; RV64I-NEXT: addi a0, a0, 113 ; RV64I-NEXT: #NO_APP +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = load i32, i32* @gi %2 = tail call i32 asm "addi $0, $1, $2", "=r,r,i"(i32 %a, i32 113) @@ -53,12 +57,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: #APP ; RV32I-NEXT: #NO_APP +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: constraint_m: ; RV64I: # %bb.0: ; RV64I-NEXT: #APP ; RV64I-NEXT: #NO_APP +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret call void asm sideeffect "", "=*m"(i32* %a) ret void @@ -70,6 +76,7 @@ ; RV32I-NEXT: #APP ; RV32I-NEXT: lw a0, 0(a0) ; RV32I-NEXT: #NO_APP +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: constraint_m2: @@ -77,6 +84,7 @@ ; RV64I-NEXT: #APP ; RV64I-NEXT: lw a0, 0(a0) ; RV64I-NEXT: #NO_APP +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = tail call i32 asm "lw $0, $1", "=r,*m"(i32* %a) nounwind ret i32 %1 Index: test/CodeGen/RISCV/jumptable.ll =================================================================== --- test/CodeGen/RISCV/jumptable.ll +++ test/CodeGen/RISCV/jumptable.ll @@ -33,6 +33,7 @@ ; RV32I-NEXT: .LBB0_9: # %exit ; RV32I-NEXT: sw a0, 0(a1) ; RV32I-NEXT: .LBB0_10: # %exit +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret entry: switch i32 %in, label %exit [ Index: test/CodeGen/RISCV/legalize-fneg.ll =================================================================== --- test/CodeGen/RISCV/legalize-fneg.ll +++ test/CodeGen/RISCV/legalize-fneg.ll @@ -11,6 +11,7 @@ ; RV32-NEXT: lui a2, 524288 ; RV32-NEXT: xor a1, a1, a2 ; RV32-NEXT: sw a1, 0(a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: test1: @@ -20,6 +21,7 @@ ; RV64-NEXT: lw a1, 0(a1) ; RV64-NEXT: xor a1, a1, a2 ; RV64-NEXT: sw a1, 0(a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret entry: %0 = load float, float* %b @@ -37,6 +39,7 @@ ; RV32-NEXT: lui a1, 524288 ; RV32-NEXT: xor a1, a2, a1 ; RV32-NEXT: sw a1, 4(a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: test2: @@ -46,6 +49,7 @@ ; RV64-NEXT: ld a1, 0(a1) ; RV64-NEXT: xor a1, a1, a2 ; RV64-NEXT: sd a1, 0(a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret entry: %0 = load double, double* %b @@ -67,6 +71,7 @@ ; RV32-NEXT: lui a1, 524288 ; RV32-NEXT: xor a1, a2, a1 ; RV32-NEXT: sw a1, 12(a0) +; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: test3: @@ -78,6 +83,7 @@ ; RV64-NEXT: slli a1, a1, 63 ; RV64-NEXT: xor a1, a2, a1 ; RV64-NEXT: sd a1, 8(a0) +; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret entry: %0 = load fp128, fp128* %b Index: test/CodeGen/RISCV/rotl-rotr.ll =================================================================== --- test/CodeGen/RISCV/rotl-rotr.ll +++ test/CodeGen/RISCV/rotl-rotr.ll @@ -13,6 +13,7 @@ ; RV32I-NEXT: sll a1, a0, a1 ; RV32I-NEXT: srl a0, a0, a2 ; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %z = sub i32 32, %y %b = shl i32 %x, %y @@ -29,6 +30,7 @@ ; RV32I-NEXT: srl a1, a0, a1 ; RV32I-NEXT: sll a0, a0, a2 ; RV32I-NEXT: or a0, a1, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %z = sub i32 32, %y %b = lshr i32 %x, %y Index: test/CodeGen/RISCV/rv64i-tricky-shifts.ll =================================================================== --- test/CodeGen/RISCV/rv64i-tricky-shifts.ll +++ test/CodeGen/RISCV/rv64i-tricky-shifts.ll @@ -12,6 +12,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: sll a0, a0, a1 ; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = shl i64 %a, %b %2 = shl i64 %1, 32 @@ -25,6 +26,7 @@ ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: srl a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = and i64 %a, 4294967295 %2 = lshr i64 %1, %b @@ -36,6 +38,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: sra a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = shl i64 %a, 32 %2 = ashr i64 %1, 32 Index: test/CodeGen/RISCV/select-cc.ll =================================================================== --- test/CodeGen/RISCV/select-cc.ll +++ test/CodeGen/RISCV/select-cc.ll @@ -55,6 +55,7 @@ ; RV32I-NEXT: # %bb.19: ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: .LBB0_20: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret %val1 = load volatile i32, i32* %b %tst1 = icmp eq i32 %a, %val1 Index: test/CodeGen/RISCV/sext-zext-trunc.ll =================================================================== --- test/CodeGen/RISCV/sext-zext-trunc.ll +++ test/CodeGen/RISCV/sext-zext-trunc.ll @@ -9,12 +9,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i1_to_i8: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i1 %a to i8 ret i8 %1 @@ -25,12 +27,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i1_to_i16: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i1 %a to i16 ret i16 %1 @@ -41,12 +45,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i1_to_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i1 %a to i32 ret i32 %1 @@ -58,12 +64,14 @@ ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i1_to_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i1 %a to i64 ret i64 %1 @@ -74,12 +82,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i8_to_i16: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i8 %a to i16 ret i16 %1 @@ -90,12 +100,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i8_to_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i8 %a to i32 ret i32 %1 @@ -107,12 +119,14 @@ ; RV32I-NEXT: slli a1, a0, 24 ; RV32I-NEXT: srai a0, a1, 24 ; RV32I-NEXT: srai a1, a1, 31 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i8_to_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i8 %a to i64 ret i64 %1 @@ -123,12 +137,14 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 16 ; RV32I-NEXT: srai a0, a0, 16 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i16_to_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i16 %a to i32 ret i32 %1 @@ -140,12 +156,14 @@ ; RV32I-NEXT: slli a1, a0, 16 ; RV32I-NEXT: srai a0, a1, 16 ; RV32I-NEXT: srai a1, a1, 31 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i16_to_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i16 %a to i64 ret i64 %1 @@ -155,11 +173,13 @@ ; RV32I-LABEL: sext_i32_to_i64: ; RV32I: # %bb.0: ; RV32I-NEXT: srai a1, a0, 31 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: sext_i32_to_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = sext i32 %a to i64 ret i64 %1 @@ -169,11 +189,13 @@ ; RV32I-LABEL: zext_i1_to_i8: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i1_to_i8: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i1 %a to i8 ret i8 %1 @@ -183,11 +205,13 @@ ; RV32I-LABEL: zext_i1_to_i16: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i1_to_i16: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i1 %a to i16 ret i16 %1 @@ -197,11 +221,13 @@ ; RV32I-LABEL: zext_i1_to_i32: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i1_to_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i1 %a to i32 ret i32 %1 @@ -212,11 +238,13 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i1_to_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i1 %a to i64 ret i64 %1 @@ -226,11 +254,13 @@ ; RV32I-LABEL: zext_i8_to_i16: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 255 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i8_to_i16: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 255 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i8 %a to i16 ret i16 %1 @@ -240,11 +270,13 @@ ; RV32I-LABEL: zext_i8_to_i32: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 255 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i8_to_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 255 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i8 %a to i32 ret i32 %1 @@ -255,11 +287,13 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i8_to_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: andi a0, a0, 255 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i8 %a to i64 ret i64 %1 @@ -271,6 +305,7 @@ ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i16_to_i32: @@ -278,6 +313,7 @@ ; RV64I-NEXT: lui a1, 16 ; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i16 %a to i32 ret i32 %1 @@ -290,6 +326,7 @@ ; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i16_to_i64: @@ -297,6 +334,7 @@ ; RV64I-NEXT: lui a1, 16 ; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i16 %a to i64 ret i64 %1 @@ -306,12 +344,14 @@ ; RV32I-LABEL: zext_i32_to_i64: ; RV32I: # %bb.0: ; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: zext_i32_to_i64: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = zext i32 %a to i64 ret i64 %1 @@ -320,10 +360,12 @@ define i1 @trunc_i8_to_i1(i8 %a) { ; RV32I-LABEL: trunc_i8_to_i1: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i8_to_i1: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i8 %a to i1 ret i1 %1 @@ -332,10 +374,12 @@ define i1 @trunc_i16_to_i1(i16 %a) { ; RV32I-LABEL: trunc_i16_to_i1: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i16_to_i1: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i16 %a to i1 ret i1 %1 @@ -344,10 +388,12 @@ define i1 @trunc_i32_to_i1(i32 %a) { ; RV32I-LABEL: trunc_i32_to_i1: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i32_to_i1: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i32 %a to i1 ret i1 %1 @@ -356,10 +402,12 @@ define i1 @trunc_i64_to_i1(i64 %a) { ; RV32I-LABEL: trunc_i64_to_i1: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i64_to_i1: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i64 %a to i1 ret i1 %1 @@ -368,10 +416,12 @@ define i8 @trunc_i16_to_i8(i16 %a) { ; RV32I-LABEL: trunc_i16_to_i8: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i16_to_i8: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i16 %a to i8 ret i8 %1 @@ -380,10 +430,12 @@ define i8 @trunc_i32_to_i8(i32 %a) { ; RV32I-LABEL: trunc_i32_to_i8: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i32_to_i8: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i32 %a to i8 ret i8 %1 @@ -392,10 +444,12 @@ define i8 @trunc_i64_to_i8(i64 %a) { ; RV32I-LABEL: trunc_i64_to_i8: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i64_to_i8: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i64 %a to i8 ret i8 %1 @@ -404,10 +458,12 @@ define i16 @trunc_i32_to_i16(i32 %a) { ; RV32I-LABEL: trunc_i32_to_i16: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i32_to_i16: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i32 %a to i16 ret i16 %1 @@ -416,10 +472,12 @@ define i16 @trunc_i64_to_i16(i64 %a) { ; RV32I-LABEL: trunc_i64_to_i16: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i64_to_i16: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i64 %a to i16 ret i16 %1 @@ -428,10 +486,12 @@ define i32 @trunc_i64_to_i32(i64 %a) { ; RV32I-LABEL: trunc_i64_to_i32: ; RV32I: # %bb.0: +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: trunc_i64_to_i32: ; RV64I: # %bb.0: +; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret %1 = trunc i64 %a to i32 ret i32 %1 Index: test/CodeGen/RISCV/tail-calls.ll =================================================================== --- test/CodeGen/RISCV/tail-calls.ll +++ test/CodeGen/RISCV/tail-calls.ll @@ -35,10 +35,12 @@ ; CHECK: lui a0, %hi(callee_indirect2) ; CHECK-NEXT: addi a5, a0, %lo(callee_indirect2) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: jr a5 ; CHECK: lui a0, %hi(callee_indirect1) ; CHECK-NEXT: addi a5, a0, %lo(callee_indirect1) +; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: jr a5 entry: %tobool = icmp eq i32 %a, 0 Index: test/CodeGen/RISCV/zext-with-load-is-free.ll =================================================================== --- test/CodeGen/RISCV/zext-with-load-is-free.ll +++ test/CodeGen/RISCV/zext-with-load-is-free.ll @@ -20,9 +20,11 @@ ; RV32I-NEXT: bne a0, a1, .LBB0_3 ; RV32I-NEXT: # %bb.2: # %if.end ; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB0_3: # %if.then ; RV32I-NEXT: addi a0, zero, 1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret entry: %0 = load i8, i8* getelementptr inbounds ([5 x i8], [5 x i8]* @bytes, i32 0, i32 0), align 1 @@ -56,9 +58,11 @@ ; RV32I-NEXT: bne a0, a1, .LBB1_3 ; RV32I-NEXT: # %bb.2: # %if.end ; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB1_3: # %if.then ; RV32I-NEXT: addi a0, zero, 1 +; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret entry: %0 = load i16, i16* getelementptr inbounds ([5 x i16], [5 x i16]* @shorts, i32 0, i32 0), align 2