Index: lib/Target/RISCV/InstPrinter/RISCVInstPrinter.cpp =================================================================== --- lib/Target/RISCV/InstPrinter/RISCVInstPrinter.cpp +++ lib/Target/RISCV/InstPrinter/RISCVInstPrinter.cpp @@ -29,12 +29,10 @@ #define PRINT_ALIAS_INSTR #include "RISCVGenAsmWriter.inc" -// Alias instruction emission is disabled by default. A subsequent patch will -// change this default and fix all affected tests. static cl::opt NoAliases("riscv-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), - cl::init(true), + cl::init(false), cl::Hidden); void RISCVInstPrinter::printInst(const MCInst *MI, raw_ostream &O, Index: test/CodeGen/RISCV/addc-adde-sube-subc.ll =================================================================== --- test/CodeGen/RISCV/addc-adde-sube-subc.ll +++ test/CodeGen/RISCV/addc-adde-sube-subc.ll @@ -15,11 +15,11 @@ ; RV32I-NEXT: add a2, a0, a2 ; RV32I-NEXT: sltu a0, a2, a0 ; RV32I-NEXT: add a1, a1, a0 -; RV32I-NEXT: addi a0, a2, 0 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = add i64 %a, %b ret i64 %1 } @@ -38,7 +38,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sub i64 %a, %b ret i64 %1 } Index: test/CodeGen/RISCV/alloca.ll =================================================================== --- test/CodeGen/RISCV/alloca.ll +++ test/CodeGen/RISCV/alloca.ll @@ -17,15 +17,15 @@ ; RV32I-NEXT: addi a0, a0, 15 ; RV32I-NEXT: andi a0, a0, -16 ; RV32I-NEXT: sub a0, sp, a0 -; RV32I-NEXT: addi sp, a0, 0 +; RV32I-NEXT: mv sp, a0 ; RV32I-NEXT: lui a1, %hi(notdead) ; RV32I-NEXT: addi a1, a1, %lo(notdead) -; RV32I-NEXT: jalr ra, a1, 0 +; RV32I-NEXT: jalr a1 ; RV32I-NEXT: addi sp, s0, -16 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = alloca i8, i32 %n call void @notdead(i8* %1) ret void @@ -42,21 +42,21 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: sw s1, 4(sp) ; RV32I-NEXT: addi s0, sp, 16 -; RV32I-NEXT: addi s1, sp, 0 +; RV32I-NEXT: mv s1, sp ; RV32I-NEXT: addi a0, a0, 15 ; RV32I-NEXT: andi a0, a0, -16 ; RV32I-NEXT: sub a0, sp, a0 -; RV32I-NEXT: addi sp, a0, 0 +; RV32I-NEXT: mv sp, a0 ; RV32I-NEXT: lui a1, %hi(notdead) ; RV32I-NEXT: addi a1, a1, %lo(notdead) -; RV32I-NEXT: jalr ra, a1, 0 -; RV32I-NEXT: addi sp, s1, 0 +; RV32I-NEXT: jalr a1 +; RV32I-NEXT: mv sp, s1 ; RV32I-NEXT: addi sp, s0, -16 ; RV32I-NEXT: lw s1, 4(sp) ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %sp = call i8* @llvm.stacksave() %addr = alloca i8, i32 %n call void @notdead(i8* %addr) Index: test/CodeGen/RISCV/alu32.ll =================================================================== --- test/CodeGen/RISCV/alu32.ll +++ test/CodeGen/RISCV/alu32.ll @@ -19,7 +19,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = add i32 %a, 1 ret i32 %1 } @@ -35,7 +35,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = icmp slt i32 %a, 2 %2 = zext i1 %1 to i32 ret i32 %2 @@ -52,7 +52,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = icmp ult i32 %a, 3 %2 = zext i1 %1 to i32 ret i32 %2 @@ -69,7 +69,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = xor i32 %a, 4 ret i32 %1 } @@ -85,7 +85,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = or i32 %a, 5 ret i32 %1 } @@ -101,7 +101,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = and i32 %a, 6 ret i32 %1 } @@ -117,7 +117,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = shl i32 %a, 7 ret i32 %1 } @@ -133,7 +133,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = lshr i32 %a, 8 ret i32 %1 } @@ -149,7 +149,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = ashr i32 %a, 9 ret i32 %1 } @@ -167,7 +167,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = add i32 %a, %b ret i32 %1 } @@ -183,7 +183,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sub i32 %a, %b ret i32 %1 } @@ -199,7 +199,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = shl i32 %a, %b ret i32 %1 } @@ -215,7 +215,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = icmp slt i32 %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -232,7 +232,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = icmp ult i32 %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -249,7 +249,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = xor i32 %a, %b ret i32 %1 } @@ -265,7 +265,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = lshr i32 %a, %b ret i32 %1 } @@ -281,7 +281,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = ashr i32 %a, %b ret i32 %1 } @@ -297,7 +297,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = or i32 %a, %b ret i32 %1 } @@ -313,7 +313,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = and i32 %a, %b ret i32 %1 } Index: test/CodeGen/RISCV/bare-select.ll =================================================================== --- test/CodeGen/RISCV/bare-select.ll +++ test/CodeGen/RISCV/bare-select.ll @@ -10,15 +10,15 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: andi a0, a0, 1 -; RV32I-NEXT: bne a0, zero, .LBB0_2 +; RV32I-NEXT: bnez a0, .LBB0_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a1, a2, 0 +; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: .LBB0_2: -; RV32I-NEXT: addi a0, a1, 0 +; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = select i1 %a, i32 %b, i32 %c ret i32 %1 } Index: test/CodeGen/RISCV/blockaddress.ll =================================================================== --- test/CodeGen/RISCV/blockaddress.ll +++ test/CodeGen/RISCV/blockaddress.ll @@ -17,13 +17,13 @@ ; RV32I-NEXT: addi a1, a1, %lo(.Ltmp0) ; RV32I-NEXT: sw a1, 0(a0) ; RV32I-NEXT: lw a0, 0(a0) -; RV32I-NEXT: jalr zero, a0, 0 +; RV32I-NEXT: jr a0 ; RV32I-NEXT: .Ltmp0: # Block address taken ; RV32I-NEXT: .LBB0_1: # %block ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret store volatile i8* blockaddress(@test_blockaddress, %block), i8** @addr %val = load volatile i8*, i8** @addr indirectbr i8* %val, [label %block] Index: test/CodeGen/RISCV/branch.ll =================================================================== --- test/CodeGen/RISCV/branch.ll +++ test/CodeGen/RISCV/branch.ll @@ -11,55 +11,55 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lw a3, 0(a1) ; RV32I-NEXT: beq a3, a0, .LBB0_12 -; RV32I-NEXT: jal zero, .LBB0_1 +; RV32I-NEXT: j .LBB0_1 ; RV32I-NEXT: .LBB0_1: # %test2 ; RV32I-NEXT: lw a3, 0(a1) ; RV32I-NEXT: bne a3, a0, .LBB0_12 -; RV32I-NEXT: jal zero, .LBB0_2 +; RV32I-NEXT: j .LBB0_2 ; RV32I-NEXT: .LBB0_2: # %test3 ; RV32I-NEXT: lw a3, 0(a1) ; RV32I-NEXT: blt a3, a0, .LBB0_12 -; RV32I-NEXT: jal zero, .LBB0_3 +; RV32I-NEXT: j .LBB0_3 ; RV32I-NEXT: .LBB0_3: # %test4 ; RV32I-NEXT: lw a3, 0(a1) ; RV32I-NEXT: bge a3, a0, .LBB0_12 -; RV32I-NEXT: jal zero, .LBB0_4 +; RV32I-NEXT: j .LBB0_4 ; RV32I-NEXT: .LBB0_4: # %test5 ; RV32I-NEXT: lw a3, 0(a1) ; RV32I-NEXT: bltu a3, a0, .LBB0_12 -; RV32I-NEXT: jal zero, .LBB0_5 +; RV32I-NEXT: j .LBB0_5 ; RV32I-NEXT: .LBB0_5: # %test6 ; RV32I-NEXT: lw a3, 0(a1) ; RV32I-NEXT: bgeu a3, a0, .LBB0_12 -; RV32I-NEXT: jal zero, .LBB0_6 +; RV32I-NEXT: j .LBB0_6 ; RV32I-NEXT: .LBB0_6: # %test7 ; RV32I-NEXT: lw a3, 0(a1) ; RV32I-NEXT: blt a0, a3, .LBB0_12 -; RV32I-NEXT: jal zero, .LBB0_7 +; RV32I-NEXT: j .LBB0_7 ; RV32I-NEXT: .LBB0_7: # %test8 ; RV32I-NEXT: lw a3, 0(a1) ; RV32I-NEXT: bge a0, a3, .LBB0_12 -; RV32I-NEXT: jal zero, .LBB0_8 +; RV32I-NEXT: j .LBB0_8 ; RV32I-NEXT: .LBB0_8: # %test9 ; RV32I-NEXT: lw a3, 0(a1) ; RV32I-NEXT: bltu a0, a3, .LBB0_12 -; RV32I-NEXT: jal zero, .LBB0_9 +; RV32I-NEXT: j .LBB0_9 ; RV32I-NEXT: .LBB0_9: # %test10 ; RV32I-NEXT: lw a3, 0(a1) ; RV32I-NEXT: bgeu a0, a3, .LBB0_12 -; RV32I-NEXT: jal zero, .LBB0_10 +; RV32I-NEXT: j .LBB0_10 ; RV32I-NEXT: .LBB0_10: # %test11 ; RV32I-NEXT: lw a0, 0(a1) ; RV32I-NEXT: andi a0, a2, 1 -; RV32I-NEXT: bne a0, zero, .LBB0_12 -; RV32I-NEXT: jal zero, .LBB0_11 +; RV32I-NEXT: bnez a0, .LBB0_12 +; RV32I-NEXT: j .LBB0_11 ; RV32I-NEXT: .LBB0_11: # %test12 ; RV32I-NEXT: lw a0, 0(a1) ; RV32I-NEXT: .LBB0_12: # %end ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %val1 = load volatile i32, i32* %b %tst1 = icmp eq i32 %val1, %a Index: test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll =================================================================== --- test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll +++ test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll @@ -20,7 +20,7 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a1, 4080 -; RV32I-NEXT: addi a1, a1, 0 +; RV32I-NEXT: mv a1, a1 ; RV32I-NEXT: slli a2, a0, 8 ; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: slli a0, a0, 24 @@ -29,7 +29,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %tmp = call i16 @llvm.bswap.i16(i16 %a) ret i16 %tmp } @@ -48,7 +48,7 @@ ; RV32I-NEXT: srli a2, a0, 24 ; RV32I-NEXT: or a1, a1, a2 ; RV32I-NEXT: lui a2, 4080 -; RV32I-NEXT: addi a2, a2, 0 +; RV32I-NEXT: mv a2, a2 ; RV32I-NEXT: slli a3, a0, 8 ; RV32I-NEXT: and a2, a3, a2 ; RV32I-NEXT: slli a0, a0, 24 @@ -57,7 +57,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %tmp = call i32 @llvm.bswap.i32(i32 %a) ret i32 %tmp } @@ -76,7 +76,7 @@ ; RV32I-NEXT: srli a4, a1, 24 ; RV32I-NEXT: or a2, a2, a4 ; RV32I-NEXT: lui a4, 4080 -; RV32I-NEXT: addi a4, a4, 0 +; RV32I-NEXT: mv a4, a4 ; RV32I-NEXT: slli a5, a1, 8 ; RV32I-NEXT: and a5, a5, a4 ; RV32I-NEXT: slli a1, a1, 24 @@ -91,11 +91,11 @@ ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: or a0, a0, a3 ; RV32I-NEXT: or a1, a0, a1 -; RV32I-NEXT: addi a0, a2, 0 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %tmp = call i64 @llvm.bswap.i64(i64 %a) ret i64 %tmp } @@ -107,14 +107,14 @@ ; RV32I-NEXT: sw ra, 12(sp) ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 -; RV32I-NEXT: addi a1, a0, 0 +; RV32I-NEXT: mv a1, a0 ; RV32I-NEXT: addi a0, zero, 8 ; RV32I-NEXT: andi a2, a1, 255 -; RV32I-NEXT: beq a2, zero, .LBB3_2 -; RV32I-NEXT: jal zero, .LBB3_1 +; RV32I-NEXT: beqz a2, .LBB3_2 +; RV32I-NEXT: j .LBB3_1 ; RV32I-NEXT: .LBB3_1: # %cond.false ; RV32I-NEXT: addi a0, a1, -1 -; RV32I-NEXT: xori a1, a1, -1 +; RV32I-NEXT: not a1, a1 ; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: lui a1, 349525 ; RV32I-NEXT: addi a1, a1, 1365 @@ -136,13 +136,13 @@ ; RV32I-NEXT: addi a1, a1, 257 ; RV32I-NEXT: lui a2, %hi(__mulsi3) ; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: srli a0, a0, 24 ; RV32I-NEXT: .LBB3_2: # %cond.end ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 false) ret i8 %tmp } @@ -154,16 +154,16 @@ ; RV32I-NEXT: sw ra, 12(sp) ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 -; RV32I-NEXT: addi a1, a0, 0 +; RV32I-NEXT: mv a1, a0 ; RV32I-NEXT: addi a0, zero, 16 ; RV32I-NEXT: lui a2, 16 ; RV32I-NEXT: addi a2, a2, -1 ; RV32I-NEXT: and a2, a1, a2 -; RV32I-NEXT: beq a2, zero, .LBB4_2 -; RV32I-NEXT: jal zero, .LBB4_1 +; RV32I-NEXT: beqz a2, .LBB4_2 +; RV32I-NEXT: j .LBB4_1 ; RV32I-NEXT: .LBB4_1: # %cond.false ; RV32I-NEXT: addi a0, a1, -1 -; RV32I-NEXT: xori a1, a1, -1 +; RV32I-NEXT: not a1, a1 ; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: lui a1, 349525 ; RV32I-NEXT: addi a1, a1, 1365 @@ -185,13 +185,13 @@ ; RV32I-NEXT: addi a1, a1, 257 ; RV32I-NEXT: lui a2, %hi(__mulsi3) ; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: srli a0, a0, 24 ; RV32I-NEXT: .LBB4_2: # %cond.end ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 false) ret i16 %tmp } @@ -203,13 +203,13 @@ ; RV32I-NEXT: sw ra, 12(sp) ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 -; RV32I-NEXT: addi a1, a0, 0 +; RV32I-NEXT: mv a1, a0 ; RV32I-NEXT: addi a0, zero, 32 -; RV32I-NEXT: beq a1, zero, .LBB5_2 -; RV32I-NEXT: jal zero, .LBB5_1 +; RV32I-NEXT: beqz a1, .LBB5_2 +; RV32I-NEXT: j .LBB5_1 ; RV32I-NEXT: .LBB5_1: # %cond.false ; RV32I-NEXT: addi a0, a1, -1 -; RV32I-NEXT: xori a1, a1, -1 +; RV32I-NEXT: not a1, a1 ; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: lui a1, 349525 ; RV32I-NEXT: addi a1, a1, 1365 @@ -231,13 +231,13 @@ ; RV32I-NEXT: addi a1, a1, 257 ; RV32I-NEXT: lui a2, %hi(__mulsi3) ; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: srli a0, a0, 24 ; RV32I-NEXT: .LBB5_2: # %cond.end ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false) ret i32 %tmp } @@ -249,10 +249,10 @@ ; RV32I-NEXT: sw ra, 12(sp) ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 -; RV32I-NEXT: addi a1, a0, 0 +; RV32I-NEXT: mv a1, a0 ; RV32I-NEXT: addi a0, zero, 32 -; RV32I-NEXT: beq a1, zero, .LBB6_2 -; RV32I-NEXT: jal zero, .LBB6_1 +; RV32I-NEXT: beqz a1, .LBB6_2 +; RV32I-NEXT: j .LBB6_1 ; RV32I-NEXT: .LBB6_1: # %cond.false ; RV32I-NEXT: srli a0, a1, 1 ; RV32I-NEXT: or a0, a1, a0 @@ -266,7 +266,7 @@ ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: lui a1, 349525 ; RV32I-NEXT: addi a1, a1, 1365 -; RV32I-NEXT: xori a0, a0, -1 +; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: srli a2, a0, 1 ; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: sub a0, a0, a1 @@ -285,13 +285,13 @@ ; RV32I-NEXT: addi a1, a1, 257 ; RV32I-NEXT: lui a2, %hi(__mulsi3) ; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: srli a0, a0, 24 ; RV32I-NEXT: .LBB6_2: # %cond.end ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false) ret i32 %tmp } @@ -310,10 +310,10 @@ ; RV32I-NEXT: sw s6, 16(sp) ; RV32I-NEXT: sw s7, 12(sp) ; RV32I-NEXT: addi s0, sp, 48 -; RV32I-NEXT: addi s1, a1, 0 -; RV32I-NEXT: addi s2, a0, 0 +; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: addi a0, s2, -1 -; RV32I-NEXT: xori a1, s2, -1 +; RV32I-NEXT: not a1, s2 ; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: lui a1, 349525 ; RV32I-NEXT: addi s4, a1, 1365 @@ -335,10 +335,10 @@ ; RV32I-NEXT: lui a1, 61681 ; RV32I-NEXT: addi s7, a1, -241 ; RV32I-NEXT: and a0, a0, s7 -; RV32I-NEXT: addi a1, s3, 0 -; RV32I-NEXT: jalr ra, s6, 0 +; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: jalr s6 ; RV32I-NEXT: addi a1, s1, -1 -; RV32I-NEXT: xori a2, s1, -1 +; RV32I-NEXT: not a2, s1 ; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: srli a2, a1, 1 ; RV32I-NEXT: and a2, a2, s4 @@ -351,16 +351,16 @@ ; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: and a1, a1, s7 ; RV32I-NEXT: srli s1, a0, 24 -; RV32I-NEXT: addi a0, a1, 0 -; RV32I-NEXT: addi a1, s3, 0 -; RV32I-NEXT: jalr ra, s6, 0 -; RV32I-NEXT: bne s2, zero, .LBB7_2 +; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: jalr s6 +; RV32I-NEXT: bnez s2, .LBB7_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: srli a0, a0, 24 ; RV32I-NEXT: addi s1, a0, 32 ; RV32I-NEXT: .LBB7_2: -; RV32I-NEXT: addi a0, s1, 0 -; RV32I-NEXT: addi a1, zero, 0 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, zero ; RV32I-NEXT: lw s7, 12(sp) ; RV32I-NEXT: lw s6, 16(sp) ; RV32I-NEXT: lw s5, 20(sp) @@ -371,7 +371,7 @@ ; RV32I-NEXT: lw s0, 40(sp) ; RV32I-NEXT: lw ra, 44(sp) ; RV32I-NEXT: addi sp, sp, 48 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 false) ret i64 %tmp } @@ -384,7 +384,7 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: addi a1, a0, -1 -; RV32I-NEXT: xori a0, a0, -1 +; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: lui a1, 349525 ; RV32I-NEXT: addi a1, a1, 1365 @@ -406,12 +406,12 @@ ; RV32I-NEXT: addi a1, a1, 257 ; RV32I-NEXT: lui a2, %hi(__mulsi3) ; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: srli a0, a0, 24 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 true) ret i8 %tmp } @@ -424,7 +424,7 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: addi a1, a0, -1 -; RV32I-NEXT: xori a0, a0, -1 +; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: lui a1, 349525 ; RV32I-NEXT: addi a1, a1, 1365 @@ -446,12 +446,12 @@ ; RV32I-NEXT: addi a1, a1, 257 ; RV32I-NEXT: lui a2, %hi(__mulsi3) ; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: srli a0, a0, 24 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 true) ret i16 %tmp } @@ -464,7 +464,7 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: addi a1, a0, -1 -; RV32I-NEXT: xori a0, a0, -1 +; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: lui a1, 349525 ; RV32I-NEXT: addi a1, a1, 1365 @@ -486,12 +486,12 @@ ; RV32I-NEXT: addi a1, a1, 257 ; RV32I-NEXT: lui a2, %hi(__mulsi3) ; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: srli a0, a0, 24 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 true) ret i32 %tmp } @@ -510,10 +510,10 @@ ; RV32I-NEXT: sw s6, 16(sp) ; RV32I-NEXT: sw s7, 12(sp) ; RV32I-NEXT: addi s0, sp, 48 -; RV32I-NEXT: addi s1, a1, 0 -; RV32I-NEXT: addi s2, a0, 0 +; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: addi a0, s2, -1 -; RV32I-NEXT: xori a1, s2, -1 +; RV32I-NEXT: not a1, s2 ; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: lui a1, 349525 ; RV32I-NEXT: addi s4, a1, 1365 @@ -535,10 +535,10 @@ ; RV32I-NEXT: lui a1, 61681 ; RV32I-NEXT: addi s7, a1, -241 ; RV32I-NEXT: and a0, a0, s7 -; RV32I-NEXT: addi a1, s3, 0 -; RV32I-NEXT: jalr ra, s6, 0 +; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: jalr s6 ; RV32I-NEXT: addi a1, s1, -1 -; RV32I-NEXT: xori a2, s1, -1 +; RV32I-NEXT: not a2, s1 ; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: srli a2, a1, 1 ; RV32I-NEXT: and a2, a2, s4 @@ -551,16 +551,16 @@ ; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: and a1, a1, s7 ; RV32I-NEXT: srli s1, a0, 24 -; RV32I-NEXT: addi a0, a1, 0 -; RV32I-NEXT: addi a1, s3, 0 -; RV32I-NEXT: jalr ra, s6, 0 -; RV32I-NEXT: bne s2, zero, .LBB11_2 +; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: jalr s6 +; RV32I-NEXT: bnez s2, .LBB11_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: srli a0, a0, 24 ; RV32I-NEXT: addi s1, a0, 32 ; RV32I-NEXT: .LBB11_2: -; RV32I-NEXT: addi a0, s1, 0 -; RV32I-NEXT: addi a1, zero, 0 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, zero ; RV32I-NEXT: lw s7, 12(sp) ; RV32I-NEXT: lw s6, 16(sp) ; RV32I-NEXT: lw s5, 20(sp) @@ -571,7 +571,7 @@ ; RV32I-NEXT: lw s0, 40(sp) ; RV32I-NEXT: lw ra, 44(sp) ; RV32I-NEXT: addi sp, sp, 48 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 true) ret i64 %tmp } @@ -603,12 +603,12 @@ ; RV32I-NEXT: addi a1, a1, 257 ; RV32I-NEXT: lui a2, %hi(__mulsi3) ; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: srli a0, a0, 24 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call i32 @llvm.ctpop.i32(i32 %a) ret i32 %1 } Index: test/CodeGen/RISCV/byval.ll =================================================================== --- test/CodeGen/RISCV/byval.ll +++ test/CodeGen/RISCV/byval.ll @@ -16,7 +16,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret entry: %0 = getelementptr inbounds %struct.Foo, %struct.Foo* %f, i32 0, i32 0 %1 = load i32, i32* %0, align 4 @@ -50,11 +50,11 @@ ; RV32I-NEXT: lui a0, %hi(callee) ; RV32I-NEXT: addi a1, a0, %lo(callee) ; RV32I-NEXT: addi a0, s0, -24 -; RV32I-NEXT: jalr ra, a1, 0 +; RV32I-NEXT: jalr a1 ; RV32I-NEXT: lw s0, 24(sp) ; RV32I-NEXT: lw ra, 28(sp) ; RV32I-NEXT: addi sp, sp, 32 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret entry: %call = call i32 @callee(%struct.Foo* byval @foo) ret void Index: test/CodeGen/RISCV/calling-conv-sext-zext.ll =================================================================== --- test/CodeGen/RISCV/calling-conv-sext-zext.ll +++ test/CodeGen/RISCV/calling-conv-sext-zext.ll @@ -12,7 +12,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret ret i8 %a } @@ -27,11 +27,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a1, %hi(receive_uint8) ; RV32I-NEXT: addi a1, a1, %lo(receive_uint8) -; RV32I-NEXT: jalr ra, a1, 0 +; RV32I-NEXT: jalr a1 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret call void @receive_uint8(i8 zeroext %a) ret void } @@ -47,11 +47,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a0, %hi(return_uint8) ; RV32I-NEXT: addi a0, a0, %lo(return_uint8) -; RV32I-NEXT: jalr ra, a0, 0 +; RV32I-NEXT: jalr a0 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call zeroext i8 @return_uint8() ret i8 %1 } @@ -68,7 +68,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret ret i8 %a } @@ -85,11 +85,11 @@ ; RV32I-NEXT: addi a1, a1, %lo(receive_sint8) ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 -; RV32I-NEXT: jalr ra, a1, 0 +; RV32I-NEXT: jalr a1 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret call void @receive_sint8(i8 signext %a) ret void @@ -104,13 +104,13 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a0, %hi(return_uint8) ; RV32I-NEXT: addi a0, a0, %lo(return_uint8) -; RV32I-NEXT: jalr ra, a0, 0 +; RV32I-NEXT: jalr a0 ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call zeroext i8 @return_uint8() ret i8 %1 } @@ -125,7 +125,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = zext i8 %a to i32 ret i32 %1 } @@ -141,11 +141,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a1, %hi(receive_anyint32) ; RV32I-NEXT: addi a1, a1, %lo(receive_anyint32) -; RV32I-NEXT: jalr ra, a1, 0 +; RV32I-NEXT: jalr a1 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = zext i8 %a to i32 call void @receive_anyint32(i32 signext %1) ret void @@ -160,11 +160,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a0, %hi(return_uint8) ; RV32I-NEXT: addi a0, a0, %lo(return_uint8) -; RV32I-NEXT: jalr ra, a0, 0 +; RV32I-NEXT: jalr a0 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call zeroext i8 @return_uint8() %2 = zext i8 %1 to i32 ret i32 %2 @@ -181,7 +181,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret ret i8 %a } @@ -195,11 +195,11 @@ ; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: lui a1, %hi(receive_uint8) ; RV32I-NEXT: addi a1, a1, %lo(receive_uint8) -; RV32I-NEXT: jalr ra, a1, 0 +; RV32I-NEXT: jalr a1 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret call void @receive_uint8(i8 zeroext %a) ret void } @@ -215,12 +215,12 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a0, %hi(return_sint8) ; RV32I-NEXT: addi a0, a0, %lo(return_sint8) -; RV32I-NEXT: jalr ra, a0, 0 +; RV32I-NEXT: jalr a0 ; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call signext i8 @return_sint8() ret i8 %1 } @@ -235,7 +235,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret ret i8 %a } @@ -248,11 +248,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a1, %hi(receive_sint8) ; RV32I-NEXT: addi a1, a1, %lo(receive_sint8) -; RV32I-NEXT: jalr ra, a1, 0 +; RV32I-NEXT: jalr a1 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret call void @receive_sint8(i8 signext %a) ret void } @@ -266,11 +266,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a0, %hi(return_sint8) ; RV32I-NEXT: addi a0, a0, %lo(return_sint8) -; RV32I-NEXT: jalr ra, a0, 0 +; RV32I-NEXT: jalr a0 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call signext i8 @return_sint8() ret i8 %1 } @@ -285,7 +285,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sext i8 %a to i32 ret i32 %1 } @@ -299,11 +299,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a1, %hi(receive_anyint32) ; RV32I-NEXT: addi a1, a1, %lo(receive_anyint32) -; RV32I-NEXT: jalr ra, a1, 0 +; RV32I-NEXT: jalr a1 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sext i8 %a to i32 call void @receive_anyint32(i32 signext %1) ret void @@ -318,11 +318,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a0, %hi(return_sint8) ; RV32I-NEXT: addi a0, a0, %lo(return_sint8) -; RV32I-NEXT: jalr ra, a0, 0 +; RV32I-NEXT: jalr a0 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call signext i8 @return_sint8() %2 = sext i8 %1 to i32 ret i32 %2 @@ -339,7 +339,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = trunc i32 %a to i8 ret i8 %1 } @@ -354,11 +354,11 @@ ; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: lui a1, %hi(receive_uint8) ; RV32I-NEXT: addi a1, a1, %lo(receive_uint8) -; RV32I-NEXT: jalr ra, a1, 0 +; RV32I-NEXT: jalr a1 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = trunc i32 %a to i8 call void @receive_uint8(i8 zeroext %1) ret void @@ -375,12 +375,12 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a0, %hi(return_anyint32) ; RV32I-NEXT: addi a0, a0, %lo(return_anyint32) -; RV32I-NEXT: jalr ra, a0, 0 +; RV32I-NEXT: jalr a0 ; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call signext i32 @return_anyint32() %2 = trunc i32 %1 to i8 ret i8 %2 @@ -398,7 +398,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = trunc i32 %a to i8 ret i8 %1 } @@ -414,11 +414,11 @@ ; RV32I-NEXT: addi a1, a1, %lo(receive_sint8) ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 -; RV32I-NEXT: jalr ra, a1, 0 +; RV32I-NEXT: jalr a1 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = trunc i32 %a to i8 call void @receive_sint8(i8 signext %1) ret void @@ -433,13 +433,13 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a0, %hi(return_anyint32) ; RV32I-NEXT: addi a0, a0, %lo(return_anyint32) -; RV32I-NEXT: jalr ra, a0, 0 +; RV32I-NEXT: jalr a0 ; RV32I-NEXT: slli a0, a0, 24 ; RV32I-NEXT: srai a0, a0, 24 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call signext i32 @return_anyint32() %2 = trunc i32 %1 to i8 ret i8 %2 @@ -455,7 +455,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret ret i32 %a } @@ -468,11 +468,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a1, %hi(receive_anyint32) ; RV32I-NEXT: addi a1, a1, %lo(receive_anyint32) -; RV32I-NEXT: jalr ra, a1, 0 +; RV32I-NEXT: jalr a1 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret call void @receive_anyint32(i32 signext %a) ret void } @@ -486,11 +486,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a0, %hi(return_anyint32) ; RV32I-NEXT: addi a0, a0, %lo(return_anyint32) -; RV32I-NEXT: jalr ra, a0, 0 +; RV32I-NEXT: jalr a0 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call signext i32 @return_anyint32() ret i32 %1 } Index: test/CodeGen/RISCV/calling-conv.ll =================================================================== --- test/CodeGen/RISCV/calling-conv.ll +++ test/CodeGen/RISCV/calling-conv.ll @@ -19,15 +19,15 @@ ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: sw s4, 8(sp) ; RV32I-NEXT: addi s0, sp, 32 -; RV32I-NEXT: addi s1, a4, 0 -; RV32I-NEXT: addi s2, a3, 0 -; RV32I-NEXT: addi s3, a1, 0 -; RV32I-NEXT: addi s4, a0, 0 +; RV32I-NEXT: mv s1, a4 +; RV32I-NEXT: mv s2, a3 +; RV32I-NEXT: mv s3, a1 +; RV32I-NEXT: mv s4, a0 ; RV32I-NEXT: lui a0, %hi(__fixdfsi) ; RV32I-NEXT: addi a2, a0, %lo(__fixdfsi) -; RV32I-NEXT: addi a0, a5, 0 -; RV32I-NEXT: addi a1, a6, 0 -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: mv a0, a5 +; RV32I-NEXT: mv a1, a6 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: add a1, s4, s3 ; RV32I-NEXT: add a1, a1, s2 ; RV32I-NEXT: add a1, a1, s1 @@ -39,7 +39,7 @@ ; RV32I-NEXT: lw s0, 24(sp) ; RV32I-NEXT: lw ra, 28(sp) ; RV32I-NEXT: addi sp, sp, 32 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %b_trunc = trunc i64 %b to i32 %e_fptosi = fptosi double %e to i32 %1 = add i32 %a, %b_trunc @@ -57,20 +57,20 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a0, 262464 -; RV32I-NEXT: addi a6, a0, 0 +; RV32I-NEXT: mv a6, a0 ; RV32I-NEXT: lui a0, %hi(callee_scalars) ; RV32I-NEXT: addi a7, a0, %lo(callee_scalars) ; RV32I-NEXT: addi a0, zero, 1 ; RV32I-NEXT: addi a1, zero, 2 ; RV32I-NEXT: addi a3, zero, 3 ; RV32I-NEXT: addi a4, zero, 4 -; RV32I-NEXT: addi a2, zero, 0 -; RV32I-NEXT: addi a5, zero, 0 -; RV32I-NEXT: jalr ra, a7, 0 +; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: mv a5, zero +; RV32I-NEXT: jalr a7 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call i32 @callee_scalars(i32 1, i64 2, i32 3, i32 4, double 5.000000e+00) ret i32 %1 } @@ -100,11 +100,11 @@ ; RV32I-NEXT: or a0, a0, a3 ; RV32I-NEXT: or a0, a0, a2 ; RV32I-NEXT: xor a0, a0, zero -; RV32I-NEXT: sltiu a0, a0, 1 +; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %b_bitcast = bitcast fp128 %b to i128 %1 = icmp eq i128 %a, %b_bitcast %2 = zext i1 %1 to i32 @@ -127,17 +127,17 @@ ; RV32I-NEXT: addi a0, zero, 1 ; RV32I-NEXT: sw a0, -24(s0) ; RV32I-NEXT: lui a0, 524272 -; RV32I-NEXT: addi a0, a0, 0 +; RV32I-NEXT: mv a0, a0 ; RV32I-NEXT: sw a0, -36(s0) ; RV32I-NEXT: lui a0, %hi(callee_large_scalars) ; RV32I-NEXT: addi a2, a0, %lo(callee_large_scalars) ; RV32I-NEXT: addi a0, s0, -24 ; RV32I-NEXT: addi a1, s0, -48 -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: lw s0, 40(sp) ; RV32I-NEXT: lw ra, 44(sp) ; RV32I-NEXT: addi sp, sp, 48 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call i32 @callee_large_scalars(i128 1, fp128 0xL00000000000000007FFF000000000000) ret i32 %1 } @@ -169,11 +169,11 @@ ; RV32I-NEXT: or a0, a0, a2 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: xor a0, a0, zero -; RV32I-NEXT: sltiu a0, a0, 1 +; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %j_bitcast = bitcast fp128 %j to i128 %1 = icmp eq i128 %h, %j_bitcast %2 = zext i1 %1 to i32 @@ -200,7 +200,7 @@ ; RV32I-NEXT: addi a0, zero, 8 ; RV32I-NEXT: sw a0, -24(s0) ; RV32I-NEXT: lui a0, 524272 -; RV32I-NEXT: addi a0, a0, 0 +; RV32I-NEXT: mv a0, a0 ; RV32I-NEXT: sw a0, -36(s0) ; RV32I-NEXT: lui a0, %hi(callee_large_scalars_exhausted_regs) ; RV32I-NEXT: addi t0, a0, %lo(callee_large_scalars_exhausted_regs) @@ -212,11 +212,11 @@ ; RV32I-NEXT: addi a5, zero, 6 ; RV32I-NEXT: addi a6, zero, 7 ; RV32I-NEXT: addi a7, s0, -24 -; RV32I-NEXT: jalr ra, t0, 0 +; RV32I-NEXT: jalr t0 ; RV32I-NEXT: lw s0, 56(sp) ; RV32I-NEXT: lw ra, 60(sp) ; RV32I-NEXT: addi sp, sp, 64 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call i32 @callee_large_scalars_exhausted_regs( i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i128 8, i32 9, fp128 0xL00000000000000007FFF000000000000) @@ -232,17 +232,17 @@ ; RV32I-NEXT: sw ra, 28(sp) ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: addi s0, sp, 32 -; RV32I-NEXT: addi a2, a1, 0 -; RV32I-NEXT: addi a1, a0, 0 +; RV32I-NEXT: mv a2, a1 +; RV32I-NEXT: mv a1, a0 ; RV32I-NEXT: lui a0, %hi(__floatditf) ; RV32I-NEXT: addi a3, a0, %lo(__floatditf) ; RV32I-NEXT: addi a0, s0, -24 -; RV32I-NEXT: jalr ra, a3, 0 +; RV32I-NEXT: jalr a3 ; RV32I-NEXT: lw a0, -24(s0) ; RV32I-NEXT: lw s0, 24(sp) ; RV32I-NEXT: lw ra, 28(sp) ; RV32I-NEXT: addi sp, sp, 32 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sitofp i64 %a to fp128 %2 = bitcast fp128 %1 to i128 %3 = trunc i128 %2 to i32 @@ -269,7 +269,7 @@ ; RV32I-NEXT: andi a0, a0, 255 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: add a0, a0, a2 -; RV32I-NEXT: sltiu a1, a3, 1 +; RV32I-NEXT: seqz a1, a3 ; RV32I-NEXT: add a0, a1, a0 ; RV32I-NEXT: add a0, a0, a5 ; RV32I-NEXT: add a0, a0, a6 @@ -278,7 +278,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %a_ext = zext i8 %a to i32 %b_ext = zext i16 %b to i32 %1 = add i32 %a_ext, %b_ext @@ -311,12 +311,12 @@ ; RV32I-NEXT: addi a5, zero, 5 ; RV32I-NEXT: addi a6, zero, 6 ; RV32I-NEXT: addi a7, zero, 7 -; RV32I-NEXT: addi a4, zero, 0 -; RV32I-NEXT: jalr ra, t0, 0 +; RV32I-NEXT: mv a4, zero +; RV32I-NEXT: jalr t0 ; RV32I-NEXT: lw s0, 24(sp) ; RV32I-NEXT: lw ra, 28(sp) ; RV32I-NEXT: addi sp, sp, 32 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call i32 @callee_many_scalars(i8 1, i16 2, i32 3, i64 4, i32 5, i32 6, i64 7, i32 8) ret i32 %1 } @@ -333,11 +333,11 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: xor a0, a0, a1 -; RV32I-NEXT: sltiu a0, a0, 1 +; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = extractvalue [2 x i32] %a.coerce, 0 %2 = extractvalue [2 x i32] %a.coerce, 1 %3 = icmp eq i32 %1, %2 @@ -356,11 +356,11 @@ ; RV32I-NEXT: addi a2, a0, %lo(callee_small_coerced_struct) ; RV32I-NEXT: addi a0, zero, 1 ; RV32I-NEXT: addi a1, zero, 2 -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call i32 @callee_small_coerced_struct([2 x i32] [i32 1, i32 2]) ret i32 %1 } @@ -382,7 +382,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = getelementptr inbounds %struct.large, %struct.large* %a, i32 0, i32 0 %2 = getelementptr inbounds %struct.large, %struct.large* %a, i32 0, i32 3 %3 = load i32, i32* %1 @@ -413,11 +413,11 @@ ; RV32I-NEXT: lui a0, %hi(callee_large_struct) ; RV32I-NEXT: addi a1, a0, %lo(callee_large_struct) ; RV32I-NEXT: addi a0, s0, -40 -; RV32I-NEXT: jalr ra, a1, 0 +; RV32I-NEXT: jalr a1 ; RV32I-NEXT: lw s0, 40(sp) ; RV32I-NEXT: lw ra, 44(sp) ; RV32I-NEXT: addi sp, sp, 48 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %ls = alloca %struct.large, align 4 %1 = bitcast %struct.large* %ls to i8* %a = getelementptr inbounds %struct.large, %struct.large* %ls, i32 0, i32 0 @@ -456,7 +456,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = bitcast fp128 %c to i128 %2 = trunc i128 %1 to i32 %3 = add i32 %2, %g @@ -516,11 +516,11 @@ ; RV32I-NEXT: addi a4, zero, 13 ; RV32I-NEXT: addi a6, zero, 4 ; RV32I-NEXT: addi a7, zero, 14 -; RV32I-NEXT: jalr ra, t0, 0 +; RV32I-NEXT: jalr t0 ; RV32I-NEXT: lw s0, 56(sp) ; RV32I-NEXT: lw ra, 60(sp) ; RV32I-NEXT: addi sp, sp, 64 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call i32 @callee_aligned_stack(i32 1, i32 11, fp128 0xLEB851EB851EB851F400091EB851EB851, i32 12, i32 13, i64 20000000000, i32 14, i32 15, double 2.720000e+00, i32 16, @@ -543,7 +543,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret ret i64 1234567898765 } @@ -556,7 +556,7 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a0, %hi(callee_small_scalar_ret) ; RV32I-NEXT: addi a0, a0, %lo(callee_small_scalar_ret) -; RV32I-NEXT: jalr ra, a0, 0 +; RV32I-NEXT: jalr a0 ; RV32I-NEXT: lui a2, 56 ; RV32I-NEXT: addi a2, a2, 580 ; RV32I-NEXT: xor a1, a1, a2 @@ -565,11 +565,11 @@ ; RV32I-NEXT: xor a0, a0, a2 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: xor a0, a0, zero -; RV32I-NEXT: sltiu a0, a0, 1 +; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call i64 @callee_small_scalar_ret() %2 = icmp eq i64 987654321234567, %1 %3 = zext i1 %2 to i32 @@ -586,11 +586,11 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: addi a0, zero, 1 -; RV32I-NEXT: addi a1, zero, 0 +; RV32I-NEXT: mv a1, zero ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret ret %struct.small { i32 1, i32* null } } @@ -603,12 +603,12 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a0, %hi(callee_small_struct_ret) ; RV32I-NEXT: addi a0, a0, %lo(callee_small_struct_ret) -; RV32I-NEXT: jalr ra, a0, 0 +; RV32I-NEXT: jalr a0 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call %struct.small @callee_small_struct_ret() %2 = extractvalue %struct.small %1, 0 %3 = extractvalue %struct.small %1, 1 @@ -627,7 +627,7 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a1, 524272 -; RV32I-NEXT: addi a1, a1, 0 +; RV32I-NEXT: mv a1, a1 ; RV32I-NEXT: sw a1, 12(a0) ; RV32I-NEXT: sw zero, 8(a0) ; RV32I-NEXT: sw zero, 4(a0) @@ -635,7 +635,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret ret fp128 0xL00000000000000007FFF000000000000 } @@ -649,11 +649,11 @@ ; RV32I-NEXT: lui a0, %hi(callee_large_scalar_ret) ; RV32I-NEXT: addi a1, a0, %lo(callee_large_scalar_ret) ; RV32I-NEXT: addi a0, s0, -32 -; RV32I-NEXT: jalr ra, a1, 0 +; RV32I-NEXT: jalr a1 ; RV32I-NEXT: lw s0, 24(sp) ; RV32I-NEXT: lw ra, 28(sp) ; RV32I-NEXT: addi sp, sp, 32 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call fp128 @callee_large_scalar_ret() ret void } @@ -678,7 +678,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %a = getelementptr inbounds %struct.large, %struct.large* %agg.result, i32 0, i32 0 store i32 1, i32* %a, align 4 %b = getelementptr inbounds %struct.large, %struct.large* %agg.result, i32 0, i32 1 @@ -700,14 +700,14 @@ ; RV32I-NEXT: lui a0, %hi(callee_large_struct_ret) ; RV32I-NEXT: addi a1, a0, %lo(callee_large_struct_ret) ; RV32I-NEXT: addi a0, s0, -24 -; RV32I-NEXT: jalr ra, a1, 0 +; RV32I-NEXT: jalr a1 ; RV32I-NEXT: lw a0, -12(s0) ; RV32I-NEXT: lw a1, -24(s0) ; RV32I-NEXT: add a0, a1, a0 ; RV32I-NEXT: lw s0, 24(sp) ; RV32I-NEXT: lw ra, 28(sp) ; RV32I-NEXT: addi sp, sp, 32 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = alloca %struct.large call void @callee_large_struct_ret(%struct.large* sret %1) %2 = getelementptr inbounds %struct.large, %struct.large* %1, i32 0, i32 0 Index: test/CodeGen/RISCV/calls.ll =================================================================== --- test/CodeGen/RISCV/calls.ll +++ test/CodeGen/RISCV/calls.ll @@ -13,11 +13,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a1, %hi(external_function) ; RV32I-NEXT: addi a1, a1, %lo(external_function) -; RV32I-NEXT: jalr ra, a1, 0 +; RV32I-NEXT: jalr a1 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call i32 @external_function(i32 %a) ret i32 %1 } @@ -33,7 +33,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = add i32 %a, 1 ret i32 %1 } @@ -47,11 +47,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a1, %hi(defined_function) ; RV32I-NEXT: addi a1, a1, %lo(defined_function) -; RV32I-NEXT: jalr ra, a1, 0 +; RV32I-NEXT: jalr a1 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call i32 @defined_function(i32 %a) ret i32 %1 } @@ -63,13 +63,13 @@ ; RV32I-NEXT: sw ra, 12(sp) ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 -; RV32I-NEXT: addi a2, a0, 0 -; RV32I-NEXT: addi a0, a1, 0 -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call i32 %a(i32 %b) ret i32 %1 } @@ -88,7 +88,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = add i32 %a, %b ret i32 %1 } @@ -101,17 +101,17 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: sw s1, 4(sp) ; RV32I-NEXT: addi s0, sp, 16 -; RV32I-NEXT: addi s1, a0, 0 +; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lui a0, %hi(fastcc_function) ; RV32I-NEXT: addi a2, a0, %lo(fastcc_function) -; RV32I-NEXT: addi a0, s1, 0 -; RV32I-NEXT: jalr ra, a2, 0 -; RV32I-NEXT: addi a0, s1, 0 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: jalr a2 +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: lw s1, 4(sp) ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call fastcc i32 @fastcc_function(i32 %a, i32 %b) ret i32 %a } @@ -126,26 +126,26 @@ ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: addi s0, sp, 32 -; RV32I-NEXT: addi s1, a0, 0 +; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: sw s1, 4(sp) ; RV32I-NEXT: sw s1, 0(sp) ; RV32I-NEXT: lui a0, %hi(external_many_args) ; RV32I-NEXT: addi t0, a0, %lo(external_many_args) -; RV32I-NEXT: addi a0, s1, 0 -; RV32I-NEXT: addi a1, s1, 0 -; RV32I-NEXT: addi a2, s1, 0 -; RV32I-NEXT: addi a3, s1, 0 -; RV32I-NEXT: addi a4, s1, 0 -; RV32I-NEXT: addi a5, s1, 0 -; RV32I-NEXT: addi a6, s1, 0 -; RV32I-NEXT: addi a7, s1, 0 -; RV32I-NEXT: jalr ra, t0, 0 -; RV32I-NEXT: addi a0, s1, 0 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s1 +; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a4, s1 +; RV32I-NEXT: mv a5, s1 +; RV32I-NEXT: mv a6, s1 +; RV32I-NEXT: mv a7, s1 +; RV32I-NEXT: jalr t0 +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) ; RV32I-NEXT: lw ra, 28(sp) ; RV32I-NEXT: addi sp, sp, 32 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call i32 @external_many_args(i32 %a, i32 %a, i32 %a, i32 %a, i32 %a, i32 %a, i32 %a, i32 %a, i32 %a, i32 %a) ret i32 %a @@ -163,7 +163,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %added = add i32 %j, 1 ret i32 %added } @@ -179,18 +179,18 @@ ; RV32I-NEXT: sw a0, 0(sp) ; RV32I-NEXT: lui a1, %hi(defined_many_args) ; RV32I-NEXT: addi t0, a1, %lo(defined_many_args) -; RV32I-NEXT: addi a1, a0, 0 -; RV32I-NEXT: addi a2, a0, 0 -; RV32I-NEXT: addi a3, a0, 0 -; RV32I-NEXT: addi a4, a0, 0 -; RV32I-NEXT: addi a5, a0, 0 -; RV32I-NEXT: addi a6, a0, 0 -; RV32I-NEXT: addi a7, a0, 0 -; RV32I-NEXT: jalr ra, t0, 0 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: mv a3, a0 +; RV32I-NEXT: mv a4, a0 +; RV32I-NEXT: mv a5, a0 +; RV32I-NEXT: mv a6, a0 +; RV32I-NEXT: mv a7, a0 +; RV32I-NEXT: jalr t0 ; RV32I-NEXT: lw s0, 24(sp) ; RV32I-NEXT: lw ra, 28(sp) ; RV32I-NEXT: addi sp, sp, 32 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = call i32 @defined_many_args(i32 %a, i32 %a, i32 %a, i32 %a, i32 %a, i32 %a, i32 %a, i32 %a, i32 %a, i32 %a) ret i32 %1 Index: test/CodeGen/RISCV/div.ll =================================================================== --- test/CodeGen/RISCV/div.ll +++ test/CodeGen/RISCV/div.ll @@ -11,11 +11,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a2, %hi(__udivsi3) ; RV32I-NEXT: addi a2, a2, %lo(__udivsi3) -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = udiv i32 %a, %b ret i32 %1 } @@ -30,11 +30,11 @@ ; RV32I-NEXT: lui a1, %hi(__udivsi3) ; RV32I-NEXT: addi a2, a1, %lo(__udivsi3) ; RV32I-NEXT: addi a1, zero, 5 -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = udiv i32 %a, 5 ret i32 %1 } @@ -50,7 +50,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = udiv i32 %a, 8 ret i32 %1 } @@ -64,11 +64,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a4, %hi(__udivdi3) ; RV32I-NEXT: addi a4, a4, %lo(__udivdi3) -; RV32I-NEXT: jalr ra, a4, 0 +; RV32I-NEXT: jalr a4 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = udiv i64 %a, %b ret i64 %1 } @@ -83,12 +83,12 @@ ; RV32I-NEXT: lui a2, %hi(__udivdi3) ; RV32I-NEXT: addi a4, a2, %lo(__udivdi3) ; RV32I-NEXT: addi a2, zero, 5 -; RV32I-NEXT: addi a3, zero, 0 -; RV32I-NEXT: jalr ra, a4, 0 +; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: jalr a4 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = udiv i64 %a, 5 ret i64 %1 } @@ -102,11 +102,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a2, %hi(__divsi3) ; RV32I-NEXT: addi a2, a2, %lo(__divsi3) -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sdiv i32 %a, %b ret i32 %1 } @@ -121,11 +121,11 @@ ; RV32I-NEXT: lui a1, %hi(__divsi3) ; RV32I-NEXT: addi a2, a1, %lo(__divsi3) ; RV32I-NEXT: addi a1, zero, 5 -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sdiv i32 %a, 5 ret i32 %1 } @@ -144,7 +144,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sdiv i32 %a, 8 ret i32 %1 } @@ -158,11 +158,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a4, %hi(__divdi3) ; RV32I-NEXT: addi a4, a4, %lo(__divdi3) -; RV32I-NEXT: jalr ra, a4, 0 +; RV32I-NEXT: jalr a4 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sdiv i64 %a, %b ret i64 %1 } @@ -177,12 +177,12 @@ ; RV32I-NEXT: lui a2, %hi(__divdi3) ; RV32I-NEXT: addi a4, a2, %lo(__divdi3) ; RV32I-NEXT: addi a2, zero, 5 -; RV32I-NEXT: addi a3, zero, 0 -; RV32I-NEXT: jalr ra, a4, 0 +; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: jalr a4 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sdiv i64 %a, 5 ret i64 %1 } Index: test/CodeGen/RISCV/fp128.ll =================================================================== --- test/CodeGen/RISCV/fp128.ll +++ test/CodeGen/RISCV/fp128.ll @@ -51,13 +51,13 @@ ; RV32I-NEXT: addi a2, a0, %lo(__netf2) ; RV32I-NEXT: addi a0, s0, -24 ; RV32I-NEXT: addi a1, s0, -40 -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: xor a0, a0, zero -; RV32I-NEXT: sltu a0, zero, a0 +; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: lw s0, 40(sp) ; RV32I-NEXT: lw ra, 44(sp) ; RV32I-NEXT: addi sp, sp, 48 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = load fp128, fp128* @x, align 16 %2 = load fp128, fp128* @y, align 16 %cmp = fcmp une fp128 %1, %2 @@ -109,7 +109,7 @@ ; RV32I-NEXT: addi a0, s0, -24 ; RV32I-NEXT: addi a1, s0, -40 ; RV32I-NEXT: addi a2, s0, -56 -; RV32I-NEXT: jalr ra, a3, 0 +; RV32I-NEXT: jalr a3 ; RV32I-NEXT: lw a0, -12(s0) ; RV32I-NEXT: sw a0, -60(s0) ; RV32I-NEXT: lw a0, -16(s0) @@ -121,11 +121,11 @@ ; RV32I-NEXT: lui a0, %hi(__fixtfsi) ; RV32I-NEXT: addi a1, a0, %lo(__fixtfsi) ; RV32I-NEXT: addi a0, s0, -72 -; RV32I-NEXT: jalr ra, a1, 0 +; RV32I-NEXT: jalr a1 ; RV32I-NEXT: lw s0, 72(sp) ; RV32I-NEXT: lw ra, 76(sp) ; RV32I-NEXT: addi sp, sp, 80 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = load fp128, fp128* @x, align 16 %2 = load fp128, fp128* @y, align 16 %3 = fadd fp128 %1, %2 Index: test/CodeGen/RISCV/frame.ll =================================================================== --- test/CodeGen/RISCV/frame.ll +++ test/CodeGen/RISCV/frame.ll @@ -19,12 +19,12 @@ ; RV32I-NEXT: lui a0, %hi(test1) ; RV32I-NEXT: addi a1, a0, %lo(test1) ; RV32I-NEXT: addi a0, s0, -28 -; RV32I-NEXT: jalr ra, a1, 0 -; RV32I-NEXT: addi a0, zero, 0 +; RV32I-NEXT: jalr a1 +; RV32I-NEXT: mv a0, zero ; RV32I-NEXT: lw s0, 24(sp) ; RV32I-NEXT: lw ra, 28(sp) ; RV32I-NEXT: addi sp, sp, 32 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %key = alloca %struct.key_t, align 4 %1 = bitcast %struct.key_t* %key to i8* call void @llvm.memset.p0i8.i64(i8* %1, i8 0, i64 20, i32 4, i1 false) Index: test/CodeGen/RISCV/i32-icmp.ll =================================================================== --- test/CodeGen/RISCV/i32-icmp.ll +++ test/CodeGen/RISCV/i32-icmp.ll @@ -13,11 +13,11 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: xor a0, a0, a1 -; RV32I-NEXT: sltiu a0, a0, 1 +; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = icmp eq i32 %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -31,11 +31,11 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: xor a0, a0, a1 -; RV32I-NEXT: sltu a0, zero, a0 +; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = icmp ne i32 %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -52,7 +52,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = icmp ugt i32 %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -70,7 +70,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = icmp uge i32 %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -87,7 +87,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = icmp ult i32 %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -105,7 +105,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = icmp ule i32 %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -122,7 +122,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = icmp sgt i32 %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -140,7 +140,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = icmp sge i32 %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -157,7 +157,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = icmp slt i32 %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -175,7 +175,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = icmp sle i32 %a, %b %2 = zext i1 %1 to i32 ret i32 %2 Index: test/CodeGen/RISCV/imm.ll =================================================================== --- test/CodeGen/RISCV/imm.ll +++ test/CodeGen/RISCV/imm.ll @@ -11,11 +11,11 @@ ; RV32I-NEXT: sw ra, 12(sp) ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 -; RV32I-NEXT: addi a0, zero, 0 +; RV32I-NEXT: mv a0, zero ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret ret i32 0 } @@ -30,7 +30,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret ret i32 2047 } @@ -45,7 +45,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret ret i32 -2048 } @@ -61,7 +61,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret ret i32 1735928559 } @@ -77,6 +77,6 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret ret i32 -559038737 } Index: test/CodeGen/RISCV/indirectbr.ll =================================================================== --- test/CodeGen/RISCV/indirectbr.ll +++ test/CodeGen/RISCV/indirectbr.ll @@ -9,13 +9,13 @@ ; RV32I-NEXT: sw ra, 12(sp) ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 -; RV32I-NEXT: jalr zero, a0, 0 +; RV32I-NEXT: jr a0 ; RV32I-NEXT: .LBB0_1: # %ret -; RV32I-NEXT: addi a0, zero, 0 +; RV32I-NEXT: mv a0, zero ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret indirectbr i8* %target, [label %test_label] test_label: br label %ret @@ -32,11 +32,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: jalr zero, a0, 1380 ; RV32I-NEXT: .LBB1_1: # %ret -; RV32I-NEXT: addi a0, zero, 0 +; RV32I-NEXT: mv a0, zero ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %target = getelementptr inbounds i8, i8* %a, i32 1380 indirectbr i8* %target, [label %test_label] test_label: Index: test/CodeGen/RISCV/jumptable.ll =================================================================== --- test/CodeGen/RISCV/jumptable.ll +++ test/CodeGen/RISCV/jumptable.ll @@ -11,26 +11,26 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: addi a2, zero, 2 ; RV32I-NEXT: blt a2, a0, .LBB0_3 -; RV32I-NEXT: jal zero, .LBB0_1 +; RV32I-NEXT: j .LBB0_1 ; RV32I-NEXT: .LBB0_1: # %entry ; RV32I-NEXT: addi a3, zero, 1 ; RV32I-NEXT: beq a0, a3, .LBB0_5 -; RV32I-NEXT: jal zero, .LBB0_2 +; RV32I-NEXT: j .LBB0_2 ; RV32I-NEXT: .LBB0_2: # %entry ; RV32I-NEXT: beq a0, a2, .LBB0_6 -; RV32I-NEXT: jal zero, .LBB0_9 +; RV32I-NEXT: j .LBB0_9 ; RV32I-NEXT: .LBB0_6: # %bb2 ; RV32I-NEXT: addi a0, zero, 3 ; RV32I-NEXT: sw a0, 0(a1) -; RV32I-NEXT: jal zero, .LBB0_9 +; RV32I-NEXT: j .LBB0_9 ; RV32I-NEXT: .LBB0_3: # %entry ; RV32I-NEXT: addi a3, zero, 3 ; RV32I-NEXT: beq a0, a3, .LBB0_7 -; RV32I-NEXT: jal zero, .LBB0_4 +; RV32I-NEXT: j .LBB0_4 ; RV32I-NEXT: .LBB0_4: # %entry ; RV32I-NEXT: addi a2, zero, 4 ; RV32I-NEXT: beq a0, a2, .LBB0_8 -; RV32I-NEXT: jal zero, .LBB0_9 +; RV32I-NEXT: j .LBB0_9 ; RV32I-NEXT: .LBB0_8: # %bb4 ; RV32I-NEXT: addi a0, zero, 1 ; RV32I-NEXT: sw a0, 0(a1) @@ -38,14 +38,14 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret ; RV32I-NEXT: .LBB0_5: # %bb1 ; RV32I-NEXT: addi a0, zero, 4 ; RV32I-NEXT: sw a0, 0(a1) -; RV32I-NEXT: jal zero, .LBB0_9 +; RV32I-NEXT: j .LBB0_9 ; RV32I-NEXT: .LBB0_7: # %bb3 ; RV32I-NEXT: sw a2, 0(a1) -; RV32I-NEXT: jal zero, .LBB0_9 +; RV32I-NEXT: j .LBB0_9 entry: switch i32 %in, label %exit [ i32 1, label %bb1 Index: test/CodeGen/RISCV/mem.ll =================================================================== --- test/CodeGen/RISCV/mem.ll +++ test/CodeGen/RISCV/mem.ll @@ -16,7 +16,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = getelementptr i8, i8* %a, i32 1 %2 = load i8, i8* %1 %3 = sext i8 %2 to i32 @@ -37,7 +37,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = getelementptr i16, i16* %a, i32 2 %2 = load i16, i16* %1 %3 = sext i16 %2 to i32 @@ -58,7 +58,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = getelementptr i32, i32* %a, i32 3 %2 = load i32, i32* %1 %3 = load volatile i32, i32* %a @@ -78,7 +78,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = getelementptr i8, i8* %a, i32 4 %2 = load i8, i8* %1 %3 = zext i8 %2 to i32 @@ -101,7 +101,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = getelementptr i16, i16* %a, i32 5 %2 = load i16, i16* %1 %3 = zext i16 %2 to i32 @@ -125,7 +125,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret store i8 %b, i8* %a %1 = getelementptr i8, i8* %a, i32 6 store i8 %b, i8* %1 @@ -144,7 +144,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret store i16 %b, i16* %a %1 = getelementptr i16, i16* %a, i32 7 store i16 %b, i16* %1 @@ -163,7 +163,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret store i32 %b, i32* %a %1 = getelementptr i32, i32* %a, i32 8 store i32 %b, i32* %1 @@ -185,7 +185,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret ; sextload i1 %1 = getelementptr i1, i1* %a, i32 1 %2 = load i1, i1* %1 @@ -214,7 +214,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret ; sextload i1 %1 = getelementptr i1, i1* %a, i32 1 %2 = load i1, i1* %1 @@ -248,11 +248,11 @@ ; RV32I-NEXT: addi a2, a2, %lo(G+36) ; RV32I-NEXT: lw a3, 0(a2) ; RV32I-NEXT: sw a0, 0(a2) -; RV32I-NEXT: addi a0, a1, 0 +; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = load volatile i32, i32* @G store i32 %a, i32* @G %2 = getelementptr i32, i32* @G, i32 9 @@ -274,11 +274,11 @@ ; RV32I-NEXT: addi a2, a1, -273 ; RV32I-NEXT: lw a1, 0(a2) ; RV32I-NEXT: sw a0, 0(a2) -; RV32I-NEXT: addi a0, a1, 0 +; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = inttoptr i32 3735928559 to i32* %2 = load volatile i32, i32* %1 store i32 %a, i32* %1 Index: test/CodeGen/RISCV/mul.ll =================================================================== --- test/CodeGen/RISCV/mul.ll +++ test/CodeGen/RISCV/mul.ll @@ -11,12 +11,12 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a1, %hi(__mulsi3) ; RV32I-NEXT: addi a2, a1, %lo(__mulsi3) -; RV32I-NEXT: addi a1, a0, 0 -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = mul i32 %a, %a ret i32 %1 } @@ -30,11 +30,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a2, %hi(__mulsi3) ; RV32I-NEXT: addi a2, a2, %lo(__mulsi3) -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = mul i32 %a, %b ret i32 %1 } @@ -49,11 +49,11 @@ ; RV32I-NEXT: lui a1, %hi(__mulsi3) ; RV32I-NEXT: addi a2, a1, %lo(__mulsi3) ; RV32I-NEXT: addi a1, zero, 5 -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = mul i32 %a, 5 ret i32 %1 } @@ -69,7 +69,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = mul i32 %a, 8 ret i32 %1 } @@ -83,11 +83,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a4, %hi(__muldi3) ; RV32I-NEXT: addi a4, a4, %lo(__muldi3) -; RV32I-NEXT: jalr ra, a4, 0 +; RV32I-NEXT: jalr a4 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = mul i64 %a, %b ret i64 %1 } @@ -102,12 +102,12 @@ ; RV32I-NEXT: lui a2, %hi(__muldi3) ; RV32I-NEXT: addi a4, a2, %lo(__muldi3) ; RV32I-NEXT: addi a2, zero, 5 -; RV32I-NEXT: addi a3, zero, 0 -; RV32I-NEXT: jalr ra, a4, 0 +; RV32I-NEXT: mv a3, zero +; RV32I-NEXT: jalr a4 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = mul i64 %a, 5 ret i64 %1 } Index: test/CodeGen/RISCV/rem.ll =================================================================== --- test/CodeGen/RISCV/rem.ll +++ test/CodeGen/RISCV/rem.ll @@ -11,11 +11,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a2, %hi(__umodsi3) ; RV32I-NEXT: addi a2, a2, %lo(__umodsi3) -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = urem i32 %a, %b ret i32 %1 } @@ -29,11 +29,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a2, %hi(__modsi3) ; RV32I-NEXT: addi a2, a2, %lo(__modsi3) -; RV32I-NEXT: jalr ra, a2, 0 +; RV32I-NEXT: jalr a2 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = srem i32 %a, %b ret i32 %1 } Index: test/CodeGen/RISCV/rotl-rotr.ll =================================================================== --- test/CodeGen/RISCV/rotl-rotr.ll +++ test/CodeGen/RISCV/rotl-rotr.ll @@ -20,7 +20,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %z = sub i32 32, %y %b = shl i32 %x, %y %c = lshr i32 %x, %z @@ -43,7 +43,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %z = sub i32 32, %y %b = lshr i32 %x, %y %c = shl i32 %x, %z Index: test/CodeGen/RISCV/select-cc.ll =================================================================== --- test/CodeGen/RISCV/select-cc.ll +++ test/CodeGen/RISCV/select-cc.ll @@ -12,57 +12,57 @@ ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: beq a0, a2, .LBB0_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: addi a0, a2, 0 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: .LBB0_2: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bne a0, a2, .LBB0_4 ; RV32I-NEXT: # %bb.3: -; RV32I-NEXT: addi a0, a2, 0 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: .LBB0_4: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bltu a2, a0, .LBB0_6 ; RV32I-NEXT: # %bb.5: -; RV32I-NEXT: addi a0, a2, 0 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: .LBB0_6: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bgeu a0, a2, .LBB0_8 ; RV32I-NEXT: # %bb.7: -; RV32I-NEXT: addi a0, a2, 0 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: .LBB0_8: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bltu a0, a2, .LBB0_10 ; RV32I-NEXT: # %bb.9: -; RV32I-NEXT: addi a0, a2, 0 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: .LBB0_10: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bgeu a2, a0, .LBB0_12 ; RV32I-NEXT: # %bb.11: -; RV32I-NEXT: addi a0, a2, 0 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: .LBB0_12: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: blt a2, a0, .LBB0_14 ; RV32I-NEXT: # %bb.13: -; RV32I-NEXT: addi a0, a2, 0 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: .LBB0_14: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: bge a0, a2, .LBB0_16 ; RV32I-NEXT: # %bb.15: -; RV32I-NEXT: addi a0, a2, 0 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: .LBB0_16: ; RV32I-NEXT: lw a2, 0(a1) ; RV32I-NEXT: blt a0, a2, .LBB0_18 ; RV32I-NEXT: # %bb.17: -; RV32I-NEXT: addi a0, a2, 0 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: .LBB0_18: ; RV32I-NEXT: lw a1, 0(a1) ; RV32I-NEXT: bge a1, a0, .LBB0_20 ; RV32I-NEXT: # %bb.19: -; RV32I-NEXT: addi a0, a1, 0 +; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: .LBB0_20: ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %val1 = load volatile i32, i32* %b %tst1 = icmp eq i32 %a, %val1 %val2 = select i1 %tst1, i32 %a, i32 %val1 Index: test/CodeGen/RISCV/sext-zext-trunc.ll =================================================================== --- test/CodeGen/RISCV/sext-zext-trunc.ll +++ test/CodeGen/RISCV/sext-zext-trunc.ll @@ -10,11 +10,11 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: andi a0, a0, 1 -; RV32I-NEXT: sub a0, zero, a0 +; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sext i1 %a to i8 ret i8 %1 } @@ -27,11 +27,11 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: andi a0, a0, 1 -; RV32I-NEXT: sub a0, zero, a0 +; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sext i1 %a to i16 ret i16 %1 } @@ -44,11 +44,11 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: andi a0, a0, 1 -; RV32I-NEXT: sub a0, zero, a0 +; RV32I-NEXT: neg a0, a0 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sext i1 %a to i32 ret i32 %1 } @@ -61,12 +61,12 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: andi a0, a0, 1 -; RV32I-NEXT: sub a0, zero, a0 -; RV32I-NEXT: addi a1, a0, 0 +; RV32I-NEXT: neg a0, a0 +; RV32I-NEXT: mv a1, a0 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sext i1 %a to i64 ret i64 %1 } @@ -83,7 +83,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sext i8 %a to i16 ret i16 %1 } @@ -100,7 +100,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sext i8 %a to i32 ret i32 %1 } @@ -118,7 +118,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sext i8 %a to i64 ret i64 %1 } @@ -135,7 +135,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sext i16 %a to i32 ret i32 %1 } @@ -153,7 +153,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sext i16 %a to i64 ret i64 %1 } @@ -169,7 +169,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = sext i32 %a to i64 ret i64 %1 } @@ -185,7 +185,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = zext i1 %a to i8 ret i8 %1 } @@ -201,7 +201,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = zext i1 %a to i16 ret i16 %1 } @@ -217,7 +217,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = zext i1 %a to i32 ret i32 %1 } @@ -230,11 +230,11 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: andi a0, a0, 1 -; RV32I-NEXT: addi a1, zero, 0 +; RV32I-NEXT: mv a1, zero ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = zext i1 %a to i64 ret i64 %1 } @@ -250,7 +250,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = zext i8 %a to i16 ret i16 %1 } @@ -266,7 +266,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = zext i8 %a to i32 ret i32 %1 } @@ -279,11 +279,11 @@ ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: andi a0, a0, 255 -; RV32I-NEXT: addi a1, zero, 0 +; RV32I-NEXT: mv a1, zero ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = zext i8 %a to i64 ret i64 %1 } @@ -301,7 +301,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = zext i16 %a to i32 ret i32 %1 } @@ -316,11 +316,11 @@ ; RV32I-NEXT: lui a1, 16 ; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: and a0, a0, a1 -; RV32I-NEXT: addi a1, zero, 0 +; RV32I-NEXT: mv a1, zero ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = zext i16 %a to i64 ret i64 %1 } @@ -332,11 +332,11 @@ ; RV32I-NEXT: sw ra, 12(sp) ; RV32I-NEXT: sw s0, 8(sp) ; RV32I-NEXT: addi s0, sp, 16 -; RV32I-NEXT: addi a1, zero, 0 +; RV32I-NEXT: mv a1, zero ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = zext i32 %a to i64 ret i64 %1 } @@ -354,7 +354,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = trunc i8 %a to i1 ret i1 %1 } @@ -369,7 +369,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = trunc i16 %a to i1 ret i1 %1 } @@ -384,7 +384,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = trunc i32 %a to i1 ret i1 %1 } @@ -399,7 +399,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = trunc i64 %a to i1 ret i1 %1 } @@ -414,7 +414,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = trunc i16 %a to i8 ret i8 %1 } @@ -429,7 +429,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = trunc i32 %a to i8 ret i8 %1 } @@ -444,7 +444,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = trunc i64 %a to i8 ret i8 %1 } @@ -459,7 +459,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = trunc i32 %a to i16 ret i16 %1 } @@ -474,7 +474,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = trunc i64 %a to i16 ret i16 %1 } @@ -489,7 +489,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = trunc i64 %a to i32 ret i32 %1 } Index: test/CodeGen/RISCV/shifts.ll =================================================================== --- test/CodeGen/RISCV/shifts.ll +++ test/CodeGen/RISCV/shifts.ll @@ -14,11 +14,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a3, %hi(__lshrdi3) ; RV32I-NEXT: addi a3, a3, %lo(__lshrdi3) -; RV32I-NEXT: jalr ra, a3, 0 +; RV32I-NEXT: jalr a3 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = lshr i64 %a, %b ret i64 %1 } @@ -32,11 +32,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a3, %hi(__ashrdi3) ; RV32I-NEXT: addi a3, a3, %lo(__ashrdi3) -; RV32I-NEXT: jalr ra, a3, 0 +; RV32I-NEXT: jalr a3 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = ashr i64 %a, %b ret i64 %1 } @@ -50,11 +50,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lui a3, %hi(__ashldi3) ; RV32I-NEXT: addi a3, a3, %lo(__ashldi3) -; RV32I-NEXT: jalr ra, a3, 0 +; RV32I-NEXT: jalr a3 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = shl i64 %a, %b ret i64 %1 } Index: test/CodeGen/RISCV/wide-mem.ll =================================================================== --- test/CodeGen/RISCV/wide-mem.ll +++ test/CodeGen/RISCV/wide-mem.ll @@ -13,11 +13,11 @@ ; RV32I-NEXT: addi s0, sp, 16 ; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: lw a1, 4(a0) -; RV32I-NEXT: addi a0, a2, 0 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = load i64, i64* %a ret i64 %1 } @@ -42,7 +42,7 @@ ; RV32I-NEXT: lw s0, 8(sp) ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 -; RV32I-NEXT: jalr zero, ra, 0 +; RV32I-NEXT: ret %1 = load i64, i64* @val64 ret i64 %1 } Index: test/MC/RISCV/fixups.s =================================================================== --- test/MC/RISCV/fixups.s +++ test/MC/RISCV/fixups.s @@ -1,7 +1,8 @@ -# RUN: llvm-mc -triple riscv32 < %s -show-encoding \ +# RUN: llvm-mc -triple riscv32 -riscv-no-aliases < %s -show-encoding \ # RUN: | FileCheck -check-prefix=CHECK-FIXUP %s # RUN: llvm-mc -filetype=obj -triple riscv32 < %s \ -# RUN: | llvm-objdump -d - | FileCheck -check-prefix=CHECK-INSTR %s +# RUN: | llvm-objdump -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INSTR %s # RUN: llvm-mc -filetype=obj -triple=riscv32 %s \ # RUN: | llvm-readobj -r | FileCheck %s -check-prefix=CHECK-REL Index: test/MC/RISCV/priv-valid.s =================================================================== --- test/MC/RISCV/priv-valid.s +++ test/MC/RISCV/priv-valid.s @@ -1,11 +1,13 @@ -# RUN: llvm-mc %s -triple=riscv32 -show-encoding \ +# RUN: llvm-mc %s -triple=riscv32 -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s -# RUN: llvm-mc %s -triple=riscv64 -show-encoding \ +# RUN: llvm-mc %s -triple=riscv64 -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv32 < %s \ -# RUN: | llvm-objdump -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv64 < %s \ -# RUN: | llvm-objdump -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # CHECK-INST: uret # CHECK: encoding: [0x73,0x00,0x20,0x00] Index: test/MC/RISCV/relocations.s =================================================================== --- test/MC/RISCV/relocations.s +++ test/MC/RISCV/relocations.s @@ -1,4 +1,4 @@ -# RUN: llvm-mc -triple riscv32 -mattr=+c < %s -show-encoding \ +# RUN: llvm-mc -triple riscv32 -mattr=+c -riscv-no-aliases < %s -show-encoding \ # RUN: | FileCheck -check-prefix=INSTR -check-prefix=FIXUP %s # RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+c < %s \ # RUN: | llvm-readobj -r | FileCheck -check-prefix=RELOC %s Index: test/MC/RISCV/rv32a-valid.s =================================================================== --- test/MC/RISCV/rv32a-valid.s +++ test/MC/RISCV/rv32a-valid.s @@ -1,11 +1,13 @@ -# RUN: llvm-mc %s -triple=riscv32 -mattr=+a -show-encoding \ +# RUN: llvm-mc %s -triple=riscv32 -mattr=+a -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s -# RUN: llvm-mc %s -triple=riscv64 -mattr=+a -show-encoding \ +# RUN: llvm-mc %s -triple=riscv64 -mattr=+a -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+a < %s \ -# RUN: | llvm-objdump -mattr=+a -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -mattr=+a -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+a < %s \ -# RUN: | llvm-objdump -mattr=+a -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -mattr=+a -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # CHECK-INST: lr.w t0, (t1) # CHECK: encoding: [0xaf,0x22,0x03,0x10] Index: test/MC/RISCV/rv32c-only-valid.s =================================================================== --- test/MC/RISCV/rv32c-only-valid.s +++ test/MC/RISCV/rv32c-only-valid.s @@ -1,6 +1,6 @@ -# RUN: llvm-mc -triple=riscv32 -mattr=+c -show-encoding < %s \ +# RUN: llvm-mc -triple=riscv32 -mattr=+c -riscv-no-aliases -show-encoding < %s \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s -# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+c < %s \ +# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+c -riscv-no-aliases < %s \ # RUN: | llvm-objdump -mattr=+c -d - | FileCheck -check-prefix=CHECK-INST %s # CHECK-INST: c.jal 2046 Index: test/MC/RISCV/rv32c-valid.s =================================================================== --- test/MC/RISCV/rv32c-valid.s +++ test/MC/RISCV/rv32c-valid.s @@ -1,11 +1,13 @@ -# RUN: llvm-mc -triple=riscv32 -mattr=+c -show-encoding < %s \ +# RUN: llvm-mc -triple=riscv32 -mattr=+c -riscv-no-aliases -show-encoding < %s \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s -# RUN: llvm-mc -triple=riscv64 -mattr=+c -show-encoding < %s \ +# RUN: llvm-mc -triple=riscv64 -mattr=+c -riscv-no-aliases -show-encoding < %s \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+c < %s \ -# RUN: | llvm-objdump -mattr=+c -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -mattr=+c -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+c < %s \ -# RUN: | llvm-objdump -mattr=+c -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -mattr=+c -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # TODO: more exhaustive testing of immediate encoding. Index: test/MC/RISCV/rv32d-valid.s =================================================================== --- test/MC/RISCV/rv32d-valid.s +++ test/MC/RISCV/rv32d-valid.s @@ -1,11 +1,13 @@ -# RUN: llvm-mc %s -triple=riscv32 -mattr=+d -show-encoding \ +# RUN: llvm-mc %s -triple=riscv32 -mattr=+d -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s -# RUN: llvm-mc %s -triple=riscv64 -mattr=+d -show-encoding \ +# RUN: llvm-mc %s -triple=riscv64 -mattr=+d -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+d < %s \ -# RUN: | llvm-objdump -mattr=+d -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -mattr=+d -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+d < %s \ -# RUN: | llvm-objdump -mattr=+d -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -mattr=+d -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # Support for the 'D' extension implies support for 'F' # CHECK-INST: fadd.s fs10, fs11, ft8 @@ -44,34 +46,34 @@ # CHECK: encoding: [0xa7,0xb3,0x9b,0x3e] fsd f9, 999(s7) -# CHECK-INST: fmadd.d fa0, fa1, fa2, fa3 +# CHECK-INST: fmadd.d fa0, fa1, fa2, fa3, dyn # CHECK: encoding: [0x43,0xf5,0xc5,0x6a] -fmadd.d f10, f11, f12, f13 -# CHECK-INST: fmsub.d fa4, fa5, fa6, fa7 +fmadd.d f10, f11, f12, f13, dyn +# CHECK-INST: fmsub.d fa4, fa5, fa6, fa7, dyn # CHECK: encoding: [0x47,0xf7,0x07,0x8b] -fmsub.d f14, f15, f16, f17 -# CHECK-INST: fnmsub.d fs2, fs3, fs4, fs5 +fmsub.d f14, f15, f16, f17, dyn +# CHECK-INST: fnmsub.d fs2, fs3, fs4, fs5, dyn # CHECK: encoding: [0x4b,0xf9,0x49,0xab] -fnmsub.d f18, f19, f20, f21 -# CHECK-INST: fnmadd.d fs6, fs7, fs8, fs9 +fnmsub.d f18, f19, f20, f21, dyn +# CHECK-INST: fnmadd.d fs6, fs7, fs8, fs9, dyn # CHECK: encoding: [0x4f,0xfb,0x8b,0xcb] -fnmadd.d f22, f23, f24, f25 +fnmadd.d f22, f23, f24, f25, dyn -# CHECK-INST: fadd.d fs10, fs11, ft8 +# CHECK-INST: fadd.d fs10, fs11, ft8, dyn # CHECK: encoding: [0x53,0xfd,0xcd,0x03] -fadd.d f26, f27, f28 -# CHECK-INST: fsub.d ft9, ft10, ft11 +fadd.d f26, f27, f28, dyn +# CHECK-INST: fsub.d ft9, ft10, ft11, dyn # CHECK: encoding: [0xd3,0x7e,0xff,0x0b] -fsub.d f29, f30, f31 -# CHECK-INST: fmul.d ft0, ft1, ft2 +fsub.d f29, f30, f31, dyn +# CHECK-INST: fmul.d ft0, ft1, ft2, dyn # CHECK: encoding: [0x53,0xf0,0x20,0x12] -fmul.d ft0, ft1, ft2 -# CHECK-INST: fdiv.d ft3, ft4, ft5 +fmul.d ft0, ft1, ft2, dyn +# CHECK-INST: fdiv.d ft3, ft4, ft5, dyn # CHECK: encoding: [0xd3,0x71,0x52,0x1a] -fdiv.d ft3, ft4, ft5 -# CHECK-INST: fsqrt.d ft6, ft7 +fdiv.d ft3, ft4, ft5, dyn +# CHECK-INST: fsqrt.d ft6, ft7, dyn # CHECK: encoding: [0x53,0xf3,0x03,0x5a] -fsqrt.d ft6, ft7 +fsqrt.d ft6, ft7, dyn # CHECK-INST: fsgnj.d fs1, fa0, fa1 # CHECK: encoding: [0xd3,0x04,0xb5,0x22] fsgnj.d fs1, fa0, fa1 @@ -88,9 +90,9 @@ # CHECK: encoding: [0x53,0x99,0x49,0x2b] fmax.d fs2, fs3, fs4 -# CHECK-INST: fcvt.s.d fs5, fs6 +# CHECK-INST: fcvt.s.d fs5, fs6, dyn # CHECK: encoding: [0xd3,0x7a,0x1b,0x40] -fcvt.s.d fs5, fs6 +fcvt.s.d fs5, fs6, dyn # CHECK-INST: fcvt.d.s fs7, fs8 # CHECK: encoding: [0xd3,0x0b,0x0c,0x42] fcvt.d.s fs7, fs8 @@ -107,9 +109,9 @@ # CHECK: encoding: [0xd3,0x16,0x0f,0xe2] fclass.d a3, ft10 -# CHECK-INST: fcvt.w.d a4, ft11 +# CHECK-INST: fcvt.w.d a4, ft11, dyn # CHECK: encoding: [0x53,0xf7,0x0f,0xc2] -fcvt.w.d a4, ft11 +fcvt.w.d a4, ft11, dyn # CHECK-INST: fcvt.d.w ft0, a5 # CHECK: encoding: [0x53,0x80,0x07,0xd2] fcvt.d.w ft0, a5 @@ -154,6 +156,6 @@ # CHECK-INST: fcvt.w.d a4, ft11, rmm # CHECK: encoding: [0x53,0xc7,0x0f,0xc2] fcvt.w.d a4, ft11, rmm -# CHECK-INST: fcvt.wu.d a5, ft10 +# CHECK-INST: fcvt.wu.d a5, ft10, dyn # CHECK: encoding: [0xd3,0x77,0x1f,0xc2] fcvt.wu.d a5, ft10, dyn Index: test/MC/RISCV/rv32dc-valid.s =================================================================== --- test/MC/RISCV/rv32dc-valid.s +++ test/MC/RISCV/rv32dc-valid.s @@ -1,7 +1,8 @@ -# RUN: llvm-mc %s -triple=riscv32 -mattr=+c,+d -show-encoding \ +# RUN: llvm-mc %s -triple=riscv32 -mattr=+c,+d -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+c,+d < %s \ -# RUN: | llvm-objdump -mattr=+c,+d -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -mattr=+c,+d -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # CHECK-INST: c.fldsp fs0, 504(sp) # CHECK: encoding: [0x7e,0x34] Index: test/MC/RISCV/rv32f-valid.s =================================================================== --- test/MC/RISCV/rv32f-valid.s +++ test/MC/RISCV/rv32f-valid.s @@ -1,11 +1,13 @@ -# RUN: llvm-mc %s -triple=riscv32 -mattr=+f -show-encoding \ +# RUN: llvm-mc %s -triple=riscv32 -mattr=+f -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s -# RUN: llvm-mc %s -triple=riscv64 -mattr=+f -show-encoding \ +# RUN: llvm-mc %s -triple=riscv64 -mattr=+f -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+f < %s \ -# RUN: | llvm-objdump -mattr=+f -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -mattr=+f -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+f < %s \ -# RUN: | llvm-objdump -mattr=+f -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -mattr=+f -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # CHECK-INST: flw ft0, 12(a0) # CHECK: encoding: [0x07,0x20,0xc5,0x00] @@ -39,34 +41,34 @@ # CHECK: encoding: [0xa7,0xa3,0x9b,0x3e] fsw f9, 999(s7) -# CHECK-INST: fmadd.s fa0, fa1, fa2, fa3 +# CHECK-INST: fmadd.s fa0, fa1, fa2, fa3, dyn # CHECK: encoding: [0x43,0xf5,0xc5,0x68] -fmadd.s f10, f11, f12, f13 -# CHECK-INST: fmsub.s fa4, fa5, fa6, fa7 +fmadd.s f10, f11, f12, f13, dyn +# CHECK-INST: fmsub.s fa4, fa5, fa6, fa7, dyn # CHECK: encoding: [0x47,0xf7,0x07,0x89] -fmsub.s f14, f15, f16, f17 -# CHECK-INST: fnmsub.s fs2, fs3, fs4, fs5 +fmsub.s f14, f15, f16, f17, dyn +# CHECK-INST: fnmsub.s fs2, fs3, fs4, fs5, dyn # CHECK: encoding: [0x4b,0xf9,0x49,0xa9] -fnmsub.s f18, f19, f20, f21 -# CHECK-INST: fnmadd.s fs6, fs7, fs8, fs9 +fnmsub.s f18, f19, f20, f21, dyn +# CHECK-INST: fnmadd.s fs6, fs7, fs8, fs9, dyn # CHECK: encoding: [0x4f,0xfb,0x8b,0xc9] -fnmadd.s f22, f23, f24, f25 +fnmadd.s f22, f23, f24, f25, dyn -# CHECK-INST: fadd.s fs10, fs11, ft8 +# CHECK-INST: fadd.s fs10, fs11, ft8, dyn # CHECK: encoding: [0x53,0xfd,0xcd,0x01] -fadd.s f26, f27, f28 -# CHECK-INST: fsub.s ft9, ft10, ft11 +fadd.s f26, f27, f28, dyn +# CHECK-INST: fsub.s ft9, ft10, ft11, dyn # CHECK: encoding: [0xd3,0x7e,0xff,0x09] -fsub.s f29, f30, f31 -# CHECK-INST: fmul.s ft0, ft1, ft2 +fsub.s f29, f30, f31, dyn +# CHECK-INST: fmul.s ft0, ft1, ft2, dyn # CHECK: encoding: [0x53,0xf0,0x20,0x10] -fmul.s ft0, ft1, ft2 -# CHECK-INST: fdiv.s ft3, ft4, ft5 +fmul.s ft0, ft1, ft2, dyn +# CHECK-INST: fdiv.s ft3, ft4, ft5, dyn # CHECK: encoding: [0xd3,0x71,0x52,0x18] -fdiv.s ft3, ft4, ft5 -# CHECK-INST: fsqrt.s ft6, ft7 +fdiv.s ft3, ft4, ft5, dyn +# CHECK-INST: fsqrt.s ft6, ft7, dyn # CHECK: encoding: [0x53,0xf3,0x03,0x58] -fsqrt.s ft6, ft7 +fsqrt.s ft6, ft7, dyn # CHECK-INST: fsgnj.s fs1, fa0, fa1 # CHECK: encoding: [0xd3,0x04,0xb5,0x20] fsgnj.s fs1, fa0, fa1 @@ -82,12 +84,12 @@ # CHECK-INST: fmax.s fs2, fs3, fs4 # CHECK: encoding: [0x53,0x99,0x49,0x29] fmax.s fs2, fs3, fs4 -# CHECK-INST: fcvt.w.s a0, fs5 +# CHECK-INST: fcvt.w.s a0, fs5, dyn # CHECK: encoding: [0x53,0xf5,0x0a,0xc0] -fcvt.w.s a0, fs5 -# CHECK-INST: fcvt.wu.s a1, fs6 +fcvt.w.s a0, fs5, dyn +# CHECK-INST: fcvt.wu.s a1, fs6, dyn # CHECK: encoding: [0xd3,0x75,0x1b,0xc0] -fcvt.wu.s a1, fs6 +fcvt.wu.s a1, fs6, dyn # CHECK-INST: fmv.x.w a2, fs7 # CHECK: encoding: [0x53,0x86,0x0b,0xe0] fmv.x.w a2, fs7 @@ -103,12 +105,12 @@ # CHECK-INST: fclass.s a3, ft10 # CHECK: encoding: [0xd3,0x16,0x0f,0xe0] fclass.s a3, ft10 -# CHECK-INST: fcvt.s.w ft11, a4 +# CHECK-INST: fcvt.s.w ft11, a4, dyn # CHECK: encoding: [0xd3,0x7f,0x07,0xd0] -fcvt.s.w ft11, a4 -# CHECK-INST: fcvt.s.wu ft0, a5 +fcvt.s.w ft11, a4, dyn +# CHECK-INST: fcvt.s.wu ft0, a5, dyn # CHECK: encoding: [0x53,0xf0,0x17,0xd0] -fcvt.s.wu ft0, a5 +fcvt.s.wu ft0, a5, dyn # CHECK-INST: fmv.w.x ft1, a6 # CHECK: encoding: [0xd3,0x00,0x08,0xf0] fmv.w.x ft1, a6 Index: test/MC/RISCV/rv32fc-valid.s =================================================================== --- test/MC/RISCV/rv32fc-valid.s +++ test/MC/RISCV/rv32fc-valid.s @@ -1,7 +1,8 @@ -# RUN: llvm-mc %s -triple=riscv32 -mattr=+c,+f -show-encoding \ +# RUN: llvm-mc %s -triple=riscv32 -mattr=+c,+f -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+c,+f < %s \ -# RUN: | llvm-objdump -mattr=+c,+f -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -mattr=+c,+f -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # CHECK-INST: c.flwsp fs0, 252(sp) # CHECK: encoding: [0x7e,0x74] Index: test/MC/RISCV/rv32i-aliases-invalid.s =================================================================== --- test/MC/RISCV/rv32i-aliases-invalid.s +++ test/MC/RISCV/rv32i-aliases-invalid.s @@ -1,5 +1,5 @@ # RUN: not llvm-mc %s -triple=riscv32 -riscv-no-aliases 2>&1 | FileCheck %s -# RUN: not llvm-mc %s -triple=riscv32 -riscv-no-aliases=false 2>&1 | FileCheck %s +# RUN: not llvm-mc %s -triple=riscv32 2>&1 | FileCheck %s # TODO ld # TODO sd Index: test/MC/RISCV/rv32i-aliases-valid.s =================================================================== --- test/MC/RISCV/rv32i-aliases-valid.s +++ test/MC/RISCV/rv32i-aliases-valid.s @@ -1,12 +1,12 @@ # RUN: llvm-mc %s -triple=riscv32 -riscv-no-aliases \ # RUN: | FileCheck -check-prefixes=CHECK-INST %s -# RUN: llvm-mc %s -triple=riscv32 -riscv-no-aliases=false \ +# RUN: llvm-mc %s -triple=riscv32 \ # RUN: | FileCheck -check-prefixes=CHECK-ALIAS %s # RUN: llvm-mc -filetype=obj -triple riscv32 < %s \ -# RUN: | llvm-objdump -d -riscv-no-aliases - \ +# RUN: | llvm-objdump -riscv-no-aliases -d - \ # RUN: | FileCheck -check-prefixes=CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv32 < %s \ -# RUN: | llvm-objdump -d -riscv-no-aliases=false - \ +# RUN: | llvm-objdump -d - \ # RUN: | FileCheck -check-prefixes=CHECK-ALIAS %s # CHECK-INST: csrrs t4, 3202, zero Index: test/MC/RISCV/rv32i-valid.s =================================================================== --- test/MC/RISCV/rv32i-valid.s +++ test/MC/RISCV/rv32i-valid.s @@ -1,11 +1,13 @@ -# RUN: llvm-mc %s -triple=riscv32 -show-encoding \ +# RUN: llvm-mc %s -triple=riscv32 -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s -# RUN: llvm-mc %s -triple=riscv64 -show-encoding \ +# RUN: llvm-mc %s -triple=riscv64 -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv32 < %s \ -# RUN: | llvm-objdump -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv64 < %s \ -# RUN: | llvm-objdump -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # CHECK-INST: lui a0, 2 # CHECK: encoding: [0x37,0x25,0x00,0x00] Index: test/MC/RISCV/rv32m-valid.s =================================================================== --- test/MC/RISCV/rv32m-valid.s +++ test/MC/RISCV/rv32m-valid.s @@ -1,11 +1,13 @@ -# RUN: llvm-mc %s -triple=riscv32 -mattr=+m -show-encoding \ +# RUN: llvm-mc %s -triple=riscv32 -mattr=+m -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s -# RUN: llvm-mc %s -triple=riscv64 -mattr=+m -show-encoding \ +# RUN: llvm-mc %s -triple=riscv64 -mattr=+m -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+m < %s \ -# RUN: | llvm-objdump -mattr=+m -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -mattr=+m -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+m < %s \ -# RUN: | llvm-objdump -mattr=+m -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -mattr=+m -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # CHECK-INST: mul a4, ra, s0 # CHECK: encoding: [0x33,0x87,0x80,0x02] Index: test/MC/RISCV/rv64a-valid.s =================================================================== --- test/MC/RISCV/rv64a-valid.s +++ test/MC/RISCV/rv64a-valid.s @@ -1,7 +1,8 @@ -# RUN: llvm-mc %s -triple=riscv64 -mattr=+a -show-encoding \ +# RUN: llvm-mc %s -triple=riscv64 -mattr=+a -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+a < %s \ -# RUN: | llvm-objdump -mattr=+a -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -mattr=+a -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # RUN: not llvm-mc -triple riscv32 -mattr=+a < %s 2>&1 \ # RUN: | FileCheck -check-prefix=CHECK-RV32 %s Index: test/MC/RISCV/rv64c-valid.s =================================================================== --- test/MC/RISCV/rv64c-valid.s +++ test/MC/RISCV/rv64c-valid.s @@ -1,7 +1,8 @@ -# RUN: llvm-mc -triple=riscv64 -mattr=+c -show-encoding < %s \ +# RUN: llvm-mc -triple=riscv64 -mattr=+c -riscv-no-aliases -show-encoding < %s \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+c < %s \ -# RUN: | llvm-objdump -mattr=+c -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -mattr=+c -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # TODO: more exhaustive testing of immediate encoding. Index: test/MC/RISCV/rv64d-aliases-valid.s =================================================================== --- /dev/null +++ test/MC/RISCV/rv64d-aliases-valid.s @@ -0,0 +1,27 @@ +# RUN: llvm-mc %s -triple=riscv64 -mattr=+d -riscv-no-aliases \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s +# RUN: llvm-mc %s -triple=riscv64 -mattr=+d \ +# RUN: | FileCheck -check-prefix=CHECK-ALIAS %s +# RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+d < %s \ +# RUN: | llvm-objdump -d -mattr=+d -riscv-no-aliases - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s +# RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+d < %s \ +# RUN: | llvm-objdump -d -mattr=+d - \ +# RUN: | FileCheck -check-prefix=CHECK-ALIAS %s + +##===----------------------------------------------------------------------===## +## Aliases which omit the rounding mode. +##===----------------------------------------------------------------------===## + +# CHECK-INST: fcvt.l.d a0, ft0, dyn +# CHECK-ALIAS: fcvt.l.d a0, ft0{{[[:space:]]}} +fcvt.l.d a0, ft0 +# CHECK-INST: fcvt.lu.d a1, ft1, dyn +# CHECK-ALIAS: fcvt.lu.d a1, ft1{{[[:space:]]}} +fcvt.lu.d a1, ft1 +# CHECK-INST: fcvt.d.l ft3, a3, dyn +# CHECK-ALIAS: fcvt.d.l ft3, a3{{[[:space:]]}} +fcvt.d.l ft3, a3 +# CHECK-INST: fcvt.d.lu ft4, a4, dyn +# CHECK-ALIAS: fcvt.d.lu ft4, a4{{[[:space:]]}} +fcvt.d.lu ft4, a4 Index: test/MC/RISCV/rv64d-valid.s =================================================================== --- test/MC/RISCV/rv64d-valid.s +++ test/MC/RISCV/rv64d-valid.s @@ -1,30 +1,31 @@ -# RUN: llvm-mc %s -triple=riscv64 -mattr=+d -show-encoding \ +# RUN: llvm-mc %s -triple=riscv64 -mattr=+d -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+d < %s \ -# RUN: | llvm-objdump -mattr=+d -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -mattr=+d -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # RUN: not llvm-mc -triple riscv32 -mattr=+d < %s 2>&1 \ # RUN: | FileCheck -check-prefix=CHECK-RV32 %s -# CHECK-INST: fcvt.l.d a0, ft0 +# CHECK-INST: fcvt.l.d a0, ft0, dyn # CHECK: encoding: [0x53,0x75,0x20,0xc2] # CHECK-RV32: :[[@LINE+1]]:1: error: instruction use requires an option to be enabled -fcvt.l.d a0, ft0 -# CHECK-INST: fcvt.lu.d a1, ft1 +fcvt.l.d a0, ft0, dyn +# CHECK-INST: fcvt.lu.d a1, ft1, dyn # CHECK: encoding: [0xd3,0xf5,0x30,0xc2] # CHECK-RV32: :[[@LINE+1]]:1: error: instruction use requires an option to be enabled -fcvt.lu.d a1, ft1 +fcvt.lu.d a1, ft1, dyn # CHECK-INST: fmv.x.d a2, ft2 # CHECK: encoding: [0x53,0x06,0x01,0xe2] # CHECK-RV32: :[[@LINE+1]]:1: error: instruction use requires an option to be enabled fmv.x.d a2, ft2 -# CHECK-INST: fcvt.d.l ft3, a3 +# CHECK-INST: fcvt.d.l ft3, a3, dyn # CHECK: encoding: [0xd3,0xf1,0x26,0xd2] # CHECK-RV32: :[[@LINE+1]]:1: error: instruction use requires an option to be enabled -fcvt.d.l ft3, a3 -# CHECK-INST: fcvt.d.lu ft4, a4 +fcvt.d.l ft3, a3, dyn +# CHECK-INST: fcvt.d.lu ft4, a4, dyn # CHECK: encoding: [0x53,0x72,0x37,0xd2] # CHECK-RV32: :[[@LINE+1]]:1: error: instruction use requires an option to be enabled -fcvt.d.lu ft4, a4 +fcvt.d.lu ft4, a4, dyn # CHECK-INST: fmv.d.x ft5, a5 # CHECK: encoding: [0xd3,0x82,0x07,0xf2] # CHECK-RV32: :[[@LINE+1]]:1: error: instruction use requires an option to be enabled Index: test/MC/RISCV/rv64f-aliases-valid.s =================================================================== --- /dev/null +++ test/MC/RISCV/rv64f-aliases-valid.s @@ -0,0 +1,27 @@ +# RUN: llvm-mc %s -triple=riscv64 -mattr=+f -riscv-no-aliases \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s +# RUN: llvm-mc %s -triple=riscv64 -mattr=+f \ +# RUN: | FileCheck -check-prefix=CHECK-ALIAS %s +# RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+f < %s \ +# RUN: | llvm-objdump -d -mattr=+f -riscv-no-aliases - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s +# RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+f < %s \ +# RUN: | llvm-objdump -d -mattr=+f - \ +# RUN: | FileCheck -check-prefix=CHECK-ALIAS %s + +##===----------------------------------------------------------------------===## +## Aliases which omit the rounding mode. +##===----------------------------------------------------------------------===## + +# CHECK-INST: fcvt.l.s a0, ft0, dyn +# CHECK-ALIAS: fcvt.l.s a0, ft0{{[[:space:]]}} +fcvt.l.s a0, ft0 +# CHECK-INST: fcvt.lu.s a1, ft1, dyn +# CHECK-ALIAS: fcvt.lu.s a1, ft1{{[[:space:]]}} +fcvt.lu.s a1, ft1 +# CHECK-INST: fcvt.s.l ft2, a2, dyn +# CHECK-ALIAS: fcvt.s.l ft2, a2{{[[:space:]]}} +fcvt.s.l ft2, a2 +# CHECK-INST: fcvt.s.lu ft3, a3, dyn +# CHECK-ALIAS: fcvt.s.lu ft3, a3{{[[:space:]]}} +fcvt.s.lu ft3, a3 Index: test/MC/RISCV/rv64f-valid.s =================================================================== --- test/MC/RISCV/rv64f-valid.s +++ test/MC/RISCV/rv64f-valid.s @@ -1,26 +1,27 @@ -# RUN: llvm-mc %s -triple=riscv64 -mattr=+f -show-encoding \ +# RUN: llvm-mc %s -triple=riscv64 -mattr=+f -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+f < %s \ -# RUN: | llvm-objdump -mattr=+f -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -mattr=+f -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # RUN: not llvm-mc -triple riscv32 -mattr=+f < %s 2>&1 \ # RUN: | FileCheck -check-prefix=CHECK-RV32 %s -# CHECK-INST: fcvt.l.s a0, ft0 +# CHECK-INST: fcvt.l.s a0, ft0, dyn # CHECK: encoding: [0x53,0x75,0x20,0xc0] # CHECK-RV32: :[[@LINE+1]]:1: error: instruction use requires an option to be enabled -fcvt.l.s a0, ft0 -# CHECK-INST: fcvt.lu.s a1, ft1 +fcvt.l.s a0, ft0, dyn +# CHECK-INST: fcvt.lu.s a1, ft1, dyn # CHECK: encoding: [0xd3,0xf5,0x30,0xc0] # CHECK-RV32: :[[@LINE+1]]:1: error: instruction use requires an option to be enabled -fcvt.lu.s a1, ft1 -# CHECK-INST: fcvt.s.l ft2, a2 +fcvt.lu.s a1, ft1, dyn +# CHECK-INST: fcvt.s.l ft2, a2, dyn # CHECK: encoding: [0x53,0x71,0x26,0xd0] # CHECK-RV32: :[[@LINE+1]]:1: error: instruction use requires an option to be enabled -fcvt.s.l ft2, a2 -# CHECK-INST: fcvt.s.lu ft3, a3 +fcvt.s.l ft2, a2, dyn +# CHECK-INST: fcvt.s.lu ft3, a3, dyn # CHECK: encoding: [0xd3,0xf1,0x36,0xd0] # CHECK-RV32: :[[@LINE+1]]:1: error: instruction use requires an option to be enabled -fcvt.s.lu ft3, a3 +fcvt.s.lu ft3, a3, dyn # Rounding modes # CHECK-INST: fcvt.l.s a4, ft4, rne Index: test/MC/RISCV/rv64i-aliases-invalid.s =================================================================== --- test/MC/RISCV/rv64i-aliases-invalid.s +++ test/MC/RISCV/rv64i-aliases-invalid.s @@ -1,5 +1,5 @@ # RUN: not llvm-mc %s -triple=riscv64 -riscv-no-aliases 2>&1 | FileCheck %s -# RUN: not llvm-mc %s -triple=riscv64 -riscv-no-aliases=false 2>&1 | FileCheck %s +# RUN: not llvm-mc %s -triple=riscv64 2>&1 | FileCheck %s rdinstreth x29 # CHECK: :[[@LINE]]:1: error: instruction use requires an option to be enabled rdcycleh x27 # CHECK: :[[@LINE]]:1: error: instruction use requires an option to be enabled Index: test/MC/RISCV/rv64i-aliases-valid.s =================================================================== --- test/MC/RISCV/rv64i-aliases-valid.s +++ test/MC/RISCV/rv64i-aliases-valid.s @@ -1,12 +1,12 @@ # RUN: llvm-mc %s -triple=riscv64 -riscv-no-aliases \ # RUN: | FileCheck -check-prefix=CHECK-INST %s -# RUN: llvm-mc %s -triple=riscv64 -riscv-no-aliases=false \ +# RUN: llvm-mc %s -triple=riscv64 \ # RUN: | FileCheck -check-prefix=CHECK-ALIAS %s # RUN: llvm-mc -filetype=obj -triple riscv64 < %s \ -# RUN: | llvm-objdump -d -riscv-no-aliases - \ +# RUN: | llvm-objdump -riscv-no-aliases -d - \ # RUN: | FileCheck -check-prefix=CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv64 < %s \ -# RUN: | llvm-objdump -d -riscv-no-aliases=false - \ +# RUN: | llvm-objdump -d - \ # RUN: | FileCheck -check-prefix=CHECK-ALIAS %s # TODO ld Index: test/MC/RISCV/rv64i-valid.s =================================================================== --- test/MC/RISCV/rv64i-valid.s +++ test/MC/RISCV/rv64i-valid.s @@ -1,7 +1,8 @@ -# RUN: llvm-mc %s -triple=riscv64 -show-encoding \ +# RUN: llvm-mc %s -triple=riscv64 -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv64 < %s \ -# RUN: | llvm-objdump -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # CHECK-INST: lwu zero, 4(ra) # CHECK: encoding: [0x03,0xe0,0x40,0x00] Index: test/MC/RISCV/rv64m-valid.s =================================================================== --- test/MC/RISCV/rv64m-valid.s +++ test/MC/RISCV/rv64m-valid.s @@ -1,7 +1,8 @@ -# RUN: llvm-mc %s -triple=riscv64 -mattr=+m -show-encoding \ +# RUN: llvm-mc %s -triple=riscv64 -mattr=+m -riscv-no-aliases -show-encoding \ # RUN: | FileCheck -check-prefixes=CHECK,CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+m < %s \ -# RUN: | llvm-objdump -mattr=+m -d - | FileCheck -check-prefix=CHECK-INST %s +# RUN: | llvm-objdump -mattr=+m -riscv-no-aliases -d - \ +# RUN: | FileCheck -check-prefix=CHECK-INST %s # CHECK-INST: mulw ra, sp, gp # CHECK: encoding: [0xbb,0x00,0x31,0x02] Index: test/MC/RISCV/rvd-aliases-valid.s =================================================================== --- test/MC/RISCV/rvd-aliases-valid.s +++ test/MC/RISCV/rvd-aliases-valid.s @@ -1,24 +1,28 @@ # RUN: llvm-mc %s -triple=riscv32 -mattr=+d -riscv-no-aliases \ # RUN: | FileCheck -check-prefix=CHECK-INST %s -# RUN: llvm-mc %s -triple=riscv32 -mattr=+d -riscv-no-aliases=false \ +# RUN: llvm-mc %s -triple=riscv32 -mattr=+d \ # RUN: | FileCheck -check-prefix=CHECK-ALIAS %s # RUN: llvm-mc %s -triple=riscv64 -mattr=+d -riscv-no-aliases \ # RUN: | FileCheck -check-prefix=CHECK-INST %s -# RUN: llvm-mc %s -triple=riscv64 -mattr=+d -riscv-no-aliases=false \ +# RUN: llvm-mc %s -triple=riscv64 -mattr=+d \ # RUN: | FileCheck -check-prefix=CHECK-ALIAS %s # RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+d < %s \ # RUN: | llvm-objdump -d -mattr=+d -riscv-no-aliases - \ # RUN: | FileCheck -check-prefix=CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+d < %s \ -# RUN: | llvm-objdump -d -mattr=+d -riscv-no-aliases=false - \ +# RUN: | llvm-objdump -d -mattr=+d - \ # RUN: | FileCheck -check-prefix=CHECK-ALIAS %s # RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+d < %s \ # RUN: | llvm-objdump -d -mattr=+d -riscv-no-aliases - \ # RUN: | FileCheck -check-prefix=CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+d < %s \ -# RUN: | llvm-objdump -d -mattr=+d -riscv-no-aliases=false - \ +# RUN: | llvm-objdump -d -mattr=+d - \ # RUN: | FileCheck -check-prefix=CHECK-ALIAS %s +##===----------------------------------------------------------------------===## +## Assembler Pseudo Instructions (User-Level ISA, Version 2.2, Chapter 20) +##===----------------------------------------------------------------------===## + # TODO fld # TODO fsd @@ -31,3 +35,44 @@ # CHECK-INST: fsgnjn.d ft2, ft3, ft3 # CHECK-ALIAS: fneg.d ft2, ft3 fneg.d f2, f3 + +##===----------------------------------------------------------------------===## +## Aliases which omit the rounding mode. +##===----------------------------------------------------------------------===## + +# CHECK-INST: fmadd.d fa0, fa1, fa2, fa3, dyn +# CHECK-ALIAS: fmadd.d fa0, fa1, fa2, fa3{{[[:space:]]}} +fmadd.d f10, f11, f12, f13 +# CHECK-INST: fmsub.d fa4, fa5, fa6, fa7, dyn +# CHECK-ALIAS: fmsub.d fa4, fa5, fa6, fa7{{[[:space:]]}} +fmsub.d f14, f15, f16, f17 +# CHECK-INST: fnmsub.d fs2, fs3, fs4, fs5, dyn +# CHECK-ALIAS: fnmsub.d fs2, fs3, fs4, fs5{{[[:space:]]}} +fnmsub.d f18, f19, f20, f21 +# CHECK-INST: fnmadd.d fs6, fs7, fs8, fs9, dyn +# CHECK-ALIAS: fnmadd.d fs6, fs7, fs8, fs9{{[[:space:]]}} +fnmadd.d f22, f23, f24, f25 +# CHECK-INST: fadd.d fs10, fs11, ft8, dyn +# CHECK-ALIAS: fadd.d fs10, fs11, ft8{{[[:space:]]}} +fadd.d f26, f27, f28 +# CHECK-INST: fsub.d ft9, ft10, ft11, dyn +# CHECK-ALIAS: fsub.d ft9, ft10, ft11{{[[:space:]]}} +fsub.d f29, f30, f31 +# CHECK-INST: fmul.d ft0, ft1, ft2, dyn +# CHECK-ALIAS: fmul.d ft0, ft1, ft2{{[[:space:]]}} +fmul.d ft0, ft1, ft2 +# CHECK-INST: fdiv.d ft3, ft4, ft5, dyn +# CHECK-ALIAS: fdiv.d ft3, ft4, ft5{{[[:space:]]}} +fdiv.d ft3, ft4, ft5 +# CHECK-INST: fsqrt.d ft6, ft7, dyn +# CHECK-ALIAS: fsqrt.d ft6, ft7{{[[:space:]]}} +fsqrt.d ft6, ft7 +# CHECK-INST: fcvt.s.d fs5, fs6, dyn +# CHECK-ALIAS: fcvt.s.d fs5, fs6{{[[:space:]]}} +fcvt.s.d fs5, fs6 +# CHECK-INST: fcvt.w.d a4, ft11, dyn +# CHECK-ALIAS: fcvt.w.d a4, ft11{{[[:space:]]}} +fcvt.w.d a4, ft11 +# CHECK-INST: fcvt.wu.d a5, ft10, dyn +# CHECK-ALIAS: fcvt.wu.d a5, ft10{{[[:space:]]}} +fcvt.wu.d a5, ft10 Index: test/MC/RISCV/rvf-aliases-valid.s =================================================================== --- test/MC/RISCV/rvf-aliases-valid.s +++ test/MC/RISCV/rvf-aliases-valid.s @@ -1,24 +1,28 @@ # RUN: llvm-mc %s -triple=riscv32 -mattr=+f -riscv-no-aliases \ # RUN: | FileCheck -check-prefix=CHECK-INST %s -# RUN: llvm-mc %s -triple=riscv32 -mattr=+f -riscv-no-aliases=false \ +# RUN: llvm-mc %s -triple=riscv32 -mattr=+f \ # RUN: | FileCheck -check-prefix=CHECK-ALIAS %s # RUN: llvm-mc %s -triple=riscv64 -mattr=+f -riscv-no-aliases \ # RUN: | FileCheck -check-prefix=CHECK-INST %s -# RUN: llvm-mc %s -triple=riscv64 -mattr=+f -riscv-no-aliases=false \ +# RUN: llvm-mc %s -triple=riscv64 -mattr=+f \ # RUN: | FileCheck -check-prefix=CHECK-ALIAS %s # RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+f < %s \ # RUN: | llvm-objdump -d -mattr=+f -riscv-no-aliases - \ # RUN: | FileCheck -check-prefix=CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+f < %s \ -# RUN: | llvm-objdump -d -mattr=+f -riscv-no-aliases=false - \ +# RUN: | llvm-objdump -d -mattr=+f - \ # RUN: | FileCheck -check-prefix=CHECK-ALIAS %s # RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+f < %s \ # RUN: | llvm-objdump -d -mattr=+f -riscv-no-aliases - \ # RUN: | FileCheck -check-prefix=CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv64 -mattr=+f < %s \ -# RUN: | llvm-objdump -d -mattr=+f -riscv-no-aliases=false - \ +# RUN: | llvm-objdump -d -mattr=+f - \ # RUN: | FileCheck -check-prefix=CHECK-ALIAS %s +##===----------------------------------------------------------------------===## +## Assembler Pseudo Instructions (User-Level ISA, Version 2.2, Chapter 20) +##===----------------------------------------------------------------------===## + # TODO flw # TODO fsw @@ -75,3 +79,47 @@ # CHECK-INST: csrrwi zero, 1, 28 # CHECK-ALIAS: fsflagsi 28 fsflagsi 0x1c + +##===----------------------------------------------------------------------===## +## Aliases which omit the rounding mode. +##===----------------------------------------------------------------------===## + +# CHECK-INST: fmadd.s fa0, fa1, fa2, fa3, dyn +# CHECK-ALIAS: fmadd.s fa0, fa1, fa2, fa3{{[[:space:]]}} +fmadd.s f10, f11, f12, f13 +# CHECK-INST: fmsub.s fa4, fa5, fa6, fa7, dyn +# CHECK-ALIAS: fmsub.s fa4, fa5, fa6, fa7{{[[:space:]]}} +fmsub.s f14, f15, f16, f17 +# CHECK-INST: fnmsub.s fs2, fs3, fs4, fs5, dyn +# CHECK-ALIAS: fnmsub.s fs2, fs3, fs4, fs5{{[[:space:]]}} +fnmsub.s f18, f19, f20, f21 +# CHECK-INST: fnmadd.s fs6, fs7, fs8, fs9, dyn +# CHECK-ALIAS: fnmadd.s fs6, fs7, fs8, fs9{{[[:space:]]}} +fnmadd.s f22, f23, f24, f25 +# CHECK-INST: fadd.s fs10, fs11, ft8, dyn +# CHECK-ALIAS: fadd.s fs10, fs11, ft8{{[[:space:]]}} +fadd.s f26, f27, f28 +# CHECK-INST: fsub.s ft9, ft10, ft11, dyn +# CHECK-ALIAS: fsub.s ft9, ft10, ft11{{[[:space:]]}} +fsub.s f29, f30, f31 +# CHECK-INST: fmul.s ft0, ft1, ft2, dyn +# CHECK-ALIAS: fmul.s ft0, ft1, ft2{{[[:space:]]}} +fmul.s ft0, ft1, ft2 +# CHECK-INST: fdiv.s ft3, ft4, ft5, dyn +# CHECK-ALIAS: fdiv.s ft3, ft4, ft5{{[[:space:]]}} +fdiv.s ft3, ft4, ft5 +# CHECK-INST: fsqrt.s ft6, ft7, dyn +# CHECK-ALIAS: fsqrt.s ft6, ft7{{[[:space:]]}} +fsqrt.s ft6, ft7 +# CHECK-INST: fcvt.w.s a0, fs5, dyn +# CHECK-ALIAS: fcvt.w.s a0, fs5{{[[:space:]]}} +fcvt.w.s a0, fs5 +# CHECK-INST: fcvt.wu.s a1, fs6, dyn +# CHECK-ALIAS: fcvt.wu.s a1, fs6{{[[:space:]]}} +fcvt.wu.s a1, fs6 +# CHECK-INST: fcvt.s.w ft11, a4, dyn +# CHECK-ALIAS: fcvt.s.w ft11, a4{{[[:space:]]}} +fcvt.s.w ft11, a4 +# CHECK-INST: fcvt.s.wu ft0, a5, dyn +# CHECK-ALIAS: fcvt.s.wu ft0, a5{{[[:space:]]}} +fcvt.s.wu ft0, a5 Index: test/MC/RISCV/rvi-aliases-valid.s =================================================================== --- test/MC/RISCV/rvi-aliases-valid.s +++ test/MC/RISCV/rvi-aliases-valid.s @@ -1,22 +1,22 @@ # RUN: llvm-mc %s -triple=riscv32 -riscv-no-aliases \ # RUN: | FileCheck -check-prefix=CHECK-INST %s -# RUN: llvm-mc %s -triple=riscv32 -riscv-no-aliases=false \ +# RUN: llvm-mc %s -triple=riscv32 \ # RUN: | FileCheck -check-prefix=CHECK-ALIAS %s # RUN: llvm-mc %s -triple=riscv64 -riscv-no-aliases\ # RUN: | FileCheck -check-prefix=CHECK-INST %s -# RUN: llvm-mc %s -triple=riscv64 -riscv-no-aliases=false \ +# RUN: llvm-mc %s -triple=riscv64 \ # RUN: | FileCheck -check-prefix=CHECK-ALIAS %s # RUN: llvm-mc -filetype=obj -triple riscv32 < %s \ # RUN: | llvm-objdump -d -riscv-no-aliases - \ # RUN: | FileCheck -check-prefix=CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv32 < %s \ -# RUN: | llvm-objdump -d -riscv-no-aliases=false - \ +# RUN: | llvm-objdump -d - \ # RUN: | FileCheck -check-prefix=CHECK-ALIAS %s # RUN: llvm-mc -filetype=obj -triple riscv64 < %s \ # RUN: | llvm-objdump -d -riscv-no-aliases - \ # RUN: | FileCheck -check-prefix=CHECK-INST %s # RUN: llvm-mc -filetype=obj -triple riscv64 < %s \ -# RUN: | llvm-objdump -d -riscv-no-aliases=false - \ +# RUN: | llvm-objdump -d - \ # RUN: | FileCheck -check-prefix=CHECK-ALIAS %s # TODO la