diff --git a/llvm/test/CodeGen/RISCV/stack-store-check.ll b/llvm/test/CodeGen/RISCV/stack-store-check.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/stack-store-check.ll @@ -0,0 +1,325 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple riscv32 -o - %s | FileCheck %s +; This test has been minimized from GCC Torture Suite's regstack-1.c +; and checks that RISCVInstrInfo::storeRegToStackSlot works at the basic +; level. + +@U = external local_unnamed_addr global fp128, align 16 +@Y1 = external local_unnamed_addr global fp128, align 16 +@X = external local_unnamed_addr global fp128, align 16 +@Y = external local_unnamed_addr global fp128, align 16 +@T = external local_unnamed_addr global fp128, align 16 +@S = external local_unnamed_addr global fp128, align 16 + +define void @main() local_unnamed_addr nounwind { +; CHECK-LABEL: main: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -688 +; CHECK-NEXT: sw ra, 684(sp) +; CHECK-NEXT: sw s0, 680(sp) +; CHECK-NEXT: sw s1, 676(sp) +; CHECK-NEXT: sw s2, 672(sp) +; CHECK-NEXT: sw s3, 668(sp) +; CHECK-NEXT: sw s4, 664(sp) +; CHECK-NEXT: sw s5, 660(sp) +; CHECK-NEXT: sw s6, 656(sp) +; CHECK-NEXT: sw s7, 652(sp) +; CHECK-NEXT: sw s8, 648(sp) +; CHECK-NEXT: sw s9, 644(sp) +; CHECK-NEXT: sw s10, 640(sp) +; CHECK-NEXT: sw s11, 636(sp) +; CHECK-NEXT: lui a0, %hi(U) +; CHECK-NEXT: lw s6, %lo(U)(a0) +; CHECK-NEXT: lw s7, %lo(U+4)(a0) +; CHECK-NEXT: lw s8, %lo(U+8)(a0) +; CHECK-NEXT: lw s0, %lo(U+12)(a0) +; CHECK-NEXT: sw zero, 612(sp) +; CHECK-NEXT: sw zero, 608(sp) +; CHECK-NEXT: sw zero, 604(sp) +; CHECK-NEXT: sw zero, 600(sp) +; CHECK-NEXT: sw s0, 596(sp) +; CHECK-NEXT: sw s8, 592(sp) +; CHECK-NEXT: sw s7, 588(sp) +; CHECK-NEXT: addi a0, sp, 616 +; CHECK-NEXT: addi a1, sp, 600 +; CHECK-NEXT: addi a2, sp, 584 +; CHECK-NEXT: sw s6, 584(sp) +; CHECK-NEXT: call __subtf3 +; CHECK-NEXT: lw s3, 616(sp) +; CHECK-NEXT: lw s4, 620(sp) +; CHECK-NEXT: lw s9, 624(sp) +; CHECK-NEXT: lw s11, 628(sp) +; CHECK-NEXT: sw s0, 548(sp) +; CHECK-NEXT: sw s8, 544(sp) +; CHECK-NEXT: sw s7, 540(sp) +; CHECK-NEXT: sw s6, 536(sp) +; CHECK-NEXT: sw s11, 564(sp) +; CHECK-NEXT: sw s9, 560(sp) +; CHECK-NEXT: sw s4, 556(sp) +; CHECK-NEXT: addi a0, sp, 568 +; CHECK-NEXT: addi a1, sp, 552 +; CHECK-NEXT: addi a2, sp, 536 +; CHECK-NEXT: sw s3, 552(sp) +; CHECK-NEXT: call __subtf3 +; CHECK-NEXT: lw a0, 568(sp) +; CHECK-NEXT: sw a0, 40(sp) +; CHECK-NEXT: lw a0, 572(sp) +; CHECK-NEXT: sw a0, 32(sp) +; CHECK-NEXT: lw a0, 576(sp) +; CHECK-NEXT: sw a0, 24(sp) +; CHECK-NEXT: lw a0, 580(sp) +; CHECK-NEXT: sw a0, 16(sp) +; CHECK-NEXT: sw zero, 500(sp) +; CHECK-NEXT: sw zero, 496(sp) +; CHECK-NEXT: sw zero, 492(sp) +; CHECK-NEXT: sw zero, 488(sp) +; CHECK-NEXT: sw s0, 516(sp) +; CHECK-NEXT: sw s8, 512(sp) +; CHECK-NEXT: sw s7, 508(sp) +; CHECK-NEXT: addi a0, sp, 520 +; CHECK-NEXT: addi a1, sp, 504 +; CHECK-NEXT: addi a2, sp, 488 +; CHECK-NEXT: sw s6, 504(sp) +; CHECK-NEXT: call __addtf3 +; CHECK-NEXT: lw s2, 520(sp) +; CHECK-NEXT: lw s10, 524(sp) +; CHECK-NEXT: lw s5, 528(sp) +; CHECK-NEXT: lw s1, 532(sp) +; CHECK-NEXT: sw s1, 8(sp) +; CHECK-NEXT: lui a0, %hi(Y1) +; CHECK-NEXT: lw a1, %lo(Y1)(a0) +; CHECK-NEXT: sw a1, 48(sp) +; CHECK-NEXT: lw a2, %lo(Y1+4)(a0) +; CHECK-NEXT: sw a2, 52(sp) +; CHECK-NEXT: lw a3, %lo(Y1+8)(a0) +; CHECK-NEXT: sw a3, 4(sp) +; CHECK-NEXT: lw a0, %lo(Y1+12)(a0) +; CHECK-NEXT: sw a0, 0(sp) +; CHECK-NEXT: sw a0, 308(sp) +; CHECK-NEXT: sw a3, 304(sp) +; CHECK-NEXT: sw a2, 300(sp) +; CHECK-NEXT: lw a0, 52(sp) +; CHECK-NEXT: sw a1, 296(sp) +; CHECK-NEXT: sw s11, 324(sp) +; CHECK-NEXT: sw s9, 320(sp) +; CHECK-NEXT: sw s4, 316(sp) +; CHECK-NEXT: addi a0, sp, 328 +; CHECK-NEXT: addi a1, sp, 312 +; CHECK-NEXT: addi a2, sp, 296 +; CHECK-NEXT: sw s3, 312(sp) +; CHECK-NEXT: call __multf3 +; CHECK-NEXT: lw a0, 328(sp) +; CHECK-NEXT: sw a0, 44(sp) +; CHECK-NEXT: lw a0, 332(sp) +; CHECK-NEXT: sw a0, 36(sp) +; CHECK-NEXT: lw a0, 336(sp) +; CHECK-NEXT: sw a0, 28(sp) +; CHECK-NEXT: lw a0, 340(sp) +; CHECK-NEXT: sw a0, 20(sp) +; CHECK-NEXT: sw s0, 468(sp) +; CHECK-NEXT: sw s8, 464(sp) +; CHECK-NEXT: sw s7, 460(sp) +; CHECK-NEXT: sw s6, 456(sp) +; CHECK-NEXT: sw s1, 452(sp) +; CHECK-NEXT: sw s5, 448(sp) +; CHECK-NEXT: sw s10, 444(sp) +; CHECK-NEXT: addi a0, sp, 472 +; CHECK-NEXT: addi a1, sp, 456 +; CHECK-NEXT: addi a2, sp, 440 +; CHECK-NEXT: sw s2, 440(sp) +; CHECK-NEXT: call __addtf3 +; CHECK-NEXT: lw a3, 472(sp) +; CHECK-NEXT: lw a0, 476(sp) +; CHECK-NEXT: lw a1, 480(sp) +; CHECK-NEXT: lw a2, 484(sp) +; CHECK-NEXT: sw zero, 420(sp) +; CHECK-NEXT: sw zero, 416(sp) +; CHECK-NEXT: sw zero, 412(sp) +; CHECK-NEXT: sw zero, 408(sp) +; CHECK-NEXT: sw a2, 404(sp) +; CHECK-NEXT: sw a1, 400(sp) +; CHECK-NEXT: sw a0, 396(sp) +; CHECK-NEXT: addi a0, sp, 424 +; CHECK-NEXT: addi a1, sp, 408 +; CHECK-NEXT: addi a2, sp, 392 +; CHECK-NEXT: sw a3, 392(sp) +; CHECK-NEXT: call __subtf3 +; CHECK-NEXT: lw a0, 424(sp) +; CHECK-NEXT: lw a1, 436(sp) +; CHECK-NEXT: lw a2, 432(sp) +; CHECK-NEXT: lw a3, 428(sp) +; CHECK-NEXT: lui a4, %hi(X) +; CHECK-NEXT: sw a1, %lo(X+12)(a4) +; CHECK-NEXT: sw a2, %lo(X+8)(a4) +; CHECK-NEXT: sw a3, %lo(X+4)(a4) +; CHECK-NEXT: sw a0, %lo(X)(a4) +; CHECK-NEXT: lw s8, 0(sp) +; CHECK-NEXT: sw s8, 212(sp) +; CHECK-NEXT: lw s7, 4(sp) +; CHECK-NEXT: sw s7, 208(sp) +; CHECK-NEXT: lw a0, 52(sp) +; CHECK-NEXT: sw a0, 204(sp) +; CHECK-NEXT: lw a0, 48(sp) +; CHECK-NEXT: sw a0, 200(sp) +; CHECK-NEXT: lw s6, 16(sp) +; CHECK-NEXT: sw s6, 228(sp) +; CHECK-NEXT: lw s4, 24(sp) +; CHECK-NEXT: sw s4, 224(sp) +; CHECK-NEXT: lw s0, 32(sp) +; CHECK-NEXT: sw s0, 220(sp) +; CHECK-NEXT: addi a0, sp, 232 +; CHECK-NEXT: addi a1, sp, 216 +; CHECK-NEXT: addi a2, sp, 200 +; CHECK-NEXT: lw s1, 40(sp) +; CHECK-NEXT: sw s1, 216(sp) +; CHECK-NEXT: call __multf3 +; CHECK-NEXT: lw a0, 232(sp) +; CHECK-NEXT: sw a0, 12(sp) +; CHECK-NEXT: lw s3, 236(sp) +; CHECK-NEXT: lw s9, 240(sp) +; CHECK-NEXT: lw s11, 244(sp) +; CHECK-NEXT: sw zero, 356(sp) +; CHECK-NEXT: sw zero, 352(sp) +; CHECK-NEXT: sw zero, 348(sp) +; CHECK-NEXT: sw zero, 344(sp) +; CHECK-NEXT: lw a0, 8(sp) +; CHECK-NEXT: sw a0, 372(sp) +; CHECK-NEXT: sw s5, 368(sp) +; CHECK-NEXT: sw s10, 364(sp) +; CHECK-NEXT: addi a0, sp, 376 +; CHECK-NEXT: addi a1, sp, 360 +; CHECK-NEXT: addi a2, sp, 344 +; CHECK-NEXT: sw s2, 360(sp) +; CHECK-NEXT: call __multf3 +; CHECK-NEXT: lw a0, 376(sp) +; CHECK-NEXT: lw a1, 388(sp) +; CHECK-NEXT: lw a2, 384(sp) +; CHECK-NEXT: lw a3, 380(sp) +; CHECK-NEXT: lui a4, %hi(S) +; CHECK-NEXT: sw a1, %lo(S+12)(a4) +; CHECK-NEXT: sw a2, %lo(S+8)(a4) +; CHECK-NEXT: sw a3, %lo(S+4)(a4) +; CHECK-NEXT: sw a0, %lo(S)(a4) +; CHECK-NEXT: sw s6, 260(sp) +; CHECK-NEXT: sw s4, 256(sp) +; CHECK-NEXT: sw s0, 252(sp) +; CHECK-NEXT: sw s1, 248(sp) +; CHECK-NEXT: lw a0, 20(sp) +; CHECK-NEXT: sw a0, 276(sp) +; CHECK-NEXT: lw a0, 28(sp) +; CHECK-NEXT: sw a0, 272(sp) +; CHECK-NEXT: lw a0, 36(sp) +; CHECK-NEXT: sw a0, 268(sp) +; CHECK-NEXT: addi a0, sp, 280 +; CHECK-NEXT: addi a1, sp, 264 +; CHECK-NEXT: addi a2, sp, 248 +; CHECK-NEXT: lw a3, 44(sp) +; CHECK-NEXT: sw a3, 264(sp) +; CHECK-NEXT: call __subtf3 +; CHECK-NEXT: lw a0, 280(sp) +; CHECK-NEXT: lw a1, 292(sp) +; CHECK-NEXT: lw a2, 288(sp) +; CHECK-NEXT: lw a3, 284(sp) +; CHECK-NEXT: lui a4, %hi(T) +; CHECK-NEXT: sw a1, %lo(T+12)(a4) +; CHECK-NEXT: sw a2, %lo(T+8)(a4) +; CHECK-NEXT: sw a3, %lo(T+4)(a4) +; CHECK-NEXT: sw a0, %lo(T)(a4) +; CHECK-NEXT: sw zero, 164(sp) +; CHECK-NEXT: sw zero, 160(sp) +; CHECK-NEXT: sw zero, 156(sp) +; CHECK-NEXT: sw zero, 152(sp) +; CHECK-NEXT: sw s11, 180(sp) +; CHECK-NEXT: sw s9, 176(sp) +; CHECK-NEXT: sw s3, 172(sp) +; CHECK-NEXT: addi a0, sp, 184 +; CHECK-NEXT: addi a1, sp, 168 +; CHECK-NEXT: addi a2, sp, 152 +; CHECK-NEXT: lw a3, 12(sp) +; CHECK-NEXT: sw a3, 168(sp) +; CHECK-NEXT: call __addtf3 +; CHECK-NEXT: lw a0, 184(sp) +; CHECK-NEXT: lw a1, 196(sp) +; CHECK-NEXT: lw a2, 192(sp) +; CHECK-NEXT: lw a3, 188(sp) +; CHECK-NEXT: lui a4, %hi(Y) +; CHECK-NEXT: sw a1, %lo(Y+12)(a4) +; CHECK-NEXT: sw a2, %lo(Y+8)(a4) +; CHECK-NEXT: sw a3, %lo(Y+4)(a4) +; CHECK-NEXT: sw a0, %lo(Y)(a4) +; CHECK-NEXT: sw zero, 116(sp) +; CHECK-NEXT: sw zero, 112(sp) +; CHECK-NEXT: sw zero, 108(sp) +; CHECK-NEXT: sw zero, 104(sp) +; CHECK-NEXT: sw s8, 132(sp) +; CHECK-NEXT: sw s7, 128(sp) +; CHECK-NEXT: lw a0, 52(sp) +; CHECK-NEXT: sw a0, 124(sp) +; CHECK-NEXT: addi a0, sp, 136 +; CHECK-NEXT: addi a1, sp, 120 +; CHECK-NEXT: addi a2, sp, 104 +; CHECK-NEXT: lw a3, 48(sp) +; CHECK-NEXT: sw a3, 120(sp) +; CHECK-NEXT: call __multf3 +; CHECK-NEXT: lw a3, 136(sp) +; CHECK-NEXT: lw a0, 140(sp) +; CHECK-NEXT: lw a1, 144(sp) +; CHECK-NEXT: lw a2, 148(sp) +; CHECK-NEXT: lui a4, 786400 +; CHECK-NEXT: sw a4, 68(sp) +; CHECK-NEXT: sw zero, 64(sp) +; CHECK-NEXT: sw zero, 60(sp) +; CHECK-NEXT: sw zero, 56(sp) +; CHECK-NEXT: sw a2, 84(sp) +; CHECK-NEXT: sw a1, 80(sp) +; CHECK-NEXT: sw a0, 76(sp) +; CHECK-NEXT: addi a0, sp, 88 +; CHECK-NEXT: addi a1, sp, 72 +; CHECK-NEXT: addi a2, sp, 56 +; CHECK-NEXT: sw a3, 72(sp) +; CHECK-NEXT: call __addtf3 +; CHECK-NEXT: lw a0, 96(sp) +; CHECK-NEXT: lw a1, 100(sp) +; CHECK-NEXT: lw a2, 88(sp) +; CHECK-NEXT: lw a3, 92(sp) +; CHECK-NEXT: lui a4, %hi(Y1) +; CHECK-NEXT: sw a0, %lo(Y1+8)(a4) +; CHECK-NEXT: sw a1, %lo(Y1+12)(a4) +; CHECK-NEXT: sw a2, %lo(Y1)(a4) +; CHECK-NEXT: sw a3, %lo(Y1+4)(a4) +; CHECK-NEXT: lw s11, 636(sp) +; CHECK-NEXT: lw s10, 640(sp) +; CHECK-NEXT: lw s9, 644(sp) +; CHECK-NEXT: lw s8, 648(sp) +; CHECK-NEXT: lw s7, 652(sp) +; CHECK-NEXT: lw s6, 656(sp) +; CHECK-NEXT: lw s5, 660(sp) +; CHECK-NEXT: lw s4, 664(sp) +; CHECK-NEXT: lw s3, 668(sp) +; CHECK-NEXT: lw s2, 672(sp) +; CHECK-NEXT: lw s1, 676(sp) +; CHECK-NEXT: lw s0, 680(sp) +; CHECK-NEXT: lw ra, 684(sp) +; CHECK-NEXT: addi sp, sp, 688 +; CHECK-NEXT: ret + %1 = load fp128, fp128* @U, align 16 + %2 = fsub fp128 0xL00000000000000000000000000000000, %1 + %3 = fsub fp128 %2, %1 + %4 = fadd fp128 %1, 0xL00000000000000000000000000000000 + %5 = load fp128, fp128* @Y1, align 16 + %6 = fmul fp128 %2, %5 + %7 = fadd fp128 %1, %4 + %8 = fsub fp128 0xL00000000000000000000000000000000, %7 + store fp128 %8, fp128* @X, align 16 + %9 = fmul fp128 %3, %5 + %10 = fmul fp128 0xL00000000000000000000000000000000, %4 + store fp128 %10, fp128* @S, align 16 + %11 = fsub fp128 %6, %3 + store fp128 %11, fp128* @T, align 16 + %12 = fadd fp128 0xL00000000000000000000000000000000, %9 + store fp128 %12, fp128* @Y, align 16 + %13 = fmul fp128 0xL00000000000000000000000000000000, %5 + %14 = fadd fp128 %13, 0xL0000000000000000BFFE000000000000 + store fp128 %14, fp128* @Y1, align 16 + ret void +}