diff --git a/libunwind/src/UnwindRegistersRestore.S b/libunwind/src/UnwindRegistersRestore.S --- a/libunwind/src/UnwindRegistersRestore.S +++ b/libunwind/src/UnwindRegistersRestore.S @@ -8,6 +8,12 @@ #include "assembly.h" +#define FROM_0_TO_15 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 +#define FROM_16_TO_31 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + +#define FROM_0_TO_31 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 +#define FROM_32_TO_63 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63 + #if defined(_AIX) .toc #else @@ -152,32 +158,9 @@ // skip r3 for now // skip r4 for now // skip r5 for now - PPC64_LR(6) - PPC64_LR(7) - PPC64_LR(8) - PPC64_LR(9) - PPC64_LR(10) - PPC64_LR(11) - PPC64_LR(12) - PPC64_LR(13) - PPC64_LR(14) - PPC64_LR(15) - PPC64_LR(16) - PPC64_LR(17) - PPC64_LR(18) - PPC64_LR(19) - PPC64_LR(20) - PPC64_LR(21) - PPC64_LR(22) - PPC64_LR(23) - PPC64_LR(24) - PPC64_LR(25) - PPC64_LR(26) - PPC64_LR(27) - PPC64_LR(28) - PPC64_LR(29) - PPC64_LR(30) - PPC64_LR(31) + .irp i,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + PPC64_LR(\i) + .endr #if defined(__VSX__) @@ -193,38 +176,9 @@ addi 4, 4, 16 // restore the first 32 VS regs (and also all floating point regs) - PPC64_LVS(0) - PPC64_LVS(1) - PPC64_LVS(2) - PPC64_LVS(3) - PPC64_LVS(4) - PPC64_LVS(5) - PPC64_LVS(6) - PPC64_LVS(7) - PPC64_LVS(8) - PPC64_LVS(9) - PPC64_LVS(10) - PPC64_LVS(11) - PPC64_LVS(12) - PPC64_LVS(13) - PPC64_LVS(14) - PPC64_LVS(15) - PPC64_LVS(16) - PPC64_LVS(17) - PPC64_LVS(18) - PPC64_LVS(19) - PPC64_LVS(20) - PPC64_LVS(21) - PPC64_LVS(22) - PPC64_LVS(23) - PPC64_LVS(24) - PPC64_LVS(25) - PPC64_LVS(26) - PPC64_LVS(27) - PPC64_LVS(28) - PPC64_LVS(29) - PPC64_LVS(30) - PPC64_LVS(31) + .irp i,FROM_0_TO_31 + PPC64_LVS(\i) + .endr #define PPC64_CLVS_RESTORE(n) \ addi 4, 3, PPC64_OFFS_FP + n * 16 ;\ @@ -257,38 +211,12 @@ #endif // !defined(_AIX) - PPC64_CLVSl(32) - PPC64_CLVSl(33) - PPC64_CLVSl(34) - PPC64_CLVSl(35) - PPC64_CLVSl(36) - PPC64_CLVSl(37) - PPC64_CLVSl(38) - PPC64_CLVSl(39) - PPC64_CLVSl(40) - PPC64_CLVSl(41) - PPC64_CLVSl(42) - PPC64_CLVSl(43) - PPC64_CLVSl(44) - PPC64_CLVSl(45) - PPC64_CLVSl(46) - PPC64_CLVSl(47) - PPC64_CLVSh(48) - PPC64_CLVSh(49) - PPC64_CLVSh(50) - PPC64_CLVSh(51) - PPC64_CLVSh(52) - PPC64_CLVSh(53) - PPC64_CLVSh(54) - PPC64_CLVSh(55) - PPC64_CLVSh(56) - PPC64_CLVSh(57) - PPC64_CLVSh(58) - PPC64_CLVSh(59) - PPC64_CLVSh(60) - PPC64_CLVSh(61) - PPC64_CLVSh(62) - PPC64_CLVSh(63) + .irp i,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47 + PPC64_CLVSl(\i) + .endr + .irp i,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63 + PPC64_CLVSh(\i) + .endr #else @@ -297,38 +225,9 @@ lfd n, (PPC64_OFFS_FP + n * 16)(3) // restore float registers - PPC64_LF(0) - PPC64_LF(1) - PPC64_LF(2) - PPC64_LF(3) - PPC64_LF(4) - PPC64_LF(5) - PPC64_LF(6) - PPC64_LF(7) - PPC64_LF(8) - PPC64_LF(9) - PPC64_LF(10) - PPC64_LF(11) - PPC64_LF(12) - PPC64_LF(13) - PPC64_LF(14) - PPC64_LF(15) - PPC64_LF(16) - PPC64_LF(17) - PPC64_LF(18) - PPC64_LF(19) - PPC64_LF(20) - PPC64_LF(21) - PPC64_LF(22) - PPC64_LF(23) - PPC64_LF(24) - PPC64_LF(25) - PPC64_LF(26) - PPC64_LF(27) - PPC64_LF(28) - PPC64_LF(29) - PPC64_LF(30) - PPC64_LF(31) + .irp i,FROM_0_TO_31 + PPC64_LF(\i) + .endr #if defined(__ALTIVEC__) @@ -370,38 +269,12 @@ // the _vectorScalarRegisters may not be 16-byte aligned // so copy via red zone temp buffer - PPC64_CLV_UNALIGNEDl(0) - PPC64_CLV_UNALIGNEDl(1) - PPC64_CLV_UNALIGNEDl(2) - PPC64_CLV_UNALIGNEDl(3) - PPC64_CLV_UNALIGNEDl(4) - PPC64_CLV_UNALIGNEDl(5) - PPC64_CLV_UNALIGNEDl(6) - PPC64_CLV_UNALIGNEDl(7) - PPC64_CLV_UNALIGNEDl(8) - PPC64_CLV_UNALIGNEDl(9) - PPC64_CLV_UNALIGNEDl(10) - PPC64_CLV_UNALIGNEDl(11) - PPC64_CLV_UNALIGNEDl(12) - PPC64_CLV_UNALIGNEDl(13) - PPC64_CLV_UNALIGNEDl(14) - PPC64_CLV_UNALIGNEDl(15) - PPC64_CLV_UNALIGNEDh(16) - PPC64_CLV_UNALIGNEDh(17) - PPC64_CLV_UNALIGNEDh(18) - PPC64_CLV_UNALIGNEDh(19) - PPC64_CLV_UNALIGNEDh(20) - PPC64_CLV_UNALIGNEDh(21) - PPC64_CLV_UNALIGNEDh(22) - PPC64_CLV_UNALIGNEDh(23) - PPC64_CLV_UNALIGNEDh(24) - PPC64_CLV_UNALIGNEDh(25) - PPC64_CLV_UNALIGNEDh(26) - PPC64_CLV_UNALIGNEDh(27) - PPC64_CLV_UNALIGNEDh(28) - PPC64_CLV_UNALIGNEDh(29) - PPC64_CLV_UNALIGNEDh(30) - PPC64_CLV_UNALIGNEDh(31) + .irp i,FROM_0_TO_15 + PPC64_CLV_UNALIGNEDl(\i) + .endr + .irp i,FROM_16_TO_31 + PPC64_CLV_UNALIGNEDh(\i) + .endr #endif #endif @@ -448,67 +321,15 @@ // skip r3 for now // skip r4 for now // skip r5 for now - lwz 6, 32(3) - lwz 7, 36(3) - lwz 8, 40(3) - lwz 9, 44(3) - lwz 10, 48(3) - lwz 11, 52(3) - lwz 12, 56(3) - lwz 13, 60(3) - lwz 14, 64(3) - lwz 15, 68(3) - lwz 16, 72(3) - lwz 17, 76(3) - lwz 18, 80(3) - lwz 19, 84(3) - lwz 20, 88(3) - lwz 21, 92(3) - lwz 22, 96(3) - lwz 23,100(3) - lwz 24,104(3) - lwz 25,108(3) - lwz 26,112(3) - lwz 27,116(3) - lwz 28,120(3) - lwz 29,124(3) - lwz 30,128(3) - lwz 31,132(3) + .irp i,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + lwz \i, (8+4*\i)(3) + .endr #ifndef __NO_FPRS__ // restore float registers - lfd 0, 160(3) - lfd 1, 168(3) - lfd 2, 176(3) - lfd 3, 184(3) - lfd 4, 192(3) - lfd 5, 200(3) - lfd 6, 208(3) - lfd 7, 216(3) - lfd 8, 224(3) - lfd 9, 232(3) - lfd 10,240(3) - lfd 11,248(3) - lfd 12,256(3) - lfd 13,264(3) - lfd 14,272(3) - lfd 15,280(3) - lfd 16,288(3) - lfd 17,296(3) - lfd 18,304(3) - lfd 19,312(3) - lfd 20,320(3) - lfd 21,328(3) - lfd 22,336(3) - lfd 23,344(3) - lfd 24,352(3) - lfd 25,360(3) - lfd 26,368(3) - lfd 27,376(3) - lfd 28,384(3) - lfd 29,392(3) - lfd 30,400(3) - lfd 31,408(3) + .irp i,FROM_0_TO_31 + lfd \i, (160+8*\i*8)(3) + .endr #endif #if defined(__ALTIVEC__) @@ -555,38 +376,12 @@ // r4 is now a 16-byte aligned pointer into the red zone // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer - LOAD_VECTOR_UNALIGNEDl(0) - LOAD_VECTOR_UNALIGNEDl(1) - LOAD_VECTOR_UNALIGNEDl(2) - LOAD_VECTOR_UNALIGNEDl(3) - LOAD_VECTOR_UNALIGNEDl(4) - LOAD_VECTOR_UNALIGNEDl(5) - LOAD_VECTOR_UNALIGNEDl(6) - LOAD_VECTOR_UNALIGNEDl(7) - LOAD_VECTOR_UNALIGNEDl(8) - LOAD_VECTOR_UNALIGNEDl(9) - LOAD_VECTOR_UNALIGNEDl(10) - LOAD_VECTOR_UNALIGNEDl(11) - LOAD_VECTOR_UNALIGNEDl(12) - LOAD_VECTOR_UNALIGNEDl(13) - LOAD_VECTOR_UNALIGNEDl(14) - LOAD_VECTOR_UNALIGNEDl(15) - LOAD_VECTOR_UNALIGNEDh(16) - LOAD_VECTOR_UNALIGNEDh(17) - LOAD_VECTOR_UNALIGNEDh(18) - LOAD_VECTOR_UNALIGNEDh(19) - LOAD_VECTOR_UNALIGNEDh(20) - LOAD_VECTOR_UNALIGNEDh(21) - LOAD_VECTOR_UNALIGNEDh(22) - LOAD_VECTOR_UNALIGNEDh(23) - LOAD_VECTOR_UNALIGNEDh(24) - LOAD_VECTOR_UNALIGNEDh(25) - LOAD_VECTOR_UNALIGNEDh(26) - LOAD_VECTOR_UNALIGNEDh(27) - LOAD_VECTOR_UNALIGNEDh(28) - LOAD_VECTOR_UNALIGNEDh(29) - LOAD_VECTOR_UNALIGNEDh(30) - LOAD_VECTOR_UNALIGNEDh(31) + .irp i,FROM_0_TO_15 + LOAD_VECTOR_UNALIGNEDl(\i) + .endr + .irp i,FROM_16_TO_31 + LOAD_VECTOR_UNALIGNEDh(\i) + .endr #endif Lnovec: @@ -1182,72 +977,20 @@ .p2align 2 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv) # if defined(__riscv_flen) - FLOAD f0, (RISCV_FOFFSET + RISCV_FSIZE * 0)(a0) - FLOAD f1, (RISCV_FOFFSET + RISCV_FSIZE * 1)(a0) - FLOAD f2, (RISCV_FOFFSET + RISCV_FSIZE * 2)(a0) - FLOAD f3, (RISCV_FOFFSET + RISCV_FSIZE * 3)(a0) - FLOAD f4, (RISCV_FOFFSET + RISCV_FSIZE * 4)(a0) - FLOAD f5, (RISCV_FOFFSET + RISCV_FSIZE * 5)(a0) - FLOAD f6, (RISCV_FOFFSET + RISCV_FSIZE * 6)(a0) - FLOAD f7, (RISCV_FOFFSET + RISCV_FSIZE * 7)(a0) - FLOAD f8, (RISCV_FOFFSET + RISCV_FSIZE * 8)(a0) - FLOAD f9, (RISCV_FOFFSET + RISCV_FSIZE * 9)(a0) - FLOAD f10, (RISCV_FOFFSET + RISCV_FSIZE * 10)(a0) - FLOAD f11, (RISCV_FOFFSET + RISCV_FSIZE * 11)(a0) - FLOAD f12, (RISCV_FOFFSET + RISCV_FSIZE * 12)(a0) - FLOAD f13, (RISCV_FOFFSET + RISCV_FSIZE * 13)(a0) - FLOAD f14, (RISCV_FOFFSET + RISCV_FSIZE * 14)(a0) - FLOAD f15, (RISCV_FOFFSET + RISCV_FSIZE * 15)(a0) - FLOAD f16, (RISCV_FOFFSET + RISCV_FSIZE * 16)(a0) - FLOAD f17, (RISCV_FOFFSET + RISCV_FSIZE * 17)(a0) - FLOAD f18, (RISCV_FOFFSET + RISCV_FSIZE * 18)(a0) - FLOAD f19, (RISCV_FOFFSET + RISCV_FSIZE * 19)(a0) - FLOAD f20, (RISCV_FOFFSET + RISCV_FSIZE * 20)(a0) - FLOAD f21, (RISCV_FOFFSET + RISCV_FSIZE * 21)(a0) - FLOAD f22, (RISCV_FOFFSET + RISCV_FSIZE * 22)(a0) - FLOAD f23, (RISCV_FOFFSET + RISCV_FSIZE * 23)(a0) - FLOAD f24, (RISCV_FOFFSET + RISCV_FSIZE * 24)(a0) - FLOAD f25, (RISCV_FOFFSET + RISCV_FSIZE * 25)(a0) - FLOAD f26, (RISCV_FOFFSET + RISCV_FSIZE * 26)(a0) - FLOAD f27, (RISCV_FOFFSET + RISCV_FSIZE * 27)(a0) - FLOAD f28, (RISCV_FOFFSET + RISCV_FSIZE * 28)(a0) - FLOAD f29, (RISCV_FOFFSET + RISCV_FSIZE * 29)(a0) - FLOAD f30, (RISCV_FOFFSET + RISCV_FSIZE * 30)(a0) - FLOAD f31, (RISCV_FOFFSET + RISCV_FSIZE * 31)(a0) + .irp i,FROM_0_TO_31 + FLOAD f\i, (RISCV_FOFFSET + RISCV_FSIZE * \i)(a0) + .endr # endif // x0 is zero ILOAD x1, (RISCV_ISIZE * 0)(a0) // restore pc into ra - ILOAD x2, (RISCV_ISIZE * 2)(a0) - ILOAD x3, (RISCV_ISIZE * 3)(a0) - ILOAD x4, (RISCV_ISIZE * 4)(a0) - ILOAD x5, (RISCV_ISIZE * 5)(a0) - ILOAD x6, (RISCV_ISIZE * 6)(a0) - ILOAD x7, (RISCV_ISIZE * 7)(a0) - ILOAD x8, (RISCV_ISIZE * 8)(a0) - ILOAD x9, (RISCV_ISIZE * 9)(a0) + .irp i,2,3,4,5,6,7,8,9 + ILOAD x\i, (RISCV_ISIZE * \i)(a0) + .endr // skip a0 for now - ILOAD x11, (RISCV_ISIZE * 11)(a0) - ILOAD x12, (RISCV_ISIZE * 12)(a0) - ILOAD x13, (RISCV_ISIZE * 13)(a0) - ILOAD x14, (RISCV_ISIZE * 14)(a0) - ILOAD x15, (RISCV_ISIZE * 15)(a0) - ILOAD x16, (RISCV_ISIZE * 16)(a0) - ILOAD x17, (RISCV_ISIZE * 17)(a0) - ILOAD x18, (RISCV_ISIZE * 18)(a0) - ILOAD x19, (RISCV_ISIZE * 19)(a0) - ILOAD x20, (RISCV_ISIZE * 20)(a0) - ILOAD x21, (RISCV_ISIZE * 21)(a0) - ILOAD x22, (RISCV_ISIZE * 22)(a0) - ILOAD x23, (RISCV_ISIZE * 23)(a0) - ILOAD x24, (RISCV_ISIZE * 24)(a0) - ILOAD x25, (RISCV_ISIZE * 25)(a0) - ILOAD x26, (RISCV_ISIZE * 26)(a0) - ILOAD x27, (RISCV_ISIZE * 27)(a0) - ILOAD x28, (RISCV_ISIZE * 28)(a0) - ILOAD x29, (RISCV_ISIZE * 29)(a0) - ILOAD x30, (RISCV_ISIZE * 30)(a0) - ILOAD x31, (RISCV_ISIZE * 31)(a0) + .irp i,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + ILOAD x\i, (RISCV_ISIZE * \i)(a0) + .endr ILOAD x10, (RISCV_ISIZE * 10)(a0) // restore a0 ret // jump to ra @@ -1266,22 +1009,9 @@ lg %r1, 8(%r2) // Restore FPRs - ld %f0, 144(%r2) - ld %f1, 152(%r2) - ld %f2, 160(%r2) - ld %f3, 168(%r2) - ld %f4, 176(%r2) - ld %f5, 184(%r2) - ld %f6, 192(%r2) - ld %f7, 200(%r2) - ld %f8, 208(%r2) - ld %f9, 216(%r2) - ld %f10, 224(%r2) - ld %f11, 232(%r2) - ld %f12, 240(%r2) - ld %f13, 248(%r2) - ld %f14, 256(%r2) - ld %f15, 264(%r2) + .irp i,FROM_0_TO_15 + ld %f\i, (144+8*\i)(%r2) + .endr // Restore GPRs - skipping %r0 and %r1 lmg %r2, %r15, 32(%r2) @@ -1300,72 +1030,20 @@ .p2align 2 DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind19Registers_loongarch6jumptoEv) # if __loongarch_frlen == 64 - fld.d $f0, $a0, (8 * 33 + 8 * 0) - fld.d $f1, $a0, (8 * 33 + 8 * 1) - fld.d $f2, $a0, (8 * 33 + 8 * 2) - fld.d $f3, $a0, (8 * 33 + 8 * 3) - fld.d $f4, $a0, (8 * 33 + 8 * 4) - fld.d $f5, $a0, (8 * 33 + 8 * 5) - fld.d $f6, $a0, (8 * 33 + 8 * 6) - fld.d $f7, $a0, (8 * 33 + 8 * 7) - fld.d $f8, $a0, (8 * 33 + 8 * 8) - fld.d $f9, $a0, (8 * 33 + 8 * 9) - fld.d $f10, $a0, (8 * 33 + 8 * 10) - fld.d $f11, $a0, (8 * 33 + 8 * 11) - fld.d $f12, $a0, (8 * 33 + 8 * 12) - fld.d $f13, $a0, (8 * 33 + 8 * 13) - fld.d $f14, $a0, (8 * 33 + 8 * 14) - fld.d $f15, $a0, (8 * 33 + 8 * 15) - fld.d $f16, $a0, (8 * 33 + 8 * 16) - fld.d $f17, $a0, (8 * 33 + 8 * 17) - fld.d $f18, $a0, (8 * 33 + 8 * 18) - fld.d $f19, $a0, (8 * 33 + 8 * 19) - fld.d $f20, $a0, (8 * 33 + 8 * 20) - fld.d $f21, $a0, (8 * 33 + 8 * 21) - fld.d $f22, $a0, (8 * 33 + 8 * 22) - fld.d $f23, $a0, (8 * 33 + 8 * 23) - fld.d $f24, $a0, (8 * 33 + 8 * 24) - fld.d $f25, $a0, (8 * 33 + 8 * 25) - fld.d $f26, $a0, (8 * 33 + 8 * 26) - fld.d $f27, $a0, (8 * 33 + 8 * 27) - fld.d $f28, $a0, (8 * 33 + 8 * 28) - fld.d $f29, $a0, (8 * 33 + 8 * 29) - fld.d $f30, $a0, (8 * 33 + 8 * 30) - fld.d $f31, $a0, (8 * 33 + 8 * 31) + .irp i,FROM_0_TO_31 + fld.d $f\i, $a0, (8 * 33 + 8 * \i) + .endr # endif // $r0 is zero - ld.d $r1, $a0, (8 * 1) - ld.d $r2, $a0, (8 * 2) - ld.d $r3, $a0, (8 * 3) + .irp i,1,2,3 + ld.d $r\i, $a0, (8 * \i) + .endr // skip $a0 for now - ld.d $r5, $a0, (8 * 5) - ld.d $r6, $a0, (8 * 6) - ld.d $r7, $a0, (8 * 7) - ld.d $r8, $a0, (8 * 8) - ld.d $r9, $a0, (8 * 9) - ld.d $r10, $a0, (8 * 10) - ld.d $r11, $a0, (8 * 11) - ld.d $r12, $a0, (8 * 12) - ld.d $r13, $a0, (8 * 13) - ld.d $r14, $a0, (8 * 14) - ld.d $r15, $a0, (8 * 15) - ld.d $r16, $a0, (8 * 16) - ld.d $r17, $a0, (8 * 17) - ld.d $r18, $a0, (8 * 18) - ld.d $r19, $a0, (8 * 19) - ld.d $r20, $a0, (8 * 20) - ld.d $r21, $a0, (8 * 21) - ld.d $r22, $a0, (8 * 22) - ld.d $r23, $a0, (8 * 23) - ld.d $r24, $a0, (8 * 24) - ld.d $r25, $a0, (8 * 25) - ld.d $r26, $a0, (8 * 26) - ld.d $r27, $a0, (8 * 27) - ld.d $r28, $a0, (8 * 28) - ld.d $r29, $a0, (8 * 29) - ld.d $r30, $a0, (8 * 30) - ld.d $r31, $a0, (8 * 31) + .irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + ld.d $r\i, $a0, (8 * \i) + .endr + ld.d $r4, $a0, (8 * 4) // restore $a0 last ld.d $r1, $a0, (8 * 32) // load new pc into $ra diff --git a/libunwind/src/UnwindRegistersSave.S b/libunwind/src/UnwindRegistersSave.S --- a/libunwind/src/UnwindRegistersSave.S +++ b/libunwind/src/UnwindRegistersSave.S @@ -8,6 +8,12 @@ #include "assembly.h" +#define FROM_0_TO_15 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 +#define FROM_16_TO_31 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + +#define FROM_0_TO_31 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 +#define FROM_32_TO_63 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63 + #if defined(_AIX) .toc #else @@ -351,37 +357,9 @@ PPC64_STR(0) mflr 0 std 0, PPC64_OFFS_SRR0(3) // store lr as ssr0 - PPC64_STR(1) - PPC64_STR(2) - PPC64_STR(3) - PPC64_STR(4) - PPC64_STR(5) - PPC64_STR(6) - PPC64_STR(7) - PPC64_STR(8) - PPC64_STR(9) - PPC64_STR(10) - PPC64_STR(11) - PPC64_STR(12) - PPC64_STR(13) - PPC64_STR(14) - PPC64_STR(15) - PPC64_STR(16) - PPC64_STR(17) - PPC64_STR(18) - PPC64_STR(19) - PPC64_STR(20) - PPC64_STR(21) - PPC64_STR(22) - PPC64_STR(23) - PPC64_STR(24) - PPC64_STR(25) - PPC64_STR(26) - PPC64_STR(27) - PPC64_STR(28) - PPC64_STR(29) - PPC64_STR(30) - PPC64_STR(31) + .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + PPC64_STR(\i) + .endr mfcr 0 std 0, PPC64_OFFS_CR(3) @@ -406,70 +384,12 @@ stxvd2x n, 0, 4 ;\ addi 4, 4, 16 - PPC64_STVS(0) - PPC64_STVS(1) - PPC64_STVS(2) - PPC64_STVS(3) - PPC64_STVS(4) - PPC64_STVS(5) - PPC64_STVS(6) - PPC64_STVS(7) - PPC64_STVS(8) - PPC64_STVS(9) - PPC64_STVS(10) - PPC64_STVS(11) - PPC64_STVS(12) - PPC64_STVS(13) - PPC64_STVS(14) - PPC64_STVS(15) - PPC64_STVS(16) - PPC64_STVS(17) - PPC64_STVS(18) - PPC64_STVS(19) - PPC64_STVS(20) - PPC64_STVS(21) - PPC64_STVS(22) - PPC64_STVS(23) - PPC64_STVS(24) - PPC64_STVS(25) - PPC64_STVS(26) - PPC64_STVS(27) - PPC64_STVS(28) - PPC64_STVS(29) - PPC64_STVS(30) - PPC64_STVS(31) - PPC64_STVS(32) - PPC64_STVS(33) - PPC64_STVS(34) - PPC64_STVS(35) - PPC64_STVS(36) - PPC64_STVS(37) - PPC64_STVS(38) - PPC64_STVS(39) - PPC64_STVS(40) - PPC64_STVS(41) - PPC64_STVS(42) - PPC64_STVS(43) - PPC64_STVS(44) - PPC64_STVS(45) - PPC64_STVS(46) - PPC64_STVS(47) - PPC64_STVS(48) - PPC64_STVS(49) - PPC64_STVS(50) - PPC64_STVS(51) - PPC64_STVS(52) - PPC64_STVS(53) - PPC64_STVS(54) - PPC64_STVS(55) - PPC64_STVS(56) - PPC64_STVS(57) - PPC64_STVS(58) - PPC64_STVS(59) - PPC64_STVS(60) - PPC64_STVS(61) - PPC64_STVS(62) - PPC64_STVS(63) + .irp i,FROM_0_TO_31 + PPC64_STVS(\i) + .endr + .irp i,FROM_32_TO_63 + PPC64_STVS(\i) + .endr #else @@ -478,38 +398,9 @@ stfd n, (PPC64_OFFS_FP + n * 16)(3) // save float registers - PPC64_STF(0) - PPC64_STF(1) - PPC64_STF(2) - PPC64_STF(3) - PPC64_STF(4) - PPC64_STF(5) - PPC64_STF(6) - PPC64_STF(7) - PPC64_STF(8) - PPC64_STF(9) - PPC64_STF(10) - PPC64_STF(11) - PPC64_STF(12) - PPC64_STF(13) - PPC64_STF(14) - PPC64_STF(15) - PPC64_STF(16) - PPC64_STF(17) - PPC64_STF(18) - PPC64_STF(19) - PPC64_STF(20) - PPC64_STF(21) - PPC64_STF(22) - PPC64_STF(23) - PPC64_STF(24) - PPC64_STF(25) - PPC64_STF(26) - PPC64_STF(27) - PPC64_STF(28) - PPC64_STF(29) - PPC64_STF(30) - PPC64_STF(31) + .irp i,FROM_0_TO_31 + PPC64_STF(\i) + .endr #if defined(__ALTIVEC__) // save vector registers @@ -526,38 +417,9 @@ ld 5, 8(4) ;\ std 5, (PPC64_OFFS_V + n * 16 + 8)(3) - PPC64_STV_UNALIGNED(0) - PPC64_STV_UNALIGNED(1) - PPC64_STV_UNALIGNED(2) - PPC64_STV_UNALIGNED(3) - PPC64_STV_UNALIGNED(4) - PPC64_STV_UNALIGNED(5) - PPC64_STV_UNALIGNED(6) - PPC64_STV_UNALIGNED(7) - PPC64_STV_UNALIGNED(8) - PPC64_STV_UNALIGNED(9) - PPC64_STV_UNALIGNED(10) - PPC64_STV_UNALIGNED(11) - PPC64_STV_UNALIGNED(12) - PPC64_STV_UNALIGNED(13) - PPC64_STV_UNALIGNED(14) - PPC64_STV_UNALIGNED(15) - PPC64_STV_UNALIGNED(16) - PPC64_STV_UNALIGNED(17) - PPC64_STV_UNALIGNED(18) - PPC64_STV_UNALIGNED(19) - PPC64_STV_UNALIGNED(20) - PPC64_STV_UNALIGNED(21) - PPC64_STV_UNALIGNED(22) - PPC64_STV_UNALIGNED(23) - PPC64_STV_UNALIGNED(24) - PPC64_STV_UNALIGNED(25) - PPC64_STV_UNALIGNED(26) - PPC64_STV_UNALIGNED(27) - PPC64_STV_UNALIGNED(28) - PPC64_STV_UNALIGNED(29) - PPC64_STV_UNALIGNED(30) - PPC64_STV_UNALIGNED(31) + .irp i,FROM_0_TO_31 + PPC64_STV_UNALIGNED(\i) + .endr #endif #endif @@ -582,37 +444,9 @@ stw 0, 8(3) mflr 0 stw 0, 0(3) // store lr as ssr0 - stw 1, 12(3) - stw 2, 16(3) - stw 3, 20(3) - stw 4, 24(3) - stw 5, 28(3) - stw 6, 32(3) - stw 7, 36(3) - stw 8, 40(3) - stw 9, 44(3) - stw 10, 48(3) - stw 11, 52(3) - stw 12, 56(3) - stw 13, 60(3) - stw 14, 64(3) - stw 15, 68(3) - stw 16, 72(3) - stw 17, 76(3) - stw 18, 80(3) - stw 19, 84(3) - stw 20, 88(3) - stw 21, 92(3) - stw 22, 96(3) - stw 23,100(3) - stw 24,104(3) - stw 25,108(3) - stw 26,112(3) - stw 27,116(3) - stw 28,120(3) - stw 29,124(3) - stw 30,128(3) - stw 31,132(3) + .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + stw \i, (8+4*\i)(3) + .endr #if defined(__ALTIVEC__) // save VRSave register @@ -628,38 +462,9 @@ #if !defined(__NO_FPRS__) // save float registers - stfd 0, 160(3) - stfd 1, 168(3) - stfd 2, 176(3) - stfd 3, 184(3) - stfd 4, 192(3) - stfd 5, 200(3) - stfd 6, 208(3) - stfd 7, 216(3) - stfd 8, 224(3) - stfd 9, 232(3) - stfd 10,240(3) - stfd 11,248(3) - stfd 12,256(3) - stfd 13,264(3) - stfd 14,272(3) - stfd 15,280(3) - stfd 16,288(3) - stfd 17,296(3) - stfd 18,304(3) - stfd 19,312(3) - stfd 20,320(3) - stfd 21,328(3) - stfd 22,336(3) - stfd 23,344(3) - stfd 24,352(3) - stfd 25,360(3) - stfd 26,368(3) - stfd 27,376(3) - stfd 28,384(3) - stfd 29,392(3) - stfd 30,400(3) - stfd 31,408(3) + .irp i,FROM_0_TO_31 + stfd \i, (160+8*\i)(3) + .endr #endif #if defined(__ALTIVEC__) @@ -680,38 +485,9 @@ lwz 5, 12(4) SEPARATOR \ stw 5, _offset+12(3) - SAVE_VECTOR_UNALIGNED( 0, 424+0x000) - SAVE_VECTOR_UNALIGNED( 1, 424+0x010) - SAVE_VECTOR_UNALIGNED( 2, 424+0x020) - SAVE_VECTOR_UNALIGNED( 3, 424+0x030) - SAVE_VECTOR_UNALIGNED( 4, 424+0x040) - SAVE_VECTOR_UNALIGNED( 5, 424+0x050) - SAVE_VECTOR_UNALIGNED( 6, 424+0x060) - SAVE_VECTOR_UNALIGNED( 7, 424+0x070) - SAVE_VECTOR_UNALIGNED( 8, 424+0x080) - SAVE_VECTOR_UNALIGNED( 9, 424+0x090) - SAVE_VECTOR_UNALIGNED(10, 424+0x0A0) - SAVE_VECTOR_UNALIGNED(11, 424+0x0B0) - SAVE_VECTOR_UNALIGNED(12, 424+0x0C0) - SAVE_VECTOR_UNALIGNED(13, 424+0x0D0) - SAVE_VECTOR_UNALIGNED(14, 424+0x0E0) - SAVE_VECTOR_UNALIGNED(15, 424+0x0F0) - SAVE_VECTOR_UNALIGNED(16, 424+0x100) - SAVE_VECTOR_UNALIGNED(17, 424+0x110) - SAVE_VECTOR_UNALIGNED(18, 424+0x120) - SAVE_VECTOR_UNALIGNED(19, 424+0x130) - SAVE_VECTOR_UNALIGNED(20, 424+0x140) - SAVE_VECTOR_UNALIGNED(21, 424+0x150) - SAVE_VECTOR_UNALIGNED(22, 424+0x160) - SAVE_VECTOR_UNALIGNED(23, 424+0x170) - SAVE_VECTOR_UNALIGNED(24, 424+0x180) - SAVE_VECTOR_UNALIGNED(25, 424+0x190) - SAVE_VECTOR_UNALIGNED(26, 424+0x1A0) - SAVE_VECTOR_UNALIGNED(27, 424+0x1B0) - SAVE_VECTOR_UNALIGNED(28, 424+0x1C0) - SAVE_VECTOR_UNALIGNED(29, 424+0x1D0) - SAVE_VECTOR_UNALIGNED(30, 424+0x1E0) - SAVE_VECTOR_UNALIGNED(31, 424+0x1F0) + .irp i,FROM_0_TO_31 + SAVE_VECTOR_UNALIGNED(\i, \i*16+424) + .endr #endif li 3, 0 // return UNW_ESUCCESS @@ -1110,71 +886,14 @@ # DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) ISTORE x1, (RISCV_ISIZE * 0)(a0) // store ra as pc - ISTORE x1, (RISCV_ISIZE * 1)(a0) - ISTORE x2, (RISCV_ISIZE * 2)(a0) - ISTORE x3, (RISCV_ISIZE * 3)(a0) - ISTORE x4, (RISCV_ISIZE * 4)(a0) - ISTORE x5, (RISCV_ISIZE * 5)(a0) - ISTORE x6, (RISCV_ISIZE * 6)(a0) - ISTORE x7, (RISCV_ISIZE * 7)(a0) - ISTORE x8, (RISCV_ISIZE * 8)(a0) - ISTORE x9, (RISCV_ISIZE * 9)(a0) - ISTORE x10, (RISCV_ISIZE * 10)(a0) - ISTORE x11, (RISCV_ISIZE * 11)(a0) - ISTORE x12, (RISCV_ISIZE * 12)(a0) - ISTORE x13, (RISCV_ISIZE * 13)(a0) - ISTORE x14, (RISCV_ISIZE * 14)(a0) - ISTORE x15, (RISCV_ISIZE * 15)(a0) - ISTORE x16, (RISCV_ISIZE * 16)(a0) - ISTORE x17, (RISCV_ISIZE * 17)(a0) - ISTORE x18, (RISCV_ISIZE * 18)(a0) - ISTORE x19, (RISCV_ISIZE * 19)(a0) - ISTORE x20, (RISCV_ISIZE * 20)(a0) - ISTORE x21, (RISCV_ISIZE * 21)(a0) - ISTORE x22, (RISCV_ISIZE * 22)(a0) - ISTORE x23, (RISCV_ISIZE * 23)(a0) - ISTORE x24, (RISCV_ISIZE * 24)(a0) - ISTORE x25, (RISCV_ISIZE * 25)(a0) - ISTORE x26, (RISCV_ISIZE * 26)(a0) - ISTORE x27, (RISCV_ISIZE * 27)(a0) - ISTORE x28, (RISCV_ISIZE * 28)(a0) - ISTORE x29, (RISCV_ISIZE * 29)(a0) - ISTORE x30, (RISCV_ISIZE * 30)(a0) - ISTORE x31, (RISCV_ISIZE * 31)(a0) + .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + ISTORE x\i, (RISCV_ISIZE * \i)(a0) + .endr # if defined(__riscv_flen) - FSTORE f0, (RISCV_FOFFSET + RISCV_FSIZE * 0)(a0) - FSTORE f1, (RISCV_FOFFSET + RISCV_FSIZE * 1)(a0) - FSTORE f2, (RISCV_FOFFSET + RISCV_FSIZE * 2)(a0) - FSTORE f3, (RISCV_FOFFSET + RISCV_FSIZE * 3)(a0) - FSTORE f4, (RISCV_FOFFSET + RISCV_FSIZE * 4)(a0) - FSTORE f5, (RISCV_FOFFSET + RISCV_FSIZE * 5)(a0) - FSTORE f6, (RISCV_FOFFSET + RISCV_FSIZE * 6)(a0) - FSTORE f7, (RISCV_FOFFSET + RISCV_FSIZE * 7)(a0) - FSTORE f8, (RISCV_FOFFSET + RISCV_FSIZE * 8)(a0) - FSTORE f9, (RISCV_FOFFSET + RISCV_FSIZE * 9)(a0) - FSTORE f10, (RISCV_FOFFSET + RISCV_FSIZE * 10)(a0) - FSTORE f11, (RISCV_FOFFSET + RISCV_FSIZE * 11)(a0) - FSTORE f12, (RISCV_FOFFSET + RISCV_FSIZE * 12)(a0) - FSTORE f13, (RISCV_FOFFSET + RISCV_FSIZE * 13)(a0) - FSTORE f14, (RISCV_FOFFSET + RISCV_FSIZE * 14)(a0) - FSTORE f15, (RISCV_FOFFSET + RISCV_FSIZE * 15)(a0) - FSTORE f16, (RISCV_FOFFSET + RISCV_FSIZE * 16)(a0) - FSTORE f17, (RISCV_FOFFSET + RISCV_FSIZE * 17)(a0) - FSTORE f18, (RISCV_FOFFSET + RISCV_FSIZE * 18)(a0) - FSTORE f19, (RISCV_FOFFSET + RISCV_FSIZE * 19)(a0) - FSTORE f20, (RISCV_FOFFSET + RISCV_FSIZE * 20)(a0) - FSTORE f21, (RISCV_FOFFSET + RISCV_FSIZE * 21)(a0) - FSTORE f22, (RISCV_FOFFSET + RISCV_FSIZE * 22)(a0) - FSTORE f23, (RISCV_FOFFSET + RISCV_FSIZE * 23)(a0) - FSTORE f24, (RISCV_FOFFSET + RISCV_FSIZE * 24)(a0) - FSTORE f25, (RISCV_FOFFSET + RISCV_FSIZE * 25)(a0) - FSTORE f26, (RISCV_FOFFSET + RISCV_FSIZE * 26)(a0) - FSTORE f27, (RISCV_FOFFSET + RISCV_FSIZE * 27)(a0) - FSTORE f28, (RISCV_FOFFSET + RISCV_FSIZE * 28)(a0) - FSTORE f29, (RISCV_FOFFSET + RISCV_FSIZE * 29)(a0) - FSTORE f30, (RISCV_FOFFSET + RISCV_FSIZE * 30)(a0) - FSTORE f31, (RISCV_FOFFSET + RISCV_FSIZE * 31)(a0) + .irp i,FROM_0_TO_31 + FSTORE f\i, (RISCV_FOFFSET + RISCV_FSIZE * \i)(a0) + .endr # endif li a0, 0 // return UNW_ESUCCESS @@ -1201,22 +920,9 @@ stg %r14, 8(%r2) // Save FPRs - std %f0, 144(%r2) - std %f1, 152(%r2) - std %f2, 160(%r2) - std %f3, 168(%r2) - std %f4, 176(%r2) - std %f5, 184(%r2) - std %f6, 192(%r2) - std %f7, 200(%r2) - std %f8, 208(%r2) - std %f9, 216(%r2) - std %f10, 224(%r2) - std %f11, 232(%r2) - std %f12, 240(%r2) - std %f13, 248(%r2) - std %f14, 256(%r2) - std %f15, 264(%r2) + .irp i,FROM_0_TO_15 + std %f\i, (144+8*\i)(%r2) + .endr // Return UNW_ESUCCESS lghi %r2, 0