mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2025-02-11 04:06:20 +00:00
[libunwind] Use .irp directives. NFC
The repeated instructions make the file long and difficult to read. Simplify them with .irp directives. Skip PowerPC since AIX assembler doesn't support .irp Reviewed By: #libunwind, compnerd Differential Revision: https://reviews.llvm.org/D139368
This commit is contained in:
parent
da2f5d0a41
commit
356bbbbda9
@ -8,6 +8,12 @@
|
||||
|
||||
#include "assembly.h"
|
||||
|
||||
#define FROM_0_TO_15 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
|
||||
#define FROM_16_TO_31 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
||||
|
||||
#define FROM_0_TO_31 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
||||
#define FROM_32_TO_63 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63
|
||||
|
||||
#if defined(_AIX)
|
||||
.toc
|
||||
#else
|
||||
@ -1026,38 +1032,9 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)
|
||||
.set noreorder
|
||||
.set nomacro
|
||||
#ifdef __mips_hard_float
|
||||
ldc1 $f0, (8 * 35)($4)
|
||||
ldc1 $f1, (8 * 36)($4)
|
||||
ldc1 $f2, (8 * 37)($4)
|
||||
ldc1 $f3, (8 * 38)($4)
|
||||
ldc1 $f4, (8 * 39)($4)
|
||||
ldc1 $f5, (8 * 40)($4)
|
||||
ldc1 $f6, (8 * 41)($4)
|
||||
ldc1 $f7, (8 * 42)($4)
|
||||
ldc1 $f8, (8 * 43)($4)
|
||||
ldc1 $f9, (8 * 44)($4)
|
||||
ldc1 $f10, (8 * 45)($4)
|
||||
ldc1 $f11, (8 * 46)($4)
|
||||
ldc1 $f12, (8 * 47)($4)
|
||||
ldc1 $f13, (8 * 48)($4)
|
||||
ldc1 $f14, (8 * 49)($4)
|
||||
ldc1 $f15, (8 * 50)($4)
|
||||
ldc1 $f16, (8 * 51)($4)
|
||||
ldc1 $f17, (8 * 52)($4)
|
||||
ldc1 $f18, (8 * 53)($4)
|
||||
ldc1 $f19, (8 * 54)($4)
|
||||
ldc1 $f20, (8 * 55)($4)
|
||||
ldc1 $f21, (8 * 56)($4)
|
||||
ldc1 $f22, (8 * 57)($4)
|
||||
ldc1 $f23, (8 * 58)($4)
|
||||
ldc1 $f24, (8 * 59)($4)
|
||||
ldc1 $f25, (8 * 60)($4)
|
||||
ldc1 $f26, (8 * 61)($4)
|
||||
ldc1 $f27, (8 * 62)($4)
|
||||
ldc1 $f28, (8 * 63)($4)
|
||||
ldc1 $f29, (8 * 64)($4)
|
||||
ldc1 $f30, (8 * 65)($4)
|
||||
ldc1 $f31, (8 * 66)($4)
|
||||
.irp i,FROM_0_TO_31
|
||||
ldc1 $f\i, (280+8*\i)($4)
|
||||
.endr
|
||||
#endif
|
||||
// restore hi and lo
|
||||
ld $8, (8 * 33)($4)
|
||||
@ -1069,32 +1046,9 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)
|
||||
ld $2, (8 * 2)($4)
|
||||
ld $3, (8 * 3)($4)
|
||||
// skip a0 for now
|
||||
ld $5, (8 * 5)($4)
|
||||
ld $6, (8 * 6)($4)
|
||||
ld $7, (8 * 7)($4)
|
||||
ld $8, (8 * 8)($4)
|
||||
ld $9, (8 * 9)($4)
|
||||
ld $10, (8 * 10)($4)
|
||||
ld $11, (8 * 11)($4)
|
||||
ld $12, (8 * 12)($4)
|
||||
ld $13, (8 * 13)($4)
|
||||
ld $14, (8 * 14)($4)
|
||||
ld $15, (8 * 15)($4)
|
||||
ld $16, (8 * 16)($4)
|
||||
ld $17, (8 * 17)($4)
|
||||
ld $18, (8 * 18)($4)
|
||||
ld $19, (8 * 19)($4)
|
||||
ld $20, (8 * 20)($4)
|
||||
ld $21, (8 * 21)($4)
|
||||
ld $22, (8 * 22)($4)
|
||||
ld $23, (8 * 23)($4)
|
||||
ld $24, (8 * 24)($4)
|
||||
ld $25, (8 * 25)($4)
|
||||
ld $26, (8 * 26)($4)
|
||||
ld $27, (8 * 27)($4)
|
||||
ld $28, (8 * 28)($4)
|
||||
ld $29, (8 * 29)($4)
|
||||
ld $30, (8 * 30)($4)
|
||||
.irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
|
||||
ld $\i, (8 * \i)($4)
|
||||
.endr
|
||||
// load new pc into ra
|
||||
ld $31, (8 * 32)($4)
|
||||
// jump to ra, load a0 in the delay slot
|
||||
@ -1182,72 +1136,20 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv)
|
||||
.p2align 2
|
||||
DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv)
|
||||
# if defined(__riscv_flen)
|
||||
FLOAD f0, (RISCV_FOFFSET + RISCV_FSIZE * 0)(a0)
|
||||
FLOAD f1, (RISCV_FOFFSET + RISCV_FSIZE * 1)(a0)
|
||||
FLOAD f2, (RISCV_FOFFSET + RISCV_FSIZE * 2)(a0)
|
||||
FLOAD f3, (RISCV_FOFFSET + RISCV_FSIZE * 3)(a0)
|
||||
FLOAD f4, (RISCV_FOFFSET + RISCV_FSIZE * 4)(a0)
|
||||
FLOAD f5, (RISCV_FOFFSET + RISCV_FSIZE * 5)(a0)
|
||||
FLOAD f6, (RISCV_FOFFSET + RISCV_FSIZE * 6)(a0)
|
||||
FLOAD f7, (RISCV_FOFFSET + RISCV_FSIZE * 7)(a0)
|
||||
FLOAD f8, (RISCV_FOFFSET + RISCV_FSIZE * 8)(a0)
|
||||
FLOAD f9, (RISCV_FOFFSET + RISCV_FSIZE * 9)(a0)
|
||||
FLOAD f10, (RISCV_FOFFSET + RISCV_FSIZE * 10)(a0)
|
||||
FLOAD f11, (RISCV_FOFFSET + RISCV_FSIZE * 11)(a0)
|
||||
FLOAD f12, (RISCV_FOFFSET + RISCV_FSIZE * 12)(a0)
|
||||
FLOAD f13, (RISCV_FOFFSET + RISCV_FSIZE * 13)(a0)
|
||||
FLOAD f14, (RISCV_FOFFSET + RISCV_FSIZE * 14)(a0)
|
||||
FLOAD f15, (RISCV_FOFFSET + RISCV_FSIZE * 15)(a0)
|
||||
FLOAD f16, (RISCV_FOFFSET + RISCV_FSIZE * 16)(a0)
|
||||
FLOAD f17, (RISCV_FOFFSET + RISCV_FSIZE * 17)(a0)
|
||||
FLOAD f18, (RISCV_FOFFSET + RISCV_FSIZE * 18)(a0)
|
||||
FLOAD f19, (RISCV_FOFFSET + RISCV_FSIZE * 19)(a0)
|
||||
FLOAD f20, (RISCV_FOFFSET + RISCV_FSIZE * 20)(a0)
|
||||
FLOAD f21, (RISCV_FOFFSET + RISCV_FSIZE * 21)(a0)
|
||||
FLOAD f22, (RISCV_FOFFSET + RISCV_FSIZE * 22)(a0)
|
||||
FLOAD f23, (RISCV_FOFFSET + RISCV_FSIZE * 23)(a0)
|
||||
FLOAD f24, (RISCV_FOFFSET + RISCV_FSIZE * 24)(a0)
|
||||
FLOAD f25, (RISCV_FOFFSET + RISCV_FSIZE * 25)(a0)
|
||||
FLOAD f26, (RISCV_FOFFSET + RISCV_FSIZE * 26)(a0)
|
||||
FLOAD f27, (RISCV_FOFFSET + RISCV_FSIZE * 27)(a0)
|
||||
FLOAD f28, (RISCV_FOFFSET + RISCV_FSIZE * 28)(a0)
|
||||
FLOAD f29, (RISCV_FOFFSET + RISCV_FSIZE * 29)(a0)
|
||||
FLOAD f30, (RISCV_FOFFSET + RISCV_FSIZE * 30)(a0)
|
||||
FLOAD f31, (RISCV_FOFFSET + RISCV_FSIZE * 31)(a0)
|
||||
.irp i,FROM_0_TO_31
|
||||
FLOAD f\i, (RISCV_FOFFSET + RISCV_FSIZE * \i)(a0)
|
||||
.endr
|
||||
# endif
|
||||
|
||||
// x0 is zero
|
||||
ILOAD x1, (RISCV_ISIZE * 0)(a0) // restore pc into ra
|
||||
ILOAD x2, (RISCV_ISIZE * 2)(a0)
|
||||
ILOAD x3, (RISCV_ISIZE * 3)(a0)
|
||||
ILOAD x4, (RISCV_ISIZE * 4)(a0)
|
||||
ILOAD x5, (RISCV_ISIZE * 5)(a0)
|
||||
ILOAD x6, (RISCV_ISIZE * 6)(a0)
|
||||
ILOAD x7, (RISCV_ISIZE * 7)(a0)
|
||||
ILOAD x8, (RISCV_ISIZE * 8)(a0)
|
||||
ILOAD x9, (RISCV_ISIZE * 9)(a0)
|
||||
.irp i,2,3,4,5,6,7,8,9
|
||||
ILOAD x\i, (RISCV_ISIZE * \i)(a0)
|
||||
.endr
|
||||
// skip a0 for now
|
||||
ILOAD x11, (RISCV_ISIZE * 11)(a0)
|
||||
ILOAD x12, (RISCV_ISIZE * 12)(a0)
|
||||
ILOAD x13, (RISCV_ISIZE * 13)(a0)
|
||||
ILOAD x14, (RISCV_ISIZE * 14)(a0)
|
||||
ILOAD x15, (RISCV_ISIZE * 15)(a0)
|
||||
ILOAD x16, (RISCV_ISIZE * 16)(a0)
|
||||
ILOAD x17, (RISCV_ISIZE * 17)(a0)
|
||||
ILOAD x18, (RISCV_ISIZE * 18)(a0)
|
||||
ILOAD x19, (RISCV_ISIZE * 19)(a0)
|
||||
ILOAD x20, (RISCV_ISIZE * 20)(a0)
|
||||
ILOAD x21, (RISCV_ISIZE * 21)(a0)
|
||||
ILOAD x22, (RISCV_ISIZE * 22)(a0)
|
||||
ILOAD x23, (RISCV_ISIZE * 23)(a0)
|
||||
ILOAD x24, (RISCV_ISIZE * 24)(a0)
|
||||
ILOAD x25, (RISCV_ISIZE * 25)(a0)
|
||||
ILOAD x26, (RISCV_ISIZE * 26)(a0)
|
||||
ILOAD x27, (RISCV_ISIZE * 27)(a0)
|
||||
ILOAD x28, (RISCV_ISIZE * 28)(a0)
|
||||
ILOAD x29, (RISCV_ISIZE * 29)(a0)
|
||||
ILOAD x30, (RISCV_ISIZE * 30)(a0)
|
||||
ILOAD x31, (RISCV_ISIZE * 31)(a0)
|
||||
.irp i,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
||||
ILOAD x\i, (RISCV_ISIZE * \i)(a0)
|
||||
.endr
|
||||
ILOAD x10, (RISCV_ISIZE * 10)(a0) // restore a0
|
||||
|
||||
ret // jump to ra
|
||||
@ -1266,22 +1168,9 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_s390x6jumptoEv)
|
||||
lg %r1, 8(%r2)
|
||||
|
||||
// Restore FPRs
|
||||
ld %f0, 144(%r2)
|
||||
ld %f1, 152(%r2)
|
||||
ld %f2, 160(%r2)
|
||||
ld %f3, 168(%r2)
|
||||
ld %f4, 176(%r2)
|
||||
ld %f5, 184(%r2)
|
||||
ld %f6, 192(%r2)
|
||||
ld %f7, 200(%r2)
|
||||
ld %f8, 208(%r2)
|
||||
ld %f9, 216(%r2)
|
||||
ld %f10, 224(%r2)
|
||||
ld %f11, 232(%r2)
|
||||
ld %f12, 240(%r2)
|
||||
ld %f13, 248(%r2)
|
||||
ld %f14, 256(%r2)
|
||||
ld %f15, 264(%r2)
|
||||
.irp i,FROM_0_TO_15
|
||||
ld %f\i, (144+8*\i)(%r2)
|
||||
.endr
|
||||
|
||||
// Restore GPRs - skipping %r0 and %r1
|
||||
lmg %r2, %r15, 32(%r2)
|
||||
@ -1300,72 +1189,20 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_s390x6jumptoEv)
|
||||
.p2align 2
|
||||
DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind19Registers_loongarch6jumptoEv)
|
||||
# if __loongarch_frlen == 64
|
||||
fld.d $f0, $a0, (8 * 33 + 8 * 0)
|
||||
fld.d $f1, $a0, (8 * 33 + 8 * 1)
|
||||
fld.d $f2, $a0, (8 * 33 + 8 * 2)
|
||||
fld.d $f3, $a0, (8 * 33 + 8 * 3)
|
||||
fld.d $f4, $a0, (8 * 33 + 8 * 4)
|
||||
fld.d $f5, $a0, (8 * 33 + 8 * 5)
|
||||
fld.d $f6, $a0, (8 * 33 + 8 * 6)
|
||||
fld.d $f7, $a0, (8 * 33 + 8 * 7)
|
||||
fld.d $f8, $a0, (8 * 33 + 8 * 8)
|
||||
fld.d $f9, $a0, (8 * 33 + 8 * 9)
|
||||
fld.d $f10, $a0, (8 * 33 + 8 * 10)
|
||||
fld.d $f11, $a0, (8 * 33 + 8 * 11)
|
||||
fld.d $f12, $a0, (8 * 33 + 8 * 12)
|
||||
fld.d $f13, $a0, (8 * 33 + 8 * 13)
|
||||
fld.d $f14, $a0, (8 * 33 + 8 * 14)
|
||||
fld.d $f15, $a0, (8 * 33 + 8 * 15)
|
||||
fld.d $f16, $a0, (8 * 33 + 8 * 16)
|
||||
fld.d $f17, $a0, (8 * 33 + 8 * 17)
|
||||
fld.d $f18, $a0, (8 * 33 + 8 * 18)
|
||||
fld.d $f19, $a0, (8 * 33 + 8 * 19)
|
||||
fld.d $f20, $a0, (8 * 33 + 8 * 20)
|
||||
fld.d $f21, $a0, (8 * 33 + 8 * 21)
|
||||
fld.d $f22, $a0, (8 * 33 + 8 * 22)
|
||||
fld.d $f23, $a0, (8 * 33 + 8 * 23)
|
||||
fld.d $f24, $a0, (8 * 33 + 8 * 24)
|
||||
fld.d $f25, $a0, (8 * 33 + 8 * 25)
|
||||
fld.d $f26, $a0, (8 * 33 + 8 * 26)
|
||||
fld.d $f27, $a0, (8 * 33 + 8 * 27)
|
||||
fld.d $f28, $a0, (8 * 33 + 8 * 28)
|
||||
fld.d $f29, $a0, (8 * 33 + 8 * 29)
|
||||
fld.d $f30, $a0, (8 * 33 + 8 * 30)
|
||||
fld.d $f31, $a0, (8 * 33 + 8 * 31)
|
||||
.irp i,FROM_0_TO_31
|
||||
fld.d $f\i, $a0, (8 * 33 + 8 * \i)
|
||||
.endr
|
||||
# endif
|
||||
|
||||
// $r0 is zero
|
||||
ld.d $r1, $a0, (8 * 1)
|
||||
ld.d $r2, $a0, (8 * 2)
|
||||
ld.d $r3, $a0, (8 * 3)
|
||||
.irp i,1,2,3
|
||||
ld.d $r\i, $a0, (8 * \i)
|
||||
.endr
|
||||
// skip $a0 for now
|
||||
ld.d $r5, $a0, (8 * 5)
|
||||
ld.d $r6, $a0, (8 * 6)
|
||||
ld.d $r7, $a0, (8 * 7)
|
||||
ld.d $r8, $a0, (8 * 8)
|
||||
ld.d $r9, $a0, (8 * 9)
|
||||
ld.d $r10, $a0, (8 * 10)
|
||||
ld.d $r11, $a0, (8 * 11)
|
||||
ld.d $r12, $a0, (8 * 12)
|
||||
ld.d $r13, $a0, (8 * 13)
|
||||
ld.d $r14, $a0, (8 * 14)
|
||||
ld.d $r15, $a0, (8 * 15)
|
||||
ld.d $r16, $a0, (8 * 16)
|
||||
ld.d $r17, $a0, (8 * 17)
|
||||
ld.d $r18, $a0, (8 * 18)
|
||||
ld.d $r19, $a0, (8 * 19)
|
||||
ld.d $r20, $a0, (8 * 20)
|
||||
ld.d $r21, $a0, (8 * 21)
|
||||
ld.d $r22, $a0, (8 * 22)
|
||||
ld.d $r23, $a0, (8 * 23)
|
||||
ld.d $r24, $a0, (8 * 24)
|
||||
ld.d $r25, $a0, (8 * 25)
|
||||
ld.d $r26, $a0, (8 * 26)
|
||||
ld.d $r27, $a0, (8 * 27)
|
||||
ld.d $r28, $a0, (8 * 28)
|
||||
ld.d $r29, $a0, (8 * 29)
|
||||
ld.d $r30, $a0, (8 * 30)
|
||||
ld.d $r31, $a0, (8 * 31)
|
||||
.irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
||||
ld.d $r\i, $a0, (8 * \i)
|
||||
.endr
|
||||
|
||||
ld.d $r4, $a0, (8 * 4) // restore $a0 last
|
||||
ld.d $r1, $a0, (8 * 32) // load new pc into $ra
|
||||
|
||||
|
@ -8,6 +8,12 @@
|
||||
|
||||
#include "assembly.h"
|
||||
|
||||
#define FROM_0_TO_15 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
|
||||
#define FROM_16_TO_31 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
||||
|
||||
#define FROM_0_TO_31 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
||||
#define FROM_32_TO_63 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63
|
||||
|
||||
#if defined(_AIX)
|
||||
.toc
|
||||
#else
|
||||
@ -244,37 +250,9 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
|
||||
.set noat
|
||||
.set noreorder
|
||||
.set nomacro
|
||||
sd $1, (8 * 1)($4)
|
||||
sd $2, (8 * 2)($4)
|
||||
sd $3, (8 * 3)($4)
|
||||
sd $4, (8 * 4)($4)
|
||||
sd $5, (8 * 5)($4)
|
||||
sd $6, (8 * 6)($4)
|
||||
sd $7, (8 * 7)($4)
|
||||
sd $8, (8 * 8)($4)
|
||||
sd $9, (8 * 9)($4)
|
||||
sd $10, (8 * 10)($4)
|
||||
sd $11, (8 * 11)($4)
|
||||
sd $12, (8 * 12)($4)
|
||||
sd $13, (8 * 13)($4)
|
||||
sd $14, (8 * 14)($4)
|
||||
sd $15, (8 * 15)($4)
|
||||
sd $16, (8 * 16)($4)
|
||||
sd $17, (8 * 17)($4)
|
||||
sd $18, (8 * 18)($4)
|
||||
sd $19, (8 * 19)($4)
|
||||
sd $20, (8 * 20)($4)
|
||||
sd $21, (8 * 21)($4)
|
||||
sd $22, (8 * 22)($4)
|
||||
sd $23, (8 * 23)($4)
|
||||
sd $24, (8 * 24)($4)
|
||||
sd $25, (8 * 25)($4)
|
||||
sd $26, (8 * 26)($4)
|
||||
sd $27, (8 * 27)($4)
|
||||
sd $28, (8 * 28)($4)
|
||||
sd $29, (8 * 29)($4)
|
||||
sd $30, (8 * 30)($4)
|
||||
sd $31, (8 * 31)($4)
|
||||
.irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
||||
sd $\i, (8 * \i)($4)
|
||||
.endr
|
||||
# Store return address to pc
|
||||
sd $31, (8 * 32)($4)
|
||||
# hi and lo
|
||||
@ -283,38 +261,9 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
|
||||
mflo $8
|
||||
sd $8, (8 * 34)($4)
|
||||
#ifdef __mips_hard_float
|
||||
sdc1 $f0, (8 * 35)($4)
|
||||
sdc1 $f1, (8 * 36)($4)
|
||||
sdc1 $f2, (8 * 37)($4)
|
||||
sdc1 $f3, (8 * 38)($4)
|
||||
sdc1 $f4, (8 * 39)($4)
|
||||
sdc1 $f5, (8 * 40)($4)
|
||||
sdc1 $f6, (8 * 41)($4)
|
||||
sdc1 $f7, (8 * 42)($4)
|
||||
sdc1 $f8, (8 * 43)($4)
|
||||
sdc1 $f9, (8 * 44)($4)
|
||||
sdc1 $f10, (8 * 45)($4)
|
||||
sdc1 $f11, (8 * 46)($4)
|
||||
sdc1 $f12, (8 * 47)($4)
|
||||
sdc1 $f13, (8 * 48)($4)
|
||||
sdc1 $f14, (8 * 49)($4)
|
||||
sdc1 $f15, (8 * 50)($4)
|
||||
sdc1 $f16, (8 * 51)($4)
|
||||
sdc1 $f17, (8 * 52)($4)
|
||||
sdc1 $f18, (8 * 53)($4)
|
||||
sdc1 $f19, (8 * 54)($4)
|
||||
sdc1 $f20, (8 * 55)($4)
|
||||
sdc1 $f21, (8 * 56)($4)
|
||||
sdc1 $f22, (8 * 57)($4)
|
||||
sdc1 $f23, (8 * 58)($4)
|
||||
sdc1 $f24, (8 * 59)($4)
|
||||
sdc1 $f25, (8 * 60)($4)
|
||||
sdc1 $f26, (8 * 61)($4)
|
||||
sdc1 $f27, (8 * 62)($4)
|
||||
sdc1 $f28, (8 * 63)($4)
|
||||
sdc1 $f29, (8 * 64)($4)
|
||||
sdc1 $f30, (8 * 65)($4)
|
||||
sdc1 $f31, (8 * 66)($4)
|
||||
.irp i,FROM_0_TO_31
|
||||
sdc1 $f\i, (280+8*\i)($4)
|
||||
.endr
|
||||
#endif
|
||||
jr $31
|
||||
# return UNW_ESUCCESS
|
||||
@ -1110,71 +1059,14 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
|
||||
#
|
||||
DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
|
||||
ISTORE x1, (RISCV_ISIZE * 0)(a0) // store ra as pc
|
||||
ISTORE x1, (RISCV_ISIZE * 1)(a0)
|
||||
ISTORE x2, (RISCV_ISIZE * 2)(a0)
|
||||
ISTORE x3, (RISCV_ISIZE * 3)(a0)
|
||||
ISTORE x4, (RISCV_ISIZE * 4)(a0)
|
||||
ISTORE x5, (RISCV_ISIZE * 5)(a0)
|
||||
ISTORE x6, (RISCV_ISIZE * 6)(a0)
|
||||
ISTORE x7, (RISCV_ISIZE * 7)(a0)
|
||||
ISTORE x8, (RISCV_ISIZE * 8)(a0)
|
||||
ISTORE x9, (RISCV_ISIZE * 9)(a0)
|
||||
ISTORE x10, (RISCV_ISIZE * 10)(a0)
|
||||
ISTORE x11, (RISCV_ISIZE * 11)(a0)
|
||||
ISTORE x12, (RISCV_ISIZE * 12)(a0)
|
||||
ISTORE x13, (RISCV_ISIZE * 13)(a0)
|
||||
ISTORE x14, (RISCV_ISIZE * 14)(a0)
|
||||
ISTORE x15, (RISCV_ISIZE * 15)(a0)
|
||||
ISTORE x16, (RISCV_ISIZE * 16)(a0)
|
||||
ISTORE x17, (RISCV_ISIZE * 17)(a0)
|
||||
ISTORE x18, (RISCV_ISIZE * 18)(a0)
|
||||
ISTORE x19, (RISCV_ISIZE * 19)(a0)
|
||||
ISTORE x20, (RISCV_ISIZE * 20)(a0)
|
||||
ISTORE x21, (RISCV_ISIZE * 21)(a0)
|
||||
ISTORE x22, (RISCV_ISIZE * 22)(a0)
|
||||
ISTORE x23, (RISCV_ISIZE * 23)(a0)
|
||||
ISTORE x24, (RISCV_ISIZE * 24)(a0)
|
||||
ISTORE x25, (RISCV_ISIZE * 25)(a0)
|
||||
ISTORE x26, (RISCV_ISIZE * 26)(a0)
|
||||
ISTORE x27, (RISCV_ISIZE * 27)(a0)
|
||||
ISTORE x28, (RISCV_ISIZE * 28)(a0)
|
||||
ISTORE x29, (RISCV_ISIZE * 29)(a0)
|
||||
ISTORE x30, (RISCV_ISIZE * 30)(a0)
|
||||
ISTORE x31, (RISCV_ISIZE * 31)(a0)
|
||||
.irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
||||
ISTORE x\i, (RISCV_ISIZE * \i)(a0)
|
||||
.endr
|
||||
|
||||
# if defined(__riscv_flen)
|
||||
FSTORE f0, (RISCV_FOFFSET + RISCV_FSIZE * 0)(a0)
|
||||
FSTORE f1, (RISCV_FOFFSET + RISCV_FSIZE * 1)(a0)
|
||||
FSTORE f2, (RISCV_FOFFSET + RISCV_FSIZE * 2)(a0)
|
||||
FSTORE f3, (RISCV_FOFFSET + RISCV_FSIZE * 3)(a0)
|
||||
FSTORE f4, (RISCV_FOFFSET + RISCV_FSIZE * 4)(a0)
|
||||
FSTORE f5, (RISCV_FOFFSET + RISCV_FSIZE * 5)(a0)
|
||||
FSTORE f6, (RISCV_FOFFSET + RISCV_FSIZE * 6)(a0)
|
||||
FSTORE f7, (RISCV_FOFFSET + RISCV_FSIZE * 7)(a0)
|
||||
FSTORE f8, (RISCV_FOFFSET + RISCV_FSIZE * 8)(a0)
|
||||
FSTORE f9, (RISCV_FOFFSET + RISCV_FSIZE * 9)(a0)
|
||||
FSTORE f10, (RISCV_FOFFSET + RISCV_FSIZE * 10)(a0)
|
||||
FSTORE f11, (RISCV_FOFFSET + RISCV_FSIZE * 11)(a0)
|
||||
FSTORE f12, (RISCV_FOFFSET + RISCV_FSIZE * 12)(a0)
|
||||
FSTORE f13, (RISCV_FOFFSET + RISCV_FSIZE * 13)(a0)
|
||||
FSTORE f14, (RISCV_FOFFSET + RISCV_FSIZE * 14)(a0)
|
||||
FSTORE f15, (RISCV_FOFFSET + RISCV_FSIZE * 15)(a0)
|
||||
FSTORE f16, (RISCV_FOFFSET + RISCV_FSIZE * 16)(a0)
|
||||
FSTORE f17, (RISCV_FOFFSET + RISCV_FSIZE * 17)(a0)
|
||||
FSTORE f18, (RISCV_FOFFSET + RISCV_FSIZE * 18)(a0)
|
||||
FSTORE f19, (RISCV_FOFFSET + RISCV_FSIZE * 19)(a0)
|
||||
FSTORE f20, (RISCV_FOFFSET + RISCV_FSIZE * 20)(a0)
|
||||
FSTORE f21, (RISCV_FOFFSET + RISCV_FSIZE * 21)(a0)
|
||||
FSTORE f22, (RISCV_FOFFSET + RISCV_FSIZE * 22)(a0)
|
||||
FSTORE f23, (RISCV_FOFFSET + RISCV_FSIZE * 23)(a0)
|
||||
FSTORE f24, (RISCV_FOFFSET + RISCV_FSIZE * 24)(a0)
|
||||
FSTORE f25, (RISCV_FOFFSET + RISCV_FSIZE * 25)(a0)
|
||||
FSTORE f26, (RISCV_FOFFSET + RISCV_FSIZE * 26)(a0)
|
||||
FSTORE f27, (RISCV_FOFFSET + RISCV_FSIZE * 27)(a0)
|
||||
FSTORE f28, (RISCV_FOFFSET + RISCV_FSIZE * 28)(a0)
|
||||
FSTORE f29, (RISCV_FOFFSET + RISCV_FSIZE * 29)(a0)
|
||||
FSTORE f30, (RISCV_FOFFSET + RISCV_FSIZE * 30)(a0)
|
||||
FSTORE f31, (RISCV_FOFFSET + RISCV_FSIZE * 31)(a0)
|
||||
.irp i,FROM_0_TO_31
|
||||
FSTORE f\i, (RISCV_FOFFSET + RISCV_FSIZE * \i)(a0)
|
||||
.endr
|
||||
# endif
|
||||
|
||||
li a0, 0 // return UNW_ESUCCESS
|
||||
@ -1201,22 +1093,9 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
|
||||
stg %r14, 8(%r2)
|
||||
|
||||
// Save FPRs
|
||||
std %f0, 144(%r2)
|
||||
std %f1, 152(%r2)
|
||||
std %f2, 160(%r2)
|
||||
std %f3, 168(%r2)
|
||||
std %f4, 176(%r2)
|
||||
std %f5, 184(%r2)
|
||||
std %f6, 192(%r2)
|
||||
std %f7, 200(%r2)
|
||||
std %f8, 208(%r2)
|
||||
std %f9, 216(%r2)
|
||||
std %f10, 224(%r2)
|
||||
std %f11, 232(%r2)
|
||||
std %f12, 240(%r2)
|
||||
std %f13, 248(%r2)
|
||||
std %f14, 256(%r2)
|
||||
std %f15, 264(%r2)
|
||||
.irp i,FROM_0_TO_15
|
||||
std %f\i, (144+8*\i)(%r2)
|
||||
.endr
|
||||
|
||||
// Return UNW_ESUCCESS
|
||||
lghi %r2, 0
|
||||
@ -1231,72 +1110,15 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
|
||||
# thread_state pointer is in $a0($r4)
|
||||
#
|
||||
DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
|
||||
st.d $r1, $a0, (8 * 1)
|
||||
st.d $r2, $a0, (8 * 2)
|
||||
st.d $r3, $a0, (8 * 3)
|
||||
st.d $r4, $a0, (8 * 4)
|
||||
st.d $r5, $a0, (8 * 5)
|
||||
st.d $r6, $a0, (8 * 6)
|
||||
st.d $r7, $a0, (8 * 7)
|
||||
st.d $r8, $a0, (8 * 8)
|
||||
st.d $r9, $a0, (8 * 9)
|
||||
st.d $r10, $a0, (8 * 10)
|
||||
st.d $r11, $a0, (8 * 11)
|
||||
st.d $r12, $a0, (8 * 12)
|
||||
st.d $r13, $a0, (8 * 13)
|
||||
st.d $r14, $a0, (8 * 14)
|
||||
st.d $r15, $a0, (8 * 15)
|
||||
st.d $r16, $a0, (8 * 16)
|
||||
st.d $r17, $a0, (8 * 17)
|
||||
st.d $r18, $a0, (8 * 18)
|
||||
st.d $r19, $a0, (8 * 19)
|
||||
st.d $r20, $a0, (8 * 20)
|
||||
st.d $r21, $a0, (8 * 21)
|
||||
st.d $r22, $a0, (8 * 22)
|
||||
st.d $r23, $a0, (8 * 23)
|
||||
st.d $r24, $a0, (8 * 24)
|
||||
st.d $r25, $a0, (8 * 25)
|
||||
st.d $r26, $a0, (8 * 26)
|
||||
st.d $r27, $a0, (8 * 27)
|
||||
st.d $r28, $a0, (8 * 28)
|
||||
st.d $r29, $a0, (8 * 29)
|
||||
st.d $r30, $a0, (8 * 30)
|
||||
st.d $r31, $a0, (8 * 31)
|
||||
.irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
|
||||
st.d $r\i, $a0, (8*\i)
|
||||
.endr
|
||||
st.d $r1, $a0, (8 * 32) // store $ra to pc
|
||||
|
||||
# if __loongarch_frlen == 64
|
||||
fst.d $f0, $a0, (8 * 33 + 8 * 0)
|
||||
fst.d $f1, $a0, (8 * 33 + 8 * 1)
|
||||
fst.d $f2, $a0, (8 * 33 + 8 * 2)
|
||||
fst.d $f3, $a0, (8 * 33 + 8 * 3)
|
||||
fst.d $f4, $a0, (8 * 33 + 8 * 4)
|
||||
fst.d $f5, $a0, (8 * 33 + 8 * 5)
|
||||
fst.d $f6, $a0, (8 * 33 + 8 * 6)
|
||||
fst.d $f7, $a0, (8 * 33 + 8 * 7)
|
||||
fst.d $f8, $a0, (8 * 33 + 8 * 8)
|
||||
fst.d $f9, $a0, (8 * 33 + 8 * 9)
|
||||
fst.d $f10, $a0, (8 * 33 + 8 * 10)
|
||||
fst.d $f11, $a0, (8 * 33 + 8 * 11)
|
||||
fst.d $f12, $a0, (8 * 33 + 8 * 12)
|
||||
fst.d $f13, $a0, (8 * 33 + 8 * 13)
|
||||
fst.d $f14, $a0, (8 * 33 + 8 * 14)
|
||||
fst.d $f15, $a0, (8 * 33 + 8 * 15)
|
||||
fst.d $f16, $a0, (8 * 33 + 8 * 16)
|
||||
fst.d $f17, $a0, (8 * 33 + 8 * 17)
|
||||
fst.d $f18, $a0, (8 * 33 + 8 * 18)
|
||||
fst.d $f19, $a0, (8 * 33 + 8 * 19)
|
||||
fst.d $f20, $a0, (8 * 33 + 8 * 20)
|
||||
fst.d $f21, $a0, (8 * 33 + 8 * 21)
|
||||
fst.d $f22, $a0, (8 * 33 + 8 * 22)
|
||||
fst.d $f23, $a0, (8 * 33 + 8 * 23)
|
||||
fst.d $f24, $a0, (8 * 33 + 8 * 24)
|
||||
fst.d $f25, $a0, (8 * 33 + 8 * 25)
|
||||
fst.d $f26, $a0, (8 * 33 + 8 * 26)
|
||||
fst.d $f27, $a0, (8 * 33 + 8 * 27)
|
||||
fst.d $f28, $a0, (8 * 33 + 8 * 28)
|
||||
fst.d $f29, $a0, (8 * 33 + 8 * 29)
|
||||
fst.d $f30, $a0, (8 * 33 + 8 * 30)
|
||||
fst.d $f31, $a0, (8 * 33 + 8 * 31)
|
||||
.irp i,FROM_0_TO_31
|
||||
fst.d $f\i, $a0, (8 * 33 + 8 * \i)
|
||||
.endr
|
||||
# endif
|
||||
|
||||
move $a0, $zero // UNW_ESUCCESS
|
||||
|
Loading…
x
Reference in New Issue
Block a user