mirror of
https://github.com/RPCSX/llvm.git
synced 2025-02-13 17:20:28 +00:00
![Rafael Espindola](/assets/img/avatar_default.png)
The way prelink used to work was * The compiler decides if a given section only has relocations that are know to point to the same DSO. If so, it names it .data.rel.ro.local<something>. * The static linker puts all of these together. * The prelinker program assigns addresses to each library and resolves the local relocations. There are many problems with this: * It is incompatible with address space randomization. * The information passed by the compiler is redundant. The linker knows if a given relocation is in the same DSO or not. If could sort by that if so desired. * There are newer ways of speeding up DSO (gnu hash for example). * Even if we want to implement this again in the compiler, the previous implementation is pretty broken. It talks about relocations that are "resolved by the static linker". If they are resolved, there are none left for the prelinker. What one needs to track is if an expression will require only dynamic relocations that point to the same DSO. At this point it looks like the prelinker is an historical curiosity. For example, fedora has retired it because it failed to build for two releases (http://pkgs.fedoraproject.org/cgit/prelink.git/commit/?id=eb43100a8331d91c801ee3dcdb0a0bb9babfdc1f) This patch removes support for it. That is, it stops printing the ".local" sections. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@253280 91177308-0d34-0410-b5e6-96231b3b80d8
132 lines
3.4 KiB
LLVM
132 lines
3.4 KiB
LLVM
; RUN: llc < %s -emulated-tls -march=x86 -mcpu=generic -mtriple=i386-linux-gnu -relocation-model=pic -enable-pie \
|
|
; RUN: | FileCheck -check-prefix=X32 %s
|
|
; RUN: llc < %s -emulated-tls -march=x86-64 -mcpu=generic -mtriple=x86_64-linux-gnu -relocation-model=pic -enable-pie \
|
|
; RUN: | FileCheck -check-prefix=X64 %s
|
|
; RUN: llc < %s -emulated-tls -march=x86 -mcpu=generic -mtriple=i386-linux-android -relocation-model=pic -enable-pie \
|
|
; RUN: | FileCheck -check-prefix=X32 %s
|
|
; RUN: llc < %s -emulated-tls -march=x86-64 -mcpu=generic -mtriple=x86_64-linux-android -relocation-model=pic -enable-pie \
|
|
; RUN: | FileCheck -check-prefix=X64 %s
|
|
|
|
; Use my_emutls_get_address like __emutls_get_address.
|
|
@my_emutls_v_xyz = external global i8*, align 4
|
|
declare i8* @my_emutls_get_address(i8*)
|
|
|
|
define i32 @my_get_xyz() {
|
|
; X32-LABEL: my_get_xyz:
|
|
; X32: movl my_emutls_v_xyz@GOT(%ebx), %eax
|
|
; X32-NEXT: movl %eax, (%esp)
|
|
; X32-NEXT: calll my_emutls_get_address@PLT
|
|
; X32-NEXT: movl (%eax), %eax
|
|
; X32-NEXT: addl $8, %esp
|
|
; X32-NEXT: popl %ebx
|
|
; X32-NEXT: retl
|
|
; X64-LABEL: my_get_xyz:
|
|
; X64: movq my_emutls_v_xyz@GOTPCREL(%rip), %rdi
|
|
; X64-NEXT: callq my_emutls_get_address@PLT
|
|
; X64-NEXT: movl (%rax), %eax
|
|
; X64-NEXT: popq %rdx
|
|
; X64-NEXT: retq
|
|
|
|
entry:
|
|
%call = call i8* @my_emutls_get_address(i8* bitcast (i8** @my_emutls_v_xyz to i8*))
|
|
%0 = bitcast i8* %call to i32*
|
|
%1 = load i32, i32* %0, align 4
|
|
ret i32 %1
|
|
}
|
|
|
|
@i = thread_local global i32 15
|
|
@i2 = external thread_local global i32
|
|
|
|
define i32 @f1() {
|
|
; X32-LABEL: f1:
|
|
; X32: movl __emutls_v.i@GOT(%ebx), %eax
|
|
; X32-NEXT: movl %eax, (%esp)
|
|
; X32-NEXT: calll __emutls_get_address@PLT
|
|
; X32-NEXT: movl (%eax), %eax
|
|
; X32-NEXT: addl $8, %esp
|
|
; X32-NEXT: popl %ebx
|
|
; X32-NEXT: retl
|
|
; X64-LABEL: f1:
|
|
; X64: movq __emutls_v.i@GOTPCREL(%rip), %rdi
|
|
; X64-NEXT: callq __emutls_get_address@PLT
|
|
; X64-NEXT: movl (%rax), %eax
|
|
; X64-NEXT: popq %rdx
|
|
; X64-NEXT: retq
|
|
|
|
entry:
|
|
%tmp1 = load i32, i32* @i
|
|
ret i32 %tmp1
|
|
}
|
|
|
|
define i32* @f2() {
|
|
; X32-LABEL: f2:
|
|
; X32: movl __emutls_v.i@GOT(%ebx), %eax
|
|
; X32-NEXT: movl %eax, (%esp)
|
|
; X32-NEXT: calll __emutls_get_address@PLT
|
|
; X64-LABEL: f2:
|
|
; X64: movq __emutls_v.i@GOTPCREL(%rip), %rdi
|
|
; X64-NEXT: callq __emutls_get_address@PLT
|
|
|
|
entry:
|
|
ret i32* @i
|
|
}
|
|
|
|
define i32 @f3() {
|
|
; X32-LABEL: f3:
|
|
; X32: movl __emutls_v.i2@GOT(%ebx), %eax
|
|
; X32-NEXT: movl %eax, (%esp)
|
|
; X32-NEXT: calll __emutls_get_address@PLT
|
|
; X64-LABEL: f3:
|
|
; X64: movq __emutls_v.i2@GOTPCREL(%rip), %rdi
|
|
; X64-NEXT: callq __emutls_get_address@PLT
|
|
|
|
entry:
|
|
%tmp1 = load i32, i32* @i2
|
|
ret i32 %tmp1
|
|
}
|
|
|
|
define i32* @f4() {
|
|
; X32-LABEL: f4:
|
|
; X32: movl __emutls_v.i2@GOT(%ebx), %eax
|
|
; X32-NEXT: movl %eax, (%esp)
|
|
; X32-NEXT: calll __emutls_get_address@PLT
|
|
; X64-LABEL: f4:
|
|
; X64: movq __emutls_v.i2@GOTPCREL(%rip), %rdi
|
|
; X64-NEXT: callq __emutls_get_address@PLT
|
|
|
|
entry:
|
|
ret i32* @i2
|
|
}
|
|
|
|
;;;;; 32-bit targets
|
|
|
|
; X32: .section .data.rel,
|
|
; X32-LABEL: __emutls_v.i:
|
|
; X32-NEXT: .long 4
|
|
; X32-NEXT: .long 4
|
|
; X32-NEXT: .long 0
|
|
; X32-NEXT: .long __emutls_t.i
|
|
|
|
; X32: .section .rodata,
|
|
; X32-LABEL: __emutls_t.i:
|
|
; X32-NEXT: .long 15
|
|
|
|
; X32-NOT: __emutls_v.i2
|
|
; X32-NOT: __emutls_t.i2
|
|
|
|
;;;;; 64-bit targets
|
|
|
|
; X64: .section .data.rel,
|
|
; X64-LABEL: __emutls_v.i:
|
|
; X64-NEXT: .quad 4
|
|
; X64-NEXT: .quad 4
|
|
; X64-NEXT: .quad 0
|
|
; X64-NEXT: .quad __emutls_t.i
|
|
|
|
; X64: .section .rodata,
|
|
; X64-LABEL: __emutls_t.i:
|
|
; X64-NEXT: .long 15
|
|
|
|
; X64-NOT: __emutls_v.i2
|
|
; X64-NOT: __emutls_t.i2
|