2005-04-16 22:20:36 +00:00
|
|
|
/* ld script to make x86-64 Linux kernel
|
|
|
|
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
|
|
|
|
*/
|
|
|
|
|
2005-06-25 21:57:48 +00:00
|
|
|
#define LOAD_OFFSET __START_KERNEL_map
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm-generic/vmlinux.lds.h>
|
2005-06-25 21:57:48 +00:00
|
|
|
#include <asm/page.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-01-11 21:43:54 +00:00
|
|
|
#undef i386 /* in case the preprocessor is a 32bit one */
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
|
|
|
|
OUTPUT_ARCH(i386:x86-64)
|
|
|
|
ENTRY(phys_startup_64)
|
|
|
|
jiffies_64 = jiffies;
|
2007-04-16 08:30:27 +00:00
|
|
|
_proxy_pda = 1;
|
2006-09-26 08:52:38 +00:00
|
|
|
PHDRS {
|
|
|
|
text PT_LOAD FLAGS(5); /* R_E */
|
|
|
|
data PT_LOAD FLAGS(7); /* RWE */
|
|
|
|
user PT_LOAD FLAGS(7); /* RWE */
|
2006-10-21 16:37:03 +00:00
|
|
|
data.init PT_LOAD FLAGS(7); /* RWE */
|
2006-09-26 08:52:38 +00:00
|
|
|
note PT_NOTE FLAGS(4); /* R__ */
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
SECTIONS
|
|
|
|
{
|
2005-06-25 21:57:48 +00:00
|
|
|
. = __START_KERNEL;
|
2005-04-16 22:20:36 +00:00
|
|
|
phys_startup_64 = startup_64 - LOAD_OFFSET;
|
|
|
|
_text = .; /* Text and read-only data */
|
2005-06-25 21:57:48 +00:00
|
|
|
.text : AT(ADDR(.text) - LOAD_OFFSET) {
|
2006-03-25 15:30:49 +00:00
|
|
|
/* First the code that has to be first for bootstrapping */
|
2006-03-25 15:30:28 +00:00
|
|
|
*(.bootstrap.text)
|
[PATCH] x86: tighten kernel image page access rights
On x86-64, kernel memory freed after init can be entirely unmapped instead
of just getting 'poisoned' by overwriting with a debug pattern.
On i386 and x86-64 (under CONFIG_DEBUG_RODATA), kernel text and bug table
can also be write-protected.
Compared to the first version, this one prevents re-creating deleted
mappings in the kernel image range on x86-64, if those got removed
previously. This, together with the original changes, prevents temporarily
having inconsistent mappings when cacheability attributes are being
changed on such pages (e.g. from AGP code). While on i386 such duplicate
mappings don't exist, the same change is done there, too, both for
consistency and because checking pte_present() before using various other
pte_XXX functions is a requirement anyway. At once, i386 code gets
adjusted to use pte_huge() instead of open coding this.
AK: split out cpa() changes
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2007-05-02 17:27:10 +00:00
|
|
|
_stext = .;
|
2006-03-25 15:30:49 +00:00
|
|
|
/* Then the rest */
|
2007-05-12 22:31:33 +00:00
|
|
|
TEXT_TEXT
|
2005-04-16 22:20:36 +00:00
|
|
|
SCHED_TEXT
|
|
|
|
LOCK_TEXT
|
2005-09-06 22:19:28 +00:00
|
|
|
KPROBES_TEXT
|
2005-04-16 22:20:36 +00:00
|
|
|
*(.fixup)
|
|
|
|
*(.gnu.warning)
|
2006-09-26 08:52:38 +00:00
|
|
|
} :text = 0x9090
|
2005-06-25 21:57:48 +00:00
|
|
|
/* out-of-line lock text */
|
|
|
|
.text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) }
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
_etext = .; /* End of text section */
|
|
|
|
|
|
|
|
. = ALIGN(16); /* Exception table */
|
|
|
|
__start___ex_table = .;
|
2005-06-25 21:57:48 +00:00
|
|
|
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
|
2005-04-16 22:20:36 +00:00
|
|
|
__stop___ex_table = .;
|
|
|
|
|
2006-12-08 10:36:22 +00:00
|
|
|
BUG_TABLE
|
|
|
|
|
[PATCH] x86: tighten kernel image page access rights
On x86-64, kernel memory freed after init can be entirely unmapped instead
of just getting 'poisoned' by overwriting with a debug pattern.
On i386 and x86-64 (under CONFIG_DEBUG_RODATA), kernel text and bug table
can also be write-protected.
Compared to the first version, this one prevents re-creating deleted
mappings in the kernel image range on x86-64, if those got removed
previously. This, together with the original changes, prevents temporarily
having inconsistent mappings when cacheability attributes are being
changed on such pages (e.g. from AGP code). While on i386 such duplicate
mappings don't exist, the same change is done there, too, both for
consistency and because checking pte_present() before using various other
pte_XXX functions is a requirement anyway. At once, i386 code gets
adjusted to use pte_huge() instead of open coding this.
AK: split out cpa() changes
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2007-05-02 17:27:10 +00:00
|
|
|
RODATA
|
|
|
|
|
2006-11-20 16:29:09 +00:00
|
|
|
. = ALIGN(PAGE_SIZE); /* Align data segment to page size boundary */
|
2005-06-25 21:57:48 +00:00
|
|
|
/* Data */
|
|
|
|
.data : AT(ADDR(.data) - LOAD_OFFSET) {
|
2007-05-17 11:38:44 +00:00
|
|
|
DATA_DATA
|
2005-04-16 22:20:36 +00:00
|
|
|
CONSTRUCTORS
|
2006-09-26 08:52:38 +00:00
|
|
|
} :data
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
_edata = .; /* End of data section */
|
|
|
|
|
2005-06-25 21:57:48 +00:00
|
|
|
. = ALIGN(PAGE_SIZE);
|
2005-04-16 22:20:36 +00:00
|
|
|
. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
|
2005-06-25 21:57:48 +00:00
|
|
|
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
|
|
|
|
*(.data.cacheline_aligned)
|
|
|
|
}
|
2006-04-07 17:50:09 +00:00
|
|
|
. = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
|
2005-07-08 00:56:59 +00:00
|
|
|
.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
|
|
|
|
*(.data.read_mostly)
|
|
|
|
}
|
2005-06-25 21:57:48 +00:00
|
|
|
|
|
|
|
#define VSYSCALL_ADDR (-10*1024*1024)
|
2005-07-12 20:58:13 +00:00
|
|
|
#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
|
|
|
|
#define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
|
2005-06-25 21:57:48 +00:00
|
|
|
|
|
|
|
#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
|
|
|
|
#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
|
|
|
|
|
|
|
|
#define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
|
|
|
|
#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-06-25 21:57:48 +00:00
|
|
|
. = VSYSCALL_ADDR;
|
2006-09-26 08:52:38 +00:00
|
|
|
.vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } :user
|
2005-06-25 21:57:48 +00:00
|
|
|
__vsyscall_0 = VSYSCALL_VIRT_ADDR;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
|
2007-02-16 09:28:21 +00:00
|
|
|
.vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { *(.vsyscall_fn) }
|
|
|
|
. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
|
|
|
|
.vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data))
|
|
|
|
{ *(.vsyscall_gtod_data) }
|
|
|
|
vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
|
2005-06-25 21:57:48 +00:00
|
|
|
|
2007-05-02 17:27:18 +00:00
|
|
|
|
|
|
|
.vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1))
|
|
|
|
{ *(.vsyscall_1) }
|
|
|
|
.vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2))
|
|
|
|
{ *(.vsyscall_2) }
|
|
|
|
|
2006-09-26 08:52:28 +00:00
|
|
|
.vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) { *(.vgetcpu_mode) }
|
|
|
|
vgetcpu_mode = VVIRT(.vgetcpu_mode);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
|
2005-06-25 21:57:48 +00:00
|
|
|
.jiffies : AT(VLOAD(.jiffies)) { *(.jiffies) }
|
|
|
|
jiffies = VVIRT(.jiffies);
|
|
|
|
|
2007-02-16 09:28:21 +00:00
|
|
|
.vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3))
|
|
|
|
{ *(.vsyscall_3) }
|
2005-06-25 21:57:48 +00:00
|
|
|
|
|
|
|
. = VSYSCALL_VIRT_ADDR + 4096;
|
|
|
|
|
|
|
|
#undef VSYSCALL_ADDR
|
|
|
|
#undef VSYSCALL_PHYS_ADDR
|
|
|
|
#undef VSYSCALL_VIRT_ADDR
|
|
|
|
#undef VLOAD_OFFSET
|
|
|
|
#undef VLOAD
|
|
|
|
#undef VVIRT_OFFSET
|
|
|
|
#undef VVIRT
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
. = ALIGN(8192); /* init_task */
|
2005-06-25 21:57:48 +00:00
|
|
|
.data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
|
|
|
|
*(.data.init_task)
|
2006-10-21 16:37:03 +00:00
|
|
|
}:data.init
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
. = ALIGN(4096);
|
2005-06-25 21:57:48 +00:00
|
|
|
.data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
|
|
|
|
*(.data.page_aligned)
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-06-26 11:56:16 +00:00
|
|
|
/* might get freed after init */
|
|
|
|
. = ALIGN(4096);
|
|
|
|
__smp_alt_begin = .;
|
|
|
|
__smp_alt_instructions = .;
|
|
|
|
.smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
|
|
|
|
*(.smp_altinstructions)
|
|
|
|
}
|
|
|
|
__smp_alt_instructions_end = .;
|
|
|
|
. = ALIGN(8);
|
|
|
|
__smp_locks = .;
|
|
|
|
.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
|
|
|
|
*(.smp_locks)
|
|
|
|
}
|
|
|
|
__smp_locks_end = .;
|
|
|
|
.smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
|
|
|
|
*(.smp_altinstr_replacement)
|
|
|
|
}
|
|
|
|
. = ALIGN(4096);
|
|
|
|
__smp_alt_end = .;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
. = ALIGN(4096); /* Init code and data */
|
|
|
|
__init_begin = .;
|
2005-06-25 21:57:48 +00:00
|
|
|
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
|
2005-04-16 22:20:36 +00:00
|
|
|
_sinittext = .;
|
|
|
|
*(.init.text)
|
|
|
|
_einittext = .;
|
|
|
|
}
|
|
|
|
__initdata_begin = .;
|
2005-06-25 21:57:48 +00:00
|
|
|
.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) }
|
2005-04-16 22:20:36 +00:00
|
|
|
__initdata_end = .;
|
|
|
|
. = ALIGN(16);
|
|
|
|
__setup_start = .;
|
2005-06-25 21:57:48 +00:00
|
|
|
.init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) }
|
2005-04-16 22:20:36 +00:00
|
|
|
__setup_end = .;
|
|
|
|
__initcall_start = .;
|
2005-06-25 21:57:48 +00:00
|
|
|
.initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
|
2006-10-27 18:41:44 +00:00
|
|
|
INITCALLS
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
__initcall_end = .;
|
|
|
|
__con_initcall_start = .;
|
2005-06-25 21:57:48 +00:00
|
|
|
.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
|
|
|
|
*(.con_initcall.init)
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
__con_initcall_end = .;
|
|
|
|
SECURITY_INIT
|
|
|
|
. = ALIGN(8);
|
|
|
|
__alt_instructions = .;
|
2005-06-25 21:57:48 +00:00
|
|
|
.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
|
|
|
|
*(.altinstructions)
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
__alt_instructions_end = .;
|
2005-06-25 21:57:48 +00:00
|
|
|
.altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
|
|
|
|
*(.altinstr_replacement)
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
/* .exit.text is discard at runtime, not link time, to deal with references
|
|
|
|
from .altinstructions and .eh_frame */
|
2005-06-25 21:57:48 +00:00
|
|
|
.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
|
|
|
|
.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
|
2007-02-10 09:44:44 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
2005-04-16 22:20:36 +00:00
|
|
|
. = ALIGN(4096);
|
|
|
|
__initramfs_start = .;
|
2005-06-25 21:57:48 +00:00
|
|
|
.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
|
2006-02-03 20:51:56 +00:00
|
|
|
__initramfs_end = .;
|
2007-02-10 09:44:44 +00:00
|
|
|
#endif
|
|
|
|
|
2007-05-02 17:27:12 +00:00
|
|
|
. = ALIGN(4096);
|
2005-04-16 22:20:36 +00:00
|
|
|
__per_cpu_start = .;
|
2005-06-25 21:57:48 +00:00
|
|
|
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
|
2005-04-16 22:20:36 +00:00
|
|
|
__per_cpu_end = .;
|
2006-09-26 08:52:40 +00:00
|
|
|
. = ALIGN(4096);
|
|
|
|
__init_end = .;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
. = ALIGN(4096);
|
|
|
|
__nosave_begin = .;
|
2005-06-25 21:57:48 +00:00
|
|
|
.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) }
|
2005-04-16 22:20:36 +00:00
|
|
|
. = ALIGN(4096);
|
|
|
|
__nosave_end = .;
|
|
|
|
|
2006-09-29 23:47:55 +00:00
|
|
|
__bss_start = .; /* BSS */
|
|
|
|
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
|
|
|
|
*(.bss.page_aligned)
|
|
|
|
*(.bss)
|
|
|
|
}
|
|
|
|
__bss_stop = .;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
_end = . ;
|
|
|
|
|
|
|
|
/* Sections to be discarded */
|
|
|
|
/DISCARD/ : {
|
|
|
|
*(.exitcall.exit)
|
|
|
|
*(.eh_frame)
|
|
|
|
}
|
|
|
|
|
2005-09-10 17:44:55 +00:00
|
|
|
STABS_DEBUG
|
|
|
|
|
|
|
|
DWARF_DEBUG
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|