mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-29 04:45:05 +00:00
6a7bbd57ed
Booting current 64-bit x86 kernels on the latest Apple MacBook (MacBook5,2) via EFI gives the following warning: [ 0.182209] ------------[ cut here ]------------ [ 0.182222] WARNING: at arch/x86/mm/pageattr.c:581 __cpa_process_fault+0x44/0xa0() [ 0.182227] Hardware name: MacBook5,2 [ 0.182231] CPA: called for zero pte. vaddr = ffff8800ffe00000 cpa->vaddr = ffff8800ffe00000 [ 0.182236] Modules linked in: [ 0.182242] Pid: 0, comm: swapper Not tainted 2.6.31-rc4 #6 [ 0.182246] Call Trace: [ 0.182254] [<ffffffff8102c754>] ? __cpa_process_fault+0x44/0xa0 [ 0.182261] [<ffffffff81048668>] warn_slowpath_common+0x78/0xd0 [ 0.182266] [<ffffffff81048744>] warn_slowpath_fmt+0x64/0x70 [ 0.182272] [<ffffffff8102c7ec>] ? update_page_count+0x3c/0x50 [ 0.182280] [<ffffffff818d25c5>] ? phys_pmd_init+0x140/0x22e [ 0.182286] [<ffffffff8102c754>] __cpa_process_fault+0x44/0xa0 [ 0.182292] [<ffffffff8102ce60>] __change_page_attr_set_clr+0x5f0/0xb40 [ 0.182301] [<ffffffff810d1035>] ? vm_unmap_aliases+0x175/0x190 [ 0.182307] [<ffffffff8102d4ae>] change_page_attr_set_clr+0xfe/0x3d0 [ 0.182314] [<ffffffff8102dcca>] _set_memory_uc+0x2a/0x30 [ 0.182319] [<ffffffff8102dd4b>] set_memory_uc+0x7b/0xb0 [ 0.182327] [<ffffffff818afe31>] efi_enter_virtual_mode+0x2ad/0x2c9 [ 0.182334] [<ffffffff818a1c66>] start_kernel+0x2db/0x3f4 [ 0.182340] [<ffffffff818a1289>] x86_64_start_reservations+0x99/0xb9 [ 0.182345] [<ffffffff818a1389>] x86_64_start_kernel+0xe0/0xf2 [ 0.182357] ---[ end trace 4eaa2a86a8e2da22 ]--- [ 0.182982] init_memory_mapping: 00000000ffffc000-0000000100000000 [ 0.182993] 00ffffc000 - 0100000000 page 4k This happens because the 64-bit version of efi_ioremap calls init_memory_mapping for all addresses, regardless of whether they are RAM or MMIO. The EFI tables on this machine ask for runtime access to some MMIO regions: [ 0.000000] EFI: mem195: type=11, attr=0x8000000000000000, range=[0x0000000093400000-0x0000000093401000) (0MB) [ 0.000000] EFI: mem196: type=11, attr=0x8000000000000000, range=[0x00000000ffc00000-0x00000000ffc40000) (0MB) [ 0.000000] EFI: mem197: type=11, attr=0x8000000000000000, range=[0x00000000ffc40000-0x00000000ffc80000) (0MB) [ 0.000000] EFI: mem198: type=11, attr=0x8000000000000000, range=[0x00000000ffc80000-0x00000000ffca4000) (0MB) [ 0.000000] EFI: mem199: type=11, attr=0x8000000000000000, range=[0x00000000ffca4000-0x00000000ffcb4000) (0MB) [ 0.000000] EFI: mem200: type=11, attr=0x8000000000000000, range=[0x00000000ffcb4000-0x00000000ffffc000) (3MB) [ 0.000000] EFI: mem201: type=11, attr=0x8000000000000000, range=[0x00000000ffffc000-0x0000000100000000) (0MB) This arranges to pass the EFI memory type through to efi_ioremap, and makes efi_ioremap use ioremap rather than init_memory_mapping if the type is EFI_MEMORY_MAPPED_IO. With this, the above warning goes away. Signed-off-by: Paul Mackerras <paulus@samba.org> LKML-Reference: <19062.55858.533494.471153@cargo.ozlabs.ibm.com> Cc: Huang Ying <ying.huang@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
111 lines
4.1 KiB
C
111 lines
4.1 KiB
C
#ifndef _ASM_X86_EFI_H
|
|
#define _ASM_X86_EFI_H
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
extern unsigned long asmlinkage efi_call_phys(void *, ...);
|
|
|
|
#define efi_call_phys0(f) efi_call_phys(f)
|
|
#define efi_call_phys1(f, a1) efi_call_phys(f, a1)
|
|
#define efi_call_phys2(f, a1, a2) efi_call_phys(f, a1, a2)
|
|
#define efi_call_phys3(f, a1, a2, a3) efi_call_phys(f, a1, a2, a3)
|
|
#define efi_call_phys4(f, a1, a2, a3, a4) \
|
|
efi_call_phys(f, a1, a2, a3, a4)
|
|
#define efi_call_phys5(f, a1, a2, a3, a4, a5) \
|
|
efi_call_phys(f, a1, a2, a3, a4, a5)
|
|
#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \
|
|
efi_call_phys(f, a1, a2, a3, a4, a5, a6)
|
|
/*
|
|
* Wrap all the virtual calls in a way that forces the parameters on the stack.
|
|
*/
|
|
|
|
#define efi_call_virt(f, args...) \
|
|
((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args)
|
|
|
|
#define efi_call_virt0(f) efi_call_virt(f)
|
|
#define efi_call_virt1(f, a1) efi_call_virt(f, a1)
|
|
#define efi_call_virt2(f, a1, a2) efi_call_virt(f, a1, a2)
|
|
#define efi_call_virt3(f, a1, a2, a3) efi_call_virt(f, a1, a2, a3)
|
|
#define efi_call_virt4(f, a1, a2, a3, a4) \
|
|
efi_call_virt(f, a1, a2, a3, a4)
|
|
#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
|
|
efi_call_virt(f, a1, a2, a3, a4, a5)
|
|
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
|
|
efi_call_virt(f, a1, a2, a3, a4, a5, a6)
|
|
|
|
#define efi_ioremap(addr, size, type) ioremap_cache(addr, size)
|
|
|
|
#else /* !CONFIG_X86_32 */
|
|
|
|
extern u64 efi_call0(void *fp);
|
|
extern u64 efi_call1(void *fp, u64 arg1);
|
|
extern u64 efi_call2(void *fp, u64 arg1, u64 arg2);
|
|
extern u64 efi_call3(void *fp, u64 arg1, u64 arg2, u64 arg3);
|
|
extern u64 efi_call4(void *fp, u64 arg1, u64 arg2, u64 arg3, u64 arg4);
|
|
extern u64 efi_call5(void *fp, u64 arg1, u64 arg2, u64 arg3,
|
|
u64 arg4, u64 arg5);
|
|
extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
|
|
u64 arg4, u64 arg5, u64 arg6);
|
|
|
|
#define efi_call_phys0(f) \
|
|
efi_call0((void *)(f))
|
|
#define efi_call_phys1(f, a1) \
|
|
efi_call1((void *)(f), (u64)(a1))
|
|
#define efi_call_phys2(f, a1, a2) \
|
|
efi_call2((void *)(f), (u64)(a1), (u64)(a2))
|
|
#define efi_call_phys3(f, a1, a2, a3) \
|
|
efi_call3((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3))
|
|
#define efi_call_phys4(f, a1, a2, a3, a4) \
|
|
efi_call4((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \
|
|
(u64)(a4))
|
|
#define efi_call_phys5(f, a1, a2, a3, a4, a5) \
|
|
efi_call5((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \
|
|
(u64)(a4), (u64)(a5))
|
|
#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \
|
|
efi_call6((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \
|
|
(u64)(a4), (u64)(a5), (u64)(a6))
|
|
|
|
#define efi_call_virt0(f) \
|
|
efi_call0((void *)(efi.systab->runtime->f))
|
|
#define efi_call_virt1(f, a1) \
|
|
efi_call1((void *)(efi.systab->runtime->f), (u64)(a1))
|
|
#define efi_call_virt2(f, a1, a2) \
|
|
efi_call2((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2))
|
|
#define efi_call_virt3(f, a1, a2, a3) \
|
|
efi_call3((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
|
|
(u64)(a3))
|
|
#define efi_call_virt4(f, a1, a2, a3, a4) \
|
|
efi_call4((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
|
|
(u64)(a3), (u64)(a4))
|
|
#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
|
|
efi_call5((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
|
|
(u64)(a3), (u64)(a4), (u64)(a5))
|
|
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
|
|
efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
|
|
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
|
|
|
|
extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
|
|
u32 type);
|
|
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
extern int add_efi_memmap;
|
|
extern void efi_reserve_early(void);
|
|
extern void efi_call_phys_prelog(void);
|
|
extern void efi_call_phys_epilog(void);
|
|
|
|
#ifndef CONFIG_EFI
|
|
/*
|
|
* IF EFI is not configured, have the EFI calls return -ENOSYS.
|
|
*/
|
|
#define efi_call0(_f) (-ENOSYS)
|
|
#define efi_call1(_f, _a1) (-ENOSYS)
|
|
#define efi_call2(_f, _a1, _a2) (-ENOSYS)
|
|
#define efi_call3(_f, _a1, _a2, _a3) (-ENOSYS)
|
|
#define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS)
|
|
#define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS)
|
|
#define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS)
|
|
#endif /* CONFIG_EFI */
|
|
|
|
#endif /* _ASM_X86_EFI_H */
|