mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-21 08:53:41 +00:00
6672f76a5a
Currently the size of the per-cpu region reserved to save crash notes is set by the per-architecture value MAX_NOTE_BYTES. Which in turn is currently set to 1024 on all supported architectures. While testing ia64 I recently discovered that this value is in fact too small. The particular setup I was using actually needs 1172 bytes. This lead to very tedious failure mode where the tail of one elf note would overwrite the head of another if they ended up being alocated sequentially by kmalloc, which was often the case. It seems to me that a far better approach is to caclculate the size that the area needs to be. This patch does just that. If a simpler stop-gap patch for ia64 to be squeezed into 2.6.21(.X) is needed then this should be as easy as making MAX_NOTE_BYTES larger in arch/asm-ia64/kexec.h. Perhaps 2048 would be a good choice. However, I think that the approach in this patch is a much more robust idea. Acked-by: Vivek Goyal <vgoyal@in.ibm.com> Signed-off-by: Simon Horman <horms@verge.net.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
63 lines
2.4 KiB
C
63 lines
2.4 KiB
C
#ifndef __ASM_SH_KEXEC_H
|
|
#define __ASM_SH_KEXEC_H
|
|
|
|
#include <asm/ptrace.h>
|
|
#include <asm/string.h>
|
|
|
|
/*
|
|
* KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
|
|
* I.e. Maximum page that is mapped directly into kernel memory,
|
|
* and kmap is not required.
|
|
*
|
|
* Someone correct me if FIXADDR_START - PAGEOFFSET is not the correct
|
|
* calculation for the amount of memory directly mappable into the
|
|
* kernel memory space.
|
|
*/
|
|
|
|
/* Maximum physical address we can use pages from */
|
|
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
|
|
/* Maximum address we can reach in physical address mode */
|
|
#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
|
|
/* Maximum address we can use for the control code buffer */
|
|
#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
|
|
|
|
#define KEXEC_CONTROL_CODE_SIZE 4096
|
|
|
|
/* The native architecture */
|
|
#define KEXEC_ARCH KEXEC_ARCH_SH
|
|
|
|
static inline void crash_setup_regs(struct pt_regs *newregs,
|
|
struct pt_regs *oldregs)
|
|
{
|
|
if (oldregs)
|
|
memcpy(newregs, oldregs, sizeof(*newregs));
|
|
else {
|
|
__asm__ __volatile__ ("mov r0, %0" : "=r" (newregs->regs[0]));
|
|
__asm__ __volatile__ ("mov r1, %0" : "=r" (newregs->regs[1]));
|
|
__asm__ __volatile__ ("mov r2, %0" : "=r" (newregs->regs[2]));
|
|
__asm__ __volatile__ ("mov r3, %0" : "=r" (newregs->regs[3]));
|
|
__asm__ __volatile__ ("mov r4, %0" : "=r" (newregs->regs[4]));
|
|
__asm__ __volatile__ ("mov r5, %0" : "=r" (newregs->regs[5]));
|
|
__asm__ __volatile__ ("mov r6, %0" : "=r" (newregs->regs[6]));
|
|
__asm__ __volatile__ ("mov r7, %0" : "=r" (newregs->regs[7]));
|
|
__asm__ __volatile__ ("mov r8, %0" : "=r" (newregs->regs[8]));
|
|
__asm__ __volatile__ ("mov r9, %0" : "=r" (newregs->regs[9]));
|
|
__asm__ __volatile__ ("mov r10, %0" : "=r" (newregs->regs[10]));
|
|
__asm__ __volatile__ ("mov r11, %0" : "=r" (newregs->regs[11]));
|
|
__asm__ __volatile__ ("mov r12, %0" : "=r" (newregs->regs[12]));
|
|
__asm__ __volatile__ ("mov r13, %0" : "=r" (newregs->regs[13]));
|
|
__asm__ __volatile__ ("mov r14, %0" : "=r" (newregs->regs[14]));
|
|
__asm__ __volatile__ ("mov r15, %0" : "=r" (newregs->regs[15]));
|
|
|
|
__asm__ __volatile__ ("sts pr, %0" : "=r" (newregs->pr));
|
|
__asm__ __volatile__ ("sts macl, %0" : "=r" (newregs->macl));
|
|
__asm__ __volatile__ ("sts mach, %0" : "=r" (newregs->mach));
|
|
|
|
__asm__ __volatile__ ("stc gbr, %0" : "=r" (newregs->gbr));
|
|
__asm__ __volatile__ ("stc sr, %0" : "=r" (newregs->sr));
|
|
|
|
newregs->pc = (unsigned long)current_text_addr();
|
|
}
|
|
}
|
|
#endif /* __ASM_SH_KEXEC_H */
|