mirror of
https://github.com/joel16/android_kernel_sony_msm8994.git
synced 2024-11-27 06:01:12 +00:00
[PATCH] pause_on_oops command line option
Attempt to fix the problem wherein people's oops reports scroll off the screen due to repeated oopsing or to oopses on other CPUs. If this happens the user can reboot with the `pause_on_oops=<seconds>' option. It will allow the first oopsing CPU to print an oops record just a single time. Second oopsing attempts, or oopses on other CPUs will cause those CPUs to enter a tight loop until the specified number of seconds have elapsed. The patch implements the infrastructure generically in the expectation that architectures other than x86 will find it useful. Cc: Dave Jones <davej@codemonkey.org.uk> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
41c28ff163
commit
dd287796d6
@ -1126,6 +1126,11 @@ running once the system is up.
|
||||
pas16= [HW,SCSI]
|
||||
See header of drivers/scsi/pas16.c.
|
||||
|
||||
pause_on_oops=
|
||||
Halt all CPUs after the first oops has been printed for
|
||||
the specified number of seconds. This is to be used if
|
||||
your oopses keep scrolling off the screen.
|
||||
|
||||
pcbit= [HW,ISDN]
|
||||
|
||||
pcd. [PARIDE]
|
||||
|
@ -352,6 +352,8 @@ void die(const char * str, struct pt_regs * regs, long err)
|
||||
static int die_counter;
|
||||
unsigned long flags;
|
||||
|
||||
oops_enter();
|
||||
|
||||
if (die.lock_owner != raw_smp_processor_id()) {
|
||||
console_verbose();
|
||||
spin_lock_irqsave(&die.lock, flags);
|
||||
@ -404,6 +406,7 @@ void die(const char * str, struct pt_regs * regs, long err)
|
||||
ssleep(5);
|
||||
panic("Fatal exception");
|
||||
}
|
||||
oops_exit();
|
||||
do_exit(SIGSEGV);
|
||||
}
|
||||
|
||||
|
@ -509,24 +509,31 @@ no_context:
|
||||
|
||||
bust_spinlocks(1);
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
if (error_code & 16) {
|
||||
pte_t *pte = lookup_address(address);
|
||||
if (oops_may_print()) {
|
||||
#ifdef CONFIG_X86_PAE
|
||||
if (error_code & 16) {
|
||||
pte_t *pte = lookup_address(address);
|
||||
|
||||
if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
|
||||
printk(KERN_CRIT "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", current->uid);
|
||||
if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
|
||||
printk(KERN_CRIT "kernel tried to execute "
|
||||
"NX-protected page - exploit attempt? "
|
||||
"(uid: %d)\n", current->uid);
|
||||
}
|
||||
#endif
|
||||
if (address < PAGE_SIZE)
|
||||
printk(KERN_ALERT "BUG: unable to handle kernel NULL "
|
||||
"pointer dereference");
|
||||
else
|
||||
printk(KERN_ALERT "BUG: unable to handle kernel paging"
|
||||
" request");
|
||||
printk(" at virtual address %08lx\n",address);
|
||||
printk(KERN_ALERT " printing eip:\n");
|
||||
printk("%08lx\n", regs->eip);
|
||||
}
|
||||
#endif
|
||||
if (address < PAGE_SIZE)
|
||||
printk(KERN_ALERT "BUG: unable to handle kernel NULL pointer dereference");
|
||||
else
|
||||
printk(KERN_ALERT "BUG: unable to handle kernel paging request");
|
||||
printk(" at virtual address %08lx\n",address);
|
||||
printk(KERN_ALERT " printing eip:\n");
|
||||
printk("%08lx\n", regs->eip);
|
||||
page = read_cr3();
|
||||
page = ((unsigned long *) __va(page))[address >> 22];
|
||||
printk(KERN_ALERT "*pde = %08lx\n", page);
|
||||
if (oops_may_print())
|
||||
printk(KERN_ALERT "*pde = %08lx\n", page);
|
||||
/*
|
||||
* We must not directly access the pte in the highpte
|
||||
* case, the page table might be allocated in highmem.
|
||||
@ -534,7 +541,7 @@ no_context:
|
||||
* it's allocated already.
|
||||
*/
|
||||
#ifndef CONFIG_HIGHPTE
|
||||
if (page & 1) {
|
||||
if ((page & 1) && oops_may_print()) {
|
||||
page &= PAGE_MASK;
|
||||
address &= 0x003ff000;
|
||||
page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
|
||||
|
@ -91,6 +91,9 @@ extern struct notifier_block *panic_notifier_list;
|
||||
extern long (*panic_blink)(long time);
|
||||
NORET_TYPE void panic(const char * fmt, ...)
|
||||
__attribute__ ((NORET_AND format (printf, 1, 2)));
|
||||
extern void oops_enter(void);
|
||||
extern void oops_exit(void);
|
||||
extern int oops_may_print(void);
|
||||
fastcall NORET_TYPE void do_exit(long error_code)
|
||||
ATTRIB_NORET;
|
||||
NORET_TYPE void complete_and_exit(struct completion *, long)
|
||||
|
@ -20,10 +20,13 @@
|
||||
#include <linux/nmi.h>
|
||||
#include <linux/kexec.h>
|
||||
|
||||
int panic_timeout;
|
||||
int panic_on_oops;
|
||||
int tainted;
|
||||
static int pause_on_oops;
|
||||
static int pause_on_oops_flag;
|
||||
static DEFINE_SPINLOCK(pause_on_oops_lock);
|
||||
|
||||
int panic_timeout;
|
||||
EXPORT_SYMBOL(panic_timeout);
|
||||
|
||||
struct notifier_block *panic_notifier_list;
|
||||
@ -174,3 +177,95 @@ void add_taint(unsigned flag)
|
||||
tainted |= flag;
|
||||
}
|
||||
EXPORT_SYMBOL(add_taint);
|
||||
|
||||
static int __init pause_on_oops_setup(char *str)
|
||||
{
|
||||
pause_on_oops = simple_strtoul(str, NULL, 0);
|
||||
return 1;
|
||||
}
|
||||
__setup("pause_on_oops=", pause_on_oops_setup);
|
||||
|
||||
static void spin_msec(int msecs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < msecs; i++) {
|
||||
touch_nmi_watchdog();
|
||||
mdelay(1);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* It just happens that oops_enter() and oops_exit() are identically
|
||||
* implemented...
|
||||
*/
|
||||
static void do_oops_enter_exit(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
static int spin_counter;
|
||||
|
||||
if (!pause_on_oops)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&pause_on_oops_lock, flags);
|
||||
if (pause_on_oops_flag == 0) {
|
||||
/* This CPU may now print the oops message */
|
||||
pause_on_oops_flag = 1;
|
||||
} else {
|
||||
/* We need to stall this CPU */
|
||||
if (!spin_counter) {
|
||||
/* This CPU gets to do the counting */
|
||||
spin_counter = pause_on_oops;
|
||||
do {
|
||||
spin_unlock(&pause_on_oops_lock);
|
||||
spin_msec(MSEC_PER_SEC);
|
||||
spin_lock(&pause_on_oops_lock);
|
||||
} while (--spin_counter);
|
||||
pause_on_oops_flag = 0;
|
||||
} else {
|
||||
/* This CPU waits for a different one */
|
||||
while (spin_counter) {
|
||||
spin_unlock(&pause_on_oops_lock);
|
||||
spin_msec(1);
|
||||
spin_lock(&pause_on_oops_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&pause_on_oops_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if the calling CPU is allowed to print oops-related info. This
|
||||
* is a bit racy..
|
||||
*/
|
||||
int oops_may_print(void)
|
||||
{
|
||||
return pause_on_oops_flag == 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called when the architecture enters its oops handler, before it prints
|
||||
* anything. If this is the first CPU to oops, and it's oopsing the first time
|
||||
* then let it proceed.
|
||||
*
|
||||
* This is all enabled by the pause_on_oops kernel boot option. We do all this
|
||||
* to ensure that oopses don't scroll off the screen. It has the side-effect
|
||||
* of preventing later-oopsing CPUs from mucking up the display, too.
|
||||
*
|
||||
* It turns out that the CPU which is allowed to print ends up pausing for the
|
||||
* right duration, whereas all the other CPUs pause for twice as long: once in
|
||||
* oops_enter(), once in oops_exit().
|
||||
*/
|
||||
void oops_enter(void)
|
||||
{
|
||||
do_oops_enter_exit();
|
||||
}
|
||||
|
||||
/*
|
||||
* Called when the architecture exits its oops handler, after printing
|
||||
* everything.
|
||||
*/
|
||||
void oops_exit(void)
|
||||
{
|
||||
do_oops_enter_exit();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user