mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-18 23:18:20 +00:00
3c9aea4742
The clock events merge introduced a change to the nmi watchdog code to handle the not longer increasing local apic timer count in the broadcast mode. This is fine for UP, but on SMP it pampers over a stuck CPU which is not handling the broadcast interrupt due to the unconditional sum up of local apic timer count and irq0 count. To cover all cases we need to keep track on which CPU irq0 is handled. In theory this is CPU#0 due to the explicit disabling of irq balancing for irq0, but there are systems which ignore this on the hardware level. The per cpu irq0 accounting allows us to remove the irq0 to CPU0 binding as well. Add a per cpu counter for irq0 and evaluate this instead of the global irq0 count in the nmi watchdog code. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
25 lines
608 B
C
25 lines
608 B
C
#ifndef __ASM_HARDIRQ_H
|
|
#define __ASM_HARDIRQ_H
|
|
|
|
#include <linux/threads.h>
|
|
#include <linux/irq.h>
|
|
|
|
typedef struct {
|
|
unsigned int __softirq_pending;
|
|
unsigned long idle_timestamp;
|
|
unsigned int __nmi_count; /* arch dependent */
|
|
unsigned int apic_timer_irqs; /* arch dependent */
|
|
unsigned int irq0_irqs;
|
|
} ____cacheline_aligned irq_cpustat_t;
|
|
|
|
DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
|
|
extern irq_cpustat_t irq_stat[];
|
|
|
|
#define __ARCH_IRQ_STAT
|
|
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
|
|
|
|
void ack_bad_irq(unsigned int irq);
|
|
#include <linux/irq_cpustat.h>
|
|
|
|
#endif /* __ASM_HARDIRQ_H */
|