mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-24 02:18:54 +00:00
2a786b452e
move_native_irqs tries to do the right thing when migrating irqs by disabling them. However disabling them is a software logical thing, not a hardware thing. This has always been a little flaky and after Ingo's latest round of changes it is guaranteed to not mask the apic. So this patch fixes move_native_irq to directly call the mask and unmask chip methods to guarantee that we mask the irq when we are migrating it. We must do this as it is required by all code that call into the path. Since we don't know the masked status when IRQ_DISABLED is set so we will not be able to restore it. The patch makes the code just give up and trying again the next time this routing is called. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Acked-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
76 lines
1.6 KiB
C
76 lines
1.6 KiB
C
|
|
#include <linux/irq.h>
|
|
|
|
void set_pending_irq(unsigned int irq, cpumask_t mask)
|
|
{
|
|
struct irq_desc *desc = irq_desc + irq;
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&desc->lock, flags);
|
|
desc->status |= IRQ_MOVE_PENDING;
|
|
irq_desc[irq].pending_mask = mask;
|
|
spin_unlock_irqrestore(&desc->lock, flags);
|
|
}
|
|
|
|
void move_masked_irq(int irq)
|
|
{
|
|
struct irq_desc *desc = irq_desc + irq;
|
|
cpumask_t tmp;
|
|
|
|
if (likely(!(desc->status & IRQ_MOVE_PENDING)))
|
|
return;
|
|
|
|
/*
|
|
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
|
|
*/
|
|
if (CHECK_IRQ_PER_CPU(desc->status)) {
|
|
WARN_ON(1);
|
|
return;
|
|
}
|
|
|
|
desc->status &= ~IRQ_MOVE_PENDING;
|
|
|
|
if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
|
|
return;
|
|
|
|
if (!desc->chip->set_affinity)
|
|
return;
|
|
|
|
assert_spin_locked(&desc->lock);
|
|
|
|
cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);
|
|
|
|
/*
|
|
* If there was a valid mask to work with, please
|
|
* do the disable, re-program, enable sequence.
|
|
* This is *not* particularly important for level triggered
|
|
* but in a edge trigger case, we might be setting rte
|
|
* when an active trigger is comming in. This could
|
|
* cause some ioapics to mal-function.
|
|
* Being paranoid i guess!
|
|
*
|
|
* For correct operation this depends on the caller
|
|
* masking the irqs.
|
|
*/
|
|
if (likely(!cpus_empty(tmp))) {
|
|
desc->chip->set_affinity(irq,tmp);
|
|
}
|
|
cpus_clear(irq_desc[irq].pending_mask);
|
|
}
|
|
|
|
void move_native_irq(int irq)
|
|
{
|
|
struct irq_desc *desc = irq_desc + irq;
|
|
|
|
if (likely(!(desc->status & IRQ_MOVE_PENDING)))
|
|
return;
|
|
|
|
if (unlikely(desc->status & IRQ_DISABLED))
|
|
return;
|
|
|
|
desc->chip->mask(irq);
|
|
move_masked_irq(irq);
|
|
desc->chip->unmask(irq);
|
|
}
|
|
|