mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-21 00:42:16 +00:00
61165d7a03
After resume on a 2cpu laptop, kernel builds collapse with a sed hang,
sh or make segfault (often on 20295564), real-time signal to cc1 etc.
Several hurdles to jump, but a manually-assisted bisect led to -rc1's
d2bcbad5f3
x86: do not zap_low_mappings
in __smp_prepare_cpus. Though the low mappings were removed at bootup,
they were left behind (with Global flags helping to keep them in TLB)
after resume or cpu online, causing the crashes seen.
Reinstate zap_low_mappings (with local __flush_tlb_all) for each cpu_up
on x86_32. This used to be serialized by smp_commenced_mask: that's now
gone, but a low_mappings flag will do. No need for native_smp_cpus_done
to repeat the zap: let mem_init zap BSP's low mappings just like on UP.
(In passing, fix error code from native_cpu_up: do_boot_cpu returns a
variety of diagnostic values, Dprintk what it says but convert to -EIO.
And save_pg_dir separately before zap_low_mappings: doesn't matter now,
but zapping twice in succession wiped out resume's swsusp_pg_dir.)
That worked well on the duo and one quad, but wouldn't boot 3rd or 4th
cpu on P4 Xeon, oopsing just after unlock_ipi_call_lock. The TLB flush
IPI now being sent reveals a long-standing bug: the booting cpu has its
APIC readied in smp_callin at the top of start_secondary, but isn't put
into the cpu_online_map until just before that unlock_ipi_call_lock.
So native_smp_call_function_mask to online cpus would send_IPI_allbutself,
including the cpu just coming up, though it has been excluded from the
count to wait for: by the time it handles the IPI, the call data on
native_smp_call_function_mask's stack may well have been overwritten.
So fall back to send_IPI_mask while cpu_online_map does not match
cpu_callout_map: perhaps there's a better APICological fix to be
made at the start_secondary end, but I wouldn't know that.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
345 lines
8.8 KiB
C
345 lines
8.8 KiB
C
/*
|
|
* Intel SMP support routines.
|
|
*
|
|
* (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
|
|
* (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
|
|
* (c) 2002,2003 Andi Kleen, SuSE Labs.
|
|
*
|
|
* i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
|
|
*
|
|
* This code is released under the GNU General Public License version 2 or
|
|
* later.
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/kernel_stat.h>
|
|
#include <linux/mc146818rtc.h>
|
|
#include <linux/cache.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/cpu.h>
|
|
|
|
#include <asm/mtrr.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/proto.h>
|
|
#include <mach_ipi.h>
|
|
#include <mach_apic.h>
|
|
/*
|
|
* Some notes on x86 processor bugs affecting SMP operation:
|
|
*
|
|
* Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
|
|
* The Linux implications for SMP are handled as follows:
|
|
*
|
|
* Pentium III / [Xeon]
|
|
* None of the E1AP-E3AP errata are visible to the user.
|
|
*
|
|
* E1AP. see PII A1AP
|
|
* E2AP. see PII A2AP
|
|
* E3AP. see PII A3AP
|
|
*
|
|
* Pentium II / [Xeon]
|
|
* None of the A1AP-A3AP errata are visible to the user.
|
|
*
|
|
* A1AP. see PPro 1AP
|
|
* A2AP. see PPro 2AP
|
|
* A3AP. see PPro 7AP
|
|
*
|
|
* Pentium Pro
|
|
* None of 1AP-9AP errata are visible to the normal user,
|
|
* except occasional delivery of 'spurious interrupt' as trap #15.
|
|
* This is very rare and a non-problem.
|
|
*
|
|
* 1AP. Linux maps APIC as non-cacheable
|
|
* 2AP. worked around in hardware
|
|
* 3AP. fixed in C0 and above steppings microcode update.
|
|
* Linux does not use excessive STARTUP_IPIs.
|
|
* 4AP. worked around in hardware
|
|
* 5AP. symmetric IO mode (normal Linux operation) not affected.
|
|
* 'noapic' mode has vector 0xf filled out properly.
|
|
* 6AP. 'noapic' mode might be affected - fixed in later steppings
|
|
* 7AP. We do not assume writes to the LVT deassering IRQs
|
|
* 8AP. We do not enable low power mode (deep sleep) during MP bootup
|
|
* 9AP. We do not use mixed mode
|
|
*
|
|
* Pentium
|
|
* There is a marginal case where REP MOVS on 100MHz SMP
|
|
* machines with B stepping processors can fail. XXX should provide
|
|
* an L1cache=Writethrough or L1cache=off option.
|
|
*
|
|
* B stepping CPUs may hang. There are hardware work arounds
|
|
* for this. We warn about it in case your board doesn't have the work
|
|
* arounds. Basically that's so I can tell anyone with a B stepping
|
|
* CPU and SMP problems "tough".
|
|
*
|
|
* Specific items [From Pentium Processor Specification Update]
|
|
*
|
|
* 1AP. Linux doesn't use remote read
|
|
* 2AP. Linux doesn't trust APIC errors
|
|
* 3AP. We work around this
|
|
* 4AP. Linux never generated 3 interrupts of the same priority
|
|
* to cause a lost local interrupt.
|
|
* 5AP. Remote read is never used
|
|
* 6AP. not affected - worked around in hardware
|
|
* 7AP. not affected - worked around in hardware
|
|
* 8AP. worked around in hardware - we get explicit CS errors if not
|
|
* 9AP. only 'noapic' mode affected. Might generate spurious
|
|
* interrupts, we log only the first one and count the
|
|
* rest silently.
|
|
* 10AP. not affected - worked around in hardware
|
|
* 11AP. Linux reads the APIC between writes to avoid this, as per
|
|
* the documentation. Make sure you preserve this as it affects
|
|
* the C stepping chips too.
|
|
* 12AP. not affected - worked around in hardware
|
|
* 13AP. not affected - worked around in hardware
|
|
* 14AP. we always deassert INIT during bootup
|
|
* 15AP. not affected - worked around in hardware
|
|
* 16AP. not affected - worked around in hardware
|
|
* 17AP. not affected - worked around in hardware
|
|
* 18AP. not affected - worked around in hardware
|
|
* 19AP. not affected - worked around in BIOS
|
|
*
|
|
* If this sounds worrying believe me these bugs are either ___RARE___,
|
|
* or are signal timing bugs worked around in hardware and there's
|
|
* about nothing of note with C stepping upwards.
|
|
*/
|
|
|
|
/*
|
|
* this function sends a 'reschedule' IPI to another CPU.
|
|
* it goes straight through and wastes no time serializing
|
|
* anything. Worst case is that we lose a reschedule ...
|
|
*/
|
|
static void native_smp_send_reschedule(int cpu)
|
|
{
|
|
if (unlikely(cpu_is_offline(cpu))) {
|
|
WARN_ON(1);
|
|
return;
|
|
}
|
|
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
|
|
}
|
|
|
|
/*
|
|
* Structure and data for smp_call_function(). This is designed to minimise
|
|
* static memory requirements. It also looks cleaner.
|
|
*/
|
|
static DEFINE_SPINLOCK(call_lock);
|
|
|
|
struct call_data_struct {
|
|
void (*func) (void *info);
|
|
void *info;
|
|
atomic_t started;
|
|
atomic_t finished;
|
|
int wait;
|
|
};
|
|
|
|
void lock_ipi_call_lock(void)
|
|
{
|
|
spin_lock_irq(&call_lock);
|
|
}
|
|
|
|
void unlock_ipi_call_lock(void)
|
|
{
|
|
spin_unlock_irq(&call_lock);
|
|
}
|
|
|
|
static struct call_data_struct *call_data;
|
|
|
|
static void __smp_call_function(void (*func) (void *info), void *info,
|
|
int nonatomic, int wait)
|
|
{
|
|
struct call_data_struct data;
|
|
int cpus = num_online_cpus() - 1;
|
|
|
|
if (!cpus)
|
|
return;
|
|
|
|
data.func = func;
|
|
data.info = info;
|
|
atomic_set(&data.started, 0);
|
|
data.wait = wait;
|
|
if (wait)
|
|
atomic_set(&data.finished, 0);
|
|
|
|
call_data = &data;
|
|
mb();
|
|
|
|
/* Send a message to all other CPUs and wait for them to respond */
|
|
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
|
|
|
|
/* Wait for response */
|
|
while (atomic_read(&data.started) != cpus)
|
|
cpu_relax();
|
|
|
|
if (wait)
|
|
while (atomic_read(&data.finished) != cpus)
|
|
cpu_relax();
|
|
}
|
|
|
|
|
|
/**
|
|
* smp_call_function_mask(): Run a function on a set of other CPUs.
|
|
* @mask: The set of cpus to run on. Must not include the current cpu.
|
|
* @func: The function to run. This must be fast and non-blocking.
|
|
* @info: An arbitrary pointer to pass to the function.
|
|
* @wait: If true, wait (atomically) until function has completed on other CPUs.
|
|
*
|
|
* Returns 0 on success, else a negative status code.
|
|
*
|
|
* If @wait is true, then returns once @func has returned; otherwise
|
|
* it returns just before the target cpu calls @func.
|
|
*
|
|
* You must not call this function with disabled interrupts or from a
|
|
* hardware interrupt handler or from a bottom half handler.
|
|
*/
|
|
static int
|
|
native_smp_call_function_mask(cpumask_t mask,
|
|
void (*func)(void *), void *info,
|
|
int wait)
|
|
{
|
|
struct call_data_struct data;
|
|
cpumask_t allbutself;
|
|
int cpus;
|
|
|
|
/* Can deadlock when called with interrupts disabled */
|
|
WARN_ON(irqs_disabled());
|
|
|
|
/* Holding any lock stops cpus from going down. */
|
|
spin_lock(&call_lock);
|
|
|
|
allbutself = cpu_online_map;
|
|
cpu_clear(smp_processor_id(), allbutself);
|
|
|
|
cpus_and(mask, mask, allbutself);
|
|
cpus = cpus_weight(mask);
|
|
|
|
if (!cpus) {
|
|
spin_unlock(&call_lock);
|
|
return 0;
|
|
}
|
|
|
|
data.func = func;
|
|
data.info = info;
|
|
atomic_set(&data.started, 0);
|
|
data.wait = wait;
|
|
if (wait)
|
|
atomic_set(&data.finished, 0);
|
|
|
|
call_data = &data;
|
|
wmb();
|
|
|
|
/* Send a message to other CPUs */
|
|
if (cpus_equal(mask, allbutself) &&
|
|
cpus_equal(cpu_online_map, cpu_callout_map))
|
|
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
|
|
else
|
|
send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
|
|
|
|
/* Wait for response */
|
|
while (atomic_read(&data.started) != cpus)
|
|
cpu_relax();
|
|
|
|
if (wait)
|
|
while (atomic_read(&data.finished) != cpus)
|
|
cpu_relax();
|
|
spin_unlock(&call_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void stop_this_cpu(void *dummy)
|
|
{
|
|
local_irq_disable();
|
|
/*
|
|
* Remove this CPU:
|
|
*/
|
|
cpu_clear(smp_processor_id(), cpu_online_map);
|
|
disable_local_APIC();
|
|
if (hlt_works(smp_processor_id()))
|
|
for (;;) halt();
|
|
for (;;);
|
|
}
|
|
|
|
/*
|
|
* this function calls the 'stop' function on all other CPUs in the system.
|
|
*/
|
|
|
|
static void native_smp_send_stop(void)
|
|
{
|
|
int nolock;
|
|
unsigned long flags;
|
|
|
|
if (reboot_force)
|
|
return;
|
|
|
|
/* Don't deadlock on the call lock in panic */
|
|
nolock = !spin_trylock(&call_lock);
|
|
local_irq_save(flags);
|
|
__smp_call_function(stop_this_cpu, NULL, 0, 0);
|
|
if (!nolock)
|
|
spin_unlock(&call_lock);
|
|
disable_local_APIC();
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
/*
|
|
* Reschedule call back. Nothing to do,
|
|
* all the work is done automatically when
|
|
* we return from the interrupt.
|
|
*/
|
|
void smp_reschedule_interrupt(struct pt_regs *regs)
|
|
{
|
|
ack_APIC_irq();
|
|
#ifdef CONFIG_X86_32
|
|
__get_cpu_var(irq_stat).irq_resched_count++;
|
|
#else
|
|
add_pda(irq_resched_count, 1);
|
|
#endif
|
|
}
|
|
|
|
void smp_call_function_interrupt(struct pt_regs *regs)
|
|
{
|
|
void (*func) (void *info) = call_data->func;
|
|
void *info = call_data->info;
|
|
int wait = call_data->wait;
|
|
|
|
ack_APIC_irq();
|
|
/*
|
|
* Notify initiating CPU that I've grabbed the data and am
|
|
* about to execute the function
|
|
*/
|
|
mb();
|
|
atomic_inc(&call_data->started);
|
|
/*
|
|
* At this point the info structure may be out of scope unless wait==1
|
|
*/
|
|
irq_enter();
|
|
(*func)(info);
|
|
#ifdef CONFIG_X86_32
|
|
__get_cpu_var(irq_stat).irq_call_count++;
|
|
#else
|
|
add_pda(irq_call_count, 1);
|
|
#endif
|
|
irq_exit();
|
|
|
|
if (wait) {
|
|
mb();
|
|
atomic_inc(&call_data->finished);
|
|
}
|
|
}
|
|
|
|
struct smp_ops smp_ops = {
|
|
.smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
|
|
.smp_prepare_cpus = native_smp_prepare_cpus,
|
|
.cpu_up = native_cpu_up,
|
|
.smp_cpus_done = native_smp_cpus_done,
|
|
|
|
.smp_send_stop = native_smp_send_stop,
|
|
.smp_send_reschedule = native_smp_send_reschedule,
|
|
.smp_call_function_mask = native_smp_call_function_mask,
|
|
};
|
|
EXPORT_SYMBOL_GPL(smp_ops);
|
|
|