mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-24 19:44:55 +00:00
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner: "A bit on the largish side due to a series of fixes for a regression in the x86 vector management which was introduced in 4.3. This work was started in December already, but it took some time to fix all corner cases and a couple of older bugs in that area which were detected while at it Aside of that a few platform updates for intel-mid, quark and UV and two fixes for in the mm code: - Use proper types for pgprot values to avoid truncation - Prevent a size truncation in the pageattr code when setting page attributes for large mappings" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (21 commits) x86/mm/pat: Avoid truncation when converting cpa->numpages to address x86/mm: Fix types used in pgprot cacheability flags translations x86/platform/quark: Print boundaries correctly x86/platform/UV: Remove EFI memmap quirk for UV2+ x86/platform/intel-mid: Join string and fix SoC name x86/platform/intel-mid: Enable 64-bit build x86/irq: Plug vector cleanup race x86/irq: Call irq_force_move_complete with irq descriptor x86/irq: Remove outgoing CPU from vector cleanup mask x86/irq: Remove the cpumask allocation from send_cleanup_vector() x86/irq: Clear move_in_progress before sending cleanup IPI x86/irq: Remove offline cpus from vector cleanup x86/irq: Get rid of code duplication x86/irq: Copy vectormask instead of an AND operation x86/irq: Check vector allocation early x86/irq: Reorganize the search in assign_irq_vector x86/irq: Reorganize the return path in assign_irq_vector x86/irq: Do not use apic_chip_data.old_domain as temporary buffer x86/irq: Validate that irq descriptor is still active x86/irq: Fix a race in x86_vector_free_irqs() ...
This commit is contained in:
commit
d517be5fcf
@ -509,11 +509,10 @@ config X86_INTEL_CE
|
||||
|
||||
config X86_INTEL_MID
|
||||
bool "Intel MID platform support"
|
||||
depends on X86_32
|
||||
depends on X86_EXTENDED_PLATFORM
|
||||
depends on X86_PLATFORM_DEVICES
|
||||
depends on PCI
|
||||
depends on PCI_GOANY
|
||||
depends on X86_64 || (PCI_GOANY && X86_32)
|
||||
depends on X86_IO_APIC
|
||||
select SFI
|
||||
select I2C
|
||||
|
@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu);
|
||||
|
||||
#define __ARCH_HAS_DO_SOFTIRQ
|
||||
|
||||
struct irq_desc;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
#include <linux/cpumask.h>
|
||||
extern int check_irq_vectors_for_cpu_disable(void);
|
||||
extern void fixup_irqs(void);
|
||||
extern void irq_force_complete_move(int);
|
||||
extern void irq_force_complete_move(struct irq_desc *desc);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM
|
||||
@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
|
||||
extern void (*x86_platform_ipi_callback)(void);
|
||||
extern void native_init_IRQ(void);
|
||||
|
||||
struct irq_desc;
|
||||
extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
|
||||
|
||||
extern __visible unsigned int do_IRQ(struct pt_regs *regs);
|
||||
|
@ -366,20 +366,18 @@ static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
|
||||
}
|
||||
static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
|
||||
{
|
||||
pgprotval_t val = pgprot_val(pgprot);
|
||||
pgprot_t new;
|
||||
unsigned long val;
|
||||
|
||||
val = pgprot_val(pgprot);
|
||||
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
|
||||
((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
|
||||
return new;
|
||||
}
|
||||
static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
|
||||
{
|
||||
pgprotval_t val = pgprot_val(pgprot);
|
||||
pgprot_t new;
|
||||
unsigned long val;
|
||||
|
||||
val = pgprot_val(pgprot);
|
||||
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
|
||||
((val & _PAGE_PAT_LARGE) >>
|
||||
(_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
|
||||
|
@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void)
|
||||
{
|
||||
int pin, ioapic, irq, irq_entry;
|
||||
const struct cpumask *mask;
|
||||
struct irq_desc *desc;
|
||||
struct irq_data *idata;
|
||||
struct irq_chip *chip;
|
||||
|
||||
@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void)
|
||||
if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
|
||||
continue;
|
||||
|
||||
idata = irq_get_irq_data(irq);
|
||||
desc = irq_to_desc(irq);
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
idata = irq_desc_get_irq_data(desc);
|
||||
|
||||
/*
|
||||
* Honour affinities which have been set in early boot
|
||||
@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void)
|
||||
/* Might be lapic_chip for irq 0 */
|
||||
if (chip->irq_set_affinity)
|
||||
chip->irq_set_affinity(idata, mask, false);
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -31,7 +31,7 @@ struct apic_chip_data {
|
||||
struct irq_domain *x86_vector_domain;
|
||||
EXPORT_SYMBOL_GPL(x86_vector_domain);
|
||||
static DEFINE_RAW_SPINLOCK(vector_lock);
|
||||
static cpumask_var_t vector_cpumask;
|
||||
static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
|
||||
static struct irq_chip lapic_controller;
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
|
||||
@ -118,35 +118,47 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
|
||||
*/
|
||||
static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
|
||||
static int current_offset = VECTOR_OFFSET_START % 16;
|
||||
int cpu, err;
|
||||
int cpu, vector;
|
||||
|
||||
if (d->move_in_progress)
|
||||
/*
|
||||
* If there is still a move in progress or the previous move has not
|
||||
* been cleaned up completely, tell the caller to come back later.
|
||||
*/
|
||||
if (d->move_in_progress ||
|
||||
cpumask_intersects(d->old_domain, cpu_online_mask))
|
||||
return -EBUSY;
|
||||
|
||||
/* Only try and allocate irqs on cpus that are present */
|
||||
err = -ENOSPC;
|
||||
cpumask_clear(d->old_domain);
|
||||
cpumask_clear(searched_cpumask);
|
||||
cpu = cpumask_first_and(mask, cpu_online_mask);
|
||||
while (cpu < nr_cpu_ids) {
|
||||
int new_cpu, vector, offset;
|
||||
int new_cpu, offset;
|
||||
|
||||
/* Get the possible target cpus for @mask/@cpu from the apic */
|
||||
apic->vector_allocation_domain(cpu, vector_cpumask, mask);
|
||||
|
||||
/*
|
||||
* Clear the offline cpus from @vector_cpumask for searching
|
||||
* and verify whether the result overlaps with @mask. If true,
|
||||
* then the call to apic->cpu_mask_to_apicid_and() will
|
||||
* succeed as well. If not, no point in trying to find a
|
||||
* vector in this mask.
|
||||
*/
|
||||
cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
|
||||
if (!cpumask_intersects(vector_searchmask, mask))
|
||||
goto next_cpu;
|
||||
|
||||
if (cpumask_subset(vector_cpumask, d->domain)) {
|
||||
err = 0;
|
||||
if (cpumask_equal(vector_cpumask, d->domain))
|
||||
break;
|
||||
goto success;
|
||||
/*
|
||||
* New cpumask using the vector is a proper subset of
|
||||
* the current in use mask. So cleanup the vector
|
||||
* allocation for the members that are not used anymore.
|
||||
* Mark the cpus which are not longer in the mask for
|
||||
* cleanup.
|
||||
*/
|
||||
cpumask_andnot(d->old_domain, d->domain,
|
||||
vector_cpumask);
|
||||
d->move_in_progress =
|
||||
cpumask_intersects(d->old_domain, cpu_online_mask);
|
||||
cpumask_and(d->domain, d->domain, vector_cpumask);
|
||||
break;
|
||||
cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
|
||||
vector = d->cfg.vector;
|
||||
goto update;
|
||||
}
|
||||
|
||||
vector = current_vector;
|
||||
@ -158,45 +170,60 @@ next:
|
||||
vector = FIRST_EXTERNAL_VECTOR + offset;
|
||||
}
|
||||
|
||||
if (unlikely(current_vector == vector)) {
|
||||
cpumask_or(d->old_domain, d->old_domain,
|
||||
vector_cpumask);
|
||||
cpumask_andnot(vector_cpumask, mask, d->old_domain);
|
||||
cpu = cpumask_first_and(vector_cpumask,
|
||||
cpu_online_mask);
|
||||
continue;
|
||||
}
|
||||
/* If the search wrapped around, try the next cpu */
|
||||
if (unlikely(current_vector == vector))
|
||||
goto next_cpu;
|
||||
|
||||
if (test_bit(vector, used_vectors))
|
||||
goto next;
|
||||
|
||||
for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
|
||||
for_each_cpu(new_cpu, vector_searchmask) {
|
||||
if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
|
||||
goto next;
|
||||
}
|
||||
/* Found one! */
|
||||
current_vector = vector;
|
||||
current_offset = offset;
|
||||
if (d->cfg.vector) {
|
||||
/* Schedule the old vector for cleanup on all cpus */
|
||||
if (d->cfg.vector)
|
||||
cpumask_copy(d->old_domain, d->domain);
|
||||
d->move_in_progress =
|
||||
cpumask_intersects(d->old_domain, cpu_online_mask);
|
||||
}
|
||||
for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
|
||||
for_each_cpu(new_cpu, vector_searchmask)
|
||||
per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
|
||||
d->cfg.vector = vector;
|
||||
cpumask_copy(d->domain, vector_cpumask);
|
||||
err = 0;
|
||||
break;
|
||||
}
|
||||
goto update;
|
||||
|
||||
if (!err) {
|
||||
/* cache destination APIC IDs into cfg->dest_apicid */
|
||||
err = apic->cpu_mask_to_apicid_and(mask, d->domain,
|
||||
&d->cfg.dest_apicid);
|
||||
next_cpu:
|
||||
/*
|
||||
* We exclude the current @vector_cpumask from the requested
|
||||
* @mask and try again with the next online cpu in the
|
||||
* result. We cannot modify @mask, so we use @vector_cpumask
|
||||
* as a temporary buffer here as it will be reassigned when
|
||||
* calling apic->vector_allocation_domain() above.
|
||||
*/
|
||||
cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
|
||||
cpumask_andnot(vector_cpumask, mask, searched_cpumask);
|
||||
cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
|
||||
continue;
|
||||
}
|
||||
return -ENOSPC;
|
||||
|
||||
return err;
|
||||
update:
|
||||
/*
|
||||
* Exclude offline cpus from the cleanup mask and set the
|
||||
* move_in_progress flag when the result is not empty.
|
||||
*/
|
||||
cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
|
||||
d->move_in_progress = !cpumask_empty(d->old_domain);
|
||||
d->cfg.vector = vector;
|
||||
cpumask_copy(d->domain, vector_cpumask);
|
||||
success:
|
||||
/*
|
||||
* Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
|
||||
* as we already established, that mask & d->domain & cpu_online_mask
|
||||
* is not empty.
|
||||
*/
|
||||
BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
|
||||
&d->cfg.dest_apicid));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int assign_irq_vector(int irq, struct apic_chip_data *data,
|
||||
@ -226,10 +253,8 @@ static int assign_irq_vector_policy(int irq, int node,
|
||||
static void clear_irq_vector(int irq, struct apic_chip_data *data)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
unsigned long flags;
|
||||
int cpu, vector;
|
||||
|
||||
raw_spin_lock_irqsave(&vector_lock, flags);
|
||||
BUG_ON(!data->cfg.vector);
|
||||
|
||||
vector = data->cfg.vector;
|
||||
@ -239,10 +264,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
|
||||
data->cfg.vector = 0;
|
||||
cpumask_clear(data->domain);
|
||||
|
||||
if (likely(!data->move_in_progress)) {
|
||||
raw_spin_unlock_irqrestore(&vector_lock, flags);
|
||||
/*
|
||||
* If move is in progress or the old_domain mask is not empty,
|
||||
* i.e. the cleanup IPI has not been processed yet, we need to remove
|
||||
* the old references to desc from all cpus vector tables.
|
||||
*/
|
||||
if (!data->move_in_progress && cpumask_empty(data->old_domain))
|
||||
return;
|
||||
}
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
|
||||
@ -255,7 +283,6 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
|
||||
}
|
||||
}
|
||||
data->move_in_progress = 0;
|
||||
raw_spin_unlock_irqrestore(&vector_lock, flags);
|
||||
}
|
||||
|
||||
void init_irq_alloc_info(struct irq_alloc_info *info,
|
||||
@ -276,19 +303,24 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
|
||||
static void x86_vector_free_irqs(struct irq_domain *domain,
|
||||
unsigned int virq, unsigned int nr_irqs)
|
||||
{
|
||||
struct apic_chip_data *apic_data;
|
||||
struct irq_data *irq_data;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_irqs; i++) {
|
||||
irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
|
||||
if (irq_data && irq_data->chip_data) {
|
||||
raw_spin_lock_irqsave(&vector_lock, flags);
|
||||
clear_irq_vector(virq + i, irq_data->chip_data);
|
||||
free_apic_chip_data(irq_data->chip_data);
|
||||
apic_data = irq_data->chip_data;
|
||||
irq_domain_reset_irq_data(irq_data);
|
||||
raw_spin_unlock_irqrestore(&vector_lock, flags);
|
||||
free_apic_chip_data(apic_data);
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
if (virq + i < nr_legacy_irqs())
|
||||
legacy_irq_data[virq + i] = NULL;
|
||||
#endif
|
||||
irq_domain_reset_irq_data(irq_data);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -406,6 +438,8 @@ int __init arch_early_irq_init(void)
|
||||
arch_init_htirq_domain(x86_vector_domain);
|
||||
|
||||
BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
|
||||
BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
|
||||
BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
|
||||
|
||||
return arch_early_ioapic_init();
|
||||
}
|
||||
@ -494,14 +528,7 @@ static int apic_set_affinity(struct irq_data *irq_data,
|
||||
return -EINVAL;
|
||||
|
||||
err = assign_irq_vector(irq, data, dest);
|
||||
if (err) {
|
||||
if (assign_irq_vector(irq, data,
|
||||
irq_data_get_affinity_mask(irq_data)))
|
||||
pr_err("Failed to recover vector for irq %d\n", irq);
|
||||
return err;
|
||||
}
|
||||
|
||||
return IRQ_SET_MASK_OK;
|
||||
return err ? err : IRQ_SET_MASK_OK;
|
||||
}
|
||||
|
||||
static struct irq_chip lapic_controller = {
|
||||
@ -513,20 +540,12 @@ static struct irq_chip lapic_controller = {
|
||||
#ifdef CONFIG_SMP
|
||||
static void __send_cleanup_vector(struct apic_chip_data *data)
|
||||
{
|
||||
cpumask_var_t cleanup_mask;
|
||||
|
||||
if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
|
||||
unsigned int i;
|
||||
|
||||
for_each_cpu_and(i, data->old_domain, cpu_online_mask)
|
||||
apic->send_IPI_mask(cpumask_of(i),
|
||||
IRQ_MOVE_CLEANUP_VECTOR);
|
||||
} else {
|
||||
cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
|
||||
apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
|
||||
free_cpumask_var(cleanup_mask);
|
||||
}
|
||||
raw_spin_lock(&vector_lock);
|
||||
cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
|
||||
data->move_in_progress = 0;
|
||||
if (!cpumask_empty(data->old_domain))
|
||||
apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
|
||||
raw_spin_unlock(&vector_lock);
|
||||
}
|
||||
|
||||
void send_cleanup_vector(struct irq_cfg *cfg)
|
||||
@ -570,12 +589,25 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
* Check if the irq migration is in progress. If so, we
|
||||
* haven't received the cleanup request yet for this irq.
|
||||
* Nothing to cleanup if irq migration is in progress
|
||||
* or this cpu is not set in the cleanup mask.
|
||||
*/
|
||||
if (data->move_in_progress)
|
||||
if (data->move_in_progress ||
|
||||
!cpumask_test_cpu(me, data->old_domain))
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
* We have two cases to handle here:
|
||||
* 1) vector is unchanged but the target mask got reduced
|
||||
* 2) vector and the target mask has changed
|
||||
*
|
||||
* #1 is obvious, but in #2 we have two vectors with the same
|
||||
* irq descriptor: the old and the new vector. So we need to
|
||||
* make sure that we only cleanup the old vector. The new
|
||||
* vector has the current @vector number in the config and
|
||||
* this cpu is part of the target mask. We better leave that
|
||||
* one alone.
|
||||
*/
|
||||
if (vector == data->cfg.vector &&
|
||||
cpumask_test_cpu(me, data->domain))
|
||||
goto unlock;
|
||||
@ -593,6 +625,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
|
||||
goto unlock;
|
||||
}
|
||||
__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
|
||||
cpumask_clear_cpu(me, data->old_domain);
|
||||
unlock:
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
@ -621,12 +654,48 @@ void irq_complete_move(struct irq_cfg *cfg)
|
||||
__irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
|
||||
}
|
||||
|
||||
void irq_force_complete_move(int irq)
|
||||
/*
|
||||
* Called with @desc->lock held and interrupts disabled.
|
||||
*/
|
||||
void irq_force_complete_move(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_cfg *cfg = irq_cfg(irq);
|
||||
struct irq_data *irqdata = irq_desc_get_irq_data(desc);
|
||||
struct apic_chip_data *data = apic_chip_data(irqdata);
|
||||
struct irq_cfg *cfg = data ? &data->cfg : NULL;
|
||||
|
||||
if (cfg)
|
||||
__irq_complete_move(cfg, cfg->vector);
|
||||
if (!cfg)
|
||||
return;
|
||||
|
||||
__irq_complete_move(cfg, cfg->vector);
|
||||
|
||||
/*
|
||||
* This is tricky. If the cleanup of @data->old_domain has not been
|
||||
* done yet, then the following setaffinity call will fail with
|
||||
* -EBUSY. This can leave the interrupt in a stale state.
|
||||
*
|
||||
* The cleanup cannot make progress because we hold @desc->lock. So in
|
||||
* case @data->old_domain is not yet cleaned up, we need to drop the
|
||||
* lock and acquire it again. @desc cannot go away, because the
|
||||
* hotplug code holds the sparse irq lock.
|
||||
*/
|
||||
raw_spin_lock(&vector_lock);
|
||||
/* Clean out all offline cpus (including ourself) first. */
|
||||
cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
|
||||
while (!cpumask_empty(data->old_domain)) {
|
||||
raw_spin_unlock(&vector_lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
cpu_relax();
|
||||
raw_spin_lock(&desc->lock);
|
||||
/*
|
||||
* Reevaluate apic_chip_data. It might have been cleared after
|
||||
* we dropped @desc->lock.
|
||||
*/
|
||||
data = apic_chip_data(irqdata);
|
||||
if (!data)
|
||||
return;
|
||||
raw_spin_lock(&vector_lock);
|
||||
}
|
||||
raw_spin_unlock(&vector_lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -889,7 +889,10 @@ void __init uv_system_init(void)
|
||||
return;
|
||||
}
|
||||
pr_info("UV: Found %s hub\n", hub);
|
||||
map_low_mmrs();
|
||||
|
||||
/* We now only need to map the MMRs on UV1 */
|
||||
if (is_uv1_hub())
|
||||
map_low_mmrs();
|
||||
|
||||
m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
|
||||
m_val = m_n_config.s.m_skt;
|
||||
|
@ -192,5 +192,13 @@ void __init x86_64_start_reservations(char *real_mode_data)
|
||||
|
||||
reserve_ebda_region();
|
||||
|
||||
switch (boot_params.hdr.hardware_subarch) {
|
||||
case X86_SUBARCH_INTEL_MID:
|
||||
x86_intel_mid_early_setup();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
start_kernel();
|
||||
}
|
||||
|
@ -462,7 +462,7 @@ void fixup_irqs(void)
|
||||
* non intr-remapping case, we can't wait till this interrupt
|
||||
* arrives at this cpu before completing the irq move.
|
||||
*/
|
||||
irq_force_complete_move(irq);
|
||||
irq_force_complete_move(desc);
|
||||
|
||||
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
|
||||
break_affinity = 1;
|
||||
@ -470,6 +470,15 @@ void fixup_irqs(void)
|
||||
}
|
||||
|
||||
chip = irq_data_get_irq_chip(data);
|
||||
/*
|
||||
* The interrupt descriptor might have been cleaned up
|
||||
* already, but it is not yet removed from the radix tree
|
||||
*/
|
||||
if (!chip) {
|
||||
raw_spin_unlock(&desc->lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
|
||||
chip->irq_mask(data);
|
||||
|
||||
|
@ -33,7 +33,7 @@ struct cpa_data {
|
||||
pgd_t *pgd;
|
||||
pgprot_t mask_set;
|
||||
pgprot_t mask_clr;
|
||||
int numpages;
|
||||
unsigned long numpages;
|
||||
int flags;
|
||||
unsigned long pfn;
|
||||
unsigned force_split : 1;
|
||||
@ -1350,7 +1350,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
|
||||
* CPA operation. Either a large page has been
|
||||
* preserved or a single page update happened.
|
||||
*/
|
||||
BUG_ON(cpa->numpages > numpages);
|
||||
BUG_ON(cpa->numpages > numpages || !cpa->numpages);
|
||||
numpages -= cpa->numpages;
|
||||
if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
|
||||
cpa->curpage++;
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <asm/efi.h>
|
||||
#include <asm/uv/uv.h>
|
||||
|
||||
@ -248,6 +249,16 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id sgi_uv1_dmi[] = {
|
||||
{ NULL, "SGI UV1",
|
||||
{ DMI_MATCH(DMI_PRODUCT_NAME, "Stoutland Platform"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
|
||||
DMI_MATCH(DMI_BIOS_VENDOR, "SGI.COM"),
|
||||
}
|
||||
},
|
||||
{ } /* NULL entry stops DMI scanning */
|
||||
};
|
||||
|
||||
void __init efi_apply_memmap_quirks(void)
|
||||
{
|
||||
/*
|
||||
@ -260,10 +271,8 @@ void __init efi_apply_memmap_quirks(void)
|
||||
efi_unmap_memmap();
|
||||
}
|
||||
|
||||
/*
|
||||
* UV doesn't support the new EFI pagetable mapping yet.
|
||||
*/
|
||||
if (is_uv_system())
|
||||
/* UV2+ BIOS has a fix for this issue. UV1 still needs the quirk. */
|
||||
if (dmi_check_system(sgi_uv1_dmi))
|
||||
set_bit(EFI_OLD_MEMMAP, &efi.flags);
|
||||
}
|
||||
|
||||
|
@ -138,7 +138,7 @@ static void intel_mid_arch_setup(void)
|
||||
intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
|
||||
else {
|
||||
intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
|
||||
pr_info("ARCH: Unknown SoC, assuming PENWELL!\n");
|
||||
pr_info("ARCH: Unknown SoC, assuming Penwell!\n");
|
||||
}
|
||||
|
||||
out:
|
||||
@ -214,12 +214,10 @@ static inline int __init setup_x86_intel_mid_timer(char *arg)
|
||||
else if (strcmp("lapic_and_apbt", arg) == 0)
|
||||
intel_mid_timer_options = INTEL_MID_TIMER_LAPIC_APBT;
|
||||
else {
|
||||
pr_warn("X86 INTEL_MID timer option %s not recognised"
|
||||
" use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n",
|
||||
arg);
|
||||
pr_warn("X86 INTEL_MID timer option %s not recognised use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n",
|
||||
arg);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
__setup("x86_intel_mid_timer=", setup_x86_intel_mid_timer);
|
||||
|
||||
|
@ -220,11 +220,12 @@ static int imr_dbgfs_state_show(struct seq_file *s, void *unused)
|
||||
if (imr_is_enabled(&imr)) {
|
||||
base = imr_to_phys(imr.addr_lo);
|
||||
end = imr_to_phys(imr.addr_hi) + IMR_MASK;
|
||||
size = end - base + 1;
|
||||
} else {
|
||||
base = 0;
|
||||
end = 0;
|
||||
size = 0;
|
||||
}
|
||||
size = end - base;
|
||||
seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
|
||||
"rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
|
||||
&base, &end, size, imr.rmask, imr.wmask,
|
||||
@ -579,6 +580,7 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
|
||||
{
|
||||
phys_addr_t base = virt_to_phys(&_text);
|
||||
size_t size = virt_to_phys(&__end_rodata) - base;
|
||||
unsigned long start, end;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
@ -586,18 +588,24 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
|
||||
for (i = 0; i < idev->max_imr; i++)
|
||||
imr_clear(i);
|
||||
|
||||
start = (unsigned long)_text;
|
||||
end = (unsigned long)__end_rodata - 1;
|
||||
|
||||
/*
|
||||
* Setup a locked IMR around the physical extent of the kernel
|
||||
* from the beginning of the .text secton to the end of the
|
||||
* .rodata section as one physically contiguous block.
|
||||
*
|
||||
* We don't round up @size since it is already PAGE_SIZE aligned.
|
||||
* See vmlinux.lds.S for details.
|
||||
*/
|
||||
ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true);
|
||||
if (ret < 0) {
|
||||
pr_err("unable to setup IMR for kernel: (%p - %p)\n",
|
||||
&_text, &__end_rodata);
|
||||
pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
|
||||
size / 1024, start, end);
|
||||
} else {
|
||||
pr_info("protecting kernel .text - .rodata: %zu KiB (%p - %p)\n",
|
||||
size / 1024, &_text, &__end_rodata);
|
||||
pr_info("protecting kernel .text - .rodata: %zu KiB (%lx - %lx)\n",
|
||||
size / 1024, start, end);
|
||||
}
|
||||
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user