2005-04-16 22:20:36 +00:00
|
|
|
/* Generic MTRR (Memory Type Range Register) driver.
|
|
|
|
|
|
|
|
Copyright (C) 1997-2000 Richard Gooch
|
|
|
|
Copyright (c) 2002 Patrick Mochel
|
|
|
|
|
|
|
|
This library is free software; you can redistribute it and/or
|
|
|
|
modify it under the terms of the GNU Library General Public
|
|
|
|
License as published by the Free Software Foundation; either
|
|
|
|
version 2 of the License, or (at your option) any later version.
|
|
|
|
|
|
|
|
This library is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
Library General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU Library General Public
|
|
|
|
License along with this library; if not, write to the Free
|
|
|
|
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
|
|
|
|
|
|
|
Richard Gooch may be reached by email at rgooch@atnf.csiro.au
|
|
|
|
The postal address is:
|
|
|
|
Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
|
|
|
|
|
|
|
|
Source: "Pentium Pro Family Developer's Manual, Volume 3:
|
|
|
|
Operating System Writer's Guide" (Intel document number 242692),
|
|
|
|
section 11.11.7
|
|
|
|
|
|
|
|
This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
|
|
|
|
on 6-7 March 2002.
|
|
|
|
Source: Intel Architecture Software Developers Manual, Volume 3:
|
|
|
|
System Programming Guide; Section 9.11. (1997 edition - PPro).
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/cpu.h>
|
2006-03-26 09:37:14 +00:00
|
|
|
#include <linux/mutex.h>
|
2008-04-29 10:52:33 +00:00
|
|
|
#include <linux/sort.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
#include <asm/e820.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/mtrr.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/msr.h>
|
2008-02-21 14:50:14 +00:00
|
|
|
#include <asm/kvm_para.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "mtrr.h"
|
|
|
|
|
|
|
|
u32 num_var_ranges = 0;
|
|
|
|
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
unsigned int mtrr_usage_table[MAX_VAR_RANGES];
|
2006-03-26 09:37:14 +00:00
|
|
|
static DEFINE_MUTEX(mtrr_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-02-13 12:26:23 +00:00
|
|
|
u64 size_or_mask, size_and_mask;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {};
|
|
|
|
|
|
|
|
struct mtrr_ops * mtrr_if = NULL;
|
|
|
|
|
|
|
|
static void set_mtrr(unsigned int reg, unsigned long base,
|
|
|
|
unsigned long size, mtrr_type type);
|
|
|
|
|
|
|
|
void set_mtrr_ops(struct mtrr_ops * ops)
|
|
|
|
{
|
|
|
|
if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
|
|
|
|
mtrr_ops[ops->vendor] = ops;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns non-zero if we have the write-combining memory type */
|
|
|
|
static int have_wrcomb(void)
|
|
|
|
{
|
|
|
|
struct pci_dev *dev;
|
2005-05-01 15:58:49 +00:00
|
|
|
u8 rev;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) {
|
2005-05-01 15:58:49 +00:00
|
|
|
/* ServerWorks LE chipsets < rev 6 have problems with write-combining
|
2005-04-16 22:20:36 +00:00
|
|
|
Don't allow it and leave room for other chipsets to be tagged */
|
|
|
|
if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
|
|
|
|
dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
|
2005-05-01 15:58:49 +00:00
|
|
|
pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
|
|
|
|
if (rev <= 5) {
|
|
|
|
printk(KERN_INFO "mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
|
|
|
|
pci_dev_put(dev);
|
|
|
|
return 0;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2005-05-01 15:58:49 +00:00
|
|
|
/* Intel 450NX errata # 23. Non ascending cacheline evictions to
|
2005-04-16 22:20:36 +00:00
|
|
|
write combining memory may resulting in data corruption */
|
|
|
|
if (dev->vendor == PCI_VENDOR_ID_INTEL &&
|
|
|
|
dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
|
|
|
|
printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
|
|
|
|
pci_dev_put(dev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
pci_dev_put(dev);
|
|
|
|
}
|
|
|
|
return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This function returns the number of variable MTRRs */
|
|
|
|
static void __init set_num_var_ranges(void)
|
|
|
|
{
|
|
|
|
unsigned long config = 0, dummy;
|
|
|
|
|
|
|
|
if (use_intel()) {
|
|
|
|
rdmsr(MTRRcap_MSR, config, dummy);
|
|
|
|
} else if (is_cpu(AMD))
|
|
|
|
config = 2;
|
|
|
|
else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
|
|
|
|
config = 8;
|
|
|
|
num_var_ranges = config & 0xff;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init init_table(void)
|
|
|
|
{
|
|
|
|
int i, max;
|
|
|
|
|
|
|
|
max = num_var_ranges;
|
|
|
|
for (i = 0; i < max; i++)
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
mtrr_usage_table[i] = 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct set_mtrr_data {
|
|
|
|
atomic_t count;
|
|
|
|
atomic_t gate;
|
|
|
|
unsigned long smp_base;
|
|
|
|
unsigned long smp_size;
|
|
|
|
unsigned int smp_reg;
|
|
|
|
mtrr_type smp_type;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void ipi_handler(void *info)
|
|
|
|
/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
|
|
|
|
[RETURNS] Nothing.
|
|
|
|
*/
|
|
|
|
{
|
2007-11-09 21:39:38 +00:00
|
|
|
#ifdef CONFIG_SMP
|
2005-04-16 22:20:36 +00:00
|
|
|
struct set_mtrr_data *data = info;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
|
|
atomic_dec(&data->count);
|
|
|
|
while(!atomic_read(&data->gate))
|
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
/* The master has cleared me to execute */
|
|
|
|
if (data->smp_reg != ~0U)
|
|
|
|
mtrr_if->set(data->smp_reg, data->smp_base,
|
|
|
|
data->smp_size, data->smp_type);
|
|
|
|
else
|
|
|
|
mtrr_if->set_all();
|
|
|
|
|
|
|
|
atomic_dec(&data->count);
|
|
|
|
while(atomic_read(&data->gate))
|
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
atomic_dec(&data->count);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
#endif
|
2007-11-09 21:39:38 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[PATCH] i386: fix MTRR code
Until not so long ago, there were system log messages pointing to
inconsistent MTRR setup of the video frame buffer caused by the way vesafb
and X worked. While vesafb was fixed meanwhile, I believe fixing it there
only hides a shortcoming in the MTRR code itself, in that that code is not
symmetric with respect to the ordering of attempts to set up two (or more)
regions where one contains the other. In the current shape, it permits
only setting up sub-regions of pre-exisiting ones. The patch below makes
this symmetric.
While working on that I noticed a few more inconsistencies in that code,
namely
- use of 'unsigned int' for sizes in many, but not all places (the patch
is converting this to use 'unsigned long' everywhere, which specifically
might be necessary for x86-64 once a processor supporting more than 44
physical address bits would become available)
- the code to correct inconsistent settings during secondary processor
startup tried (if necessary) to correct, among other things, the value
in IA32_MTRR_DEF_TYPE, however the newly computed value would never get
used (i.e. stored in the respective MSR)
- the generic range validation code checked that the end of the
to-be-added range would be above 1MB; the value checked should have been
the start of the range
- when contained regions are detected, previously this was allowed only
when the old region was uncacheable; this can be symmetric (i.e. the new
region can also be uncacheable) and even further as per Intel's
documentation write-trough and write-back for either region is also
compatible with the respective opposite in the other
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-12-07 01:14:09 +00:00
|
|
|
static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
|
|
|
|
return type1 == MTRR_TYPE_UNCACHABLE ||
|
|
|
|
type2 == MTRR_TYPE_UNCACHABLE ||
|
|
|
|
(type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
|
|
|
|
(type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/**
|
|
|
|
* set_mtrr - update mtrrs on all processors
|
|
|
|
* @reg: mtrr in question
|
|
|
|
* @base: mtrr base
|
|
|
|
* @size: mtrr size
|
|
|
|
* @type: mtrr type
|
|
|
|
*
|
|
|
|
* This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
|
|
|
|
*
|
|
|
|
* 1. Send IPI to do the following:
|
|
|
|
* 2. Disable Interrupts
|
|
|
|
* 3. Wait for all procs to do so
|
|
|
|
* 4. Enter no-fill cache mode
|
|
|
|
* 5. Flush caches
|
|
|
|
* 6. Clear PGE bit
|
|
|
|
* 7. Flush all TLBs
|
|
|
|
* 8. Disable all range registers
|
|
|
|
* 9. Update the MTRRs
|
|
|
|
* 10. Enable all range registers
|
|
|
|
* 11. Flush all TLBs and caches again
|
|
|
|
* 12. Enter normal cache mode and reenable caching
|
|
|
|
* 13. Set PGE
|
|
|
|
* 14. Wait for buddies to catch up
|
|
|
|
* 15. Enable interrupts.
|
|
|
|
*
|
|
|
|
* What does that mean for us? Well, first we set data.count to the number
|
|
|
|
* of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
|
|
|
|
* until it hits 0 and proceed. We set the data.gate flag and reset data.count.
|
|
|
|
* Meanwhile, they are waiting for that flag to be set. Once it's set, each
|
|
|
|
* CPU goes through the transition of updating MTRRs. The CPU vendors may each do it
|
|
|
|
* differently, so we call mtrr_if->set() callback and let them take care of it.
|
|
|
|
* When they're done, they again decrement data->count and wait for data.gate to
|
|
|
|
* be reset.
|
|
|
|
* When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
|
|
|
|
* Everyone then enables interrupts and we all continue on.
|
|
|
|
*
|
|
|
|
* Note that the mechanism is the same for UP systems, too; all the SMP stuff
|
|
|
|
* becomes nops.
|
|
|
|
*/
|
|
|
|
static void set_mtrr(unsigned int reg, unsigned long base,
|
|
|
|
unsigned long size, mtrr_type type)
|
|
|
|
{
|
|
|
|
struct set_mtrr_data data;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
data.smp_reg = reg;
|
|
|
|
data.smp_base = base;
|
|
|
|
data.smp_size = size;
|
|
|
|
data.smp_type = type;
|
|
|
|
atomic_set(&data.count, num_booting_cpus() - 1);
|
2007-07-06 09:39:52 +00:00
|
|
|
/* make sure data.count is visible before unleashing other CPUs */
|
|
|
|
smp_wmb();
|
2005-04-16 22:20:36 +00:00
|
|
|
atomic_set(&data.gate,0);
|
|
|
|
|
|
|
|
/* Start the ball rolling on other CPUs */
|
2008-06-06 09:18:06 +00:00
|
|
|
if (smp_call_function(ipi_handler, &data, 0) != 0)
|
2005-04-16 22:20:36 +00:00
|
|
|
panic("mtrr: timed out waiting for other CPUs\n");
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
|
|
while(atomic_read(&data.count))
|
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
/* ok, reset count and toggle gate */
|
|
|
|
atomic_set(&data.count, num_booting_cpus() - 1);
|
2007-07-06 09:39:52 +00:00
|
|
|
smp_wmb();
|
2005-04-16 22:20:36 +00:00
|
|
|
atomic_set(&data.gate,1);
|
|
|
|
|
|
|
|
/* do our MTRR business */
|
|
|
|
|
|
|
|
/* HACK!
|
|
|
|
* We use this same function to initialize the mtrrs on boot.
|
|
|
|
* The state of the boot cpu's mtrrs has been saved, and we want
|
|
|
|
* to replicate across all the APs.
|
|
|
|
* If we're doing that @reg is set to something special...
|
|
|
|
*/
|
|
|
|
if (reg != ~0U)
|
|
|
|
mtrr_if->set(reg,base,size,type);
|
|
|
|
|
|
|
|
/* wait for the others */
|
|
|
|
while(atomic_read(&data.count))
|
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
atomic_set(&data.count, num_booting_cpus() - 1);
|
2007-07-06 09:39:52 +00:00
|
|
|
smp_wmb();
|
2005-04-16 22:20:36 +00:00
|
|
|
atomic_set(&data.gate,0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait here for everyone to have seen the gate change
|
|
|
|
* So we're the last ones to touch 'data'
|
|
|
|
*/
|
|
|
|
while(atomic_read(&data.count))
|
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* mtrr_add_page - Add a memory type region
|
2006-12-07 01:14:00 +00:00
|
|
|
* @base: Physical base address of region in pages (in units of 4 kB!)
|
|
|
|
* @size: Physical size of region in pages (4 kB)
|
2005-04-16 22:20:36 +00:00
|
|
|
* @type: Type of MTRR desired
|
|
|
|
* @increment: If this is true do usage counting on the region
|
|
|
|
*
|
|
|
|
* Memory type region registers control the caching on newer Intel and
|
|
|
|
* non Intel processors. This function allows drivers to request an
|
|
|
|
* MTRR is added. The details and hardware specifics of each processor's
|
|
|
|
* implementation are hidden from the caller, but nevertheless the
|
|
|
|
* caller should expect to need to provide a power of two size on an
|
|
|
|
* equivalent power of two boundary.
|
|
|
|
*
|
|
|
|
* If the region cannot be added either because all regions are in use
|
|
|
|
* or the CPU cannot support it a negative value is returned. On success
|
|
|
|
* the register number for this entry is returned, but should be treated
|
|
|
|
* as a cookie only.
|
|
|
|
*
|
|
|
|
* On a multiprocessor machine the changes are made to all processors.
|
|
|
|
* This is required on x86 by the Intel processors.
|
|
|
|
*
|
|
|
|
* The available types are
|
|
|
|
*
|
|
|
|
* %MTRR_TYPE_UNCACHABLE - No caching
|
|
|
|
*
|
|
|
|
* %MTRR_TYPE_WRBACK - Write data back in bursts whenever
|
|
|
|
*
|
|
|
|
* %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
|
|
|
|
*
|
|
|
|
* %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
|
|
|
|
*
|
|
|
|
* BUGS: Needs a quiet flag for the cases where drivers do not mind
|
|
|
|
* failures and do not wish system log messages to be sent.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int mtrr_add_page(unsigned long base, unsigned long size,
|
2008-01-30 12:30:31 +00:00
|
|
|
unsigned int type, bool increment)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
[PATCH] i386: fix MTRR code
Until not so long ago, there were system log messages pointing to
inconsistent MTRR setup of the video frame buffer caused by the way vesafb
and X worked. While vesafb was fixed meanwhile, I believe fixing it there
only hides a shortcoming in the MTRR code itself, in that that code is not
symmetric with respect to the ordering of attempts to set up two (or more)
regions where one contains the other. In the current shape, it permits
only setting up sub-regions of pre-exisiting ones. The patch below makes
this symmetric.
While working on that I noticed a few more inconsistencies in that code,
namely
- use of 'unsigned int' for sizes in many, but not all places (the patch
is converting this to use 'unsigned long' everywhere, which specifically
might be necessary for x86-64 once a processor supporting more than 44
physical address bits would become available)
- the code to correct inconsistent settings during secondary processor
startup tried (if necessary) to correct, among other things, the value
in IA32_MTRR_DEF_TYPE, however the newly computed value would never get
used (i.e. stored in the respective MSR)
- the generic range validation code checked that the end of the
to-be-added range would be above 1MB; the value checked should have been
the start of the range
- when contained regions are detected, previously this was allowed only
when the old region was uncacheable; this can be symmetric (i.e. the new
region can also be uncacheable) and even further as per Intel's
documentation write-trough and write-back for either region is also
compatible with the respective opposite in the other
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-12-07 01:14:09 +00:00
|
|
|
int i, replace, error;
|
2005-04-16 22:20:36 +00:00
|
|
|
mtrr_type ltype;
|
[PATCH] i386: fix MTRR code
Until not so long ago, there were system log messages pointing to
inconsistent MTRR setup of the video frame buffer caused by the way vesafb
and X worked. While vesafb was fixed meanwhile, I believe fixing it there
only hides a shortcoming in the MTRR code itself, in that that code is not
symmetric with respect to the ordering of attempts to set up two (or more)
regions where one contains the other. In the current shape, it permits
only setting up sub-regions of pre-exisiting ones. The patch below makes
this symmetric.
While working on that I noticed a few more inconsistencies in that code,
namely
- use of 'unsigned int' for sizes in many, but not all places (the patch
is converting this to use 'unsigned long' everywhere, which specifically
might be necessary for x86-64 once a processor supporting more than 44
physical address bits would become available)
- the code to correct inconsistent settings during secondary processor
startup tried (if necessary) to correct, among other things, the value
in IA32_MTRR_DEF_TYPE, however the newly computed value would never get
used (i.e. stored in the respective MSR)
- the generic range validation code checked that the end of the
to-be-added range would be above 1MB; the value checked should have been
the start of the range
- when contained regions are detected, previously this was allowed only
when the old region was uncacheable; this can be symmetric (i.e. the new
region can also be uncacheable) and even further as per Intel's
documentation write-trough and write-back for either region is also
compatible with the respective opposite in the other
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-12-07 01:14:09 +00:00
|
|
|
unsigned long lbase, lsize;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!mtrr_if)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
if ((error = mtrr_if->validate_add_page(base,size,type)))
|
|
|
|
return error;
|
|
|
|
|
|
|
|
if (type >= MTRR_NUM_TYPES) {
|
|
|
|
printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the type is WC, check that this processor supports it */
|
|
|
|
if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
|
|
|
|
printk(KERN_WARNING
|
|
|
|
"mtrr: your processor doesn't support write-combining\n");
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
[PATCH] i386: fix MTRR code
Until not so long ago, there were system log messages pointing to
inconsistent MTRR setup of the video frame buffer caused by the way vesafb
and X worked. While vesafb was fixed meanwhile, I believe fixing it there
only hides a shortcoming in the MTRR code itself, in that that code is not
symmetric with respect to the ordering of attempts to set up two (or more)
regions where one contains the other. In the current shape, it permits
only setting up sub-regions of pre-exisiting ones. The patch below makes
this symmetric.
While working on that I noticed a few more inconsistencies in that code,
namely
- use of 'unsigned int' for sizes in many, but not all places (the patch
is converting this to use 'unsigned long' everywhere, which specifically
might be necessary for x86-64 once a processor supporting more than 44
physical address bits would become available)
- the code to correct inconsistent settings during secondary processor
startup tried (if necessary) to correct, among other things, the value
in IA32_MTRR_DEF_TYPE, however the newly computed value would never get
used (i.e. stored in the respective MSR)
- the generic range validation code checked that the end of the
to-be-added range would be above 1MB; the value checked should have been
the start of the range
- when contained regions are detected, previously this was allowed only
when the old region was uncacheable; this can be symmetric (i.e. the new
region can also be uncacheable) and even further as per Intel's
documentation write-trough and write-back for either region is also
compatible with the respective opposite in the other
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-12-07 01:14:09 +00:00
|
|
|
if (!size) {
|
|
|
|
printk(KERN_WARNING "mtrr: zero sized request\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (base & size_or_mask || size & size_or_mask) {
|
|
|
|
printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = -EINVAL;
|
[PATCH] i386: fix MTRR code
Until not so long ago, there were system log messages pointing to
inconsistent MTRR setup of the video frame buffer caused by the way vesafb
and X worked. While vesafb was fixed meanwhile, I believe fixing it there
only hides a shortcoming in the MTRR code itself, in that that code is not
symmetric with respect to the ordering of attempts to set up two (or more)
regions where one contains the other. In the current shape, it permits
only setting up sub-regions of pre-exisiting ones. The patch below makes
this symmetric.
While working on that I noticed a few more inconsistencies in that code,
namely
- use of 'unsigned int' for sizes in many, but not all places (the patch
is converting this to use 'unsigned long' everywhere, which specifically
might be necessary for x86-64 once a processor supporting more than 44
physical address bits would become available)
- the code to correct inconsistent settings during secondary processor
startup tried (if necessary) to correct, among other things, the value
in IA32_MTRR_DEF_TYPE, however the newly computed value would never get
used (i.e. stored in the respective MSR)
- the generic range validation code checked that the end of the
to-be-added range would be above 1MB; the value checked should have been
the start of the range
- when contained regions are detected, previously this was allowed only
when the old region was uncacheable; this can be symmetric (i.e. the new
region can also be uncacheable) and even further as per Intel's
documentation write-trough and write-back for either region is also
compatible with the respective opposite in the other
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-12-07 01:14:09 +00:00
|
|
|
replace = -1;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-07-08 00:56:38 +00:00
|
|
|
/* No CPU hotplug when we change MTRR entries */
|
2008-01-25 20:08:02 +00:00
|
|
|
get_online_cpus();
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Search for existing MTRR */
|
2006-03-26 09:37:14 +00:00
|
|
|
mutex_lock(&mtrr_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
for (i = 0; i < num_var_ranges; ++i) {
|
|
|
|
mtrr_if->get(i, &lbase, &lsize, <ype);
|
[PATCH] i386: fix MTRR code
Until not so long ago, there were system log messages pointing to
inconsistent MTRR setup of the video frame buffer caused by the way vesafb
and X worked. While vesafb was fixed meanwhile, I believe fixing it there
only hides a shortcoming in the MTRR code itself, in that that code is not
symmetric with respect to the ordering of attempts to set up two (or more)
regions where one contains the other. In the current shape, it permits
only setting up sub-regions of pre-exisiting ones. The patch below makes
this symmetric.
While working on that I noticed a few more inconsistencies in that code,
namely
- use of 'unsigned int' for sizes in many, but not all places (the patch
is converting this to use 'unsigned long' everywhere, which specifically
might be necessary for x86-64 once a processor supporting more than 44
physical address bits would become available)
- the code to correct inconsistent settings during secondary processor
startup tried (if necessary) to correct, among other things, the value
in IA32_MTRR_DEF_TYPE, however the newly computed value would never get
used (i.e. stored in the respective MSR)
- the generic range validation code checked that the end of the
to-be-added range would be above 1MB; the value checked should have been
the start of the range
- when contained regions are detected, previously this was allowed only
when the old region was uncacheable; this can be symmetric (i.e. the new
region can also be uncacheable) and even further as per Intel's
documentation write-trough and write-back for either region is also
compatible with the respective opposite in the other
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-12-07 01:14:09 +00:00
|
|
|
if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase)
|
2005-04-16 22:20:36 +00:00
|
|
|
continue;
|
|
|
|
/* At this point we know there is some kind of overlap/enclosure */
|
[PATCH] i386: fix MTRR code
Until not so long ago, there were system log messages pointing to
inconsistent MTRR setup of the video frame buffer caused by the way vesafb
and X worked. While vesafb was fixed meanwhile, I believe fixing it there
only hides a shortcoming in the MTRR code itself, in that that code is not
symmetric with respect to the ordering of attempts to set up two (or more)
regions where one contains the other. In the current shape, it permits
only setting up sub-regions of pre-exisiting ones. The patch below makes
this symmetric.
While working on that I noticed a few more inconsistencies in that code,
namely
- use of 'unsigned int' for sizes in many, but not all places (the patch
is converting this to use 'unsigned long' everywhere, which specifically
might be necessary for x86-64 once a processor supporting more than 44
physical address bits would become available)
- the code to correct inconsistent settings during secondary processor
startup tried (if necessary) to correct, among other things, the value
in IA32_MTRR_DEF_TYPE, however the newly computed value would never get
used (i.e. stored in the respective MSR)
- the generic range validation code checked that the end of the
to-be-added range would be above 1MB; the value checked should have been
the start of the range
- when contained regions are detected, previously this was allowed only
when the old region was uncacheable; this can be symmetric (i.e. the new
region can also be uncacheable) and even further as per Intel's
documentation write-trough and write-back for either region is also
compatible with the respective opposite in the other
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-12-07 01:14:09 +00:00
|
|
|
if (base < lbase || base + size - 1 > lbase + lsize - 1) {
|
|
|
|
if (base <= lbase && base + size - 1 >= lbase + lsize - 1) {
|
|
|
|
/* New region encloses an existing region */
|
|
|
|
if (type == ltype) {
|
|
|
|
replace = replace == -1 ? i : -2;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
else if (types_compatible(type, ltype))
|
|
|
|
continue;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
printk(KERN_WARNING
|
|
|
|
"mtrr: 0x%lx000,0x%lx000 overlaps existing"
|
[PATCH] i386: fix MTRR code
Until not so long ago, there were system log messages pointing to
inconsistent MTRR setup of the video frame buffer caused by the way vesafb
and X worked. While vesafb was fixed meanwhile, I believe fixing it there
only hides a shortcoming in the MTRR code itself, in that that code is not
symmetric with respect to the ordering of attempts to set up two (or more)
regions where one contains the other. In the current shape, it permits
only setting up sub-regions of pre-exisiting ones. The patch below makes
this symmetric.
While working on that I noticed a few more inconsistencies in that code,
namely
- use of 'unsigned int' for sizes in many, but not all places (the patch
is converting this to use 'unsigned long' everywhere, which specifically
might be necessary for x86-64 once a processor supporting more than 44
physical address bits would become available)
- the code to correct inconsistent settings during secondary processor
startup tried (if necessary) to correct, among other things, the value
in IA32_MTRR_DEF_TYPE, however the newly computed value would never get
used (i.e. stored in the respective MSR)
- the generic range validation code checked that the end of the
to-be-added range would be above 1MB; the value checked should have been
the start of the range
- when contained regions are detected, previously this was allowed only
when the old region was uncacheable; this can be symmetric (i.e. the new
region can also be uncacheable) and even further as per Intel's
documentation write-trough and write-back for either region is also
compatible with the respective opposite in the other
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-12-07 01:14:09 +00:00
|
|
|
" 0x%lx000,0x%lx000\n", base, size, lbase,
|
2005-04-16 22:20:36 +00:00
|
|
|
lsize);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
/* New region is enclosed by an existing region */
|
|
|
|
if (ltype != type) {
|
[PATCH] i386: fix MTRR code
Until not so long ago, there were system log messages pointing to
inconsistent MTRR setup of the video frame buffer caused by the way vesafb
and X worked. While vesafb was fixed meanwhile, I believe fixing it there
only hides a shortcoming in the MTRR code itself, in that that code is not
symmetric with respect to the ordering of attempts to set up two (or more)
regions where one contains the other. In the current shape, it permits
only setting up sub-regions of pre-exisiting ones. The patch below makes
this symmetric.
While working on that I noticed a few more inconsistencies in that code,
namely
- use of 'unsigned int' for sizes in many, but not all places (the patch
is converting this to use 'unsigned long' everywhere, which specifically
might be necessary for x86-64 once a processor supporting more than 44
physical address bits would become available)
- the code to correct inconsistent settings during secondary processor
startup tried (if necessary) to correct, among other things, the value
in IA32_MTRR_DEF_TYPE, however the newly computed value would never get
used (i.e. stored in the respective MSR)
- the generic range validation code checked that the end of the
to-be-added range would be above 1MB; the value checked should have been
the start of the range
- when contained regions are detected, previously this was allowed only
when the old region was uncacheable; this can be symmetric (i.e. the new
region can also be uncacheable) and even further as per Intel's
documentation write-trough and write-back for either region is also
compatible with the respective opposite in the other
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-12-07 01:14:09 +00:00
|
|
|
if (types_compatible(type, ltype))
|
2005-04-16 22:20:36 +00:00
|
|
|
continue;
|
|
|
|
printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
|
|
|
|
base, size, mtrr_attrib_to_str(ltype),
|
|
|
|
mtrr_attrib_to_str(type));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (increment)
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
++mtrr_usage_table[i];
|
2005-04-16 22:20:36 +00:00
|
|
|
error = i;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
/* Search for an empty MTRR */
|
[PATCH] i386: fix MTRR code
Until not so long ago, there were system log messages pointing to
inconsistent MTRR setup of the video frame buffer caused by the way vesafb
and X worked. While vesafb was fixed meanwhile, I believe fixing it there
only hides a shortcoming in the MTRR code itself, in that that code is not
symmetric with respect to the ordering of attempts to set up two (or more)
regions where one contains the other. In the current shape, it permits
only setting up sub-regions of pre-exisiting ones. The patch below makes
this symmetric.
While working on that I noticed a few more inconsistencies in that code,
namely
- use of 'unsigned int' for sizes in many, but not all places (the patch
is converting this to use 'unsigned long' everywhere, which specifically
might be necessary for x86-64 once a processor supporting more than 44
physical address bits would become available)
- the code to correct inconsistent settings during secondary processor
startup tried (if necessary) to correct, among other things, the value
in IA32_MTRR_DEF_TYPE, however the newly computed value would never get
used (i.e. stored in the respective MSR)
- the generic range validation code checked that the end of the
to-be-added range would be above 1MB; the value checked should have been
the start of the range
- when contained regions are detected, previously this was allowed only
when the old region was uncacheable; this can be symmetric (i.e. the new
region can also be uncacheable) and even further as per Intel's
documentation write-trough and write-back for either region is also
compatible with the respective opposite in the other
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-12-07 01:14:09 +00:00
|
|
|
i = mtrr_if->get_free_region(base, size, replace);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (i >= 0) {
|
|
|
|
set_mtrr(i, base, size, type);
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
if (likely(replace < 0)) {
|
|
|
|
mtrr_usage_table[i] = 1;
|
|
|
|
} else {
|
|
|
|
mtrr_usage_table[i] = mtrr_usage_table[replace];
|
2008-01-30 12:30:31 +00:00
|
|
|
if (increment)
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
mtrr_usage_table[i]++;
|
[PATCH] i386: fix MTRR code
Until not so long ago, there were system log messages pointing to
inconsistent MTRR setup of the video frame buffer caused by the way vesafb
and X worked. While vesafb was fixed meanwhile, I believe fixing it there
only hides a shortcoming in the MTRR code itself, in that that code is not
symmetric with respect to the ordering of attempts to set up two (or more)
regions where one contains the other. In the current shape, it permits
only setting up sub-regions of pre-exisiting ones. The patch below makes
this symmetric.
While working on that I noticed a few more inconsistencies in that code,
namely
- use of 'unsigned int' for sizes in many, but not all places (the patch
is converting this to use 'unsigned long' everywhere, which specifically
might be necessary for x86-64 once a processor supporting more than 44
physical address bits would become available)
- the code to correct inconsistent settings during secondary processor
startup tried (if necessary) to correct, among other things, the value
in IA32_MTRR_DEF_TYPE, however the newly computed value would never get
used (i.e. stored in the respective MSR)
- the generic range validation code checked that the end of the
to-be-added range would be above 1MB; the value checked should have been
the start of the range
- when contained regions are detected, previously this was allowed only
when the old region was uncacheable; this can be symmetric (i.e. the new
region can also be uncacheable) and even further as per Intel's
documentation write-trough and write-back for either region is also
compatible with the respective opposite in the other
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-12-07 01:14:09 +00:00
|
|
|
if (unlikely(replace != i)) {
|
|
|
|
set_mtrr(replace, 0, 0, 0);
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
mtrr_usage_table[replace] = 0;
|
[PATCH] i386: fix MTRR code
Until not so long ago, there were system log messages pointing to
inconsistent MTRR setup of the video frame buffer caused by the way vesafb
and X worked. While vesafb was fixed meanwhile, I believe fixing it there
only hides a shortcoming in the MTRR code itself, in that that code is not
symmetric with respect to the ordering of attempts to set up two (or more)
regions where one contains the other. In the current shape, it permits
only setting up sub-regions of pre-exisiting ones. The patch below makes
this symmetric.
While working on that I noticed a few more inconsistencies in that code,
namely
- use of 'unsigned int' for sizes in many, but not all places (the patch
is converting this to use 'unsigned long' everywhere, which specifically
might be necessary for x86-64 once a processor supporting more than 44
physical address bits would become available)
- the code to correct inconsistent settings during secondary processor
startup tried (if necessary) to correct, among other things, the value
in IA32_MTRR_DEF_TYPE, however the newly computed value would never get
used (i.e. stored in the respective MSR)
- the generic range validation code checked that the end of the
to-be-added range would be above 1MB; the value checked should have been
the start of the range
- when contained regions are detected, previously this was allowed only
when the old region was uncacheable; this can be symmetric (i.e. the new
region can also be uncacheable) and even further as per Intel's
documentation write-trough and write-back for either region is also
compatible with the respective opposite in the other
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-12-07 01:14:09 +00:00
|
|
|
}
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
} else
|
|
|
|
printk(KERN_INFO "mtrr: no more MTRRs available\n");
|
|
|
|
error = i;
|
|
|
|
out:
|
2006-03-26 09:37:14 +00:00
|
|
|
mutex_unlock(&mtrr_mutex);
|
2008-01-25 20:08:02 +00:00
|
|
|
put_online_cpus();
|
2005-04-16 22:20:36 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2005-06-23 07:08:35 +00:00
|
|
|
static int mtrr_check(unsigned long base, unsigned long size)
|
|
|
|
{
|
|
|
|
if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
|
|
|
|
printk(KERN_WARNING
|
|
|
|
"mtrr: size and base must be multiples of 4 kiB\n");
|
|
|
|
printk(KERN_DEBUG
|
|
|
|
"mtrr: size: 0x%lx base: 0x%lx\n", size, base);
|
|
|
|
dump_stack();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/**
|
|
|
|
* mtrr_add - Add a memory type region
|
|
|
|
* @base: Physical base address of region
|
|
|
|
* @size: Physical size of region
|
|
|
|
* @type: Type of MTRR desired
|
|
|
|
* @increment: If this is true do usage counting on the region
|
|
|
|
*
|
|
|
|
* Memory type region registers control the caching on newer Intel and
|
|
|
|
* non Intel processors. This function allows drivers to request an
|
|
|
|
* MTRR is added. The details and hardware specifics of each processor's
|
|
|
|
* implementation are hidden from the caller, but nevertheless the
|
|
|
|
* caller should expect to need to provide a power of two size on an
|
|
|
|
* equivalent power of two boundary.
|
|
|
|
*
|
|
|
|
* If the region cannot be added either because all regions are in use
|
|
|
|
* or the CPU cannot support it a negative value is returned. On success
|
|
|
|
* the register number for this entry is returned, but should be treated
|
|
|
|
* as a cookie only.
|
|
|
|
*
|
|
|
|
* On a multiprocessor machine the changes are made to all processors.
|
|
|
|
* This is required on x86 by the Intel processors.
|
|
|
|
*
|
|
|
|
* The available types are
|
|
|
|
*
|
|
|
|
* %MTRR_TYPE_UNCACHABLE - No caching
|
|
|
|
*
|
|
|
|
* %MTRR_TYPE_WRBACK - Write data back in bursts whenever
|
|
|
|
*
|
|
|
|
* %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
|
|
|
|
*
|
|
|
|
* %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
|
|
|
|
*
|
|
|
|
* BUGS: Needs a quiet flag for the cases where drivers do not mind
|
|
|
|
* failures and do not wish system log messages to be sent.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
|
|
|
mtrr_add(unsigned long base, unsigned long size, unsigned int type,
|
2008-01-30 12:30:31 +00:00
|
|
|
bool increment)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-06-23 07:08:35 +00:00
|
|
|
if (mtrr_check(base, size))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EINVAL;
|
|
|
|
return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
|
|
|
|
increment);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* mtrr_del_page - delete a memory type region
|
|
|
|
* @reg: Register returned by mtrr_add
|
|
|
|
* @base: Physical base address
|
|
|
|
* @size: Size of region
|
|
|
|
*
|
|
|
|
* If register is supplied then base and size are ignored. This is
|
|
|
|
* how drivers should call it.
|
|
|
|
*
|
|
|
|
* Releases an MTRR region. If the usage count drops to zero the
|
|
|
|
* register is freed and the region returns to default state.
|
|
|
|
* On success the register is returned, on failure a negative error
|
|
|
|
* code.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int mtrr_del_page(int reg, unsigned long base, unsigned long size)
|
|
|
|
{
|
|
|
|
int i, max;
|
|
|
|
mtrr_type ltype;
|
[PATCH] i386: fix MTRR code
Until not so long ago, there were system log messages pointing to
inconsistent MTRR setup of the video frame buffer caused by the way vesafb
and X worked. While vesafb was fixed meanwhile, I believe fixing it there
only hides a shortcoming in the MTRR code itself, in that that code is not
symmetric with respect to the ordering of attempts to set up two (or more)
regions where one contains the other. In the current shape, it permits
only setting up sub-regions of pre-exisiting ones. The patch below makes
this symmetric.
While working on that I noticed a few more inconsistencies in that code,
namely
- use of 'unsigned int' for sizes in many, but not all places (the patch
is converting this to use 'unsigned long' everywhere, which specifically
might be necessary for x86-64 once a processor supporting more than 44
physical address bits would become available)
- the code to correct inconsistent settings during secondary processor
startup tried (if necessary) to correct, among other things, the value
in IA32_MTRR_DEF_TYPE, however the newly computed value would never get
used (i.e. stored in the respective MSR)
- the generic range validation code checked that the end of the
to-be-added range would be above 1MB; the value checked should have been
the start of the range
- when contained regions are detected, previously this was allowed only
when the old region was uncacheable; this can be symmetric (i.e. the new
region can also be uncacheable) and even further as per Intel's
documentation write-trough and write-back for either region is also
compatible with the respective opposite in the other
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-12-07 01:14:09 +00:00
|
|
|
unsigned long lbase, lsize;
|
2005-04-16 22:20:36 +00:00
|
|
|
int error = -EINVAL;
|
|
|
|
|
|
|
|
if (!mtrr_if)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
max = num_var_ranges;
|
2005-07-08 00:56:38 +00:00
|
|
|
/* No CPU hotplug when we change MTRR entries */
|
2008-01-25 20:08:02 +00:00
|
|
|
get_online_cpus();
|
2006-03-26 09:37:14 +00:00
|
|
|
mutex_lock(&mtrr_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (reg < 0) {
|
|
|
|
/* Search for existing MTRR */
|
|
|
|
for (i = 0; i < max; ++i) {
|
|
|
|
mtrr_if->get(i, &lbase, &lsize, <ype);
|
|
|
|
if (lbase == base && lsize == size) {
|
|
|
|
reg = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (reg < 0) {
|
|
|
|
printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
|
|
|
|
size);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (reg >= max) {
|
|
|
|
printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
mtrr_if->get(reg, &lbase, &lsize, <ype);
|
|
|
|
if (lsize < 1) {
|
|
|
|
printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
|
|
|
|
goto out;
|
|
|
|
}
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
if (mtrr_usage_table[reg] < 1) {
|
2005-04-16 22:20:36 +00:00
|
|
|
printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
|
|
|
|
goto out;
|
|
|
|
}
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
if (--mtrr_usage_table[reg] < 1)
|
2005-04-16 22:20:36 +00:00
|
|
|
set_mtrr(reg, 0, 0, 0);
|
|
|
|
error = reg;
|
|
|
|
out:
|
2006-03-26 09:37:14 +00:00
|
|
|
mutex_unlock(&mtrr_mutex);
|
2008-01-25 20:08:02 +00:00
|
|
|
put_online_cpus();
|
2005-04-16 22:20:36 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
/**
|
|
|
|
* mtrr_del - delete a memory type region
|
|
|
|
* @reg: Register returned by mtrr_add
|
|
|
|
* @base: Physical base address
|
|
|
|
* @size: Size of region
|
|
|
|
*
|
|
|
|
* If register is supplied then base and size are ignored. This is
|
|
|
|
* how drivers should call it.
|
|
|
|
*
|
|
|
|
* Releases an MTRR region. If the usage count drops to zero the
|
|
|
|
* register is freed and the region returns to default state.
|
|
|
|
* On success the register is returned, on failure a negative error
|
|
|
|
* code.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
|
|
|
mtrr_del(int reg, unsigned long base, unsigned long size)
|
|
|
|
{
|
2005-06-23 07:08:35 +00:00
|
|
|
if (mtrr_check(base, size))
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EINVAL;
|
|
|
|
return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(mtrr_add);
|
|
|
|
EXPORT_SYMBOL(mtrr_del);
|
|
|
|
|
|
|
|
/* HACK ALERT!
|
|
|
|
* These should be called implicitly, but we can't yet until all the initcall
|
|
|
|
* stuff is done...
|
|
|
|
*/
|
|
|
|
static void __init init_ifs(void)
|
|
|
|
{
|
2006-12-07 01:14:09 +00:00
|
|
|
#ifndef CONFIG_X86_64
|
2005-04-16 22:20:36 +00:00
|
|
|
amd_init_mtrr();
|
|
|
|
cyrix_init_mtrr();
|
|
|
|
centaur_init_mtrr();
|
2006-12-07 01:14:09 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-07-08 00:56:38 +00:00
|
|
|
/* The suspend/resume methods are only for CPU without MTRR. CPU using generic
|
|
|
|
* MTRR driver doesn't require this
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
struct mtrr_value {
|
|
|
|
mtrr_type ltype;
|
|
|
|
unsigned long lbase;
|
[PATCH] i386: fix MTRR code
Until not so long ago, there were system log messages pointing to
inconsistent MTRR setup of the video frame buffer caused by the way vesafb
and X worked. While vesafb was fixed meanwhile, I believe fixing it there
only hides a shortcoming in the MTRR code itself, in that that code is not
symmetric with respect to the ordering of attempts to set up two (or more)
regions where one contains the other. In the current shape, it permits
only setting up sub-regions of pre-exisiting ones. The patch below makes
this symmetric.
While working on that I noticed a few more inconsistencies in that code,
namely
- use of 'unsigned int' for sizes in many, but not all places (the patch
is converting this to use 'unsigned long' everywhere, which specifically
might be necessary for x86-64 once a processor supporting more than 44
physical address bits would become available)
- the code to correct inconsistent settings during secondary processor
startup tried (if necessary) to correct, among other things, the value
in IA32_MTRR_DEF_TYPE, however the newly computed value would never get
used (i.e. stored in the respective MSR)
- the generic range validation code checked that the end of the
to-be-added range would be above 1MB; the value checked should have been
the start of the range
- when contained regions are detected, previously this was allowed only
when the old region was uncacheable; this can be symmetric (i.e. the new
region can also be uncacheable) and even further as per Intel's
documentation write-trough and write-back for either region is also
compatible with the respective opposite in the other
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andi Kleen <ak@suse.de>
2006-12-07 01:14:09 +00:00
|
|
|
unsigned long lsize;
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
static struct mtrr_value mtrr_state[MAX_VAR_RANGES];
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-09-03 22:56:56 +00:00
|
|
|
static int mtrr_save(struct sys_device * sysdev, pm_message_t state)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < num_var_ranges; i++) {
|
|
|
|
mtrr_if->get(i,
|
|
|
|
&mtrr_state[i].lbase,
|
|
|
|
&mtrr_state[i].lsize,
|
|
|
|
&mtrr_state[i].ltype);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mtrr_restore(struct sys_device * sysdev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < num_var_ranges; i++) {
|
|
|
|
if (mtrr_state[i].lsize)
|
|
|
|
set_mtrr(i,
|
|
|
|
mtrr_state[i].lbase,
|
|
|
|
mtrr_state[i].lsize,
|
|
|
|
mtrr_state[i].ltype);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static struct sysdev_driver mtrr_sysdev_driver = {
|
|
|
|
.suspend = mtrr_save,
|
|
|
|
.resume = mtrr_restore,
|
|
|
|
};
|
|
|
|
|
2008-04-30 03:25:58 +00:00
|
|
|
/* should be related to MTRR_VAR_RANGES nums */
|
2008-04-29 10:52:33 +00:00
|
|
|
#define RANGE_NUM 256
|
|
|
|
|
|
|
|
struct res_range {
|
|
|
|
unsigned long start;
|
|
|
|
unsigned long end;
|
|
|
|
};
|
|
|
|
|
2008-04-30 03:25:58 +00:00
|
|
|
static int __init
|
|
|
|
add_range(struct res_range *range, int nr_range, unsigned long start,
|
|
|
|
unsigned long end)
|
2008-04-29 10:52:33 +00:00
|
|
|
{
|
2008-04-30 03:25:58 +00:00
|
|
|
/* out of slots */
|
|
|
|
if (nr_range >= RANGE_NUM)
|
|
|
|
return nr_range;
|
2008-04-29 10:52:33 +00:00
|
|
|
|
2008-04-30 03:25:58 +00:00
|
|
|
range[nr_range].start = start;
|
|
|
|
range[nr_range].end = end;
|
|
|
|
|
|
|
|
nr_range++;
|
|
|
|
|
|
|
|
return nr_range;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init
|
|
|
|
add_range_with_merge(struct res_range *range, int nr_range, unsigned long start,
|
|
|
|
unsigned long end)
|
|
|
|
{
|
|
|
|
int i;
|
2008-04-29 10:52:33 +00:00
|
|
|
|
|
|
|
/* try to merge it with old one */
|
|
|
|
for (i = 0; i < nr_range; i++) {
|
|
|
|
unsigned long final_start, final_end;
|
|
|
|
unsigned long common_start, common_end;
|
|
|
|
|
|
|
|
if (!range[i].end)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
common_start = max(range[i].start, start);
|
|
|
|
common_end = min(range[i].end, end);
|
|
|
|
if (common_start > common_end + 1)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
final_start = min(range[i].start, start);
|
|
|
|
final_end = max(range[i].end, end);
|
|
|
|
|
|
|
|
range[i].start = final_start;
|
|
|
|
range[i].end = final_end;
|
|
|
|
return nr_range;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* need to add that */
|
2008-04-30 03:25:58 +00:00
|
|
|
return add_range(range, nr_range, start, end);
|
2008-04-29 10:52:33 +00:00
|
|
|
}
|
2008-04-30 03:25:58 +00:00
|
|
|
|
|
|
|
static void __init
|
|
|
|
subtract_range(struct res_range *range, unsigned long start, unsigned long end)
|
2008-04-29 10:52:33 +00:00
|
|
|
{
|
2008-04-30 03:25:58 +00:00
|
|
|
int i, j;
|
2008-04-29 10:52:33 +00:00
|
|
|
|
|
|
|
for (j = 0; j < RANGE_NUM; j++) {
|
|
|
|
if (!range[j].end)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (start <= range[j].start && end >= range[j].end) {
|
|
|
|
range[j].start = 0;
|
|
|
|
range[j].end = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
if (start <= range[j].start && end < range[j].end &&
|
|
|
|
range[j].start < end + 1) {
|
2008-04-29 10:52:33 +00:00
|
|
|
range[j].start = end + 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
if (start > range[j].start && end >= range[j].end &&
|
|
|
|
range[j].end > start - 1) {
|
2008-04-29 10:52:33 +00:00
|
|
|
range[j].end = start - 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (start > range[j].start && end < range[j].end) {
|
|
|
|
/* find the new spare */
|
|
|
|
for (i = 0; i < RANGE_NUM; i++) {
|
|
|
|
if (range[i].end == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i < RANGE_NUM) {
|
|
|
|
range[i].end = range[j].end;
|
|
|
|
range[i].start = end + 1;
|
|
|
|
} else {
|
|
|
|
printk(KERN_ERR "run of slot in ranges\n");
|
|
|
|
}
|
|
|
|
range[j].end = start - 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init cmp_range(const void *x1, const void *x2)
|
|
|
|
{
|
|
|
|
const struct res_range *r1 = x1;
|
|
|
|
const struct res_range *r2 = x2;
|
|
|
|
long start1, start2;
|
|
|
|
|
|
|
|
start1 = r1->start;
|
|
|
|
start2 = r2->start;
|
|
|
|
|
|
|
|
return start1 - start2;
|
|
|
|
}
|
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
struct var_mtrr_range_state {
|
|
|
|
unsigned long base_pfn;
|
|
|
|
unsigned long size_pfn;
|
|
|
|
mtrr_type type;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
|
2008-05-10 05:40:52 +00:00
|
|
|
static int __initdata debug_print;
|
2008-05-02 09:40:22 +00:00
|
|
|
|
|
|
|
static int __init
|
|
|
|
x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
|
|
|
|
unsigned long extra_remove_base,
|
|
|
|
unsigned long extra_remove_size)
|
|
|
|
{
|
|
|
|
unsigned long i, base, size;
|
|
|
|
mtrr_type type;
|
|
|
|
|
|
|
|
for (i = 0; i < num_var_ranges; i++) {
|
|
|
|
type = range_state[i].type;
|
|
|
|
if (type != MTRR_TYPE_WRBACK)
|
|
|
|
continue;
|
|
|
|
base = range_state[i].base_pfn;
|
|
|
|
size = range_state[i].size_pfn;
|
|
|
|
nr_range = add_range_with_merge(range, nr_range, base,
|
|
|
|
base + size - 1);
|
|
|
|
}
|
2008-05-10 05:40:52 +00:00
|
|
|
if (debug_print) {
|
|
|
|
printk(KERN_DEBUG "After WB checking\n");
|
|
|
|
for (i = 0; i < nr_range; i++)
|
|
|
|
printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n",
|
2008-05-02 09:40:22 +00:00
|
|
|
range[i].start, range[i].end + 1);
|
2008-05-10 05:40:52 +00:00
|
|
|
}
|
2008-05-02 09:40:22 +00:00
|
|
|
|
|
|
|
/* take out UC ranges */
|
|
|
|
for (i = 0; i < num_var_ranges; i++) {
|
|
|
|
type = range_state[i].type;
|
|
|
|
if (type != MTRR_TYPE_UNCACHABLE)
|
|
|
|
continue;
|
|
|
|
size = range_state[i].size_pfn;
|
|
|
|
if (!size)
|
|
|
|
continue;
|
|
|
|
base = range_state[i].base_pfn;
|
|
|
|
subtract_range(range, base, base + size - 1);
|
|
|
|
}
|
|
|
|
if (extra_remove_size)
|
|
|
|
subtract_range(range, extra_remove_base,
|
|
|
|
extra_remove_base + extra_remove_size - 1);
|
|
|
|
|
|
|
|
/* get new range num */
|
|
|
|
nr_range = 0;
|
|
|
|
for (i = 0; i < RANGE_NUM; i++) {
|
|
|
|
if (!range[i].end)
|
|
|
|
continue;
|
|
|
|
nr_range++;
|
|
|
|
}
|
2008-05-10 05:40:52 +00:00
|
|
|
if (debug_print) {
|
|
|
|
printk(KERN_DEBUG "After UC checking\n");
|
|
|
|
for (i = 0; i < nr_range; i++)
|
|
|
|
printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n",
|
|
|
|
range[i].start, range[i].end + 1);
|
|
|
|
}
|
2008-05-02 09:40:22 +00:00
|
|
|
|
|
|
|
/* sort the ranges */
|
|
|
|
sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
|
2008-05-10 05:40:52 +00:00
|
|
|
if (debug_print) {
|
|
|
|
printk(KERN_DEBUG "After sorting\n");
|
|
|
|
for (i = 0; i < nr_range; i++)
|
|
|
|
printk(KERN_DEBUG "MTRR MAP PFN: %016lx - %016lx\n",
|
2008-05-02 09:40:22 +00:00
|
|
|
range[i].start, range[i].end + 1);
|
2008-05-10 05:40:52 +00:00
|
|
|
}
|
2008-05-02 09:40:22 +00:00
|
|
|
|
|
|
|
/* clear those is not used */
|
|
|
|
for (i = nr_range; i < RANGE_NUM; i++)
|
|
|
|
memset(&range[i], 0, sizeof(range[i]));
|
|
|
|
|
|
|
|
return nr_range;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct res_range __initdata range[RANGE_NUM];
|
|
|
|
|
|
|
|
#ifdef CONFIG_MTRR_SANITIZER
|
|
|
|
|
|
|
|
static unsigned long __init sum_ranges(struct res_range *range, int nr_range)
|
|
|
|
{
|
|
|
|
unsigned long sum;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
sum = 0;
|
|
|
|
for (i = 0; i < nr_range; i++)
|
|
|
|
sum += range[i].end + 1 - range[i].start;
|
|
|
|
|
|
|
|
return sum;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int enable_mtrr_cleanup __initdata =
|
|
|
|
CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT;
|
|
|
|
|
|
|
|
static int __init disable_mtrr_cleanup_setup(char *str)
|
|
|
|
{
|
|
|
|
if (enable_mtrr_cleanup != -1)
|
|
|
|
enable_mtrr_cleanup = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup);
|
|
|
|
|
|
|
|
static int __init enable_mtrr_cleanup_setup(char *str)
|
|
|
|
{
|
|
|
|
if (enable_mtrr_cleanup != -1)
|
|
|
|
enable_mtrr_cleanup = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("enble_mtrr_cleanup", enable_mtrr_cleanup_setup);
|
|
|
|
|
2008-09-27 07:30:06 +00:00
|
|
|
static int __init mtrr_cleanup_debug_setup(char *str)
|
|
|
|
{
|
|
|
|
debug_print = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup);
|
|
|
|
|
2008-04-29 10:52:33 +00:00
|
|
|
struct var_mtrr_state {
|
2008-04-30 03:25:58 +00:00
|
|
|
unsigned long range_startk;
|
|
|
|
unsigned long range_sizek;
|
|
|
|
unsigned long chunk_sizek;
|
|
|
|
unsigned long gran_sizek;
|
|
|
|
unsigned int reg;
|
2008-04-29 10:52:33 +00:00
|
|
|
};
|
|
|
|
|
2008-04-30 03:25:58 +00:00
|
|
|
static void __init
|
|
|
|
set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
|
2008-05-02 09:40:22 +00:00
|
|
|
unsigned char type, unsigned int address_bits)
|
2008-04-29 10:52:33 +00:00
|
|
|
{
|
|
|
|
u32 base_lo, base_hi, mask_lo, mask_hi;
|
2008-04-30 03:25:58 +00:00
|
|
|
u64 base, mask;
|
2008-04-29 10:52:33 +00:00
|
|
|
|
|
|
|
if (!sizek) {
|
|
|
|
fill_mtrr_var_range(reg, 0, 0, 0, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-04-30 03:25:58 +00:00
|
|
|
mask = (1ULL << address_bits) - 1;
|
|
|
|
mask &= ~((((u64)sizek) << 10) - 1);
|
2008-04-29 10:52:33 +00:00
|
|
|
|
2008-04-30 03:25:58 +00:00
|
|
|
base = ((u64)basek) << 10;
|
2008-04-29 10:52:33 +00:00
|
|
|
|
2008-04-30 03:25:58 +00:00
|
|
|
base |= type;
|
|
|
|
mask |= 0x800;
|
|
|
|
|
|
|
|
base_lo = base & ((1ULL<<32) - 1);
|
|
|
|
base_hi = base >> 32;
|
|
|
|
|
|
|
|
mask_lo = mask & ((1ULL<<32) - 1);
|
|
|
|
mask_hi = mask >> 32;
|
2008-04-29 10:52:33 +00:00
|
|
|
|
|
|
|
fill_mtrr_var_range(reg, base_lo, base_hi, mask_lo, mask_hi);
|
|
|
|
}
|
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
static void __init
|
|
|
|
save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
|
|
|
|
unsigned char type)
|
|
|
|
{
|
|
|
|
range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10);
|
|
|
|
range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10);
|
|
|
|
range_state[reg].type = type;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init
|
|
|
|
set_var_mtrr_all(unsigned int address_bits)
|
|
|
|
{
|
|
|
|
unsigned long basek, sizek;
|
|
|
|
unsigned char type;
|
|
|
|
unsigned int reg;
|
|
|
|
|
|
|
|
for (reg = 0; reg < num_var_ranges; reg++) {
|
|
|
|
basek = range_state[reg].base_pfn << (PAGE_SHIFT - 10);
|
|
|
|
sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10);
|
|
|
|
type = range_state[reg].type;
|
|
|
|
|
|
|
|
set_var_mtrr(reg, basek, sizek, type, address_bits);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-04-30 03:25:58 +00:00
|
|
|
static unsigned int __init
|
|
|
|
range_to_mtrr(unsigned int reg, unsigned long range_startk,
|
2008-05-02 09:40:22 +00:00
|
|
|
unsigned long range_sizek, unsigned char type)
|
2008-04-29 10:52:33 +00:00
|
|
|
{
|
|
|
|
if (!range_sizek || (reg >= num_var_ranges))
|
|
|
|
return reg;
|
|
|
|
|
|
|
|
while (range_sizek) {
|
|
|
|
unsigned long max_align, align;
|
|
|
|
unsigned long sizek;
|
2008-04-30 03:25:58 +00:00
|
|
|
|
2008-04-29 10:52:33 +00:00
|
|
|
/* Compute the maximum size I can make a range */
|
|
|
|
if (range_startk)
|
|
|
|
max_align = ffs(range_startk) - 1;
|
|
|
|
else
|
|
|
|
max_align = 32;
|
|
|
|
align = fls(range_sizek) - 1;
|
|
|
|
if (align > max_align)
|
|
|
|
align = max_align;
|
|
|
|
|
|
|
|
sizek = 1 << align;
|
2008-05-10 05:40:52 +00:00
|
|
|
if (debug_print)
|
|
|
|
printk(KERN_DEBUG "Setting variable MTRR %d, "
|
|
|
|
"base: %ldMB, range: %ldMB, type %s\n",
|
|
|
|
reg, range_startk >> 10, sizek >> 10,
|
|
|
|
(type == MTRR_TYPE_UNCACHABLE)?"UC":
|
|
|
|
((type == MTRR_TYPE_WRBACK)?"WB":"Other")
|
|
|
|
);
|
2008-05-02 09:40:22 +00:00
|
|
|
save_var_mtrr(reg++, range_startk, sizek, type);
|
2008-04-29 10:52:33 +00:00
|
|
|
range_startk += sizek;
|
|
|
|
range_sizek -= sizek;
|
|
|
|
if (reg >= num_var_ranges)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
static unsigned __init
|
|
|
|
range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
|
|
|
|
unsigned long sizek)
|
2008-04-29 10:52:33 +00:00
|
|
|
{
|
|
|
|
unsigned long hole_basek, hole_sizek;
|
2008-05-02 09:40:22 +00:00
|
|
|
unsigned long second_basek, second_sizek;
|
2008-04-29 10:52:33 +00:00
|
|
|
unsigned long range0_basek, range0_sizek;
|
|
|
|
unsigned long range_basek, range_sizek;
|
|
|
|
unsigned long chunk_sizek;
|
|
|
|
unsigned long gran_sizek;
|
|
|
|
|
|
|
|
hole_basek = 0;
|
|
|
|
hole_sizek = 0;
|
2008-05-02 09:40:22 +00:00
|
|
|
second_basek = 0;
|
|
|
|
second_sizek = 0;
|
2008-04-29 10:52:33 +00:00
|
|
|
chunk_sizek = state->chunk_sizek;
|
|
|
|
gran_sizek = state->gran_sizek;
|
|
|
|
|
|
|
|
/* align with gran size, prevent small block used up MTRRs */
|
|
|
|
range_basek = ALIGN(state->range_startk, gran_sizek);
|
|
|
|
if ((range_basek > basek) && basek)
|
2008-05-02 09:40:22 +00:00
|
|
|
return second_sizek;
|
|
|
|
state->range_sizek -= (range_basek - state->range_startk);
|
|
|
|
range_sizek = ALIGN(state->range_sizek, gran_sizek);
|
2008-04-29 10:52:33 +00:00
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
while (range_sizek > state->range_sizek) {
|
2008-04-29 10:52:33 +00:00
|
|
|
range_sizek -= gran_sizek;
|
|
|
|
if (!range_sizek)
|
2008-05-02 09:40:22 +00:00
|
|
|
return 0;
|
2008-04-29 10:52:33 +00:00
|
|
|
}
|
|
|
|
state->range_sizek = range_sizek;
|
|
|
|
|
|
|
|
/* try to append some small hole */
|
|
|
|
range0_basek = state->range_startk;
|
|
|
|
range0_sizek = ALIGN(state->range_sizek, chunk_sizek);
|
2008-09-27 07:30:08 +00:00
|
|
|
|
|
|
|
/* no increase */
|
2008-04-30 03:25:58 +00:00
|
|
|
if (range0_sizek == state->range_sizek) {
|
2008-05-10 05:40:52 +00:00
|
|
|
if (debug_print)
|
|
|
|
printk(KERN_DEBUG "rangeX: %016lx - %016lx\n",
|
|
|
|
range0_basek<<10,
|
2008-05-02 09:40:22 +00:00
|
|
|
(range0_basek + state->range_sizek)<<10);
|
|
|
|
state->reg = range_to_mtrr(state->reg, range0_basek,
|
|
|
|
state->range_sizek, MTRR_TYPE_WRBACK);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-09-27 07:30:08 +00:00
|
|
|
/* only cut back, when it is not the last */
|
|
|
|
if (sizek) {
|
|
|
|
while (range0_basek + range0_sizek > (basek + sizek)) {
|
2008-09-28 03:26:06 +00:00
|
|
|
if (range0_sizek >= chunk_sizek)
|
|
|
|
range0_sizek -= chunk_sizek;
|
|
|
|
else
|
|
|
|
range0_sizek = 0;
|
|
|
|
|
2008-09-27 07:30:08 +00:00
|
|
|
if (!range0_sizek)
|
|
|
|
break;
|
|
|
|
}
|
2008-04-29 10:52:33 +00:00
|
|
|
}
|
|
|
|
|
2008-09-28 03:26:06 +00:00
|
|
|
second_try:
|
2008-04-29 10:52:33 +00:00
|
|
|
range_basek = range0_basek + range0_sizek;
|
2008-05-02 09:40:22 +00:00
|
|
|
|
2008-09-27 07:30:08 +00:00
|
|
|
/* one hole in the middle */
|
|
|
|
if (range_basek > basek && range_basek <= (basek + sizek))
|
|
|
|
second_sizek = range_basek - basek;
|
|
|
|
|
|
|
|
if (range0_sizek > state->range_sizek) {
|
|
|
|
|
|
|
|
/* one hole in middle or at end */
|
|
|
|
hole_sizek = range0_sizek - state->range_sizek - second_sizek;
|
2008-09-28 03:26:06 +00:00
|
|
|
|
|
|
|
/* hole size should be less than half of range0 size */
|
|
|
|
if (hole_sizek > (range0_sizek >> 1) &&
|
|
|
|
range0_sizek >= chunk_sizek) {
|
|
|
|
range0_sizek -= chunk_sizek;
|
|
|
|
second_sizek = 0;
|
|
|
|
hole_sizek = 0;
|
|
|
|
|
|
|
|
goto second_try;
|
2008-09-27 07:30:08 +00:00
|
|
|
}
|
2008-09-28 03:26:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (range0_sizek) {
|
|
|
|
if (debug_print)
|
|
|
|
printk(KERN_DEBUG "range0: %016lx - %016lx\n",
|
|
|
|
range0_basek<<10,
|
|
|
|
(range0_basek + range0_sizek)<<10);
|
|
|
|
state->reg = range_to_mtrr(state->reg, range0_basek,
|
|
|
|
range0_sizek, MTRR_TYPE_WRBACK);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (range0_sizek < state->range_sizek) {
|
2008-09-27 07:30:08 +00:00
|
|
|
/* need to handle left over */
|
2008-04-29 10:52:33 +00:00
|
|
|
range_sizek = state->range_sizek - range0_sizek;
|
|
|
|
|
2008-09-28 03:26:06 +00:00
|
|
|
if (debug_print)
|
|
|
|
printk(KERN_DEBUG "range: %016lx - %016lx\n",
|
|
|
|
range_basek<<10,
|
|
|
|
(range_basek + range_sizek)<<10);
|
|
|
|
state->reg = range_to_mtrr(state->reg, range_basek,
|
|
|
|
range_sizek, MTRR_TYPE_WRBACK);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hole_sizek) {
|
|
|
|
hole_basek = range_basek - hole_sizek - second_sizek;
|
|
|
|
if (debug_print)
|
|
|
|
printk(KERN_DEBUG "hole: %016lx - %016lx\n",
|
|
|
|
hole_basek<<10,
|
|
|
|
(hole_basek + hole_sizek)<<10);
|
|
|
|
state->reg = range_to_mtrr(state->reg, hole_basek,
|
|
|
|
hole_sizek, MTRR_TYPE_UNCACHABLE);
|
2008-04-29 10:52:33 +00:00
|
|
|
}
|
2008-05-02 09:40:22 +00:00
|
|
|
|
|
|
|
return second_sizek;
|
2008-04-29 10:52:33 +00:00
|
|
|
}
|
|
|
|
|
2008-04-30 03:25:58 +00:00
|
|
|
static void __init
|
|
|
|
set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn,
|
|
|
|
unsigned long size_pfn)
|
2008-04-29 10:52:33 +00:00
|
|
|
{
|
|
|
|
unsigned long basek, sizek;
|
2008-05-02 09:40:22 +00:00
|
|
|
unsigned long second_sizek = 0;
|
2008-04-29 10:52:33 +00:00
|
|
|
|
|
|
|
if (state->reg >= num_var_ranges)
|
|
|
|
return;
|
|
|
|
|
|
|
|
basek = base_pfn << (PAGE_SHIFT - 10);
|
|
|
|
sizek = size_pfn << (PAGE_SHIFT - 10);
|
|
|
|
|
|
|
|
/* See if I can merge with the last range */
|
2008-05-02 09:40:22 +00:00
|
|
|
if ((basek <= 1024) ||
|
|
|
|
(state->range_startk + state->range_sizek == basek)) {
|
2008-04-29 10:52:33 +00:00
|
|
|
unsigned long endk = basek + sizek;
|
|
|
|
state->range_sizek = endk - state->range_startk;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* Write the range mtrrs */
|
2008-05-02 09:40:22 +00:00
|
|
|
if (state->range_sizek != 0)
|
|
|
|
second_sizek = range_to_mtrr_with_hole(state, basek, sizek);
|
2008-04-29 10:52:33 +00:00
|
|
|
|
|
|
|
/* Allocate an msr */
|
2008-05-02 09:40:22 +00:00
|
|
|
state->range_startk = basek + second_sizek;
|
|
|
|
state->range_sizek = sizek - second_sizek;
|
2008-04-29 10:52:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* mininum size of mtrr block that can take hole */
|
|
|
|
static u64 mtrr_chunk_size __initdata = (256ULL<<20);
|
|
|
|
|
|
|
|
static int __init parse_mtrr_chunk_size_opt(char *p)
|
|
|
|
{
|
|
|
|
if (!p)
|
|
|
|
return -EINVAL;
|
|
|
|
mtrr_chunk_size = memparse(p, &p);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt);
|
|
|
|
|
|
|
|
/* granity of mtrr of block */
|
2008-05-02 09:40:22 +00:00
|
|
|
static u64 mtrr_gran_size __initdata;
|
2008-04-29 10:52:33 +00:00
|
|
|
|
|
|
|
static int __init parse_mtrr_gran_size_opt(char *p)
|
|
|
|
{
|
|
|
|
if (!p)
|
|
|
|
return -EINVAL;
|
|
|
|
mtrr_gran_size = memparse(p, &p);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("mtrr_gran_size", parse_mtrr_gran_size_opt);
|
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
static int nr_mtrr_spare_reg __initdata =
|
|
|
|
CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT;
|
|
|
|
|
|
|
|
static int __init parse_mtrr_spare_reg(char *arg)
|
|
|
|
{
|
|
|
|
if (arg)
|
|
|
|
nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg);
|
|
|
|
|
|
|
|
static int __init
|
2008-04-30 03:25:58 +00:00
|
|
|
x86_setup_var_mtrrs(struct res_range *range, int nr_range,
|
2008-05-02 09:40:22 +00:00
|
|
|
u64 chunk_size, u64 gran_size)
|
2008-04-29 10:52:33 +00:00
|
|
|
{
|
|
|
|
struct var_mtrr_state var_state;
|
|
|
|
int i;
|
2008-05-02 09:40:22 +00:00
|
|
|
int num_reg;
|
2008-04-29 10:52:33 +00:00
|
|
|
|
2008-04-30 03:25:58 +00:00
|
|
|
var_state.range_startk = 0;
|
|
|
|
var_state.range_sizek = 0;
|
|
|
|
var_state.reg = 0;
|
2008-05-02 09:40:22 +00:00
|
|
|
var_state.chunk_sizek = chunk_size >> 10;
|
|
|
|
var_state.gran_sizek = gran_size >> 10;
|
|
|
|
|
|
|
|
memset(range_state, 0, sizeof(range_state));
|
2008-04-29 10:52:33 +00:00
|
|
|
|
|
|
|
/* Write the range etc */
|
|
|
|
for (i = 0; i < nr_range; i++)
|
2008-05-02 09:40:22 +00:00
|
|
|
set_var_mtrr_range(&var_state, range[i].start,
|
|
|
|
range[i].end - range[i].start + 1);
|
2008-04-29 10:52:33 +00:00
|
|
|
|
|
|
|
/* Write the last range */
|
2008-05-02 09:40:22 +00:00
|
|
|
if (var_state.range_sizek != 0)
|
|
|
|
range_to_mtrr_with_hole(&var_state, 0, 0);
|
|
|
|
|
|
|
|
num_reg = var_state.reg;
|
2008-04-29 10:52:33 +00:00
|
|
|
/* Clear out the extra MTRR's */
|
2008-04-30 03:25:58 +00:00
|
|
|
while (var_state.reg < num_var_ranges) {
|
2008-05-02 09:40:22 +00:00
|
|
|
save_var_mtrr(var_state.reg, 0, 0, 0);
|
2008-04-30 03:25:58 +00:00
|
|
|
var_state.reg++;
|
|
|
|
}
|
2008-04-29 10:52:33 +00:00
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
return num_reg;
|
|
|
|
}
|
2008-04-29 10:52:33 +00:00
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
struct mtrr_cleanup_result {
|
|
|
|
unsigned long gran_sizek;
|
|
|
|
unsigned long chunk_sizek;
|
|
|
|
unsigned long lose_cover_sizek;
|
|
|
|
unsigned int num_reg;
|
|
|
|
int bad;
|
|
|
|
};
|
2008-04-29 10:52:33 +00:00
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
/*
|
|
|
|
* gran_size: 1M, 2M, ..., 2G
|
2008-09-27 07:30:07 +00:00
|
|
|
* chunk size: gran_size, ..., 2G
|
|
|
|
* so we need (1+12)*6
|
2008-05-02 09:40:22 +00:00
|
|
|
*/
|
2008-09-27 07:30:07 +00:00
|
|
|
#define NUM_RESULT 78
|
2008-05-02 09:40:22 +00:00
|
|
|
#define PSHIFT (PAGE_SHIFT - 10)
|
2008-04-29 10:52:33 +00:00
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
static struct mtrr_cleanup_result __initdata result[NUM_RESULT];
|
|
|
|
static struct res_range __initdata range_new[RANGE_NUM];
|
|
|
|
static unsigned long __initdata min_loss_pfn[RANGE_NUM];
|
2008-04-29 10:52:33 +00:00
|
|
|
|
|
|
|
static int __init mtrr_cleanup(unsigned address_bits)
|
|
|
|
{
|
2008-04-30 03:25:58 +00:00
|
|
|
unsigned long extra_remove_base, extra_remove_size;
|
2008-04-29 10:52:33 +00:00
|
|
|
unsigned long i, base, size, def, dummy;
|
2008-04-30 03:25:58 +00:00
|
|
|
mtrr_type type;
|
2008-05-02 09:40:22 +00:00
|
|
|
int nr_range, nr_range_new;
|
|
|
|
u64 chunk_size, gran_size;
|
|
|
|
unsigned long range_sums, range_sums_new;
|
|
|
|
int index_good;
|
|
|
|
int num_reg_good;
|
2008-04-29 10:52:33 +00:00
|
|
|
|
|
|
|
/* extra one for all 0 */
|
|
|
|
int num[MTRR_NUM_TYPES + 1];
|
|
|
|
|
|
|
|
if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
|
|
|
|
return 0;
|
|
|
|
rdmsr(MTRRdefType_MSR, def, dummy);
|
|
|
|
def &= 0xff;
|
|
|
|
if (def != MTRR_TYPE_UNCACHABLE)
|
|
|
|
return 0;
|
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
/* get it and store it aside */
|
|
|
|
memset(range_state, 0, sizeof(range_state));
|
|
|
|
for (i = 0; i < num_var_ranges; i++) {
|
|
|
|
mtrr_if->get(i, &base, &size, &type);
|
|
|
|
range_state[i].base_pfn = base;
|
|
|
|
range_state[i].size_pfn = size;
|
|
|
|
range_state[i].type = type;
|
|
|
|
}
|
|
|
|
|
2008-04-29 10:52:33 +00:00
|
|
|
/* check entries number */
|
|
|
|
memset(num, 0, sizeof(num));
|
|
|
|
for (i = 0; i < num_var_ranges; i++) {
|
2008-05-02 09:40:22 +00:00
|
|
|
type = range_state[i].type;
|
|
|
|
size = range_state[i].size_pfn;
|
2008-04-29 10:52:33 +00:00
|
|
|
if (type >= MTRR_NUM_TYPES)
|
|
|
|
continue;
|
|
|
|
if (!size)
|
|
|
|
type = MTRR_NUM_TYPES;
|
|
|
|
num[type]++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check if we got UC entries */
|
|
|
|
if (!num[MTRR_TYPE_UNCACHABLE])
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* check if we only had WB and UC */
|
|
|
|
if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
|
|
|
|
num_var_ranges - num[MTRR_NUM_TYPES])
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
memset(range, 0, sizeof(range));
|
|
|
|
extra_remove_size = 0;
|
|
|
|
if (mtrr_tom2) {
|
|
|
|
extra_remove_base = 1 << (32 - PAGE_SHIFT);
|
2008-05-02 09:40:22 +00:00
|
|
|
extra_remove_size =
|
|
|
|
(mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base;
|
|
|
|
}
|
|
|
|
nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base,
|
|
|
|
extra_remove_size);
|
|
|
|
range_sums = sum_ranges(range, nr_range);
|
|
|
|
printk(KERN_INFO "total RAM coverred: %ldM\n",
|
|
|
|
range_sums >> (20 - PAGE_SHIFT));
|
|
|
|
|
|
|
|
if (mtrr_chunk_size && mtrr_gran_size) {
|
|
|
|
int num_reg;
|
|
|
|
|
2008-09-27 07:30:06 +00:00
|
|
|
debug_print++;
|
2008-05-02 09:40:22 +00:00
|
|
|
/* convert ranges to var ranges state */
|
|
|
|
num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size,
|
|
|
|
mtrr_gran_size);
|
|
|
|
|
|
|
|
/* we got new setting in range_state, check it */
|
|
|
|
memset(range_new, 0, sizeof(range_new));
|
|
|
|
nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
|
|
|
|
extra_remove_base,
|
|
|
|
extra_remove_size);
|
|
|
|
range_sums_new = sum_ranges(range_new, nr_range_new);
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
result[i].chunk_sizek = mtrr_chunk_size >> 10;
|
|
|
|
result[i].gran_sizek = mtrr_gran_size >> 10;
|
|
|
|
result[i].num_reg = num_reg;
|
|
|
|
if (range_sums < range_sums_new) {
|
|
|
|
result[i].lose_cover_sizek =
|
|
|
|
(range_sums_new - range_sums) << PSHIFT;
|
|
|
|
result[i].bad = 1;
|
|
|
|
} else
|
|
|
|
result[i].lose_cover_sizek =
|
|
|
|
(range_sums - range_sums_new) << PSHIFT;
|
|
|
|
|
2008-05-10 05:40:52 +00:00
|
|
|
printk(KERN_INFO "%sgran_size: %ldM \tchunk_size: %ldM \t",
|
|
|
|
result[i].bad?"*BAD*":" ", result[i].gran_sizek >> 10,
|
2008-05-02 09:40:22 +00:00
|
|
|
result[i].chunk_sizek >> 10);
|
|
|
|
printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ldM \n",
|
|
|
|
result[i].num_reg, result[i].bad?"-":"",
|
|
|
|
result[i].lose_cover_sizek >> 10);
|
|
|
|
if (!result[i].bad) {
|
|
|
|
set_var_mtrr_all(address_bits);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, "
|
|
|
|
"will find optimal one\n");
|
2008-09-27 07:30:06 +00:00
|
|
|
debug_print--;
|
2008-05-02 09:40:22 +00:00
|
|
|
memset(result, 0, sizeof(result[0]));
|
|
|
|
}
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn));
|
|
|
|
memset(result, 0, sizeof(result));
|
|
|
|
for (gran_size = (1ULL<<20); gran_size < (1ULL<<32); gran_size <<= 1) {
|
2008-09-27 07:30:07 +00:00
|
|
|
for (chunk_size = gran_size; chunk_size < (1ULL<<32);
|
2008-05-02 09:40:22 +00:00
|
|
|
chunk_size <<= 1) {
|
|
|
|
int num_reg;
|
|
|
|
|
2008-05-10 05:40:52 +00:00
|
|
|
if (debug_print)
|
|
|
|
printk(KERN_INFO
|
2008-05-02 09:40:22 +00:00
|
|
|
"\ngran_size: %lldM chunk_size_size: %lldM\n",
|
2008-05-10 05:40:52 +00:00
|
|
|
gran_size >> 20, chunk_size >> 20);
|
2008-05-02 09:40:22 +00:00
|
|
|
if (i >= NUM_RESULT)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* convert ranges to var ranges state */
|
|
|
|
num_reg = x86_setup_var_mtrrs(range, nr_range,
|
|
|
|
chunk_size, gran_size);
|
|
|
|
|
|
|
|
/* we got new setting in range_state, check it */
|
|
|
|
memset(range_new, 0, sizeof(range_new));
|
|
|
|
nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
|
|
|
|
extra_remove_base, extra_remove_size);
|
|
|
|
range_sums_new = sum_ranges(range_new, nr_range_new);
|
|
|
|
|
|
|
|
result[i].chunk_sizek = chunk_size >> 10;
|
|
|
|
result[i].gran_sizek = gran_size >> 10;
|
|
|
|
result[i].num_reg = num_reg;
|
|
|
|
if (range_sums < range_sums_new) {
|
|
|
|
result[i].lose_cover_sizek =
|
|
|
|
(range_sums_new - range_sums) << PSHIFT;
|
|
|
|
result[i].bad = 1;
|
|
|
|
} else
|
|
|
|
result[i].lose_cover_sizek =
|
|
|
|
(range_sums - range_sums_new) << PSHIFT;
|
|
|
|
|
|
|
|
/* double check it */
|
|
|
|
if (!result[i].bad && !result[i].lose_cover_sizek) {
|
|
|
|
if (nr_range_new != nr_range ||
|
|
|
|
memcmp(range, range_new, sizeof(range)))
|
|
|
|
result[i].bad = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!result[i].bad && (range_sums - range_sums_new <
|
|
|
|
min_loss_pfn[num_reg])) {
|
|
|
|
min_loss_pfn[num_reg] =
|
|
|
|
range_sums - range_sums_new;
|
|
|
|
}
|
|
|
|
i++;
|
|
|
|
}
|
2008-04-29 10:52:33 +00:00
|
|
|
}
|
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
/* print out all */
|
|
|
|
for (i = 0; i < NUM_RESULT; i++) {
|
2008-05-10 05:40:52 +00:00
|
|
|
printk(KERN_INFO "%sgran_size: %ldM \tchunk_size: %ldM \t",
|
2008-05-02 09:40:22 +00:00
|
|
|
result[i].bad?"*BAD* ":" ", result[i].gran_sizek >> 10,
|
|
|
|
result[i].chunk_sizek >> 10);
|
2008-05-10 05:40:52 +00:00
|
|
|
printk(KERN_CONT "num_reg: %d \tlose RAM: %s%ldM\n",
|
2008-05-02 09:40:22 +00:00
|
|
|
result[i].num_reg, result[i].bad?"-":"",
|
|
|
|
result[i].lose_cover_sizek >> 10);
|
|
|
|
}
|
2008-04-29 10:52:33 +00:00
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
/* try to find the optimal index */
|
|
|
|
if (nr_mtrr_spare_reg >= num_var_ranges)
|
|
|
|
nr_mtrr_spare_reg = num_var_ranges - 1;
|
|
|
|
num_reg_good = -1;
|
2008-05-10 05:40:52 +00:00
|
|
|
for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
|
2008-09-29 20:39:17 +00:00
|
|
|
if (!min_loss_pfn[i])
|
2008-05-02 09:40:22 +00:00
|
|
|
num_reg_good = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
index_good = -1;
|
|
|
|
if (num_reg_good != -1) {
|
|
|
|
for (i = 0; i < NUM_RESULT; i++) {
|
|
|
|
if (!result[i].bad &&
|
|
|
|
result[i].num_reg == num_reg_good &&
|
|
|
|
!result[i].lose_cover_sizek) {
|
|
|
|
index_good = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (index_good != -1) {
|
|
|
|
printk(KERN_INFO "Found optimal setting for mtrr clean up\n");
|
|
|
|
i = index_good;
|
2008-05-10 05:40:52 +00:00
|
|
|
printk(KERN_INFO "gran_size: %ldM \tchunk_size: %ldM \t",
|
2008-05-02 09:40:22 +00:00
|
|
|
result[i].gran_sizek >> 10,
|
|
|
|
result[i].chunk_sizek >> 10);
|
2008-05-10 05:40:52 +00:00
|
|
|
printk(KERN_CONT "num_reg: %d \tlose RAM: %ldM\n",
|
2008-05-02 09:40:22 +00:00
|
|
|
result[i].num_reg,
|
|
|
|
result[i].lose_cover_sizek >> 10);
|
|
|
|
/* convert ranges to var ranges state */
|
|
|
|
chunk_size = result[i].chunk_sizek;
|
|
|
|
chunk_size <<= 10;
|
|
|
|
gran_size = result[i].gran_sizek;
|
|
|
|
gran_size <<= 10;
|
2008-09-27 07:30:06 +00:00
|
|
|
debug_print++;
|
2008-05-02 09:40:22 +00:00
|
|
|
x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
|
2008-09-27 07:30:06 +00:00
|
|
|
debug_print--;
|
2008-05-02 09:40:22 +00:00
|
|
|
set_var_mtrr_all(address_bits);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n");
|
|
|
|
printk(KERN_INFO "please specify mtrr_gran_size/mtrr_chunk_size\n");
|
|
|
|
|
|
|
|
return 0;
|
2008-04-29 10:52:33 +00:00
|
|
|
}
|
2008-05-02 09:40:22 +00:00
|
|
|
#else
|
|
|
|
static int __init mtrr_cleanup(unsigned address_bits)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static int __initdata changed_by_mtrr_cleanup;
|
2008-04-29 10:52:33 +00:00
|
|
|
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
static int disable_mtrr_trim;
|
|
|
|
|
|
|
|
static int __init disable_mtrr_trim_setup(char *str)
|
|
|
|
{
|
|
|
|
disable_mtrr_trim = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Newer AMD K8s and later CPUs have a special magic MSR way to force WB
|
|
|
|
* for memory >4GB. Check for that here.
|
|
|
|
* Note this won't check if the MTRRs < 4GB where the magic bit doesn't
|
|
|
|
* apply to are wrong, but so far we don't know of any such case in the wild.
|
|
|
|
*/
|
|
|
|
#define Tom2Enabled (1U << 21)
|
|
|
|
#define Tom2ForceMemTypeWB (1U << 22)
|
|
|
|
|
2008-03-24 23:02:01 +00:00
|
|
|
int __init amd_special_default_mtrr(void)
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
{
|
|
|
|
u32 l, h;
|
|
|
|
|
|
|
|
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
|
|
|
|
return 0;
|
|
|
|
if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
|
|
|
|
return 0;
|
|
|
|
/* In case some hypervisor doesn't pass SYSCFG through */
|
|
|
|
if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
|
|
|
|
return 0;
|
|
|
|
/*
|
|
|
|
* Memory between 4GB and top of mem is forced WB by this magic bit.
|
|
|
|
* Reserved before K8RevF, but should be zero there.
|
|
|
|
*/
|
|
|
|
if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) ==
|
|
|
|
(Tom2Enabled | Tom2ForceMemTypeWB))
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
static u64 __init real_trim_memory(unsigned long start_pfn,
|
|
|
|
unsigned long limit_pfn)
|
2008-04-29 08:59:49 +00:00
|
|
|
{
|
|
|
|
u64 trim_start, trim_size;
|
2008-04-30 03:25:16 +00:00
|
|
|
trim_start = start_pfn;
|
2008-04-29 08:59:49 +00:00
|
|
|
trim_start <<= PAGE_SHIFT;
|
|
|
|
trim_size = limit_pfn;
|
|
|
|
trim_size <<= PAGE_SHIFT;
|
|
|
|
trim_size -= trim_start;
|
2008-04-30 03:25:16 +00:00
|
|
|
|
2008-06-16 01:58:51 +00:00
|
|
|
return e820_update_range(trim_start, trim_size, E820_RAM,
|
2008-04-29 08:59:49 +00:00
|
|
|
E820_RESERVED);
|
|
|
|
}
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
/**
|
|
|
|
* mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
|
2008-02-18 21:10:44 +00:00
|
|
|
* @end_pfn: ending page frame number
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
*
|
|
|
|
* Some buggy BIOSes don't setup the MTRRs properly for systems with certain
|
|
|
|
* memory configurations. This routine checks that the highest MTRR matches
|
|
|
|
* the end of memory, to make sure the MTRRs having a write back type cover
|
|
|
|
* all of the memory the kernel is intending to use. If not, it'll trim any
|
|
|
|
* memory off the end by adjusting end_pfn, removing it from the kernel's
|
|
|
|
* allocation pools, warning the user with an obnoxious message.
|
|
|
|
*/
|
|
|
|
int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
|
|
|
|
{
|
2008-02-06 21:39:45 +00:00
|
|
|
unsigned long i, base, size, highest_pfn = 0, def, dummy;
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
mtrr_type type;
|
2008-04-29 08:59:49 +00:00
|
|
|
int nr_range;
|
2008-05-02 09:40:22 +00:00
|
|
|
u64 total_trim_size;
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
|
2008-04-29 08:59:49 +00:00
|
|
|
/* extra one for all 0 */
|
|
|
|
int num[MTRR_NUM_TYPES + 1];
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
/*
|
|
|
|
* Make sure we only trim uncachable memory on machines that
|
|
|
|
* support the Intel MTRR architecture:
|
|
|
|
*/
|
2008-01-30 12:33:32 +00:00
|
|
|
if (!is_cpu(INTEL) || disable_mtrr_trim)
|
|
|
|
return 0;
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
rdmsr(MTRRdefType_MSR, def, dummy);
|
|
|
|
def &= 0xff;
|
2008-01-30 12:33:32 +00:00
|
|
|
if (def != MTRR_TYPE_UNCACHABLE)
|
|
|
|
return 0;
|
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
/* get it and store it aside */
|
|
|
|
memset(range_state, 0, sizeof(range_state));
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
for (i = 0; i < num_var_ranges; i++) {
|
|
|
|
mtrr_if->get(i, &base, &size, &type);
|
2008-05-02 09:40:22 +00:00
|
|
|
range_state[i].base_pfn = base;
|
|
|
|
range_state[i].size_pfn = size;
|
|
|
|
range_state[i].type = type;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find highest cached pfn */
|
|
|
|
for (i = 0; i < num_var_ranges; i++) {
|
|
|
|
type = range_state[i].type;
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
if (type != MTRR_TYPE_WRBACK)
|
|
|
|
continue;
|
2008-05-02 09:40:22 +00:00
|
|
|
base = range_state[i].base_pfn;
|
|
|
|
size = range_state[i].size_pfn;
|
2008-02-06 21:39:45 +00:00
|
|
|
if (highest_pfn < base + size)
|
|
|
|
highest_pfn = base + size;
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
}
|
|
|
|
|
2008-01-30 12:33:32 +00:00
|
|
|
/* kvm/qemu doesn't have mtrr set right, don't trim them all */
|
2008-02-06 21:39:45 +00:00
|
|
|
if (!highest_pfn) {
|
2008-07-08 16:51:56 +00:00
|
|
|
WARN(!kvm_para_available(), KERN_WARNING
|
2008-02-21 14:50:14 +00:00
|
|
|
"WARNING: strange, CPU MTRRs all blank?\n");
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
return 0;
|
2008-01-30 12:33:32 +00:00
|
|
|
}
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
|
2008-04-29 08:59:49 +00:00
|
|
|
/* check entries number */
|
|
|
|
memset(num, 0, sizeof(num));
|
|
|
|
for (i = 0; i < num_var_ranges; i++) {
|
2008-05-02 09:40:22 +00:00
|
|
|
type = range_state[i].type;
|
2008-04-29 08:59:49 +00:00
|
|
|
if (type >= MTRR_NUM_TYPES)
|
|
|
|
continue;
|
2008-05-02 09:40:22 +00:00
|
|
|
size = range_state[i].size_pfn;
|
2008-04-29 08:59:49 +00:00
|
|
|
if (!size)
|
|
|
|
type = MTRR_NUM_TYPES;
|
|
|
|
num[type]++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* no entry for WB? */
|
|
|
|
if (!num[MTRR_TYPE_WRBACK])
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* check if we only had WB and UC */
|
|
|
|
if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
|
|
|
|
num_var_ranges - num[MTRR_NUM_TYPES])
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
memset(range, 0, sizeof(range));
|
|
|
|
nr_range = 0;
|
|
|
|
if (mtrr_tom2) {
|
|
|
|
range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT));
|
|
|
|
range[nr_range].end = (mtrr_tom2 >> PAGE_SHIFT) - 1;
|
|
|
|
if (highest_pfn < range[nr_range].end + 1)
|
|
|
|
highest_pfn = range[nr_range].end + 1;
|
|
|
|
nr_range++;
|
|
|
|
}
|
|
|
|
nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
|
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
total_trim_size = 0;
|
2008-04-30 03:25:16 +00:00
|
|
|
/* check the head */
|
2008-04-29 08:59:49 +00:00
|
|
|
if (range[0].start)
|
2008-05-02 09:40:22 +00:00
|
|
|
total_trim_size += real_trim_memory(0, range[0].start);
|
2008-04-30 03:25:16 +00:00
|
|
|
/* check the holes */
|
|
|
|
for (i = 0; i < nr_range - 1; i++) {
|
2008-04-29 08:59:49 +00:00
|
|
|
if (range[i].end + 1 < range[i+1].start)
|
2008-05-02 09:40:22 +00:00
|
|
|
total_trim_size += real_trim_memory(range[i].end + 1,
|
|
|
|
range[i+1].start);
|
2008-04-29 08:59:49 +00:00
|
|
|
}
|
2008-04-30 03:25:16 +00:00
|
|
|
/* check the top */
|
|
|
|
i = nr_range - 1;
|
|
|
|
if (range[i].end + 1 < end_pfn)
|
2008-05-02 09:40:22 +00:00
|
|
|
total_trim_size += real_trim_memory(range[i].end + 1,
|
|
|
|
end_pfn);
|
2008-04-29 08:59:49 +00:00
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
if (total_trim_size) {
|
2008-04-29 08:59:49 +00:00
|
|
|
printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover"
|
|
|
|
" all of memory, losing %lluMB of RAM.\n",
|
2008-05-02 09:40:22 +00:00
|
|
|
total_trim_size >> 20);
|
2008-04-29 08:59:49 +00:00
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
if (!changed_by_mtrr_cleanup)
|
2008-04-30 03:25:16 +00:00
|
|
|
WARN_ON(1);
|
2008-04-29 08:59:49 +00:00
|
|
|
|
2008-04-30 03:25:16 +00:00
|
|
|
printk(KERN_INFO "update e820 for mtrr\n");
|
2008-04-29 08:59:49 +00:00
|
|
|
update_e820();
|
2008-04-30 03:25:16 +00:00
|
|
|
|
|
|
|
return 1;
|
2008-04-29 08:59:49 +00:00
|
|
|
}
|
|
|
|
|
2008-04-30 03:25:16 +00:00
|
|
|
return 0;
|
x86, 32-bit: trim memory not covered by wb mtrrs
On some machines, buggy BIOSes don't properly setup WB MTRRs to cover all
available RAM, meaning the last few megs (or even gigs) of memory will be
marked uncached. Since Linux tends to allocate from high memory addresses
first, this causes the machine to be unusably slow as soon as the kernel
starts really using memory (i.e. right around init time).
This patch works around the problem by scanning the MTRRs at boot and
figuring out whether the current end_pfn value (setup by early e820 code)
goes beyond the highest WB MTRR range, and if so, trimming it to match. A
fairly obnoxious KERN_WARNING is printed too, letting the user know that
not all of their memory is available due to a likely BIOS bug.
Something similar could be done on i386 if needed, but the boot ordering
would be slightly different, since the MTRR code on i386 depends on the
boot_cpu_data structure being setup.
This patch fixes a bug in the last patch that caused the code to run on
non-Intel machines (AMD machines apparently don't need it and it's untested
on other non-Intel machines, so best keep it off).
Further enhancements and fixes from:
Yinghai Lu <Yinghai.Lu@Sun.COM>
Andi Kleen <ak@suse.de>
Signed-off-by: Jesse Barnes <jesse.barnes@intel.com>
Tested-by: Justin Piszcz <jpiszcz@lucidpixels.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-01-30 12:33:18 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/**
|
2005-07-08 00:56:38 +00:00
|
|
|
* mtrr_bp_init - initialize mtrrs on the boot CPU
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* This needs to be called early; before any of the other CPUs are
|
|
|
|
* initialized (i.e. before smp_init()).
|
|
|
|
*
|
|
|
|
*/
|
2007-07-21 15:10:39 +00:00
|
|
|
void __init mtrr_bp_init(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-04-29 10:52:33 +00:00
|
|
|
u32 phys_addr;
|
2005-04-16 22:20:36 +00:00
|
|
|
init_ifs();
|
|
|
|
|
2008-04-29 10:52:33 +00:00
|
|
|
phys_addr = 32;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (cpu_has_mtrr) {
|
|
|
|
mtrr_if = &generic_mtrr_ops;
|
|
|
|
size_or_mask = 0xff000000; /* 36 bits */
|
|
|
|
size_and_mask = 0x00f00000;
|
2008-04-29 10:52:33 +00:00
|
|
|
phys_addr = 36;
|
2005-04-16 22:25:10 +00:00
|
|
|
|
|
|
|
/* This is an AMD specific MSR, but we assume(hope?) that
|
|
|
|
Intel will implement it to when they extend the address
|
|
|
|
bus of the Xeon. */
|
|
|
|
if (cpuid_eax(0x80000000) >= 0x80000008) {
|
|
|
|
phys_addr = cpuid_eax(0x80000008) & 0xff;
|
2005-11-05 16:25:54 +00:00
|
|
|
/* CPUID workaround for Intel 0F33/0F34 CPU */
|
|
|
|
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
|
|
|
boot_cpu_data.x86 == 0xF &&
|
|
|
|
boot_cpu_data.x86_model == 0x3 &&
|
|
|
|
(boot_cpu_data.x86_mask == 0x3 ||
|
|
|
|
boot_cpu_data.x86_mask == 0x4))
|
|
|
|
phys_addr = 36;
|
|
|
|
|
2007-02-13 12:26:23 +00:00
|
|
|
size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
|
|
|
|
size_and_mask = ~size_or_mask & 0xfffff00000ULL;
|
2005-04-16 22:25:10 +00:00
|
|
|
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
|
|
|
|
boot_cpu_data.x86 == 6) {
|
|
|
|
/* VIA C* family have Intel style MTRRs, but
|
|
|
|
don't support PAE */
|
|
|
|
size_or_mask = 0xfff00000; /* 32 bits */
|
|
|
|
size_and_mask = 0;
|
2008-04-29 10:52:33 +00:00
|
|
|
phys_addr = 32;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
switch (boot_cpu_data.x86_vendor) {
|
|
|
|
case X86_VENDOR_AMD:
|
|
|
|
if (cpu_has_k6_mtrr) {
|
|
|
|
/* Pre-Athlon (K6) AMD CPU MTRRs */
|
|
|
|
mtrr_if = mtrr_ops[X86_VENDOR_AMD];
|
|
|
|
size_or_mask = 0xfff00000; /* 32 bits */
|
|
|
|
size_and_mask = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case X86_VENDOR_CENTAUR:
|
|
|
|
if (cpu_has_centaur_mcr) {
|
|
|
|
mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
|
|
|
|
size_or_mask = 0xfff00000; /* 32 bits */
|
|
|
|
size_and_mask = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case X86_VENDOR_CYRIX:
|
|
|
|
if (cpu_has_cyrix_arr) {
|
|
|
|
mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
|
|
|
|
size_or_mask = 0xfff00000; /* 32 bits */
|
|
|
|
size_and_mask = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mtrr_if) {
|
|
|
|
set_num_var_ranges();
|
|
|
|
init_table();
|
2008-04-29 10:52:33 +00:00
|
|
|
if (use_intel()) {
|
2005-07-08 00:56:38 +00:00
|
|
|
get_mtrr_state();
|
2008-04-29 10:52:33 +00:00
|
|
|
|
2008-05-02 09:40:22 +00:00
|
|
|
if (mtrr_cleanup(phys_addr)) {
|
|
|
|
changed_by_mtrr_cleanup = 1;
|
2008-04-29 10:52:33 +00:00
|
|
|
mtrr_if->set_all();
|
2008-05-02 09:40:22 +00:00
|
|
|
}
|
2008-04-29 10:52:33 +00:00
|
|
|
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-07-08 00:56:38 +00:00
|
|
|
void mtrr_ap_init(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!mtrr_if || !use_intel())
|
|
|
|
return;
|
|
|
|
/*
|
2006-03-26 09:37:14 +00:00
|
|
|
* Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,
|
2005-07-08 00:56:38 +00:00
|
|
|
* but this routine will be called in cpu boot time, holding the lock
|
|
|
|
* breaks it. This routine is called in two cases: 1.very earily time
|
|
|
|
* of software resume, when there absolutely isn't mtrr entry changes;
|
|
|
|
* 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to
|
|
|
|
* prevent mtrr entry changes
|
|
|
|
*/
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
|
|
mtrr_if->set_all();
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
[PATCH] x86: Save the MTRRs of the BSP before booting an AP
Applied fix by Andew Morton:
http://lkml.org/lkml/2007/4/8/88 - Fix `make headers_check'.
AMD and Intel x86 CPU manuals state that it is the responsibility of
system software to initialize and maintain MTRR consistency across
all processors in Multi-Processing Environments.
Quote from page 188 of the AMD64 System Programming manual (Volume 2):
7.6.5 MTRRs in Multi-Processing Environments
"In multi-processing environments, the MTRRs located in all processors must
characterize memory in the same way. Generally, this means that identical
values are written to the MTRRs used by the processors." (short omission here)
"Failure to do so may result in coherency violations or loss of atomicity.
Processor implementations do not check the MTRR settings in other processors
to ensure consistency. It is the responsibility of system software to
initialize and maintain MTRR consistency across all processors."
Current Linux MTRR code already implements the above in the case that the
BIOS does not properly initialize MTRRs on the secondary processors,
but the case where the fixed-range MTRRs of the boot processor are changed
after Linux started to boot, before the initialsation of a secondary
processor, is not handled yet.
In this case, secondary processors are currently initialized by Linux
with MTRRs which the boot processor had very early, when mtrr_bp_init()
did run, but not with the MTRRs which the boot processor uses at the
time when that secondary processors is actually booted,
causing differing MTRR contents on the secondary processors.
Such situation happens on Acer Ferrari 1000 and 5000 notebooks where the
BIOS enables and sets AMD-specific IORR bits in the fixed-range MTRRs
of the boot processor when it transitions the system into ACPI mode.
The SMI handler of the BIOS does this in SMM, entered while Linux ACPI
code runs acpi_enable().
Other occasions where the SMI handler of the BIOS may change bits in
the MTRRs could occur as well. To initialize newly booted secodary
processors with the fixed-range MTRRs which the boot processor uses
at that time, this patch saves the fixed-range MTRRs of the boot
processor before new secondary processors are started. When the
secondary processors run their Linux initialisation code, their
fixed-range MTRRs will be updated with the saved fixed-range MTRRs.
If CONFIG_MTRR is not set, we define mtrr_save_state
as an empty statement because there is nothing to do.
Possible TODOs:
*) CPU-hotplugging outside of SMP suspend/resume is not yet tested
with this patch.
*) If, even in this case, an AP never runs i386/do_boot_cpu or x86_64/cpu_up,
then the calls to mtrr_save_state() could be replaced by calls to
mtrr_save_fixed_ranges(NULL) and mtrr_save_state() would not be
needed.
That would need either verification of the CPU-hotplug code or
at least a test on a >2 CPU machine.
*) The MTRRs of other running processors are not yet checked at this
time but it might be interesting to syncronize the MTTRs of all
processors before booting. That would be an incremental patch,
but of rather low priority since there is no machine known so
far which would require this.
AK: moved prototypes on x86-64 around to fix warnings
Signed-off-by: Bernhard Kaindl <bk@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Andi Kleen <ak@suse.de>
Cc: Dave Jones <davej@codemonkey.org.uk>
2007-05-02 17:27:17 +00:00
|
|
|
/**
|
|
|
|
* Save current fixed-range MTRR state of the BSP
|
|
|
|
*/
|
|
|
|
void mtrr_save_state(void)
|
|
|
|
{
|
2008-06-06 09:18:06 +00:00
|
|
|
smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1);
|
[PATCH] x86: Save the MTRRs of the BSP before booting an AP
Applied fix by Andew Morton:
http://lkml.org/lkml/2007/4/8/88 - Fix `make headers_check'.
AMD and Intel x86 CPU manuals state that it is the responsibility of
system software to initialize and maintain MTRR consistency across
all processors in Multi-Processing Environments.
Quote from page 188 of the AMD64 System Programming manual (Volume 2):
7.6.5 MTRRs in Multi-Processing Environments
"In multi-processing environments, the MTRRs located in all processors must
characterize memory in the same way. Generally, this means that identical
values are written to the MTRRs used by the processors." (short omission here)
"Failure to do so may result in coherency violations or loss of atomicity.
Processor implementations do not check the MTRR settings in other processors
to ensure consistency. It is the responsibility of system software to
initialize and maintain MTRR consistency across all processors."
Current Linux MTRR code already implements the above in the case that the
BIOS does not properly initialize MTRRs on the secondary processors,
but the case where the fixed-range MTRRs of the boot processor are changed
after Linux started to boot, before the initialsation of a secondary
processor, is not handled yet.
In this case, secondary processors are currently initialized by Linux
with MTRRs which the boot processor had very early, when mtrr_bp_init()
did run, but not with the MTRRs which the boot processor uses at the
time when that secondary processors is actually booted,
causing differing MTRR contents on the secondary processors.
Such situation happens on Acer Ferrari 1000 and 5000 notebooks where the
BIOS enables and sets AMD-specific IORR bits in the fixed-range MTRRs
of the boot processor when it transitions the system into ACPI mode.
The SMI handler of the BIOS does this in SMM, entered while Linux ACPI
code runs acpi_enable().
Other occasions where the SMI handler of the BIOS may change bits in
the MTRRs could occur as well. To initialize newly booted secodary
processors with the fixed-range MTRRs which the boot processor uses
at that time, this patch saves the fixed-range MTRRs of the boot
processor before new secondary processors are started. When the
secondary processors run their Linux initialisation code, their
fixed-range MTRRs will be updated with the saved fixed-range MTRRs.
If CONFIG_MTRR is not set, we define mtrr_save_state
as an empty statement because there is nothing to do.
Possible TODOs:
*) CPU-hotplugging outside of SMP suspend/resume is not yet tested
with this patch.
*) If, even in this case, an AP never runs i386/do_boot_cpu or x86_64/cpu_up,
then the calls to mtrr_save_state() could be replaced by calls to
mtrr_save_fixed_ranges(NULL) and mtrr_save_state() would not be
needed.
That would need either verification of the CPU-hotplug code or
at least a test on a >2 CPU machine.
*) The MTRRs of other running processors are not yet checked at this
time but it might be interesting to syncronize the MTTRs of all
processors before booting. That would be an incremental patch,
but of rather low priority since there is no machine known so
far which would require this.
AK: moved prototypes on x86-64 around to fix warnings
Signed-off-by: Bernhard Kaindl <bk@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Andi Kleen <ak@suse.de>
Cc: Dave Jones <davej@codemonkey.org.uk>
2007-05-02 17:27:17 +00:00
|
|
|
}
|
|
|
|
|
2005-07-08 00:56:38 +00:00
|
|
|
static int __init mtrr_init_finialize(void)
|
|
|
|
{
|
|
|
|
if (!mtrr_if)
|
|
|
|
return 0;
|
2008-04-29 10:52:33 +00:00
|
|
|
if (use_intel()) {
|
2008-05-02 09:40:22 +00:00
|
|
|
if (!changed_by_mtrr_cleanup)
|
2008-04-29 10:52:33 +00:00
|
|
|
mtrr_state_warn();
|
|
|
|
} else {
|
2007-10-19 23:13:56 +00:00
|
|
|
/* The CPUs haven't MTRR and seem to not support SMP. They have
|
2005-07-08 00:56:38 +00:00
|
|
|
* specific drivers, we use a tricky method to support
|
|
|
|
* suspend/resume for them.
|
|
|
|
* TBD: is there any system with such CPU which supports
|
|
|
|
* suspend/resume? if no, we should remove the code.
|
|
|
|
*/
|
|
|
|
sysdev_driver_register(&cpu_sysdev_class,
|
|
|
|
&mtrr_sysdev_driver);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
subsys_initcall(mtrr_init_finialize);
|