mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-23 01:40:30 +00:00
345077cd98
Impact: fix wrong cache sharing detection on platforms supporting > 8 bit apicid's In the presence of extended topology eumeration leaf 0xb provided by cpuid, 32bit extended initial_apicid in cpuinfo_x86 struct will be updated by detect_extended_topology(). At this instance, we should also reinit the apicid (which could also potentially be extended to 32bit). With out this there will potentially be duplicate apicid's populated in the per cpu's cpuinfo_x86 struct, resulting in wrong cache sharing topology etc detected by init_intel_cacheinfo(). Reported-by: Dimitri Sivanich <sivanich@sgi.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Acked-by: Dimitri Sivanich <sivanich@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: <stable@kernel.org>
180 lines
4.2 KiB
C
180 lines
4.2 KiB
C
/*
|
|
* Routines to indentify additional cpu features that are scattered in
|
|
* cpuid space.
|
|
*/
|
|
#include <linux/cpu.h>
|
|
|
|
#include <asm/pat.h>
|
|
#include <asm/processor.h>
|
|
|
|
#include <mach_apic.h>
|
|
|
|
struct cpuid_bit {
|
|
u16 feature;
|
|
u8 reg;
|
|
u8 bit;
|
|
u32 level;
|
|
};
|
|
|
|
enum cpuid_regs {
|
|
CR_EAX = 0,
|
|
CR_ECX,
|
|
CR_EDX,
|
|
CR_EBX
|
|
};
|
|
|
|
void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
|
|
{
|
|
u32 max_level;
|
|
u32 regs[4];
|
|
const struct cpuid_bit *cb;
|
|
|
|
static const struct cpuid_bit cpuid_bits[] = {
|
|
{ X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
|
|
{ 0, 0, 0, 0 }
|
|
};
|
|
|
|
for (cb = cpuid_bits; cb->feature; cb++) {
|
|
|
|
/* Verify that the level is valid */
|
|
max_level = cpuid_eax(cb->level & 0xffff0000);
|
|
if (max_level < cb->level ||
|
|
max_level > (cb->level | 0xffff))
|
|
continue;
|
|
|
|
cpuid(cb->level, ®s[CR_EAX], ®s[CR_EBX],
|
|
®s[CR_ECX], ®s[CR_EDX]);
|
|
|
|
if (regs[cb->reg] & (1 << cb->bit))
|
|
set_cpu_cap(c, cb->feature);
|
|
}
|
|
}
|
|
|
|
/* leaf 0xb SMT level */
|
|
#define SMT_LEVEL 0
|
|
|
|
/* leaf 0xb sub-leaf types */
|
|
#define INVALID_TYPE 0
|
|
#define SMT_TYPE 1
|
|
#define CORE_TYPE 2
|
|
|
|
#define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff)
|
|
#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
|
|
#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
|
|
|
|
/*
|
|
* Check for extended topology enumeration cpuid leaf 0xb and if it
|
|
* exists, use it for populating initial_apicid and cpu topology
|
|
* detection.
|
|
*/
|
|
void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
|
|
{
|
|
#ifdef CONFIG_X86_SMP
|
|
unsigned int eax, ebx, ecx, edx, sub_index;
|
|
unsigned int ht_mask_width, core_plus_mask_width;
|
|
unsigned int core_select_mask, core_level_siblings;
|
|
|
|
if (c->cpuid_level < 0xb)
|
|
return;
|
|
|
|
cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
|
|
|
|
/*
|
|
* check if the cpuid leaf 0xb is actually implemented.
|
|
*/
|
|
if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
|
|
return;
|
|
|
|
set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
|
|
|
|
/*
|
|
* initial apic id, which also represents 32-bit extended x2apic id.
|
|
*/
|
|
c->initial_apicid = edx;
|
|
|
|
/*
|
|
* Populate HT related information from sub-leaf level 0.
|
|
*/
|
|
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
|
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
|
|
|
sub_index = 1;
|
|
do {
|
|
cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
|
|
|
|
/*
|
|
* Check for the Core type in the implemented sub leaves.
|
|
*/
|
|
if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
|
|
core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
|
core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
|
break;
|
|
}
|
|
|
|
sub_index++;
|
|
} while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
|
|
|
|
core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
|
|
|
|
#ifdef CONFIG_X86_32
|
|
c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
|
|
& core_select_mask;
|
|
c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width);
|
|
/*
|
|
* Reinit the apicid, now that we have extended initial_apicid.
|
|
*/
|
|
c->apicid = phys_pkg_id(c->initial_apicid, 0);
|
|
#else
|
|
c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
|
|
c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
|
|
/*
|
|
* Reinit the apicid, now that we have extended initial_apicid.
|
|
*/
|
|
c->apicid = phys_pkg_id(0);
|
|
#endif
|
|
c->x86_max_cores = (core_level_siblings / smp_num_siblings);
|
|
|
|
|
|
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
|
|
c->phys_proc_id);
|
|
if (c->x86_max_cores > 1)
|
|
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
|
|
c->cpu_core_id);
|
|
return;
|
|
#endif
|
|
}
|
|
|
|
#ifdef CONFIG_X86_PAT
|
|
void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
|
|
{
|
|
if (!cpu_has_pat)
|
|
pat_disable("PAT not supported by CPU.");
|
|
|
|
switch (c->x86_vendor) {
|
|
case X86_VENDOR_INTEL:
|
|
/*
|
|
* There is a known erratum on Pentium III and Core Solo
|
|
* and Core Duo CPUs.
|
|
* " Page with PAT set to WC while associated MTRR is UC
|
|
* may consolidate to UC "
|
|
* Because of this erratum, it is better to stick with
|
|
* setting WC in MTRR rather than using PAT on these CPUs.
|
|
*
|
|
* Enable PAT WC only on P4, Core 2 or later CPUs.
|
|
*/
|
|
if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
|
|
return;
|
|
|
|
pat_disable("PAT WC disabled due to known CPU erratum.");
|
|
return;
|
|
|
|
case X86_VENDOR_AMD:
|
|
case X86_VENDOR_CENTAUR:
|
|
case X86_VENDOR_TRANSMETA:
|
|
return;
|
|
}
|
|
|
|
pat_disable("PAT disabled. Not yet verified on this CPU type.");
|
|
}
|
|
#endif
|