mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-28 12:25:31 +00:00
x86/tsc: Validate TSC_ADJUST after resume
Some 'feature' BIOSes fiddle with the TSC_ADJUST register during suspend/resume which renders the TSC unusable. Add sanity checks into the resume path and restore the original value if it was adjusted. Reported-and-tested-by: Roland Scheidegger <rscheidegger_lists@hispeed.ch> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Bruce Schlobohm <bruce.schlobohm@intel.com> Cc: Kevin Stanton <kevin.b.stanton@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Allen Hung <allen_hung@dell.com> Cc: Borislav Petkov <bp@alien8.de> Link: http://lkml.kernel.org/r/20161213131211.317654500@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
31f8a651fc
commit
6a36958317
@ -47,12 +47,12 @@ extern int tsc_clocksource_reliable;
|
|||||||
*/
|
*/
|
||||||
#ifdef CONFIG_X86_TSC
|
#ifdef CONFIG_X86_TSC
|
||||||
extern bool tsc_store_and_check_tsc_adjust(void);
|
extern bool tsc_store_and_check_tsc_adjust(void);
|
||||||
extern void tsc_verify_tsc_adjust(void);
|
extern void tsc_verify_tsc_adjust(bool resume);
|
||||||
extern void check_tsc_sync_source(int cpu);
|
extern void check_tsc_sync_source(int cpu);
|
||||||
extern void check_tsc_sync_target(void);
|
extern void check_tsc_sync_target(void);
|
||||||
#else
|
#else
|
||||||
static inline bool tsc_store_and_check_tsc_adjust(void) { return false; }
|
static inline bool tsc_store_and_check_tsc_adjust(void) { return false; }
|
||||||
static inline void tsc_verify_tsc_adjust(void) { }
|
static inline void tsc_verify_tsc_adjust(bool resume) { }
|
||||||
static inline void check_tsc_sync_source(int cpu) { }
|
static inline void check_tsc_sync_source(int cpu) { }
|
||||||
static inline void check_tsc_sync_target(void) { }
|
static inline void check_tsc_sync_target(void) { }
|
||||||
#endif
|
#endif
|
||||||
|
@ -277,7 +277,7 @@ void exit_idle(void)
|
|||||||
|
|
||||||
void arch_cpu_idle_enter(void)
|
void arch_cpu_idle_enter(void)
|
||||||
{
|
{
|
||||||
tsc_verify_tsc_adjust();
|
tsc_verify_tsc_adjust(false);
|
||||||
local_touch_nmi();
|
local_touch_nmi();
|
||||||
enter_idle();
|
enter_idle();
|
||||||
}
|
}
|
||||||
|
@ -1080,6 +1080,11 @@ static void detect_art(void)
|
|||||||
|
|
||||||
static struct clocksource clocksource_tsc;
|
static struct clocksource clocksource_tsc;
|
||||||
|
|
||||||
|
static void tsc_resume(struct clocksource *cs)
|
||||||
|
{
|
||||||
|
tsc_verify_tsc_adjust(true);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We used to compare the TSC to the cycle_last value in the clocksource
|
* We used to compare the TSC to the cycle_last value in the clocksource
|
||||||
* structure to avoid a nasty time-warp. This can be observed in a
|
* structure to avoid a nasty time-warp. This can be observed in a
|
||||||
@ -1112,6 +1117,7 @@ static struct clocksource clocksource_tsc = {
|
|||||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
|
||||||
CLOCK_SOURCE_MUST_VERIFY,
|
CLOCK_SOURCE_MUST_VERIFY,
|
||||||
.archdata = { .vclock_mode = VCLOCK_TSC },
|
.archdata = { .vclock_mode = VCLOCK_TSC },
|
||||||
|
.resume = tsc_resume,
|
||||||
};
|
};
|
||||||
|
|
||||||
void mark_tsc_unstable(char *reason)
|
void mark_tsc_unstable(char *reason)
|
||||||
|
@ -30,7 +30,7 @@ struct tsc_adjust {
|
|||||||
|
|
||||||
static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
|
static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
|
||||||
|
|
||||||
void tsc_verify_tsc_adjust(void)
|
void tsc_verify_tsc_adjust(bool resume)
|
||||||
{
|
{
|
||||||
struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust);
|
struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust);
|
||||||
s64 curval;
|
s64 curval;
|
||||||
@ -39,7 +39,7 @@ void tsc_verify_tsc_adjust(void)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
/* Rate limit the MSR check */
|
/* Rate limit the MSR check */
|
||||||
if (time_before(jiffies, adj->nextcheck))
|
if (!resume && time_before(jiffies, adj->nextcheck))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
adj->nextcheck = jiffies + HZ;
|
adj->nextcheck = jiffies + HZ;
|
||||||
@ -51,7 +51,7 @@ void tsc_verify_tsc_adjust(void)
|
|||||||
/* Restore the original value */
|
/* Restore the original value */
|
||||||
wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted);
|
wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted);
|
||||||
|
|
||||||
if (!adj->warned) {
|
if (!adj->warned || resume) {
|
||||||
pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n",
|
pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n",
|
||||||
smp_processor_id(), adj->adjusted, curval);
|
smp_processor_id(), adj->adjusted, curval);
|
||||||
adj->warned = true;
|
adj->warned = true;
|
||||||
|
@ -252,6 +252,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
|
|||||||
fix_processor_context();
|
fix_processor_context();
|
||||||
|
|
||||||
do_fpu_end();
|
do_fpu_end();
|
||||||
|
tsc_verify_tsc_adjust(true);
|
||||||
x86_platform.restore_sched_clock_state();
|
x86_platform.restore_sched_clock_state();
|
||||||
mtrr_bp_restore();
|
mtrr_bp_restore();
|
||||||
perf_restore_debug_store();
|
perf_restore_debug_store();
|
||||||
|
Loading…
Reference in New Issue
Block a user