Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fpu updates from Ingo Molnar:
 "Initial round of kernel_fpu_begin/end cleanups from Oleg Nesterov,
  plus a cleanup from Borislav Petkov"

* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, fpu: Fix math_state_restore() race with kernel_fpu_begin()
  x86, fpu: Don't abuse has_fpu in __kernel_fpu_begin/end()
  x86, fpu: Introduce per-cpu in_kernel_fpu state
  x86/fpu: Use a symbolic name for asm operand
This commit is contained in:
Linus Torvalds 2015-02-09 18:01:52 -08:00
commit c93ecedab3
4 changed files with 42 additions and 25 deletions

View File

@ -207,7 +207,7 @@ static inline void fpu_fxsave(struct fpu *fpu)
if (config_enabled(CONFIG_X86_32))
asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
else if (config_enabled(CONFIG_AS_FXSAVEQ))
asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave));
asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
else {
/* Using "rex64; fxsave %0" is broken because, if the memory
* operand uses any extended registers for addressing, a second
@ -290,9 +290,11 @@ static inline int fpu_restore_checking(struct fpu *fpu)
static inline int restore_fpu_checking(struct task_struct *tsk)
{
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. "m" is a random variable that should be in L1 */
/*
* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
* pending. Clear the x87 state here by setting it to fixed values.
* "m" is a random variable that should be in L1.
*/
if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
asm volatile(
"fnclex\n\t"

View File

@ -40,8 +40,8 @@ extern void __kernel_fpu_end(void);
static inline void kernel_fpu_begin(void)
{
WARN_ON_ONCE(!irq_fpu_usable());
preempt_disable();
WARN_ON_ONCE(!irq_fpu_usable());
__kernel_fpu_begin();
}
@ -51,6 +51,10 @@ static inline void kernel_fpu_end(void)
preempt_enable();
}
/* Must be called with preempt disabled */
extern void kernel_fpu_disable(void);
extern void kernel_fpu_enable(void);
/*
* Some instructions like VIA's padlock instructions generate a spurious
* DNA fault but don't modify SSE registers. And these instructions

View File

@ -19,6 +19,19 @@
#include <asm/fpu-internal.h>
#include <asm/user.h>
static DEFINE_PER_CPU(bool, in_kernel_fpu);
void kernel_fpu_disable(void)
{
WARN_ON(this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, true);
}
void kernel_fpu_enable(void)
{
this_cpu_write(in_kernel_fpu, false);
}
/*
* Were we in an interrupt that interrupted kernel mode?
*
@ -33,6 +46,9 @@
*/
static inline bool interrupted_kernel_fpu_idle(void)
{
if (this_cpu_read(in_kernel_fpu))
return false;
if (use_eager_fpu())
return __thread_has_fpu(current);
@ -73,10 +89,10 @@ void __kernel_fpu_begin(void)
{
struct task_struct *me = current;
this_cpu_write(in_kernel_fpu, true);
if (__thread_has_fpu(me)) {
__thread_clear_has_fpu(me);
__save_init_fpu(me);
/* We do 'stts()' in __kernel_fpu_end() */
} else if (!use_eager_fpu()) {
this_cpu_write(fpu_owner_task, NULL);
clts();
@ -86,19 +102,16 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
void __kernel_fpu_end(void)
{
if (use_eager_fpu()) {
/*
* For eager fpu, most the time, tsk_used_math() is true.
* Restore the user math as we are done with the kernel usage.
* At few instances during thread exit, signal handling etc,
* tsk_used_math() is false. Those few places will take proper
* actions, so we don't need to restore the math here.
*/
if (likely(tsk_used_math(current)))
math_state_restore();
} else {
struct task_struct *me = current;
if (__thread_has_fpu(me)) {
if (WARN_ON(restore_fpu_checking(me)))
drop_init_fpu(me);
} else if (!use_eager_fpu()) {
stts();
}
this_cpu_write(in_kernel_fpu, false);
}
EXPORT_SYMBOL(__kernel_fpu_end);

View File

@ -859,18 +859,16 @@ void math_state_restore(void)
local_irq_disable();
}
/* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */
kernel_fpu_disable();
__thread_fpu_begin(tsk);
/*
* Paranoid restore. send a SIGSEGV if we fail to restore the state.
*/
if (unlikely(restore_fpu_checking(tsk))) {
drop_init_fpu(tsk);
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
return;
}
} else {
tsk->thread.fpu_counter++;
}
kernel_fpu_enable();
}
EXPORT_SYMBOL_GPL(math_state_restore);