mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-16 05:50:19 +00:00
Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 FPU state handling updates from Borislav Petkov: "This contains work started by Rik van Riel and brought to fruition by Sebastian Andrzej Siewior with the main goal to optimize when to load FPU registers: only when returning to userspace and not on every context switch (while the task remains in the kernel). In addition, this optimization makes kernel_fpu_begin() cheaper by requiring registers saving only on the first invocation and skipping that in following ones. What is more, this series cleans up and streamlines many aspects of the already complex FPU code, hopefully making it more palatable for future improvements and simplifications. Finally, there's a __user annotations fix from Jann Horn" * 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (29 commits) x86/fpu: Fault-in user stack if copy_fpstate_to_sigframe() fails x86/pkeys: Add PKRU value to init_fpstate x86/fpu: Restore regs in copy_fpstate_to_sigframe() in order to use the fastpath x86/fpu: Add a fastpath to copy_fpstate_to_sigframe() x86/fpu: Add a fastpath to __fpu__restore_sig() x86/fpu: Defer FPU state load until return to userspace x86/fpu: Merge the two code paths in __fpu__restore_sig() x86/fpu: Restore from kernel memory on the 64-bit path too x86/fpu: Inline copy_user_to_fpregs_zeroing() x86/fpu: Update xstate's PKRU value on write_pkru() x86/fpu: Prepare copy_fpstate_to_sigframe() for TIF_NEED_FPU_LOAD x86/fpu: Always store the registers in copy_fpstate_to_sigframe() x86/entry: Add TIF_NEED_FPU_LOAD x86/fpu: Eager switch PKRU state x86/pkeys: Don't check if PKRU is zero before writing it x86/fpu: Only write PKRU if it is different from current x86/pkeys: Provide *pkru() helpers x86/fpu: Use a feature number instead of mask in two more helpers x86/fpu: Make __raw_xsave_addr() use a feature number instead of mask x86/fpu: Add an __fpregs_load_activate() internal helper ...
This commit is contained in:
commit
8ff468c29e
@ -52,7 +52,6 @@ preemption must be disabled around such regions.
|
||||
|
||||
Note, some FPU functions are already explicitly preempt safe. For example,
|
||||
kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
|
||||
However, fpu__restore() must be called with preemption disabled.
|
||||
|
||||
|
||||
RULE #3: Lock acquire and release must be performed by same task
|
||||
|
@ -25,12 +25,13 @@
|
||||
#include <linux/uprobes.h>
|
||||
#include <linux/livepatch.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/desc.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/vdso.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/fpu/api.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/syscalls.h>
|
||||
@ -196,6 +197,13 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
|
||||
if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
|
||||
exit_to_usermode_loop(regs, cached_flags);
|
||||
|
||||
/* Reload ti->flags; we may have rescheduled above. */
|
||||
cached_flags = READ_ONCE(ti->flags);
|
||||
|
||||
fpregs_assert_state_consistent();
|
||||
if (unlikely(cached_flags & _TIF_NEED_FPU_LOAD))
|
||||
switch_fpu_return();
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
/*
|
||||
* Compat syscalls set TS_COMPAT. Make sure we clear it before
|
||||
|
@ -221,8 +221,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
|
||||
size_t frame_size,
|
||||
void __user **fpstate)
|
||||
{
|
||||
struct fpu *fpu = ¤t->thread.fpu;
|
||||
unsigned long sp;
|
||||
unsigned long sp, fx_aligned, math_size;
|
||||
|
||||
/* Default to using normal stack */
|
||||
sp = regs->sp;
|
||||
@ -236,15 +235,11 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
|
||||
ksig->ka.sa.sa_restorer)
|
||||
sp = (unsigned long) ksig->ka.sa.sa_restorer;
|
||||
|
||||
if (fpu->initialized) {
|
||||
unsigned long fx_aligned, math_size;
|
||||
|
||||
sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
|
||||
*fpstate = (struct _fpstate_32 __user *) sp;
|
||||
if (copy_fpstate_to_sigframe(*fpstate, (void __user *)fx_aligned,
|
||||
math_size) < 0)
|
||||
return (void __user *) -1L;
|
||||
}
|
||||
sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
|
||||
*fpstate = (struct _fpstate_32 __user *) sp;
|
||||
if (copy_fpstate_to_sigframe(*fpstate, (void __user *)fx_aligned,
|
||||
math_size) < 0)
|
||||
return (void __user *) -1L;
|
||||
|
||||
sp -= frame_size;
|
||||
/* Align the stack pointer according to the i386 ABI,
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
#ifndef _ASM_X86_FPU_API_H
|
||||
#define _ASM_X86_FPU_API_H
|
||||
#include <linux/bottom_half.h>
|
||||
|
||||
/*
|
||||
* Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
|
||||
@ -21,6 +22,36 @@
|
||||
extern void kernel_fpu_begin(void);
|
||||
extern void kernel_fpu_end(void);
|
||||
extern bool irq_fpu_usable(void);
|
||||
extern void fpregs_mark_activate(void);
|
||||
|
||||
/*
|
||||
* Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
|
||||
* A context switch will (and softirq might) save CPU's FPU registers to
|
||||
* fpu->state and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in
|
||||
* a random state.
|
||||
*/
|
||||
static inline void fpregs_lock(void)
|
||||
{
|
||||
preempt_disable();
|
||||
local_bh_disable();
|
||||
}
|
||||
|
||||
static inline void fpregs_unlock(void)
|
||||
{
|
||||
local_bh_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_DEBUG_FPU
|
||||
extern void fpregs_assert_state_consistent(void);
|
||||
#else
|
||||
static inline void fpregs_assert_state_consistent(void) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Load the task FPU state before returning to userspace.
|
||||
*/
|
||||
extern void switch_fpu_return(void);
|
||||
|
||||
/*
|
||||
* Query the presence of one or more xfeatures. Works on any legacy CPU as well.
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/compat.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/user.h>
|
||||
#include <asm/fpu/api.h>
|
||||
@ -24,14 +25,12 @@
|
||||
/*
|
||||
* High level FPU state handling functions:
|
||||
*/
|
||||
extern void fpu__initialize(struct fpu *fpu);
|
||||
extern void fpu__prepare_read(struct fpu *fpu);
|
||||
extern void fpu__prepare_write(struct fpu *fpu);
|
||||
extern void fpu__save(struct fpu *fpu);
|
||||
extern void fpu__restore(struct fpu *fpu);
|
||||
extern int fpu__restore_sig(void __user *buf, int ia32_frame);
|
||||
extern void fpu__drop(struct fpu *fpu);
|
||||
extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
|
||||
extern int fpu__copy(struct task_struct *dst, struct task_struct *src);
|
||||
extern void fpu__clear(struct fpu *fpu);
|
||||
extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
|
||||
extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate);
|
||||
@ -122,6 +121,21 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
|
||||
err; \
|
||||
})
|
||||
|
||||
#define kernel_insn_err(insn, output, input...) \
|
||||
({ \
|
||||
int err; \
|
||||
asm volatile("1:" #insn "\n\t" \
|
||||
"2:\n" \
|
||||
".section .fixup,\"ax\"\n" \
|
||||
"3: movl $-1,%[err]\n" \
|
||||
" jmp 2b\n" \
|
||||
".previous\n" \
|
||||
_ASM_EXTABLE(1b, 3b) \
|
||||
: [err] "=r" (err), output \
|
||||
: "0"(0), input); \
|
||||
err; \
|
||||
})
|
||||
|
||||
#define kernel_insn(insn, output, input...) \
|
||||
asm volatile("1:" #insn "\n\t" \
|
||||
"2:\n" \
|
||||
@ -150,6 +164,14 @@ static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
|
||||
kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
|
||||
}
|
||||
|
||||
static inline int copy_kernel_to_fxregs_err(struct fxregs_state *fx)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_X86_32))
|
||||
return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
|
||||
else
|
||||
return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
|
||||
}
|
||||
|
||||
static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_X86_32))
|
||||
@ -163,6 +185,11 @@ static inline void copy_kernel_to_fregs(struct fregs_state *fx)
|
||||
kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
|
||||
}
|
||||
|
||||
static inline int copy_kernel_to_fregs_err(struct fregs_state *fx)
|
||||
{
|
||||
return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
|
||||
}
|
||||
|
||||
static inline int copy_user_to_fregs(struct fregs_state __user *fx)
|
||||
{
|
||||
return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
|
||||
@ -362,6 +389,21 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore xstate from kernel space xsave area, return an error code instead of
|
||||
* an exception.
|
||||
*/
|
||||
static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
|
||||
{
|
||||
u32 lmask = mask;
|
||||
u32 hmask = mask >> 32;
|
||||
int err;
|
||||
|
||||
XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* These must be called with preempt disabled. Returns
|
||||
* 'true' if the FPU state is still intact and we can
|
||||
@ -486,6 +528,25 @@ static inline void fpregs_activate(struct fpu *fpu)
|
||||
trace_x86_fpu_regs_activated(fpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal helper, do not use directly. Use switch_fpu_return() instead.
|
||||
*/
|
||||
static inline void __fpregs_load_activate(void)
|
||||
{
|
||||
struct fpu *fpu = ¤t->thread.fpu;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (WARN_ON_ONCE(current->mm == NULL))
|
||||
return;
|
||||
|
||||
if (!fpregs_state_valid(fpu, cpu)) {
|
||||
copy_kernel_to_fpregs(&fpu->state);
|
||||
fpregs_activate(fpu);
|
||||
fpu->last_cpu = cpu;
|
||||
}
|
||||
clear_thread_flag(TIF_NEED_FPU_LOAD);
|
||||
}
|
||||
|
||||
/*
|
||||
* FPU state switching for scheduling.
|
||||
*
|
||||
@ -494,12 +555,23 @@ static inline void fpregs_activate(struct fpu *fpu)
|
||||
* - switch_fpu_prepare() saves the old state.
|
||||
* This is done within the context of the old process.
|
||||
*
|
||||
* - switch_fpu_finish() restores the new state as
|
||||
* necessary.
|
||||
* - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state
|
||||
* will get loaded on return to userspace, or when the kernel needs it.
|
||||
*
|
||||
* If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers
|
||||
* are saved in the current thread's FPU register state.
|
||||
*
|
||||
* If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not
|
||||
* hold current()'s FPU registers. It is required to load the
|
||||
* registers before returning to userland or using the content
|
||||
* otherwise.
|
||||
*
|
||||
* The FPU context is only stored/restored for a user task and
|
||||
* ->mm is used to distinguish between kernel and user threads.
|
||||
*/
|
||||
static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
|
||||
{
|
||||
if (static_cpu_has(X86_FEATURE_FPU) && old_fpu->initialized) {
|
||||
if (static_cpu_has(X86_FEATURE_FPU) && current->mm) {
|
||||
if (!copy_fpregs_to_fpstate(old_fpu))
|
||||
old_fpu->last_cpu = -1;
|
||||
else
|
||||
@ -507,8 +579,7 @@ static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
|
||||
|
||||
/* But leave fpu_fpregs_owner_ctx! */
|
||||
trace_x86_fpu_regs_deactivated(old_fpu);
|
||||
} else
|
||||
old_fpu->last_cpu = -1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -516,36 +587,32 @@ static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
|
||||
*/
|
||||
|
||||
/*
|
||||
* Set up the userspace FPU context for the new task, if the task
|
||||
* has used the FPU.
|
||||
* Load PKRU from the FPU context if available. Delay loading of the
|
||||
* complete FPU state until the return to userland.
|
||||
*/
|
||||
static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
|
||||
static inline void switch_fpu_finish(struct fpu *new_fpu)
|
||||
{
|
||||
bool preload = static_cpu_has(X86_FEATURE_FPU) &&
|
||||
new_fpu->initialized;
|
||||
u32 pkru_val = init_pkru_value;
|
||||
struct pkru_state *pk;
|
||||
|
||||
if (preload) {
|
||||
if (!fpregs_state_valid(new_fpu, cpu))
|
||||
copy_kernel_to_fpregs(&new_fpu->state);
|
||||
fpregs_activate(new_fpu);
|
||||
if (!static_cpu_has(X86_FEATURE_FPU))
|
||||
return;
|
||||
|
||||
set_thread_flag(TIF_NEED_FPU_LOAD);
|
||||
|
||||
if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
|
||||
return;
|
||||
|
||||
/*
|
||||
* PKRU state is switched eagerly because it needs to be valid before we
|
||||
* return to userland e.g. for a copy_to_user() operation.
|
||||
*/
|
||||
if (current->mm) {
|
||||
pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
|
||||
if (pk)
|
||||
pkru_val = pk->pkru;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Needs to be preemption-safe.
|
||||
*
|
||||
* NOTE! user_fpu_begin() must be used only immediately before restoring
|
||||
* the save state. It does not do any saving/restoring on its own. In
|
||||
* lazy FPU mode, it is just an optimization to avoid a #NM exception,
|
||||
* the task can lose the FPU right after preempt_enable().
|
||||
*/
|
||||
static inline void user_fpu_begin(void)
|
||||
{
|
||||
struct fpu *fpu = ¤t->thread.fpu;
|
||||
|
||||
preempt_disable();
|
||||
fpregs_activate(fpu);
|
||||
preempt_enable();
|
||||
__write_pkru(pkru_val);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -22,7 +22,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
|
||||
|
||||
extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
|
||||
struct task_struct *tsk);
|
||||
extern void convert_to_fxsr(struct task_struct *tsk,
|
||||
extern void convert_to_fxsr(struct fxregs_state *fxsave,
|
||||
const struct user_i387_ia32_struct *env);
|
||||
|
||||
unsigned long
|
||||
|
@ -293,15 +293,6 @@ struct fpu {
|
||||
*/
|
||||
unsigned int last_cpu;
|
||||
|
||||
/*
|
||||
* @initialized:
|
||||
*
|
||||
* This flag indicates whether this context is initialized: if the task
|
||||
* is not running then we can restore from this context, if the task
|
||||
* is running then we should save into this context.
|
||||
*/
|
||||
unsigned char initialized;
|
||||
|
||||
/*
|
||||
* @avx512_timestamp:
|
||||
*
|
||||
|
@ -2,9 +2,11 @@
|
||||
#ifndef __ASM_X86_XSAVE_H
|
||||
#define __ASM_X86_XSAVE_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/processor.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/user.h>
|
||||
|
||||
/* Bit 63 of XCR0 is reserved for future expansion */
|
||||
#define XFEATURE_MASK_EXTEND (~(XFEATURE_MASK_FPSSE | (1ULL << 63)))
|
||||
@ -46,8 +48,8 @@ extern void __init update_regset_xstate_info(unsigned int size,
|
||||
u64 xstate_mask);
|
||||
|
||||
void fpu__xstate_clear_all_cpu_caps(void);
|
||||
void *get_xsave_addr(struct xregs_state *xsave, int xstate);
|
||||
const void *get_xsave_field_ptr(int xstate_field);
|
||||
void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
|
||||
const void *get_xsave_field_ptr(int xfeature_nr);
|
||||
int using_compacted_format(void);
|
||||
int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
|
||||
int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
|
||||
|
@ -23,6 +23,8 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/x86_init.h>
|
||||
#include <asm/fpu/xstate.h>
|
||||
#include <asm/fpu/api.h>
|
||||
|
||||
extern pgd_t early_top_pgt[PTRS_PER_PGD];
|
||||
int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
|
||||
@ -127,14 +129,29 @@ static inline int pte_dirty(pte_t pte)
|
||||
static inline u32 read_pkru(void)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_OSPKE))
|
||||
return __read_pkru();
|
||||
return rdpkru();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void write_pkru(u32 pkru)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_OSPKE))
|
||||
__write_pkru(pkru);
|
||||
struct pkru_state *pk;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_OSPKE))
|
||||
return;
|
||||
|
||||
pk = get_xsave_addr(¤t->thread.fpu.state.xsave, XFEATURE_PKRU);
|
||||
|
||||
/*
|
||||
* The PKRU value in xstate needs to be in sync with the value that is
|
||||
* written to the CPU. The FPU restore on return to userland would
|
||||
* otherwise load the previous value again.
|
||||
*/
|
||||
fpregs_lock();
|
||||
if (pk)
|
||||
pk->pkru = pkru;
|
||||
__write_pkru(pkru);
|
||||
fpregs_unlock();
|
||||
}
|
||||
|
||||
static inline int pte_young(pte_t pte)
|
||||
@ -1358,6 +1375,12 @@ static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
|
||||
#define PKRU_WD_BIT 0x2
|
||||
#define PKRU_BITS_PER_PKEY 2
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
extern u32 init_pkru_value;
|
||||
#else
|
||||
#define init_pkru_value 0
|
||||
#endif
|
||||
|
||||
static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
|
||||
{
|
||||
int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
|
||||
|
@ -92,7 +92,7 @@ static inline void native_write_cr8(unsigned long val)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
static inline u32 __read_pkru(void)
|
||||
static inline u32 rdpkru(void)
|
||||
{
|
||||
u32 ecx = 0;
|
||||
u32 edx, pkru;
|
||||
@ -107,7 +107,7 @@ static inline u32 __read_pkru(void)
|
||||
return pkru;
|
||||
}
|
||||
|
||||
static inline void __write_pkru(u32 pkru)
|
||||
static inline void wrpkru(u32 pkru)
|
||||
{
|
||||
u32 ecx = 0, edx = 0;
|
||||
|
||||
@ -118,8 +118,21 @@ static inline void __write_pkru(u32 pkru)
|
||||
asm volatile(".byte 0x0f,0x01,0xef\n\t"
|
||||
: : "a" (pkru), "c"(ecx), "d"(edx));
|
||||
}
|
||||
|
||||
static inline void __write_pkru(u32 pkru)
|
||||
{
|
||||
/*
|
||||
* WRPKRU is relatively expensive compared to RDPKRU.
|
||||
* Avoid WRPKRU when it would not change the value.
|
||||
*/
|
||||
if (pkru == rdpkru())
|
||||
return;
|
||||
|
||||
wrpkru(pkru);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline u32 __read_pkru(void)
|
||||
static inline u32 rdpkru(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -88,6 +88,7 @@ struct thread_info {
|
||||
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
|
||||
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
|
||||
#define TIF_PATCH_PENDING 13 /* pending live patching update */
|
||||
#define TIF_NEED_FPU_LOAD 14 /* load FPU on return to userspace */
|
||||
#define TIF_NOCPUID 15 /* CPUID is not accessible in userland */
|
||||
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
|
||||
#define TIF_IA32 17 /* IA32 compatibility process */
|
||||
@ -117,6 +118,7 @@ struct thread_info {
|
||||
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
|
||||
#define _TIF_UPROBE (1 << TIF_UPROBE)
|
||||
#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
|
||||
#define _TIF_NEED_FPU_LOAD (1 << TIF_NEED_FPU_LOAD)
|
||||
#define _TIF_NOCPUID (1 << TIF_NOCPUID)
|
||||
#define _TIF_NOTSC (1 << TIF_NOTSC)
|
||||
#define _TIF_IA32 (1 << TIF_IA32)
|
||||
|
@ -13,22 +13,22 @@ DECLARE_EVENT_CLASS(x86_fpu,
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct fpu *, fpu)
|
||||
__field(bool, initialized)
|
||||
__field(bool, load_fpu)
|
||||
__field(u64, xfeatures)
|
||||
__field(u64, xcomp_bv)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->fpu = fpu;
|
||||
__entry->initialized = fpu->initialized;
|
||||
__entry->load_fpu = test_thread_flag(TIF_NEED_FPU_LOAD);
|
||||
if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
|
||||
__entry->xfeatures = fpu->state.xsave.header.xfeatures;
|
||||
__entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
|
||||
}
|
||||
),
|
||||
TP_printk("x86/fpu: %p initialized: %d xfeatures: %llx xcomp_bv: %llx",
|
||||
TP_printk("x86/fpu: %p load: %d xfeatures: %llx xcomp_bv: %llx",
|
||||
__entry->fpu,
|
||||
__entry->initialized,
|
||||
__entry->load_fpu,
|
||||
__entry->xfeatures,
|
||||
__entry->xcomp_bv
|
||||
)
|
||||
@ -64,11 +64,6 @@ DEFINE_EVENT(x86_fpu, x86_fpu_regs_deactivated,
|
||||
TP_ARGS(fpu)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(x86_fpu, x86_fpu_activate_state,
|
||||
TP_PROTO(struct fpu *fpu),
|
||||
TP_ARGS(fpu)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(x86_fpu, x86_fpu_init_state,
|
||||
TP_PROTO(struct fpu *fpu),
|
||||
TP_ARGS(fpu)
|
||||
|
@ -372,6 +372,8 @@ static bool pku_disabled;
|
||||
|
||||
static __always_inline void setup_pku(struct cpuinfo_x86 *c)
|
||||
{
|
||||
struct pkru_state *pk;
|
||||
|
||||
/* check the boot processor, plus compile options for PKU: */
|
||||
if (!cpu_feature_enabled(X86_FEATURE_PKU))
|
||||
return;
|
||||
@ -382,6 +384,9 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
|
||||
return;
|
||||
|
||||
cr4_set_bits(X86_CR4_PKE);
|
||||
pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
|
||||
if (pk)
|
||||
pk->pkru = init_pkru_value;
|
||||
/*
|
||||
* Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
|
||||
* cpuid bit to be set. We need to ensure that we
|
||||
|
@ -101,24 +101,21 @@ static void __kernel_fpu_begin(void)
|
||||
|
||||
kernel_fpu_disable();
|
||||
|
||||
if (fpu->initialized) {
|
||||
/*
|
||||
* Ignore return value -- we don't care if reg state
|
||||
* is clobbered.
|
||||
*/
|
||||
copy_fpregs_to_fpstate(fpu);
|
||||
} else {
|
||||
__cpu_invalidate_fpregs_state();
|
||||
if (current->mm) {
|
||||
if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
|
||||
set_thread_flag(TIF_NEED_FPU_LOAD);
|
||||
/*
|
||||
* Ignore return value -- we don't care if reg state
|
||||
* is clobbered.
|
||||
*/
|
||||
copy_fpregs_to_fpstate(fpu);
|
||||
}
|
||||
}
|
||||
__cpu_invalidate_fpregs_state();
|
||||
}
|
||||
|
||||
static void __kernel_fpu_end(void)
|
||||
{
|
||||
struct fpu *fpu = ¤t->thread.fpu;
|
||||
|
||||
if (fpu->initialized)
|
||||
copy_kernel_to_fpregs(&fpu->state);
|
||||
|
||||
kernel_fpu_enable();
|
||||
}
|
||||
|
||||
@ -145,15 +142,17 @@ void fpu__save(struct fpu *fpu)
|
||||
{
|
||||
WARN_ON_FPU(fpu != ¤t->thread.fpu);
|
||||
|
||||
preempt_disable();
|
||||
fpregs_lock();
|
||||
trace_x86_fpu_before_save(fpu);
|
||||
if (fpu->initialized) {
|
||||
|
||||
if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
|
||||
if (!copy_fpregs_to_fpstate(fpu)) {
|
||||
copy_kernel_to_fpregs(&fpu->state);
|
||||
}
|
||||
}
|
||||
|
||||
trace_x86_fpu_after_save(fpu);
|
||||
preempt_enable();
|
||||
fpregs_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fpu__save);
|
||||
|
||||
@ -186,11 +185,14 @@ void fpstate_init(union fpregs_state *state)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fpstate_init);
|
||||
|
||||
int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
|
||||
int fpu__copy(struct task_struct *dst, struct task_struct *src)
|
||||
{
|
||||
struct fpu *dst_fpu = &dst->thread.fpu;
|
||||
struct fpu *src_fpu = &src->thread.fpu;
|
||||
|
||||
dst_fpu->last_cpu = -1;
|
||||
|
||||
if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU))
|
||||
if (!static_cpu_has(X86_FEATURE_FPU))
|
||||
return 0;
|
||||
|
||||
WARN_ON_FPU(src_fpu != ¤t->thread.fpu);
|
||||
@ -202,16 +204,23 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
|
||||
memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
|
||||
|
||||
/*
|
||||
* Save current FPU registers directly into the child
|
||||
* FPU context, without any memory-to-memory copying.
|
||||
* If the FPU registers are not current just memcpy() the state.
|
||||
* Otherwise save current FPU registers directly into the child's FPU
|
||||
* context, without any memory-to-memory copying.
|
||||
*
|
||||
* ( The function 'fails' in the FNSAVE case, which destroys
|
||||
* register contents so we have to copy them back. )
|
||||
* register contents so we have to load them back. )
|
||||
*/
|
||||
if (!copy_fpregs_to_fpstate(dst_fpu)) {
|
||||
memcpy(&src_fpu->state, &dst_fpu->state, fpu_kernel_xstate_size);
|
||||
copy_kernel_to_fpregs(&src_fpu->state);
|
||||
}
|
||||
fpregs_lock();
|
||||
if (test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size);
|
||||
|
||||
else if (!copy_fpregs_to_fpstate(dst_fpu))
|
||||
copy_kernel_to_fpregs(&dst_fpu->state);
|
||||
|
||||
fpregs_unlock();
|
||||
|
||||
set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
|
||||
|
||||
trace_x86_fpu_copy_src(src_fpu);
|
||||
trace_x86_fpu_copy_dst(dst_fpu);
|
||||
@ -223,20 +232,14 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
|
||||
* Activate the current task's in-memory FPU context,
|
||||
* if it has not been used before:
|
||||
*/
|
||||
void fpu__initialize(struct fpu *fpu)
|
||||
static void fpu__initialize(struct fpu *fpu)
|
||||
{
|
||||
WARN_ON_FPU(fpu != ¤t->thread.fpu);
|
||||
|
||||
if (!fpu->initialized) {
|
||||
fpstate_init(&fpu->state);
|
||||
trace_x86_fpu_init_state(fpu);
|
||||
|
||||
trace_x86_fpu_activate_state(fpu);
|
||||
/* Safe to do for the current task: */
|
||||
fpu->initialized = 1;
|
||||
}
|
||||
set_thread_flag(TIF_NEED_FPU_LOAD);
|
||||
fpstate_init(&fpu->state);
|
||||
trace_x86_fpu_init_state(fpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fpu__initialize);
|
||||
|
||||
/*
|
||||
* This function must be called before we read a task's fpstate.
|
||||
@ -248,32 +251,20 @@ EXPORT_SYMBOL_GPL(fpu__initialize);
|
||||
*
|
||||
* - or it's called for stopped tasks (ptrace), in which case the
|
||||
* registers were already saved by the context-switch code when
|
||||
* the task scheduled out - we only have to initialize the registers
|
||||
* if they've never been initialized.
|
||||
* the task scheduled out.
|
||||
*
|
||||
* If the task has used the FPU before then save it.
|
||||
*/
|
||||
void fpu__prepare_read(struct fpu *fpu)
|
||||
{
|
||||
if (fpu == ¤t->thread.fpu) {
|
||||
if (fpu == ¤t->thread.fpu)
|
||||
fpu__save(fpu);
|
||||
} else {
|
||||
if (!fpu->initialized) {
|
||||
fpstate_init(&fpu->state);
|
||||
trace_x86_fpu_init_state(fpu);
|
||||
|
||||
trace_x86_fpu_activate_state(fpu);
|
||||
/* Safe to do for current and for stopped child tasks: */
|
||||
fpu->initialized = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This function must be called before we write a task's fpstate.
|
||||
*
|
||||
* If the task has used the FPU before then invalidate any cached FPU registers.
|
||||
* If the task has not used the FPU before then initialize its fpstate.
|
||||
* Invalidate any cached FPU registers.
|
||||
*
|
||||
* After this function call, after registers in the fpstate are
|
||||
* modified and the child task has woken up, the child task will
|
||||
@ -290,43 +281,10 @@ void fpu__prepare_write(struct fpu *fpu)
|
||||
*/
|
||||
WARN_ON_FPU(fpu == ¤t->thread.fpu);
|
||||
|
||||
if (fpu->initialized) {
|
||||
/* Invalidate any cached state: */
|
||||
__fpu_invalidate_fpregs_state(fpu);
|
||||
} else {
|
||||
fpstate_init(&fpu->state);
|
||||
trace_x86_fpu_init_state(fpu);
|
||||
|
||||
trace_x86_fpu_activate_state(fpu);
|
||||
/* Safe to do for stopped child tasks: */
|
||||
fpu->initialized = 1;
|
||||
}
|
||||
/* Invalidate any cached state: */
|
||||
__fpu_invalidate_fpregs_state(fpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* 'fpu__restore()' is called to copy FPU registers from
|
||||
* the FPU fpstate to the live hw registers and to activate
|
||||
* access to the hardware registers, so that FPU instructions
|
||||
* can be used afterwards.
|
||||
*
|
||||
* Must be called with kernel preemption disabled (for example
|
||||
* with local interrupts disabled, as it is in the case of
|
||||
* do_device_not_available()).
|
||||
*/
|
||||
void fpu__restore(struct fpu *fpu)
|
||||
{
|
||||
fpu__initialize(fpu);
|
||||
|
||||
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
|
||||
kernel_fpu_disable();
|
||||
trace_x86_fpu_before_restore(fpu);
|
||||
fpregs_activate(fpu);
|
||||
copy_kernel_to_fpregs(&fpu->state);
|
||||
trace_x86_fpu_after_restore(fpu);
|
||||
kernel_fpu_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fpu__restore);
|
||||
|
||||
/*
|
||||
* Drops current FPU state: deactivates the fpregs and
|
||||
* the fpstate. NOTE: it still leaves previous contents
|
||||
@ -341,17 +299,13 @@ void fpu__drop(struct fpu *fpu)
|
||||
preempt_disable();
|
||||
|
||||
if (fpu == ¤t->thread.fpu) {
|
||||
if (fpu->initialized) {
|
||||
/* Ignore delayed exceptions from user space */
|
||||
asm volatile("1: fwait\n"
|
||||
"2:\n"
|
||||
_ASM_EXTABLE(1b, 2b));
|
||||
fpregs_deactivate(fpu);
|
||||
}
|
||||
/* Ignore delayed exceptions from user space */
|
||||
asm volatile("1: fwait\n"
|
||||
"2:\n"
|
||||
_ASM_EXTABLE(1b, 2b));
|
||||
fpregs_deactivate(fpu);
|
||||
}
|
||||
|
||||
fpu->initialized = 0;
|
||||
|
||||
trace_x86_fpu_dropped(fpu);
|
||||
|
||||
preempt_enable();
|
||||
@ -363,6 +317,8 @@ void fpu__drop(struct fpu *fpu)
|
||||
*/
|
||||
static inline void copy_init_fpstate_to_fpregs(void)
|
||||
{
|
||||
fpregs_lock();
|
||||
|
||||
if (use_xsave())
|
||||
copy_kernel_to_xregs(&init_fpstate.xsave, -1);
|
||||
else if (static_cpu_has(X86_FEATURE_FXSR))
|
||||
@ -372,6 +328,9 @@ static inline void copy_init_fpstate_to_fpregs(void)
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_OSPKE))
|
||||
copy_init_pkru_to_fpregs();
|
||||
|
||||
fpregs_mark_activate();
|
||||
fpregs_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -389,15 +348,51 @@ void fpu__clear(struct fpu *fpu)
|
||||
/*
|
||||
* Make sure fpstate is cleared and initialized.
|
||||
*/
|
||||
if (static_cpu_has(X86_FEATURE_FPU)) {
|
||||
preempt_disable();
|
||||
fpu__initialize(fpu);
|
||||
user_fpu_begin();
|
||||
fpu__initialize(fpu);
|
||||
if (static_cpu_has(X86_FEATURE_FPU))
|
||||
copy_init_fpstate_to_fpregs();
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Load FPU context before returning to userspace.
|
||||
*/
|
||||
void switch_fpu_return(void)
|
||||
{
|
||||
if (!static_cpu_has(X86_FEATURE_FPU))
|
||||
return;
|
||||
|
||||
__fpregs_load_activate();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(switch_fpu_return);
|
||||
|
||||
#ifdef CONFIG_X86_DEBUG_FPU
|
||||
/*
|
||||
* If current FPU state according to its tracking (loaded FPU context on this
|
||||
* CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is
|
||||
* loaded on return to userland.
|
||||
*/
|
||||
void fpregs_assert_state_consistent(void)
|
||||
{
|
||||
struct fpu *fpu = ¤t->thread.fpu;
|
||||
|
||||
if (test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
return;
|
||||
|
||||
WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id()));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
|
||||
#endif
|
||||
|
||||
void fpregs_mark_activate(void)
|
||||
{
|
||||
struct fpu *fpu = ¤t->thread.fpu;
|
||||
|
||||
fpregs_activate(fpu);
|
||||
fpu->last_cpu = smp_processor_id();
|
||||
clear_thread_flag(TIF_NEED_FPU_LOAD);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fpregs_mark_activate);
|
||||
|
||||
/*
|
||||
* x87 math exception handling:
|
||||
*/
|
||||
|
@ -239,8 +239,6 @@ static void __init fpu__init_system_ctx_switch(void)
|
||||
|
||||
WARN_ON_FPU(!on_boot_cpu);
|
||||
on_boot_cpu = 0;
|
||||
|
||||
WARN_ON_FPU(current->thread.fpu.initialized);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -15,16 +15,12 @@
|
||||
*/
|
||||
int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
|
||||
{
|
||||
struct fpu *target_fpu = &target->thread.fpu;
|
||||
|
||||
return target_fpu->initialized ? regset->n : 0;
|
||||
return regset->n;
|
||||
}
|
||||
|
||||
int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
|
||||
{
|
||||
struct fpu *target_fpu = &target->thread.fpu;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->initialized)
|
||||
if (boot_cpu_has(X86_FEATURE_FXSR))
|
||||
return regset->n;
|
||||
else
|
||||
return 0;
|
||||
@ -269,11 +265,10 @@ convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
|
||||
memcpy(&to[i], &from[i], sizeof(to[0]));
|
||||
}
|
||||
|
||||
void convert_to_fxsr(struct task_struct *tsk,
|
||||
void convert_to_fxsr(struct fxregs_state *fxsave,
|
||||
const struct user_i387_ia32_struct *env)
|
||||
|
||||
{
|
||||
struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
|
||||
struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
|
||||
struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
|
||||
int i;
|
||||
@ -350,7 +345,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
|
||||
if (!ret)
|
||||
convert_to_fxsr(target, &env);
|
||||
convert_to_fxsr(&target->thread.fpu.state.fxsave, &env);
|
||||
|
||||
/*
|
||||
* update the header bit in the xsave header, indicating the
|
||||
@ -371,16 +366,9 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
|
||||
int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
struct fpu *fpu = &tsk->thread.fpu;
|
||||
int fpvalid;
|
||||
|
||||
fpvalid = fpu->initialized;
|
||||
if (fpvalid)
|
||||
fpvalid = !fpregs_get(tsk, NULL,
|
||||
0, sizeof(struct user_i387_ia32_struct),
|
||||
ufpu, NULL);
|
||||
|
||||
return fpvalid;
|
||||
return !fpregs_get(tsk, NULL, 0, sizeof(struct user_i387_ia32_struct),
|
||||
ufpu, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(dump_fpu);
|
||||
|
||||
|
@ -92,13 +92,13 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
|
||||
return err;
|
||||
|
||||
err |= __put_user(FP_XSTATE_MAGIC2,
|
||||
(__u32 *)(buf + fpu_user_xstate_size));
|
||||
(__u32 __user *)(buf + fpu_user_xstate_size));
|
||||
|
||||
/*
|
||||
* Read the xfeatures which we copied (directly from the cpu or
|
||||
* from the state in task struct) to the user buffers.
|
||||
*/
|
||||
err |= __get_user(xfeatures, (__u32 *)&x->header.xfeatures);
|
||||
err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
|
||||
|
||||
/*
|
||||
* For legacy compatible, we always set FP/SSE bits in the bit
|
||||
@ -113,7 +113,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
|
||||
*/
|
||||
xfeatures |= XFEATURE_MASK_FPSSE;
|
||||
|
||||
err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures);
|
||||
err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -144,9 +144,10 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
|
||||
* buf == buf_fx for 64-bit frames and 32-bit fsave frame.
|
||||
* buf != buf_fx for 32-bit frames with fxstate.
|
||||
*
|
||||
* If the fpu, extended register state is live, save the state directly
|
||||
* to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise,
|
||||
* copy the thread's fpu state to the user frame starting at 'buf_fx'.
|
||||
* Try to save it directly to the user frame with disabled page fault handler.
|
||||
* If this fails then do the slow path where the FPU state is first saved to
|
||||
* task's fpu->state and then copy it to the user frame pointed to by the
|
||||
* aligned pointer 'buf_fx'.
|
||||
*
|
||||
* If this is a 32-bit frame with fxstate, put a fsave header before
|
||||
* the aligned state at 'buf_fx'.
|
||||
@ -156,10 +157,9 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
|
||||
*/
|
||||
int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
|
||||
{
|
||||
struct fpu *fpu = ¤t->thread.fpu;
|
||||
struct xregs_state *xsave = &fpu->state.xsave;
|
||||
struct task_struct *tsk = current;
|
||||
int ia32_fxstate = (buf != buf_fx);
|
||||
int ret;
|
||||
|
||||
ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
|
||||
IS_ENABLED(CONFIG_IA32_EMULATION));
|
||||
@ -172,28 +172,34 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
|
||||
sizeof(struct user_i387_ia32_struct), NULL,
|
||||
(struct _fpstate_32 __user *) buf) ? -1 : 1;
|
||||
|
||||
if (fpu->initialized || using_compacted_format()) {
|
||||
/* Save the live register state to the user directly. */
|
||||
if (copy_fpregs_to_sigframe(buf_fx))
|
||||
return -1;
|
||||
/* Update the thread's fxstate to save the fsave header. */
|
||||
if (ia32_fxstate)
|
||||
copy_fxregs_to_kernel(fpu);
|
||||
} else {
|
||||
/*
|
||||
* It is a *bug* if kernel uses compacted-format for xsave
|
||||
* area and we copy it out directly to a signal frame. It
|
||||
* should have been handled above by saving the registers
|
||||
* directly.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_XSAVES)) {
|
||||
WARN_ONCE(1, "x86/fpu: saving compacted-format xsave area to a signal frame!\n");
|
||||
return -1;
|
||||
}
|
||||
retry:
|
||||
/*
|
||||
* Load the FPU registers if they are not valid for the current task.
|
||||
* With a valid FPU state we can attempt to save the state directly to
|
||||
* userland's stack frame which will likely succeed. If it does not,
|
||||
* resolve the fault in the user memory and try again.
|
||||
*/
|
||||
fpregs_lock();
|
||||
if (test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
__fpregs_load_activate();
|
||||
|
||||
fpstate_sanitize_xstate(fpu);
|
||||
if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
|
||||
return -1;
|
||||
pagefault_disable();
|
||||
ret = copy_fpregs_to_sigframe(buf_fx);
|
||||
pagefault_enable();
|
||||
fpregs_unlock();
|
||||
|
||||
if (ret) {
|
||||
int aligned_size;
|
||||
int nr_pages;
|
||||
|
||||
aligned_size = offset_in_page(buf_fx) + fpu_user_xstate_size;
|
||||
nr_pages = DIV_ROUND_UP(aligned_size, PAGE_SIZE);
|
||||
|
||||
ret = get_user_pages_unlocked((unsigned long)buf_fx, nr_pages,
|
||||
NULL, FOLL_WRITE);
|
||||
if (ret == nr_pages)
|
||||
goto retry;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* Save the fsave header for the 32-bit frames. */
|
||||
@ -207,11 +213,11 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
|
||||
}
|
||||
|
||||
static inline void
|
||||
sanitize_restored_xstate(struct task_struct *tsk,
|
||||
sanitize_restored_xstate(union fpregs_state *state,
|
||||
struct user_i387_ia32_struct *ia32_env,
|
||||
u64 xfeatures, int fx_only)
|
||||
{
|
||||
struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
|
||||
struct xregs_state *xsave = &state->xsave;
|
||||
struct xstate_header *header = &xsave->header;
|
||||
|
||||
if (use_xsave()) {
|
||||
@ -238,17 +244,18 @@ sanitize_restored_xstate(struct task_struct *tsk,
|
||||
*/
|
||||
xsave->i387.mxcsr &= mxcsr_feature_mask;
|
||||
|
||||
convert_to_fxsr(tsk, ia32_env);
|
||||
if (ia32_env)
|
||||
convert_to_fxsr(&state->fxsave, ia32_env);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore the extended state if present. Otherwise, restore the FP/SSE state.
|
||||
*/
|
||||
static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
|
||||
static int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
|
||||
{
|
||||
if (use_xsave()) {
|
||||
if ((unsigned long)buf % 64 || fx_only) {
|
||||
if (fx_only) {
|
||||
u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
|
||||
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
|
||||
return copy_user_to_fxregs(buf);
|
||||
@ -266,12 +273,15 @@ static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_
|
||||
|
||||
static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
{
|
||||
struct user_i387_ia32_struct *envp = NULL;
|
||||
int state_size = fpu_kernel_xstate_size;
|
||||
int ia32_fxstate = (buf != buf_fx);
|
||||
struct task_struct *tsk = current;
|
||||
struct fpu *fpu = &tsk->thread.fpu;
|
||||
int state_size = fpu_kernel_xstate_size;
|
||||
struct user_i387_ia32_struct env;
|
||||
u64 xfeatures = 0;
|
||||
int fx_only = 0;
|
||||
int ret = 0;
|
||||
|
||||
ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
|
||||
IS_ENABLED(CONFIG_IA32_EMULATION));
|
||||
@ -284,8 +294,6 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
if (!access_ok(buf, size))
|
||||
return -EACCES;
|
||||
|
||||
fpu__initialize(fpu);
|
||||
|
||||
if (!static_cpu_has(X86_FEATURE_FPU))
|
||||
return fpregs_soft_set(current, NULL,
|
||||
0, sizeof(struct user_i387_ia32_struct),
|
||||
@ -308,61 +316,101 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The current state of the FPU registers does not matter. By setting
|
||||
* TIF_NEED_FPU_LOAD unconditionally it is ensured that the our xstate
|
||||
* is not modified on context switch and that the xstate is considered
|
||||
* to be loaded again on return to userland (overriding last_cpu avoids
|
||||
* the optimisation).
|
||||
*/
|
||||
set_thread_flag(TIF_NEED_FPU_LOAD);
|
||||
__fpu_invalidate_fpregs_state(fpu);
|
||||
|
||||
if ((unsigned long)buf_fx % 64)
|
||||
fx_only = 1;
|
||||
/*
|
||||
* For 32-bit frames with fxstate, copy the fxstate so it can be
|
||||
* reconstructed later.
|
||||
*/
|
||||
if (ia32_fxstate) {
|
||||
/*
|
||||
* For 32-bit frames with fxstate, copy the user state to the
|
||||
* thread's fpu state, reconstruct fxstate from the fsave
|
||||
* header. Validate and sanitize the copied state.
|
||||
*/
|
||||
struct user_i387_ia32_struct env;
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* Drop the current fpu which clears fpu->initialized. This ensures
|
||||
* that any context-switch during the copy of the new state,
|
||||
* avoids the intermediate state from getting restored/saved.
|
||||
* Thus avoiding the new restored state from getting corrupted.
|
||||
* We will be ready to restore/save the state only after
|
||||
* fpu->initialized is again set.
|
||||
*/
|
||||
fpu__drop(fpu);
|
||||
|
||||
if (using_compacted_format()) {
|
||||
err = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
|
||||
} else {
|
||||
err = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
|
||||
|
||||
if (!err && state_size > offsetof(struct xregs_state, header))
|
||||
err = validate_xstate_header(&fpu->state.xsave.header);
|
||||
}
|
||||
|
||||
if (err || __copy_from_user(&env, buf, sizeof(env))) {
|
||||
fpstate_init(&fpu->state);
|
||||
trace_x86_fpu_init_state(fpu);
|
||||
err = -1;
|
||||
} else {
|
||||
sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
|
||||
}
|
||||
|
||||
local_bh_disable();
|
||||
fpu->initialized = 1;
|
||||
fpu__restore(fpu);
|
||||
local_bh_enable();
|
||||
|
||||
return err;
|
||||
ret = __copy_from_user(&env, buf, sizeof(env));
|
||||
if (ret)
|
||||
goto err_out;
|
||||
envp = &env;
|
||||
} else {
|
||||
/*
|
||||
* For 64-bit frames and 32-bit fsave frames, restore the user
|
||||
* state to the registers directly (with exceptions handled).
|
||||
* Attempt to restore the FPU registers directly from user
|
||||
* memory. For that to succeed, the user access cannot cause
|
||||
* page faults. If it does, fall back to the slow path below,
|
||||
* going through the kernel buffer with the enabled pagefault
|
||||
* handler.
|
||||
*/
|
||||
user_fpu_begin();
|
||||
if (copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) {
|
||||
fpu__clear(fpu);
|
||||
return -1;
|
||||
fpregs_lock();
|
||||
pagefault_disable();
|
||||
ret = copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only);
|
||||
pagefault_enable();
|
||||
if (!ret) {
|
||||
fpregs_mark_activate();
|
||||
fpregs_unlock();
|
||||
return 0;
|
||||
}
|
||||
fpregs_unlock();
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
if (use_xsave() && !fx_only) {
|
||||
u64 init_bv = xfeatures_mask & ~xfeatures;
|
||||
|
||||
if (using_compacted_format()) {
|
||||
ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
|
||||
} else {
|
||||
ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
|
||||
|
||||
if (!ret && state_size > offsetof(struct xregs_state, header))
|
||||
ret = validate_xstate_header(&fpu->state.xsave.header);
|
||||
}
|
||||
if (ret)
|
||||
goto err_out;
|
||||
|
||||
sanitize_restored_xstate(&fpu->state, envp, xfeatures, fx_only);
|
||||
|
||||
fpregs_lock();
|
||||
if (unlikely(init_bv))
|
||||
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
|
||||
ret = copy_kernel_to_xregs_err(&fpu->state.xsave, xfeatures);
|
||||
|
||||
} else if (use_fxsr()) {
|
||||
ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
sanitize_restored_xstate(&fpu->state, envp, xfeatures, fx_only);
|
||||
|
||||
fpregs_lock();
|
||||
if (use_xsave()) {
|
||||
u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
|
||||
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
|
||||
}
|
||||
|
||||
ret = copy_kernel_to_fxregs_err(&fpu->state.fxsave);
|
||||
} else {
|
||||
ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
|
||||
fpregs_lock();
|
||||
ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
|
||||
}
|
||||
if (!ret)
|
||||
fpregs_mark_activate();
|
||||
fpregs_unlock();
|
||||
|
||||
err_out:
|
||||
if (ret)
|
||||
fpu__clear(fpu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int xstate_sigframe_size(void)
|
||||
|
@ -805,20 +805,18 @@ void fpu__resume_cpu(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Given an xstate feature mask, calculate where in the xsave
|
||||
* Given an xstate feature nr, calculate where in the xsave
|
||||
* buffer the state is. Callers should ensure that the buffer
|
||||
* is valid.
|
||||
*/
|
||||
static void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask)
|
||||
static void *__raw_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
|
||||
{
|
||||
int feature_nr = fls64(xstate_feature_mask) - 1;
|
||||
|
||||
if (!xfeature_enabled(feature_nr)) {
|
||||
if (!xfeature_enabled(xfeature_nr)) {
|
||||
WARN_ON_FPU(1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void *)xsave + xstate_comp_offsets[feature_nr];
|
||||
return (void *)xsave + xstate_comp_offsets[xfeature_nr];
|
||||
}
|
||||
/*
|
||||
* Given the xsave area and a state inside, this function returns the
|
||||
@ -832,13 +830,13 @@ static void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask
|
||||
*
|
||||
* Inputs:
|
||||
* xstate: the thread's storage area for all FPU data
|
||||
* xstate_feature: state which is defined in xsave.h (e.g.
|
||||
* XFEATURE_MASK_FP, XFEATURE_MASK_SSE, etc...)
|
||||
* xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP,
|
||||
* XFEATURE_SSE, etc...)
|
||||
* Output:
|
||||
* address of the state in the xsave area, or NULL if the
|
||||
* field is not present in the xsave buffer.
|
||||
*/
|
||||
void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
|
||||
void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
|
||||
{
|
||||
/*
|
||||
* Do we even *have* xsave state?
|
||||
@ -851,11 +849,11 @@ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
|
||||
* have not enabled. Remember that pcntxt_mask is
|
||||
* what we write to the XCR0 register.
|
||||
*/
|
||||
WARN_ONCE(!(xfeatures_mask & xstate_feature),
|
||||
WARN_ONCE(!(xfeatures_mask & BIT_ULL(xfeature_nr)),
|
||||
"get of unsupported state");
|
||||
/*
|
||||
* This assumes the last 'xsave*' instruction to
|
||||
* have requested that 'xstate_feature' be saved.
|
||||
* have requested that 'xfeature_nr' be saved.
|
||||
* If it did not, we might be seeing and old value
|
||||
* of the field in the buffer.
|
||||
*
|
||||
@ -864,10 +862,10 @@ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
|
||||
* or because the "init optimization" caused it
|
||||
* to not be saved.
|
||||
*/
|
||||
if (!(xsave->header.xfeatures & xstate_feature))
|
||||
if (!(xsave->header.xfeatures & BIT_ULL(xfeature_nr)))
|
||||
return NULL;
|
||||
|
||||
return __raw_xsave_addr(xsave, xstate_feature);
|
||||
return __raw_xsave_addr(xsave, xfeature_nr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_xsave_addr);
|
||||
|
||||
@ -882,25 +880,23 @@ EXPORT_SYMBOL_GPL(get_xsave_addr);
|
||||
* Note that this only works on the current task.
|
||||
*
|
||||
* Inputs:
|
||||
* @xsave_state: state which is defined in xsave.h (e.g. XFEATURE_MASK_FP,
|
||||
* XFEATURE_MASK_SSE, etc...)
|
||||
* @xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP,
|
||||
* XFEATURE_SSE, etc...)
|
||||
* Output:
|
||||
* address of the state in the xsave area or NULL if the state
|
||||
* is not present or is in its 'init state'.
|
||||
*/
|
||||
const void *get_xsave_field_ptr(int xsave_state)
|
||||
const void *get_xsave_field_ptr(int xfeature_nr)
|
||||
{
|
||||
struct fpu *fpu = ¤t->thread.fpu;
|
||||
|
||||
if (!fpu->initialized)
|
||||
return NULL;
|
||||
/*
|
||||
* fpu__save() takes the CPU's xstate registers
|
||||
* and saves them off to the 'fpu memory buffer.
|
||||
*/
|
||||
fpu__save(fpu);
|
||||
|
||||
return get_xsave_addr(&fpu->state.xsave, xsave_state);
|
||||
return get_xsave_addr(&fpu->state.xsave, xfeature_nr);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_PKEYS
|
||||
@ -1016,7 +1012,7 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of
|
||||
* Copy only in-use xstates:
|
||||
*/
|
||||
if ((header.xfeatures >> i) & 1) {
|
||||
void *src = __raw_xsave_addr(xsave, 1 << i);
|
||||
void *src = __raw_xsave_addr(xsave, i);
|
||||
|
||||
offset = xstate_offsets[i];
|
||||
size = xstate_sizes[i];
|
||||
@ -1102,7 +1098,7 @@ int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned i
|
||||
* Copy only in-use xstates:
|
||||
*/
|
||||
if ((header.xfeatures >> i) & 1) {
|
||||
void *src = __raw_xsave_addr(xsave, 1 << i);
|
||||
void *src = __raw_xsave_addr(xsave, i);
|
||||
|
||||
offset = xstate_offsets[i];
|
||||
size = xstate_sizes[i];
|
||||
@ -1159,7 +1155,7 @@ int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
|
||||
u64 mask = ((u64)1 << i);
|
||||
|
||||
if (hdr.xfeatures & mask) {
|
||||
void *dst = __raw_xsave_addr(xsave, 1 << i);
|
||||
void *dst = __raw_xsave_addr(xsave, i);
|
||||
|
||||
offset = xstate_offsets[i];
|
||||
size = xstate_sizes[i];
|
||||
@ -1213,7 +1209,7 @@ int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
|
||||
u64 mask = ((u64)1 << i);
|
||||
|
||||
if (hdr.xfeatures & mask) {
|
||||
void *dst = __raw_xsave_addr(xsave, 1 << i);
|
||||
void *dst = __raw_xsave_addr(xsave, i);
|
||||
|
||||
offset = xstate_offsets[i];
|
||||
size = xstate_sizes[i];
|
||||
|
@ -101,7 +101,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||
dst->thread.vm86 = NULL;
|
||||
#endif
|
||||
|
||||
return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
|
||||
return fpu__copy(dst, src);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -241,7 +241,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
|
||||
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
|
||||
|
||||
switch_fpu_prepare(prev_fpu, cpu);
|
||||
if (!test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
switch_fpu_prepare(prev_fpu, cpu);
|
||||
|
||||
/*
|
||||
* Save away %gs. No need to save %fs, as it was saved on the
|
||||
@ -274,9 +275,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
/*
|
||||
* Leave lazy mode, flushing any hypercalls made here.
|
||||
* This must be done before restoring TLS segments so
|
||||
* the GDT and LDT are properly updated, and must be
|
||||
* done before fpu__restore(), so the TS bit is up
|
||||
* to date.
|
||||
* the GDT and LDT are properly updated.
|
||||
*/
|
||||
arch_end_context_switch(next_p);
|
||||
|
||||
@ -297,10 +296,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
if (prev->gs | next->gs)
|
||||
lazy_load_gs(next->gs);
|
||||
|
||||
switch_fpu_finish(next_fpu, cpu);
|
||||
|
||||
this_cpu_write(current_task, next_p);
|
||||
|
||||
switch_fpu_finish(next_fpu);
|
||||
|
||||
/* Load the Intel cache allocation PQR MSR. */
|
||||
resctrl_sched_in();
|
||||
|
||||
|
@ -521,7 +521,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
|
||||
this_cpu_read(irq_count) != -1);
|
||||
|
||||
switch_fpu_prepare(prev_fpu, cpu);
|
||||
if (!test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
switch_fpu_prepare(prev_fpu, cpu);
|
||||
|
||||
/* We must save %fs and %gs before load_TLS() because
|
||||
* %fs and %gs may be cleared by load_TLS().
|
||||
@ -539,9 +540,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
/*
|
||||
* Leave lazy mode, flushing any hypercalls made here. This
|
||||
* must be done after loading TLS entries in the GDT but before
|
||||
* loading segments that might reference them, and and it must
|
||||
* be done before fpu__restore(), so the TS bit is up to
|
||||
* date.
|
||||
* loading segments that might reference them.
|
||||
*/
|
||||
arch_end_context_switch(next_p);
|
||||
|
||||
@ -569,14 +568,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
|
||||
x86_fsgsbase_load(prev, next);
|
||||
|
||||
switch_fpu_finish(next_fpu, cpu);
|
||||
|
||||
/*
|
||||
* Switch the PDA and FPU contexts.
|
||||
*/
|
||||
this_cpu_write(current_task, next_p);
|
||||
this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
|
||||
|
||||
switch_fpu_finish(next_fpu);
|
||||
|
||||
/* Reload sp0. */
|
||||
update_task_stack(next_p);
|
||||
|
||||
|
@ -205,7 +205,7 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
|
||||
put_user_ex(regs->ss, &sc->ss);
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
put_user_ex(fpstate, &sc->fpstate);
|
||||
put_user_ex(fpstate, (unsigned long __user *)&sc->fpstate);
|
||||
|
||||
/* non-iBCS2 extensions.. */
|
||||
put_user_ex(mask, &sc->oldmask);
|
||||
@ -245,7 +245,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
|
||||
unsigned long sp = regs->sp;
|
||||
unsigned long buf_fx = 0;
|
||||
int onsigstack = on_sig_stack(sp);
|
||||
struct fpu *fpu = ¤t->thread.fpu;
|
||||
int ret;
|
||||
|
||||
/* redzone */
|
||||
if (IS_ENABLED(CONFIG_X86_64))
|
||||
@ -264,11 +264,9 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
|
||||
sp = (unsigned long) ka->sa.sa_restorer;
|
||||
}
|
||||
|
||||
if (fpu->initialized) {
|
||||
sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
|
||||
&buf_fx, &math_size);
|
||||
*fpstate = (void __user *)sp;
|
||||
}
|
||||
sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
|
||||
&buf_fx, &math_size);
|
||||
*fpstate = (void __user *)sp;
|
||||
|
||||
sp = align_sigframe(sp - frame_size);
|
||||
|
||||
@ -280,8 +278,8 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
|
||||
return (void __user *)-1L;
|
||||
|
||||
/* save i387 and extended state */
|
||||
if (fpu->initialized &&
|
||||
copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0)
|
||||
ret = copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size);
|
||||
if (ret < 0)
|
||||
return (void __user *)-1L;
|
||||
|
||||
return (void __user *)sp;
|
||||
@ -574,7 +572,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
|
||||
restorer = NULL;
|
||||
err |= -EFAULT;
|
||||
}
|
||||
put_user_ex(restorer, &frame->pretcode);
|
||||
put_user_ex(restorer, (unsigned long __user *)&frame->pretcode);
|
||||
} put_user_catch(err);
|
||||
|
||||
err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
|
||||
@ -765,8 +763,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
||||
/*
|
||||
* Ensure the signal handler starts with the new fpu state.
|
||||
*/
|
||||
if (fpu->initialized)
|
||||
fpu__clear(fpu);
|
||||
fpu__clear(fpu);
|
||||
}
|
||||
signal_setup_done(failed, ksig, stepping);
|
||||
}
|
||||
|
@ -456,7 +456,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
|
||||
* which is all zeros which indicates MPX was not
|
||||
* responsible for the exception.
|
||||
*/
|
||||
bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
|
||||
bndcsr = get_xsave_field_ptr(XFEATURE_BNDCSR);
|
||||
if (!bndcsr)
|
||||
goto exit_trap;
|
||||
|
||||
|
@ -6500,7 +6500,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
|
||||
vcpu->arch.pkru = __read_pkru();
|
||||
vcpu->arch.pkru = rdpkru();
|
||||
if (vcpu->arch.pkru != vmx->host_pkru)
|
||||
__write_pkru(vmx->host_pkru);
|
||||
}
|
||||
|
@ -3681,15 +3681,15 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
|
||||
while (valid) {
|
||||
u64 feature = valid & -valid;
|
||||
int index = fls64(feature) - 1;
|
||||
void *src = get_xsave_addr(xsave, feature);
|
||||
u64 xfeature_mask = valid & -valid;
|
||||
int xfeature_nr = fls64(xfeature_mask) - 1;
|
||||
void *src = get_xsave_addr(xsave, xfeature_nr);
|
||||
|
||||
if (src) {
|
||||
u32 size, offset, ecx, edx;
|
||||
cpuid_count(XSTATE_CPUID, index,
|
||||
cpuid_count(XSTATE_CPUID, xfeature_nr,
|
||||
&size, &offset, &ecx, &edx);
|
||||
if (feature == XFEATURE_MASK_PKRU)
|
||||
if (xfeature_nr == XFEATURE_PKRU)
|
||||
memcpy(dest + offset, &vcpu->arch.pkru,
|
||||
sizeof(vcpu->arch.pkru));
|
||||
else
|
||||
@ -3697,7 +3697,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
|
||||
|
||||
}
|
||||
|
||||
valid -= feature;
|
||||
valid -= xfeature_mask;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3724,22 +3724,22 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
|
||||
*/
|
||||
valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
|
||||
while (valid) {
|
||||
u64 feature = valid & -valid;
|
||||
int index = fls64(feature) - 1;
|
||||
void *dest = get_xsave_addr(xsave, feature);
|
||||
u64 xfeature_mask = valid & -valid;
|
||||
int xfeature_nr = fls64(xfeature_mask) - 1;
|
||||
void *dest = get_xsave_addr(xsave, xfeature_nr);
|
||||
|
||||
if (dest) {
|
||||
u32 size, offset, ecx, edx;
|
||||
cpuid_count(XSTATE_CPUID, index,
|
||||
cpuid_count(XSTATE_CPUID, xfeature_nr,
|
||||
&size, &offset, &ecx, &edx);
|
||||
if (feature == XFEATURE_MASK_PKRU)
|
||||
if (xfeature_nr == XFEATURE_PKRU)
|
||||
memcpy(&vcpu->arch.pkru, src + offset,
|
||||
sizeof(vcpu->arch.pkru));
|
||||
else
|
||||
memcpy(dest, src + offset, size);
|
||||
}
|
||||
|
||||
valid -= feature;
|
||||
valid -= xfeature_mask;
|
||||
}
|
||||
}
|
||||
|
||||
@ -7899,6 +7899,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
wait_lapic_expire(vcpu);
|
||||
guest_enter_irqoff();
|
||||
|
||||
fpregs_assert_state_consistent();
|
||||
if (test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
switch_fpu_return();
|
||||
|
||||
if (unlikely(vcpu->arch.switch_db_regs)) {
|
||||
set_debugreg(0, 7);
|
||||
set_debugreg(vcpu->arch.eff_db[0], 0);
|
||||
@ -8157,22 +8161,30 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
|
||||
/* Swap (qemu) user FPU context for the guest FPU context. */
|
||||
static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
preempt_disable();
|
||||
fpregs_lock();
|
||||
|
||||
copy_fpregs_to_fpstate(¤t->thread.fpu);
|
||||
/* PKRU is separately restored in kvm_x86_ops->run. */
|
||||
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
|
||||
~XFEATURE_MASK_PKRU);
|
||||
preempt_enable();
|
||||
|
||||
fpregs_mark_activate();
|
||||
fpregs_unlock();
|
||||
|
||||
trace_kvm_fpu(1);
|
||||
}
|
||||
|
||||
/* When vcpu_run ends, restore user space FPU context. */
|
||||
static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
preempt_disable();
|
||||
fpregs_lock();
|
||||
|
||||
copy_fpregs_to_fpstate(vcpu->arch.guest_fpu);
|
||||
copy_kernel_to_fpregs(¤t->thread.fpu.state);
|
||||
preempt_enable();
|
||||
|
||||
fpregs_mark_activate();
|
||||
fpregs_unlock();
|
||||
|
||||
++vcpu->stat.fpu_reload;
|
||||
trace_kvm_fpu(0);
|
||||
}
|
||||
@ -8870,11 +8882,11 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
if (init_event)
|
||||
kvm_put_guest_fpu(vcpu);
|
||||
mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
|
||||
XFEATURE_MASK_BNDREGS);
|
||||
XFEATURE_BNDREGS);
|
||||
if (mpx_state_buffer)
|
||||
memset(mpx_state_buffer, 0, sizeof(struct mpx_bndreg_state));
|
||||
mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
|
||||
XFEATURE_MASK_BNDCSR);
|
||||
XFEATURE_BNDCSR);
|
||||
if (mpx_state_buffer)
|
||||
memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr));
|
||||
if (init_event)
|
||||
|
@ -113,9 +113,6 @@ void math_emulate(struct math_emu_info *info)
|
||||
unsigned long code_base = 0;
|
||||
unsigned long code_limit = 0; /* Initialized to stop compiler warnings */
|
||||
struct desc_struct code_descriptor;
|
||||
struct fpu *fpu = ¤t->thread.fpu;
|
||||
|
||||
fpu__initialize(fpu);
|
||||
|
||||
#ifdef RE_ENTRANT_CHECKING
|
||||
if (emulating) {
|
||||
|
@ -142,7 +142,7 @@ int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs)
|
||||
goto err_out;
|
||||
}
|
||||
/* get bndregs field from current task's xsave area */
|
||||
bndregs = get_xsave_field_ptr(XFEATURE_MASK_BNDREGS);
|
||||
bndregs = get_xsave_field_ptr(XFEATURE_BNDREGS);
|
||||
if (!bndregs) {
|
||||
err = -EINVAL;
|
||||
goto err_out;
|
||||
@ -190,7 +190,7 @@ static __user void *mpx_get_bounds_dir(void)
|
||||
* The bounds directory pointer is stored in a register
|
||||
* only accessible if we first do an xsave.
|
||||
*/
|
||||
bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
|
||||
bndcsr = get_xsave_field_ptr(XFEATURE_BNDCSR);
|
||||
if (!bndcsr)
|
||||
return MPX_INVALID_BOUNDS_DIR;
|
||||
|
||||
@ -376,7 +376,7 @@ static int do_mpx_bt_fault(void)
|
||||
const struct mpx_bndcsr *bndcsr;
|
||||
struct mm_struct *mm = current->mm;
|
||||
|
||||
bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
|
||||
bndcsr = get_xsave_field_ptr(XFEATURE_BNDCSR);
|
||||
if (!bndcsr)
|
||||
return -EINVAL;
|
||||
/*
|
||||
|
@ -18,6 +18,7 @@
|
||||
|
||||
#include <asm/cpufeature.h> /* boot_cpu_has, ... */
|
||||
#include <asm/mmu_context.h> /* vma_pkey() */
|
||||
#include <asm/fpu/internal.h> /* init_fpstate */
|
||||
|
||||
int __execute_only_pkey(struct mm_struct *mm)
|
||||
{
|
||||
@ -39,17 +40,12 @@ int __execute_only_pkey(struct mm_struct *mm)
|
||||
* dance to set PKRU if we do not need to. Check it
|
||||
* first and assume that if the execute-only pkey is
|
||||
* write-disabled that we do not have to set it
|
||||
* ourselves. We need preempt off so that nobody
|
||||
* can make fpregs inactive.
|
||||
* ourselves.
|
||||
*/
|
||||
preempt_disable();
|
||||
if (!need_to_set_mm_pkey &&
|
||||
current->thread.fpu.initialized &&
|
||||
!__pkru_allows_read(read_pkru(), execute_only_pkey)) {
|
||||
preempt_enable();
|
||||
return execute_only_pkey;
|
||||
}
|
||||
preempt_enable();
|
||||
|
||||
/*
|
||||
* Set up PKRU so that it denies access for everything
|
||||
@ -131,7 +127,6 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey
|
||||
* in the process's lifetime will not accidentally get access
|
||||
* to data which is pkey-protected later on.
|
||||
*/
|
||||
static
|
||||
u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
|
||||
PKRU_AD_KEY( 4) | PKRU_AD_KEY( 5) | PKRU_AD_KEY( 6) |
|
||||
PKRU_AD_KEY( 7) | PKRU_AD_KEY( 8) | PKRU_AD_KEY( 9) |
|
||||
@ -147,13 +142,6 @@ u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
|
||||
void copy_init_pkru_to_fpregs(void)
|
||||
{
|
||||
u32 init_pkru_value_snapshot = READ_ONCE(init_pkru_value);
|
||||
/*
|
||||
* Any write to PKRU takes it out of the XSAVE 'init
|
||||
* state' which increases context switch cost. Avoid
|
||||
* writing 0 when PKRU was already 0.
|
||||
*/
|
||||
if (!init_pkru_value_snapshot && !read_pkru())
|
||||
return;
|
||||
/*
|
||||
* Override the PKRU state that came from 'init_fpstate'
|
||||
* with the baseline from the process.
|
||||
@ -174,6 +162,7 @@ static ssize_t init_pkru_read_file(struct file *file, char __user *user_buf,
|
||||
static ssize_t init_pkru_write_file(struct file *file,
|
||||
const char __user *user_buf, size_t count, loff_t *ppos)
|
||||
{
|
||||
struct pkru_state *pk;
|
||||
char buf[32];
|
||||
ssize_t len;
|
||||
u32 new_init_pkru;
|
||||
@ -196,6 +185,10 @@ static ssize_t init_pkru_write_file(struct file *file,
|
||||
return -EINVAL;
|
||||
|
||||
WRITE_ONCE(init_pkru_value, new_init_pkru);
|
||||
pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
|
||||
if (!pk)
|
||||
return -EINVAL;
|
||||
pk->pkru = new_init_pkru;
|
||||
return count;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user