mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-21 00:42:16 +00:00
9ec2b804e0
Most of the time we can simply use the iret instruction to exit the kernel, rather than having to use the iret hypercall - the only exception is if we're returning into vm86 mode, or from delivering an NMI (which we don't support yet). When running native, iret has the behaviour of testing for a pending interrupt atomically with re-enabling interrupts. Unfortunately there's no way to do this with Xen, so there's a window in which we could get a recursive exception after enabling events but before actually returning to userspace. This causes a problem: if the nested interrupt causes one of the task's TIF_WORK_MASK flags to be set, they will not be checked again before returning to userspace. This means that pending work may be left pending indefinitely, until the process enters and leaves the kernel again. The net effect is that a pending signal or reschedule event could be delayed for an unbounded amount of time. To deal with this, the xen event upcall handler checks to see if the EIP is within the critical section of the iret code, after events are (potentially) enabled up to the iret itself. If its within this range, it calls the iret critical section fixup, which adjusts the stack to deal with any unrestored registers, and then shifts the stack frame up to replace the previous invocation. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
72 lines
2.0 KiB
C
72 lines
2.0 KiB
C
#ifndef XEN_OPS_H
|
|
#define XEN_OPS_H
|
|
|
|
#include <linux/init.h>
|
|
|
|
/* These are code, but not functions. Defined in entry.S */
|
|
extern const char xen_hypervisor_callback[];
|
|
extern const char xen_failsafe_callback[];
|
|
|
|
void xen_copy_trap_info(struct trap_info *traps);
|
|
|
|
DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
|
|
DECLARE_PER_CPU(unsigned long, xen_cr3);
|
|
|
|
extern struct start_info *xen_start_info;
|
|
extern struct shared_info *HYPERVISOR_shared_info;
|
|
|
|
char * __init xen_memory_setup(void);
|
|
void __init xen_arch_setup(void);
|
|
void __init xen_init_IRQ(void);
|
|
|
|
void xen_setup_timer(int cpu);
|
|
void xen_setup_cpu_clockevents(void);
|
|
unsigned long xen_cpu_khz(void);
|
|
void __init xen_time_init(void);
|
|
unsigned long xen_get_wallclock(void);
|
|
int xen_set_wallclock(unsigned long time);
|
|
unsigned long long xen_sched_clock(void);
|
|
|
|
void xen_mark_init_mm_pinned(void);
|
|
|
|
DECLARE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode);
|
|
|
|
static inline unsigned xen_get_lazy_mode(void)
|
|
{
|
|
return x86_read_percpu(xen_lazy_mode);
|
|
}
|
|
|
|
void __init xen_fill_possible_map(void);
|
|
|
|
void __init xen_setup_vcpu_info_placement(void);
|
|
void xen_smp_prepare_boot_cpu(void);
|
|
void xen_smp_prepare_cpus(unsigned int max_cpus);
|
|
int xen_cpu_up(unsigned int cpu);
|
|
void xen_smp_cpus_done(unsigned int max_cpus);
|
|
|
|
void xen_smp_send_stop(void);
|
|
void xen_smp_send_reschedule(int cpu);
|
|
int xen_smp_call_function (void (*func) (void *info), void *info, int nonatomic,
|
|
int wait);
|
|
int xen_smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
|
int nonatomic, int wait);
|
|
|
|
int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
|
|
void *info, int wait);
|
|
|
|
|
|
/* Declare an asm function, along with symbols needed to make it
|
|
inlineable */
|
|
#define DECL_ASM(ret, name, ...) \
|
|
ret name(__VA_ARGS__); \
|
|
extern char name##_end[]; \
|
|
extern char name##_reloc[] \
|
|
|
|
DECL_ASM(void, xen_irq_enable_direct, void);
|
|
DECL_ASM(void, xen_irq_disable_direct, void);
|
|
DECL_ASM(unsigned long, xen_save_fl_direct, void);
|
|
DECL_ASM(void, xen_restore_fl_direct, unsigned long);
|
|
|
|
void xen_iret_direct(void);
|
|
#endif /* XEN_OPS_H */
|