Fixed build for Linux 5.11+

This commit is contained in:
Tiberiu Chibici 2021-05-01 14:47:49 -07:00
parent 21b3a00163
commit 2018dcd3f4
6 changed files with 69 additions and 33 deletions

View File

@ -54,6 +54,12 @@
// To get LINUX_SIGRTMIN
#include <rtsig.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)
# define check_64bit_mode(regs) !test_thread_flag(TIF_IA32)
#else
# define check_64bit_mode(regs) any_64bit_mode(regs)
#endif
extern char* task_copy_vchroot_path(task_t t);
struct load_results
@ -222,7 +228,7 @@ int setup_space(struct linux_binprm* bprm, struct load_results* lr)
// Explanation:
// Using STACK_TOP would cause the stack to be placed just above the commpage
// and would collide with it eventually.
unsigned long stackAddr = commpage_address(!test_thread_flag(TIF_IA32));
unsigned long stackAddr = commpage_address(check_64bit_mode(current_pt_regs()));
setup_new_exec(bprm);
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)
@ -560,7 +566,7 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
void
start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
{
bool ia32 = test_thread_flag(TIF_IA32);
bool ia32 = !check_64bit_mode(regs);
start_thread_common(regs, new_ip, new_sp,
ia32 ? __USER32_CS : __USER_CS,
__USER_DS,
@ -872,7 +878,7 @@ int macho_coredump(struct coredump_params* cprm)
#endif
// Write the Mach-O header and loader commands
if (test_thread_flag(TIF_IA32))
if (!check_64bit_mode(current_pt_regs()))
{
// 32-bit executables
if (!macho_dump_headers32(cprm))

View File

@ -25,6 +25,12 @@
#include <mach/mach_types.h>
#include <duct/duct_post_xnu.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)
# define check_64bit_mode(regs) !test_thread_flag(TIF_IA32)
#else
# define check_64bit_mode(regs) any_64bit_mode(regs)
#endif
kern_return_t darling_host_info(host_flavor_t flavor, host_info_t host_info_out, mach_msg_type_number_t* host_info_outCnt)
{
switch (flavor)
@ -49,7 +55,7 @@ kern_return_t darling_host_info(host_flavor_t flavor, host_info_t host_info_out,
hinfo->cpu_type = CPU_TYPE_I386;
hinfo->cpu_subtype = CPU_SUBTYPE_I386_ALL;
#elif defined(__x86_64__)
if (!test_thread_flag(TIF_IA32))
if (check_64bit_mode(task_pt_regs(linux_current)))
{
hinfo->cpu_type = CPU_TYPE_I386;
hinfo->cpu_subtype = CPU_SUBTYPE_X86_64_ALL;

View File

@ -4,7 +4,9 @@
#include <linux/slab.h>
#include <linux/anon_inodes.h>
#include <linux/fs.h>
#define current linux_current
#include <linux/fdtable.h>
#undef current
#include <linux/poll.h>
#if 0 // TODO: EVFILT_SOCK support (we've gotta fix some header collisions)
#include <linux/net.h>
@ -32,8 +34,17 @@
#include "kqueue.h"
#include "task_registry.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)
# define darling_fcheck_files fcheck_files
# define darling_close_fd ksys_close
#else
# define darling_fcheck_files files_lookup_fd_rcu
# define darling_close_fd close_fd
#endif
// re-define `fcheck` because we use `linux_current`
#define fcheck(fd) fcheck_files(linux_current->files, fd)
#define fcheck(fd) darling_fcheck_files(linux_current->files, fd)
struct dkqueue_pte;
typedef SLIST_HEAD(dkqueue_pte_head, dkqueue_pte) dkqueue_pte_head_t;
@ -252,7 +263,7 @@ static struct file *__fget_files(struct files_struct *files, unsigned int fd,
rcu_read_lock();
loop:
file = fcheck_files(files, fd);
file = darling_fcheck_files(files, fd);
if (file) {
/* File object ref couldn't be taken.
* dup2() atomicity guarantee is the reason
@ -1137,7 +1148,7 @@ static void dkqueue_fork_listener(int pid, void* context, darling_proc_event_t e
LIST_FOREACH(curr, &parent_proc->p_fd->kqueue_list, link) {
dkqueue_log("closing kqueue with fd %d on fork", curr->fd);
proc_fdunlock(parent_proc);
ksys_close(curr->fd);
darling_close_fd(curr->fd);
proc_fdlock(parent_proc);
}
proc_fdunlock(parent_proc);
@ -1258,7 +1269,7 @@ int darling_kqueue_create(struct task* task) {
error_out:
if (fd >= 0) {
ksys_close(fd);
darling_close_fd(fd);
} else {
// we only cleanup the rest ourselves if the fd still hasn't been created.
// otherwise (if it *has* been created), Linux will call `dkqueue_release` on the file

View File

@ -30,7 +30,9 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/eventfd.h>
#define current linux_current
#include <linux/fdtable.h>
#undef current
#include <linux/syscalls.h>
#include <linux/fs_struct.h>
#include <linux/moduleparam.h>
@ -77,6 +79,12 @@
#undef kfree
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)
# define check_64bit_mode(regs) !test_thread_flag(TIF_IA32)
#else
# define check_64bit_mode(regs) any_64bit_mode(regs)
#endif
typedef long (*trap_handler)(task_t, ...);
static void *commpage32, *commpage64;
@ -445,7 +453,7 @@ int mach_dev_mmap(struct file* file, struct vm_area_struct *vma)
if (vma->vm_pgoff != 0)
return -LINUX_EINVAL;
if (test_thread_flag(TIF_IA32))
if (!check_64bit_mode(current_pt_regs()))
{
if (length != commpage_length(false))
return -LINUX_EINVAL;
@ -490,7 +498,7 @@ struct file* xnu_task_setup(void)
int commpage_install(struct file* xnu_task)
{
unsigned long addr;
bool _64bit = !test_thread_flag(TIF_IA32);
bool _64bit = check_64bit_mode(current_pt_regs());
addr = vm_mmap(xnu_task, commpage_address(_64bit), commpage_length(_64bit), PROT_READ, MAP_SHARED | MAP_FIXED, 0);
@ -2138,7 +2146,7 @@ thread_get_state(
static int state_to_kernel(const struct thread_state* state)
{
#ifdef __x86_64__
if (!test_thread_flag(TIF_IA32))
if (check_64bit_mode(current_pt_regs()))
{
x86_thread_state64_t tstate;
x86_float_state64_t fstate;
@ -2175,7 +2183,7 @@ static int state_to_kernel(const struct thread_state* state)
static int state_from_kernel(struct thread_state* state)
{
#ifdef __x86_64__
if (!test_thread_flag(TIF_IA32))
if (check_64bit_mode(current_pt_regs()))
{
x86_thread_state64_t tstate;
x86_float_state64_t fstate;

View File

@ -204,8 +204,11 @@ thread_get_state_internal(
if (*state_count < x86_THREAD_STATE_COUNT)
return KERN_INVALID_ARGUMENT;
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)
if (!test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
#else
if (user_64bit_mode(task_pt_regs(ltask)))
#endif
{
s->tsh.flavor = flavor = x86_THREAD_STATE64;
s->tsh.count = x86_THREAD_STATE64_COUNT;
@ -229,7 +232,7 @@ thread_get_state_internal(
if (*state_count < x86_FLOAT_STATE_COUNT)
return KERN_INVALID_ARGUMENT;
if (!test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (user_64bit_mode(task_pt_regs(ltask)))
{
s->fsh.flavor = flavor = x86_FLOAT_STATE64;
s->fsh.count = x86_FLOAT_STATE64_COUNT;
@ -252,7 +255,7 @@ thread_get_state_internal(
if (*state_count < x86_DEBUG_STATE_COUNT)
return KERN_INVALID_ARGUMENT;
if (!test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (user_64bit_mode(task_pt_regs(ltask)))
{
s->dsh.flavor = flavor = x86_DEBUG_STATE64;
s->dsh.count = x86_DEBUG_STATE64_COUNT;
@ -276,7 +279,7 @@ thread_get_state_internal(
{
if (*state_count < x86_THREAD_STATE32_COUNT)
return KERN_INVALID_ARGUMENT;
if (!test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (user_64bit_mode(task_pt_regs(ltask)))
return KERN_INVALID_ARGUMENT;
x86_thread_state32_t* s = (x86_thread_state32_t*) state;
@ -291,7 +294,7 @@ thread_get_state_internal(
{
if (*state_count < x86_FLOAT_STATE32_COUNT)
return KERN_INVALID_ARGUMENT;
if (!test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (user_64bit_mode(task_pt_regs(ltask)))
return KERN_INVALID_ARGUMENT;
x86_float_state32_t* s = (x86_float_state32_t*) state;
@ -317,7 +320,7 @@ thread_get_state_internal(
{
if (*state_count < x86_THREAD_STATE64_COUNT)
return KERN_INVALID_ARGUMENT;
if (test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (!user_64bit_mode(task_pt_regs(ltask)))
return KERN_INVALID_ARGUMENT;
x86_thread_state64_t* s = (x86_thread_state64_t*) state;
@ -333,7 +336,7 @@ thread_get_state_internal(
{
if (*state_count < x86_DEBUG_STATE32_COUNT)
return KERN_INVALID_ARGUMENT;
if (!test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (user_64bit_mode(task_pt_regs(ltask)))
return KERN_INVALID_ARGUMENT;
x86_debug_state32_t* s = (x86_debug_state32_t*) state;
@ -364,7 +367,7 @@ thread_get_state_internal(
{
if (*state_count < x86_DEBUG_STATE64_COUNT)
return KERN_INVALID_ARGUMENT;
if (test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (!user_64bit_mode(task_pt_regs(ltask)))
return KERN_INVALID_ARGUMENT;
x86_debug_state64_t* s = (x86_debug_state64_t*) state;
@ -492,7 +495,7 @@ thread_set_state(
if (s->tsh.flavor == x86_THREAD_STATE32)
{
if (!test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (user_64bit_mode(task_pt_regs(ltask)))
return KERN_INVALID_ARGUMENT;
state_count = s->tsh.count;
@ -500,7 +503,7 @@ thread_set_state(
}
else if (s->tsh.flavor == x86_THREAD_STATE64)
{
if (test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (!user_64bit_mode(task_pt_regs(ltask)))
return KERN_INVALID_ARGUMENT;
state_count = s->tsh.count;
@ -521,7 +524,7 @@ thread_set_state(
if (s->fsh.flavor == x86_FLOAT_STATE32)
{
if (!test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (user_64bit_mode(task_pt_regs(ltask)))
return KERN_INVALID_ARGUMENT;
state_count = s->fsh.count;
@ -529,7 +532,7 @@ thread_set_state(
}
else if (s->fsh.flavor == x86_FLOAT_STATE64)
{
if (test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (!user_64bit_mode(task_pt_regs(ltask)))
return KERN_INVALID_ARGUMENT;
state_count = s->fsh.count;
@ -550,7 +553,7 @@ thread_set_state(
if (s->dsh.flavor == x86_DEBUG_STATE32)
{
if (!test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (user_64bit_mode(task_pt_regs(ltask)))
return KERN_INVALID_ARGUMENT;
state_count = s->dsh.count;
@ -558,7 +561,7 @@ thread_set_state(
}
else if (s->dsh.flavor == x86_DEBUG_STATE64)
{
if (test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (!user_64bit_mode(task_pt_regs(ltask)))
return KERN_INVALID_ARGUMENT;
state_count = s->dsh.count;
@ -578,7 +581,7 @@ thread_set_state(
{
if (state_count < x86_THREAD_STATE32_COUNT)
return KERN_INVALID_ARGUMENT;
if (!test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (user_64bit_mode(task_pt_regs(ltask)))
return KERN_INVALID_ARGUMENT;
const x86_thread_state32_t* s = (x86_thread_state32_t*) state;
@ -590,7 +593,7 @@ thread_set_state(
{
if (state_count < x86_THREAD_STATE64_COUNT)
return KERN_INVALID_ARGUMENT;
if (test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (!user_64bit_mode(task_pt_regs(ltask)))
return KERN_INVALID_ARGUMENT;
const x86_thread_state64_t* s = (x86_thread_state64_t*) state;
@ -604,7 +607,7 @@ thread_set_state(
{
if (state_count < x86_FLOAT_STATE32_COUNT)
return KERN_INVALID_ARGUMENT;
if (!test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (user_64bit_mode(task_pt_regs(ltask)))
return KERN_INVALID_ARGUMENT;
const x86_float_state32_t* s = (x86_float_state32_t*) state;
@ -617,7 +620,7 @@ thread_set_state(
{
if (state_count < x86_FLOAT_STATE64_COUNT)
return KERN_INVALID_ARGUMENT;
if (!darling_is_task_64bit())
if (user_64bit_mode(current_pt_regs()))
return KERN_INVALID_ARGUMENT;
const x86_float_state64_t* s = (x86_float_state64_t*) state;
@ -627,7 +630,7 @@ thread_set_state(
}
case x86_DEBUG_STATE32:
{
if (!test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (user_64bit_mode(task_pt_regs(ltask)))
return KERN_INVALID_ARGUMENT;
const x86_debug_state32_t* s = (x86_debug_state32_t*) state;
x86_debug_state64_t s64;
@ -646,7 +649,7 @@ thread_set_state(
}
case x86_DEBUG_STATE64:
{
if (test_ti_thread_flag(task_thread_info(ltask), TIF_IA32))
if (!user_64bit_mode(task_pt_regs(ltask)))
return KERN_INVALID_ARGUMENT;
const x86_debug_state64_t* s = (x86_debug_state64_t*) state;

View File

@ -437,8 +437,10 @@ boolean_t vm_map_copy_validate_size(vm_map_t dst_map, vm_map_copy_t copy, vm_map
int darling_is_task_64bit(void)
{
#if __x86_64__ || __arm64__
#if (__x86_64__ || __arm64__) && LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)
return !test_thread_flag(TIF_IA32);
#elif (__x86_64__ || __arm64__)
return any_64bit_mode(task_pt_regs(linux_current));
#else
return 0;
#endif