Many additions + stubs; basic kernel MIG RPC working

Most of the newly added functions are just stubs for MIG calls. However, we now properly initialize IPC and related subsystems and we now have copyin/copyout that allows basic `mach_msg_overwrite_trap` usage.

dyld now progresses to `getHostInfo` and successfully retrieves `host_info` with a kernel MIG call (and then proceeds to die on `mach_port_deallocate`, since it hasn't been updated yet).
This commit is contained in:
Ariel Abreu 2022-01-20 00:11:51 -05:00
parent 3bfc652c9b
commit be93cc46b7
No known key found for this signature in database
GPG Key ID: D67AE16CCEA85B70
36 changed files with 1812 additions and 88 deletions

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
.gdb_history

View File

@ -129,6 +129,7 @@ add_compile_options(
-fblocks
-ffunction-sections
-fdata-sections
-Wno-incompatible-library-redeclaration
)
set(MIG_USER_SOURCE_SUFFIX "User.c")
@ -196,6 +197,7 @@ add_custom_target(kernel_mig_generate
${CMAKE_CURRENT_BINARY_DIR}/xnu/osfmk/mach/upl.h
${CMAKE_CURRENT_BINARY_DIR}/xnu/osfmk/mach/host_notify_reply.h
${CMAKE_CURRENT_BINARY_DIR}/xnu/osfmk/mach/vm32_map_server.h
${CMAKE_CURRENT_BINARY_DIR}/xnu/osfmk/mach/vm32_map_server.c
${CMAKE_CURRENT_BINARY_DIR}/xnu/osfmk/mach/mach_notify.h
${CMAKE_CURRENT_BINARY_DIR}/xnu/osfmk/mach/mach_voucher_attr_control.h
${CMAKE_CURRENT_BINARY_DIR}/xnu/osfmk/mach/memory_entry_server.h
@ -237,6 +239,8 @@ add_library(darlingserver_duct_tape
src/thread.c
src/timer.c
src/traps.c
src/host.c
src/processor.c
xnu/libkern/os/refcnt.c
xnu/libkern/gen/OSAtomicOperations.c
@ -261,6 +265,9 @@ add_library(darlingserver_duct_tape
xnu/osfmk/kern/timer_call.c
xnu/osfmk/kern/clock_oldops.c
xnu/osfmk/kern/sync_sema.c
xnu/osfmk/kern/sync_lock.c
xnu/osfmk/kern/syscall_emulation.c
xnu/osfmk/kern/ux_handler.c
xnu/osfmk/ipc/ipc_entry.c
xnu/osfmk/ipc/ipc_hash.c
@ -287,6 +294,8 @@ add_library(darlingserver_duct_tape
xnu/osfmk/prng/prng_random.c
xnu/osfmk/vm/vm32_user.c
${CMAKE_CURRENT_BINARY_DIR}/xnu/osfmk/mach/clock_priv_server.c
${CMAKE_CURRENT_BINARY_DIR}/xnu/osfmk/mach/clock_reply_user.c
${CMAKE_CURRENT_BINARY_DIR}/xnu/osfmk/mach/clock_server.c
@ -310,6 +319,7 @@ add_library(darlingserver_duct_tape
${CMAKE_CURRENT_BINARY_DIR}/xnu/osfmk/mach/task_server.c
${CMAKE_CURRENT_BINARY_DIR}/xnu/osfmk/mach/thread_act_server.c
${CMAKE_CURRENT_BINARY_DIR}/xnu/osfmk/mach/mach_eventlink_server.c
${CMAKE_CURRENT_BINARY_DIR}/xnu/osfmk/mach/vm32_map_server.c
${CMAKE_CURRENT_BINARY_DIR}/xnu/osfmk/device/device_server.c
${CMAKE_CURRENT_BINARY_DIR}/xnu/osfmk/UserNotification/UNDReplyServer.c
)

View File

@ -2,6 +2,8 @@
#define _DARLINGSERVER_DUCT_TAPE_H_
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <libsimple/lock.h>
@ -42,6 +44,8 @@ typedef dtape_thread_handle_t (*dtape_hook_thread_create_kernel_f)(void);
typedef void (*dtape_hook_thread_start_f)(void* thread_context, dtape_thread_continuation_callback_f continuation_callback);
typedef void (*dtape_hook_current_thread_interrupt_disable_f)(void);
typedef void (*dtape_hook_current_thread_interrupt_enable_f)(void);
typedef bool (*dtape_hook_task_read_memory_f)(void* task_context, uintptr_t remote_address, void* local_buffer, size_t length);
typedef bool (*dtape_hook_task_write_memory_f)(void* task_context, uintptr_t remote_address, const void* local_buffer, size_t length);
typedef struct dtape_hooks {
dtape_hook_thread_suspend_f thread_suspend;
@ -55,13 +59,18 @@ typedef struct dtape_hooks {
dtape_hook_thread_start_f thread_start;
dtape_hook_current_thread_interrupt_disable_f current_thread_interrupt_disable;
dtape_hook_current_thread_interrupt_enable_f current_thread_interrupt_enable;
dtape_hook_task_read_memory_f task_read_memory;
dtape_hook_task_write_memory_f task_write_memory;
} dtape_hooks_t;
void dtape_init(const dtape_hooks_t* hooks);
void dtape_deinit(void);
uint32_t dtape_task_self_trap(void);
uint32_t dtape_host_self_trap(void);
uint32_t dtape_thread_self_trap(void);
uint32_t dtape_mach_reply_port(void);
int dtape_mach_msg_overwrite(uintptr_t msg, int32_t option, uint32_t send_size, uint32_t rcv_size, uint32_t rcv_name, uint32_t timeout, uint32_t notify, uintptr_t rcv_msg, uint32_t rcv_limit);
/**
* The threshold beyond which thread IDs are considered IDs for kernel threads.

View File

@ -0,0 +1,33 @@
#ifndef _DARLINGSERVER_DUCT_TAPE_MEMORY_H_
#define _DARLINGSERVER_DUCT_TAPE_MEMORY_H_
#include <stdint.h>
#include <mach/vm_types.h>
#include <os/refcnt.h>
struct dtape_task;
struct _vm_map {
uint32_t dtape_page_shift;
uint64_t max_offset;
os_refcnt_t map_refcnt;
struct dtape_task* dtape_task;
};
#define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->dtape_page_shift : PAGE_SHIFT)
struct vm_map_copy {
int type;
uint64_t offset;
uint64_t size;
};
#define VM_MAP_COPY_ENTRY_LIST 1
#define VM_MAP_COPY_OBJECT 2
#define VM_MAP_COPY_KERNEL_BUFFER 3
void dtape_memory_init(void);
vm_map_t dtape_vm_map_create(struct dtape_task* task);
void dtape_vm_map_destroy(vm_map_t map);
#endif // _DARLINGSERVER_DUCT_TAPE_MEMORY_H_

View File

@ -0,0 +1,6 @@
#ifndef _DARLINGSERVER_DUCT_TAPE_PROCESOR_H_
#define _DARLINGSERVER_DUCT_TAPE_PROCESOR_H_
void dtape_processor_init(void);
#endif // _DARLINGSERVER_DUCT_TAPE_PROCESOR_H_

View File

@ -3,10 +3,18 @@
#include <stdbool.h>
void dtape_stub_log(const char* function_name, int safety);
void dtape_stub_log(const char* function_name, int safety, const char* subsection);
#define dtape_stub() (dtape_stub_log(__FUNCTION__, 0))
#define dtape_stub_safe() (dtape_stub_log(__FUNCTION__, 1))
#define dtape_stub_unsafe() (dtape_stub_log(__FUNCTION__, -1))
// for general functions where it's unknown whether they can be safely stubbed or not
#define dtape_stub(...) (dtape_stub_log(__FUNCTION__, 0, "" __VA_ARGS__))
// for functions that have been confirmed to be okay being stubbed
#define dtape_stub_safe(...) (dtape_stub_log(__FUNCTION__, 1, "" __VA_ARGS__))
// for functions that have been confirmed to require an actual implementation (rather than a simple stub)
#define dtape_stub_unsafe(...) ({ \
dtape_stub_log(__FUNCTION__, -1, "" __VA_ARGS__); \
__builtin_unreachable(); \
}) \
#endif // _DARLINGSERVER_DUCT_TAPE_STUBS_H_

View File

@ -15,4 +15,6 @@ static dtape_task_t* dtape_task_for_xnu_task(task_t xnu_task) {
return (dtape_task_t*)((char*)xnu_task - offsetof(dtape_task_t, xnu_task));
};
void dtape_task_init(void);
#endif // _DARLINGSERVER_DUCT_TAPE_TASK_H_

157
duct-tape/src/host.c Normal file
View File

@ -0,0 +1,157 @@
#include <darlingserver/duct-tape/stubs.h>
#include <kern/host.h>
#include <mach_debug/mach_debug.h>
#include <libsimple/lock.h>
// Linux sysinfo (from the sysinfo man page)
struct sysinfo {
long uptime;
unsigned long loads[3];
unsigned long totalram;
unsigned long freeram;
unsigned long sharedram;
unsigned long bufferram;
unsigned long totalswap;
unsigned long freeswap;
unsigned short procs;
unsigned long totalhigh;
unsigned long freehigh;
unsigned int mem_unit;
char _f[20 - 2 * sizeof(long) - sizeof(int)];
};
int sysinfo(struct sysinfo *info);
// Linux sysconf
long sysconf(int name);
#define _SC_NPROCESSORS_CONF 83
#define _SC_NPROCESSORS_ONLN 84
static void cache_sysinfo(void* context) {
struct sysinfo* cached_sysinfo = context;
if (sysinfo(cached_sysinfo) < 0) {
panic("Failed to retrieve sysinfo");
}
};
kern_return_t host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t* count) {
static libsimple_once_t once_token = LIBSIMPLE_ONCE_INITIALIZER;
static struct sysinfo cached_sysinfo;
if (host == HOST_NULL) {
return KERN_INVALID_ARGUMENT;
}
switch (flavor) {
case HOST_BASIC_INFO: {
host_basic_info_t basic_info = (host_basic_info_t)info;
// need at least enough space for the legacy structure
if (*count < HOST_BASIC_INFO_OLD_COUNT) {
return KERN_FAILURE;
}
libsimple_once(&once_token, cache_sysinfo, &cached_sysinfo);
basic_info->memory_size = cached_sysinfo.totalram;
#if __x86_64__ || __i386__
basic_info->cpu_type = CPU_TYPE_X86;
basic_info->cpu_subtype = CPU_SUBTYPE_X86_ARCH1;
#else
#error Unknown CPU type
#endif
basic_info->max_cpus = sysconf(_SC_NPROCESSORS_CONF);
basic_info->avail_cpus = sysconf(_SC_NPROCESSORS_ONLN);
// if there's room for the modern structure, fill in some additional info
if (*count >= HOST_BASIC_INFO_COUNT) {
// TODO: properly differentiate physical vs. logical cores
dtape_stub_safe("modern HOST_BASIC_INFO");
basic_info->cpu_threadtype = CPU_THREADTYPE_NONE;
basic_info->physical_cpu = basic_info->avail_cpus;
basic_info->physical_cpu_max = basic_info->max_cpus;
basic_info->logical_cpu = basic_info->avail_cpus;
basic_info->logical_cpu_max = basic_info->max_cpus;
basic_info->max_mem = basic_info->memory_size;
*count = HOST_BASIC_INFO_COUNT;
} else {
*count = HOST_BASIC_INFO_OLD_COUNT;
}
return KERN_SUCCESS;
}
case HOST_DEBUG_INFO_INTERNAL:
return KERN_NOT_SUPPORTED;
case HOST_SCHED_INFO:
dtape_stub_unsafe("HOST_SCHED_INFO");
case HOST_RESOURCE_SIZES:
dtape_stub_unsafe("HOST_RESOURCE_SIZES");
case HOST_PRIORITY_INFO:
dtape_stub_unsafe("HOST_PRIORITY_INFO");
case HOST_PREFERRED_USER_ARCH:
dtape_stub_unsafe("HOST_PREFERRED_USER_ARCH");
case HOST_CAN_HAS_DEBUGGER:
dtape_stub_unsafe("HOST_CAN_HAS_DEBUGGER");
case HOST_VM_PURGABLE:
dtape_stub_unsafe("HOST_VM_PURGABLE");
case HOST_MACH_MSG_TRAP:
case HOST_SEMAPHORE_TRAPS:
*count = 0;
return KERN_SUCCESS;
default:
return KERN_INVALID_ARGUMENT;
}
};
kern_return_t host_default_memory_manager(host_priv_t host_priv, memory_object_default_t* default_manager, memory_object_cluster_size_t cluster_size) {
dtape_stub_unsafe();
};
kern_return_t host_get_boot_info(host_priv_t host_priv, kernel_boot_info_t boot_info) {
dtape_stub_unsafe();
};
kern_return_t host_get_UNDServer(host_priv_t host_priv, UNDServerRef* serverp) {
dtape_stub_unsafe();
};
kern_return_t host_set_UNDServer(host_priv_t host_priv, UNDServerRef server) {
dtape_stub_unsafe();
};
kern_return_t host_lockgroup_info(host_t host, lockgroup_info_array_t* lockgroup_infop, mach_msg_type_number_t* lockgroup_infoCntp) {
dtape_stub_unsafe();
};
kern_return_t host_reboot(host_priv_t host_priv, int options) {
dtape_stub_unsafe();
};
kern_return_t host_security_create_task_token(host_security_t host_security, task_t parent_task, security_token_t sec_token, audit_token_t audit_token, host_priv_t host_priv, ledger_port_array_t ledger_ports, mach_msg_type_number_t num_ledger_ports, boolean_t inherit_memory, task_t* child_task) {
dtape_stub_safe();
return KERN_NOT_SUPPORTED;
};
kern_return_t host_security_set_task_token(host_security_t host_security, task_t task, security_token_t sec_token, audit_token_t audit_token, host_priv_t host_priv) {
dtape_stub_unsafe();
};
kern_return_t host_virtual_physical_table_info(host_t host, hash_info_bucket_array_t* infop, mach_msg_type_number_t* countp) {
dtape_stub_unsafe();
};
kern_return_t host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t* count) {
dtape_stub_unsafe();
};
kern_return_t vm_stats(void* info, unsigned int* count) {
dtape_stub_unsafe();
};

View File

@ -226,6 +226,18 @@ void lck_ticket_assert_owned(lck_ticket_t* tlock) {
lck_spin_assert(&tlock->dtape_lock, LCK_ASSERT_OWNED);
};
//
// read-write lock
//
lck_rw_type_t lck_rw_done(lck_rw_t* lock) {
dtape_stub_unsafe();
};
void lck_rw_lock_exclusive(lck_rw_t* lock) {
dtape_stub_unsafe();
};
// <copied from="xnu://7195.141.2/osfmk/kern/locks.c">
/*

View File

@ -1,4 +1,7 @@
#include <darlingserver/duct-tape/stubs.h>
#include <darlingserver/duct-tape/memory.h>
#include <darlingserver/duct-tape/task.h>
#include <darlingserver/duct-tape/hooks.h>
#include <kern/zalloc.h>
#include <kern/kalloc.h>
@ -6,6 +9,8 @@
#include <stdlib.h>
#include <mach_debug/mach_debug.h>
struct zone {
const char* name;
vm_size_t size;
@ -17,13 +22,14 @@ struct kalloc_heap KHEAP_DEFAULT[1];
struct kalloc_heap KHEAP_DATA_BUFFERS[1];
// stub
vm_map_t kernel_map;
vm_size_t kalloc_max_prerounded = 0;
void* calloc(size_t count, size_t size);
void* aligned_alloc(size_t alignment, size_t size);
void* mmap(void* addr, size_t length, int prot, int flags, int fd, long int offset);
int munmap(void* addr, size_t length);
long sysconf(int name);
#define MAP_ANONYMOUS 0x20
#define MAP_PRIVATE 0x02
@ -34,6 +40,44 @@ int munmap(void* addr, size_t length);
#define MAP_FAILED ((void*)-1)
#define _SC_PAGESIZE 30
void dtape_memory_init(void) {
};
vm_map_t dtape_vm_map_create(struct dtape_task* task) {
vm_map_t map = malloc(sizeof(struct _vm_map));
if (!map) {
return map;
}
os_ref_init(&map->map_refcnt, NULL);
map->max_offset = MACH_VM_MAX_ADDRESS;
map->dtape_page_shift = __builtin_ctzl(sysconf(_SC_PAGESIZE));
map->dtape_task = task;
return map;
};
void dtape_vm_map_destroy(vm_map_t map) {
if (os_ref_release(&map->map_refcnt) != 0) {
panic("VM map still in-use at destruction");
}
free(map);
};
void vm_map_reference(vm_map_t map) {
os_ref_retain(&map->map_refcnt);
};
void vm_map_deallocate(vm_map_t map) {
os_ref_release_live(&map->map_refcnt);
};
// TODO: zone-based allocations could be optimized to not just use malloc
zone_t zone_create(const char* name, vm_size_t size, zone_create_flags_t flags) {
@ -136,3 +180,361 @@ kern_return_t kernel_memory_allocate(vm_map_t map, vm_offset_t* addrp, vm_size_t
kern_return_t vm_deallocate(vm_map_t map, vm_offset_t start, vm_size_t size) {
return munmap((void*)start, size) == 0;
};
kern_return_t vm_allocate_kernel(vm_map_t map, vm_offset_t* addr, vm_size_t size, int flags, vm_tag_t tag) {
mach_vm_offset_t tmp;
kern_return_t status = mach_vm_allocate_kernel(map, &tmp, size, flags, tag);
if (status == KERN_SUCCESS) {
*addr = tmp;
}
return status;
};
kern_return_t kmem_alloc(vm_map_t map, vm_offset_t* addrp, vm_size_t size, vm_tag_t tag) {
return kernel_memory_allocate(map, addrp, size, 0, 0, tag);
};
void kmem_free(vm_map_t map, vm_offset_t addr, vm_size_t size) {
vm_deallocate(map, addr, size);
};
kern_return_t copyoutmap(vm_map_t map, void* fromdata, vm_map_address_t toaddr, vm_size_t length) {
if (map == kernel_map) {
memmove(fromdata, (void*)toaddr, length);
return KERN_SUCCESS;
} else {
return dtape_hooks->task_write_memory(map->dtape_task->context, toaddr, fromdata, length) ? KERN_SUCCESS : KERN_FAILURE;
}
};
kern_return_t copyinmap(vm_map_t map, vm_map_offset_t fromaddr, void* todata, vm_size_t length) {
if (map == kernel_map) {
memmove((void*)fromaddr, todata, length);
return KERN_SUCCESS;
} else {
return dtape_hooks->task_read_memory(map->dtape_task->context, fromaddr, todata, length) ? KERN_SUCCESS : KERN_FAILURE;
}
};
int (copyin)(const user_addr_t user_addr, void* kernel_addr, vm_size_t nbytes) {
return (copyinmap(current_map(), user_addr, kernel_addr, nbytes) == KERN_SUCCESS) ? 0 : 1;
};
int (copyout)(const void* kernel_addr, user_addr_t user_addr, vm_size_t nbytes) {
// it doesn't actually modify kernel_addr
return (copyoutmap(current_map(), (void*)kernel_addr, user_addr, nbytes) == KERN_SUCCESS) ? 0 : 1;
};
int copyinmsg(const user_addr_t user_addr, char* kernel_addr, mach_msg_size_t nbytes) {
return (copyin)(user_addr, kernel_addr, nbytes);
};
int copyoutmsg(const char* kernel_addr, user_addr_t user_addr, mach_msg_size_t nbytes) {
return (copyout)(kernel_addr, user_addr, nbytes);
};
kern_return_t kmem_suballoc(vm_map_t parent, vm_offset_t* addr, vm_size_t size, boolean_t pageable, int flags, vm_map_kernel_flags_t vmk_flags, vm_tag_t tag, vm_map_t* new_map) {
// this is enough to satisfy ipc_init
dtape_stub();
*new_map = parent;
return KERN_SUCCESS;
};
kern_return_t _mach_make_memory_entry(vm_map_t target_map, memory_object_size_t* size, memory_object_offset_t offset, vm_prot_t permission, ipc_port_t* object_handle, ipc_port_t parent_entry) {
dtape_stub_unsafe();
};
kern_return_t mach_memory_entry_access_tracking(ipc_port_t entry_port, int* access_tracking, uint32_t* access_tracking_reads, uint32_t* access_tracking_writes) {
dtape_stub_unsafe();
};
kern_return_t mach_memory_entry_ownership(ipc_port_t entry_port, task_t owner, int ledger_tag, int ledger_flags) {
dtape_stub_unsafe();
};
kern_return_t mach_memory_entry_purgable_control(ipc_port_t entry_port, vm_purgable_t control, int* state) {
dtape_stub_unsafe();
};
kern_return_t mach_memory_info(host_priv_t host, mach_zone_name_array_t* namesp, mach_msg_type_number_t* namesCntp, mach_zone_info_array_t* infop, mach_msg_type_number_t* infoCntp, mach_memory_info_array_t* memoryInfop, mach_msg_type_number_t* memoryInfoCntp) {
dtape_stub_unsafe();
};
kern_return_t mach_memory_object_memory_entry(host_t host, boolean_t internal, vm_size_t size, vm_prot_t permission, memory_object_t pager, ipc_port_t* entry_handle) {
dtape_stub_unsafe();
};
kern_return_t mach_memory_object_memory_entry_64(host_t host, boolean_t internal, vm_object_offset_t size, vm_prot_t permission, memory_object_t pager, ipc_port_t* entry_handle) {
dtape_stub_unsafe();
};
void pmap_require(pmap_t pmap) {
dtape_stub_safe();
};
kern_return_t vm_allocate_cpm(host_priv_t host_priv, vm_map_t map, vm_address_t* addr, vm_size_t size, int flags) {
dtape_stub_unsafe();
};
kern_return_t vm32_mapped_pages_info(vm_map_t map, page_address_array_t* pages, mach_msg_type_number_t* pages_count) {
dtape_stub_unsafe();
};
kern_return_t vm32_region_info(vm_map_t map, vm32_offset_t address, vm_info_region_t* regionp, vm_info_object_array_t* objectsp, mach_msg_type_number_t* objectsCntp) {
dtape_stub_unsafe();
};
kern_return_t vm32_region_info_64(vm_map_t map, vm32_offset_t address, vm_info_region_64_t* regionp, vm_info_object_array_t* objectsp, mach_msg_type_number_t* objectsCntp) {
dtape_stub_unsafe();
};
memory_object_t convert_port_to_memory_object(mach_port_t port) {
dtape_stub_unsafe();
};
vm_map_t convert_port_entry_to_map(ipc_port_t port) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_behavior_set(vm_map_t map, mach_vm_offset_t start, mach_vm_size_t size, vm_behavior_t new_behavior) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_copy(vm_map_t map, mach_vm_address_t source_address, mach_vm_size_t size, mach_vm_address_t dest_address) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_deallocate(vm_map_t map, mach_vm_offset_t start, mach_vm_size_t size) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_inherit(vm_map_t map, mach_vm_offset_t start, mach_vm_size_t size, vm_inherit_t new_inheritance) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_machine_attribute(vm_map_t map, mach_vm_address_t addr, mach_vm_size_t size, vm_machine_attribute_t attribute, vm_machine_attribute_val_t* value) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_map_external(vm_map_t target_map, mach_vm_offset_t* address, mach_vm_size_t initial_size, mach_vm_offset_t mask, int flags, ipc_port_t port, vm_object_offset_t offset, boolean_t copy, vm_prot_t cur_protection, vm_prot_t max_protection, vm_inherit_t inheritance) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_msync(vm_map_t map, mach_vm_address_t address, mach_vm_size_t size, vm_sync_t sync_flags) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_page_info(vm_map_t map, mach_vm_address_t address, vm_page_info_flavor_t flavor, vm_page_info_t info, mach_msg_type_number_t* count) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_page_query(vm_map_t map, mach_vm_offset_t offset, int* disposition, int* ref_count) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_page_range_query(vm_map_t map, mach_vm_offset_t address, mach_vm_size_t size, mach_vm_address_t dispositions_addr, mach_vm_size_t* dispositions_count) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_protect(vm_map_t map, mach_vm_offset_t start, mach_vm_size_t size, boolean_t set_maximum, vm_prot_t new_protection) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_purgable_control(vm_map_t map, mach_vm_offset_t address, vm_purgable_t control, int* state) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_read(vm_map_t map, mach_vm_address_t addr, mach_vm_size_t size, pointer_t* data, mach_msg_type_number_t* data_size) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_read_list(vm_map_t map, mach_vm_read_entry_t data_list, natural_t count) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_read_overwrite(vm_map_t map, mach_vm_address_t address, mach_vm_size_t size, mach_vm_address_t data, mach_vm_size_t* data_size) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_region(vm_map_t map, mach_vm_offset_t* address, mach_vm_size_t* size, vm_region_flavor_t flavor, vm_region_info_t info, mach_msg_type_number_t* count, mach_port_t* object_name) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_region_recurse(vm_map_t map, mach_vm_address_t* address, mach_vm_size_t* size, uint32_t* depth, vm_region_recurse_info_t info, mach_msg_type_number_t* infoCnt) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_remap_external(vm_map_t target_map, mach_vm_offset_t* address, mach_vm_size_t size, mach_vm_offset_t mask, int flags, vm_map_t src_map, mach_vm_offset_t memory_address, boolean_t copy, vm_prot_t* cur_protection, vm_prot_t* max_protection, vm_inherit_t inheritance) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_remap_new_external(vm_map_t target_map, mach_vm_offset_t* address, mach_vm_size_t size, mach_vm_offset_t mask, int flags, mach_port_t src_tport, mach_vm_offset_t memory_address, boolean_t copy, vm_prot_t* cur_protection, vm_prot_t* max_protection, vm_inherit_t inheritance) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_wire_external(host_priv_t host_priv, vm_map_t map, mach_vm_offset_t start, mach_vm_size_t size, vm_prot_t access) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_write(vm_map_t map, mach_vm_address_t address, pointer_t data, mach_msg_type_number_t size) {
dtape_stub_unsafe();
};
kern_return_t mach_vm_allocate_kernel(vm_map_t map, mach_vm_offset_t* addr, mach_vm_size_t size, int flags, vm_tag_t tag) {
dtape_stub_unsafe();
};
kern_return_t mach_zone_force_gc(host_t host) {
dtape_stub_unsafe();
};
kern_return_t mach_zone_get_btlog_records(host_priv_t host, mach_zone_name_t name, zone_btrecord_array_t* recsp, mach_msg_type_number_t* recsCntp) {
dtape_stub_safe();
return KERN_FAILURE;
};
kern_return_t mach_zone_get_zlog_zones(host_priv_t host, mach_zone_name_array_t* namesp, mach_msg_type_number_t* namesCntp) {
dtape_stub_safe();
return KERN_FAILURE;
};
kern_return_t mach_zone_info(host_priv_t host, mach_zone_name_array_t* namesp, mach_msg_type_number_t* namesCntp, mach_zone_info_array_t* infop, mach_msg_type_number_t* infoCntp) {
dtape_stub_unsafe();
};
kern_return_t mach_zone_info_for_largest_zone(host_priv_t host, mach_zone_name_t* namep, mach_zone_info_t* infop) {
dtape_stub_unsafe();
};
kern_return_t mach_zone_info_for_zone(host_priv_t host, mach_zone_name_t name, mach_zone_info_t* infop) {
dtape_stub_unsafe();
};
boolean_t vm_kernel_map_is_kernel(vm_map_t map) {
dtape_stub_unsafe();
};
kern_return_t vm_map_copyin_common(vm_map_t src_map, vm_map_address_t src_addr, vm_map_size_t len, boolean_t src_destroy, boolean_t src_volatile, vm_map_copy_t* copy_result, boolean_t use_maxprot) {
dtape_stub_unsafe();
};
kern_return_t vm_map_copyout_size(vm_map_t dst_map, vm_map_address_t* dst_addr, vm_map_copy_t copy, vm_map_size_t copy_size) {
dtape_stub_unsafe();
};
kern_return_t vm_map_copy_overwrite(vm_map_t dst_map, vm_map_offset_t dst_addr, vm_map_copy_t copy, vm_map_size_t copy_size, boolean_t interruptible) {
dtape_stub_unsafe();
};
boolean_t vm_map_copy_validate_size(vm_map_t dst_map, vm_map_copy_t copy, vm_map_size_t* size) {
dtape_stub_unsafe();
};
kern_return_t vm_map_page_query_internal(vm_map_t target_map, vm_map_offset_t offset, int* disposition, int* ref_count) {
dtape_stub_unsafe();
};
kern_return_t vm_map_purgable_control(vm_map_t map, vm_map_offset_t address, vm_purgable_t control, int* state) {
dtape_stub_unsafe();
};
void vm_map_read_deallocate(vm_map_read_t map) {
dtape_stub_unsafe();
};
kern_return_t vm_map_region(vm_map_t map, vm_map_offset_t* address, vm_map_size_t* size, vm_region_flavor_t flavor, vm_region_info_t info, mach_msg_type_number_t* count, mach_port_t* object_name) {
dtape_stub_unsafe();
};
kern_return_t vm_map_region_recurse_64(vm_map_t map, vm_map_offset_t* address, vm_map_size_t* size, natural_t* nesting_depth, vm_region_submap_info_64_t submap_info, mach_msg_type_number_t* count) {
dtape_stub_unsafe();
};
kern_return_t vm_map_unwire(vm_map_t map, vm_map_offset_t start, vm_map_offset_t end, boolean_t user_wire) {
dtape_stub_unsafe();
};
kern_return_t vm_map_wire_kernel(vm_map_t map, vm_map_offset_t start, vm_map_offset_t end, vm_prot_t caller_prot, vm_tag_t tag, boolean_t user_wire) {
dtape_stub_unsafe();
};
kern_return_t vm32__task_wire(vm_map_t map, boolean_t must_wire) {
dtape_stub_unsafe();
};
kern_return_t vm32__map_exec_lockdown(vm_map_t map) {
dtape_stub_unsafe();
};
// <copied from="xnu://7195.141.2/osfmk/vm/vm_user.c">
/*
* mach_vm_allocate allocates "zero fill" memory in the specfied
* map.
*/
kern_return_t
mach_vm_allocate_external(
vm_map_t map,
mach_vm_offset_t *addr,
mach_vm_size_t size,
int flags)
{
vm_tag_t tag;
VM_GET_FLAGS_ALIAS(flags, tag);
return mach_vm_allocate_kernel(map, addr, size, flags, tag);
}
/*
* vm_wire -
* Specify that the range of the virtual address space
* of the target task must not cause page faults for
* the indicated accesses.
*
* [ To unwire the pages, specify VM_PROT_NONE. ]
*/
kern_return_t
vm_wire(
host_priv_t host_priv,
vm_map_t map,
vm_offset_t start,
vm_size_t size,
vm_prot_t access)
{
kern_return_t rc;
if (host_priv == HOST_PRIV_NULL) {
return KERN_INVALID_HOST;
}
if (map == VM_MAP_NULL) {
return KERN_INVALID_TASK;
}
if ((access & ~VM_PROT_ALL) || (start + size < start)) {
return KERN_INVALID_ARGUMENT;
}
if (size == 0) {
rc = KERN_SUCCESS;
} else if (access != VM_PROT_NONE) {
rc = vm_map_wire_kernel(map,
vm_map_trunc_page(start,
VM_MAP_PAGE_MASK(map)),
vm_map_round_page(start + size,
VM_MAP_PAGE_MASK(map)),
access, VM_KERN_MEMORY_OSFMK,
TRUE);
} else {
rc = vm_map_unwire(map,
vm_map_trunc_page(start,
VM_MAP_PAGE_MASK(map)),
vm_map_round_page(start + size,
VM_MAP_PAGE_MASK(map)),
TRUE);
}
return rc;
}
// </copied>

View File

@ -1,6 +1,9 @@
#include <darlingserver/duct-tape.h>
#include <darlingserver/duct-tape/hooks.h>
#include <darlingserver/duct-tape/log.h>
#include <darlingserver/duct-tape/processor.h>
#include <darlingserver/duct-tape/memory.h>
#include <darlingserver/duct-tape/task.h>
#include <kern/waitq.h>
#include <kern/clock.h>
@ -15,35 +18,69 @@
#include <sys/types.h>
const dtape_hooks_t* dtape_hooks;
char version[] = "Darling 11.5";
int vsnprintf(char* buffer, size_t buffer_size, const char* format, va_list args);
ssize_t getrandom(void* buf, size_t buflen, unsigned int flags);
void ipc_table_init(void);
void ipc_init(void);
void mig_init(void);
void host_notify_init(void);
void user_data_attr_manager_init(void);
void ipc_voucher_init(void);
void dtape_logv(dtape_log_level_t level, const char* format, va_list args) {
char message[4096];
vsnprintf(message, sizeof(message), format, args);
dtape_hooks->log(level, message);
};
void dtape_log(dtape_log_level_t level, const char* format, ...) {
char message[4096];
va_list args;
va_start(args, format);
vsnprintf(message, sizeof(message), format, args);
dtape_logv(level, format, args);
va_end(args);
dtape_hooks->log(level, message);
};
void dtape_init(const dtape_hooks_t* hooks) {
dtape_hooks = hooks;
ipc_space_zone = zone_create("ipc spaces", sizeof(struct ipc_space), ZC_NOENCRYPT);
dtape_log_debug("dtape_processor_init");
dtape_processor_init();
ipc_table_init();
dtape_log_debug("dtape_memory_init");
dtape_memory_init();
ipc_space_zone = zone_create("ipc spaces", sizeof(struct ipc_space), ZC_NOENCRYPT);
ipc_kmsg_zone = zone_create("ipc kmsgs", IKM_SAVED_KMSG_SIZE, ZC_CACHING | ZC_ZFREE_CLEARMEM);
ipc_object_zones[IOT_PORT] = zone_create("ipc ports", sizeof(struct ipc_port), ZC_NOENCRYPT | ZC_CACHING | ZC_ZFREE_CLEARMEM | ZC_NOSEQUESTER);
ipc_object_zones[IOT_PORT_SET] = zone_create("ipc port sets", sizeof(struct ipc_pset), ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM | ZC_NOSEQUESTER);
lck_mtx_init(&realhost.lock, LCK_GRP_NULL, LCK_ATTR_NULL);
dtape_log_debug("ipc_table_init");
ipc_table_init();
dtape_log_debug("ipc_voucher_init");
ipc_voucher_init();
dtape_log_debug("dtape_task_init");
dtape_task_init();
dtape_log_debug("ipc_init");
ipc_init();
dtape_log_debug("mig_init");
mig_init();
dtape_log_debug("host_notify_init");
host_notify_init();
dtape_log_debug("user_data_attr_manager_init");
user_data_attr_manager_init();
dtape_log_debug("waitq_bootstrap");
waitq_bootstrap();
@ -70,3 +107,22 @@ void dtape_deinit(void) {
void read_frandom(void* buffer, unsigned int numBytes) {
getrandom(buffer, numBytes, 0);
};
void kprintf(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
dtape_logv(dtape_log_level_info, fmt, args);
va_end(args);
};
int scnprintf(char* buffer, size_t buffer_size, const char* format, ...) {
va_list args;
va_start(args, format);
int code = vsnprintf(buffer, buffer_size, format, args);
va_end(args);
if (code < 0) {
return code;
} else {
return strnlen(buffer, buffer_size);
}
};

98
duct-tape/src/processor.c Normal file
View File

@ -0,0 +1,98 @@
#include <darlingserver/duct-tape/stubs.h>
#include <kern/processor.h>
#include <kern/kalloc.h>
processor_t processor_array[MAX_SCHED_CPUS] = {0};
struct processor_set pset0;
uint32_t processor_avail_count;
uint32_t processor_avail_count_user;
uint32_t primary_processor_avail_count;
uint32_t primary_processor_avail_count_user;
unsigned int processor_count;
simple_lock_data_t processor_list_lock;
processor_t master_processor;
void dtape_processor_init(void) {
simple_lock_init(&processor_list_lock, 0);
master_processor = kalloc(sizeof(*master_processor));
memset(master_processor, 0, sizeof(*master_processor));
};
kern_return_t processor_assign(processor_t processor, processor_set_t new_pset, boolean_t wait) {
dtape_stub_safe();
return KERN_FAILURE;
};
kern_return_t processor_control(processor_t processor, processor_info_t info, mach_msg_type_number_t count) {
dtape_stub_unsafe();
};
kern_return_t processor_exit_from_user(processor_t processor) {
dtape_stub_unsafe();
};
kern_return_t processor_get_assignment(processor_t processor, processor_set_t* pset) {
dtape_stub_unsafe();
};
kern_return_t processor_info(processor_t processor, processor_flavor_t flavor, host_t* host, processor_info_t info, mach_msg_type_number_t* count) {
dtape_stub_unsafe();
};
kern_return_t processor_info_count(processor_flavor_t flavor, mach_msg_type_number_t* count) {
dtape_stub_unsafe();
};
kern_return_t processor_set_create(host_t host, processor_set_t* new_set, processor_set_t* new_name) {
dtape_stub_unsafe();
};
kern_return_t processor_set_destroy(processor_set_t pset) {
dtape_stub_unsafe();
};
kern_return_t processor_set_info(processor_set_t pset, int flavor, host_t* host, processor_set_info_t info, mach_msg_type_number_t* count) {
dtape_stub_unsafe();
};
kern_return_t processor_set_max_priority(processor_set_t pset, int max_priority, boolean_t change_threads) {
dtape_stub_unsafe();
};
kern_return_t processor_set_policy_control(processor_set_t pset, int flavor, processor_set_info_t policy_info, mach_msg_type_number_t count, boolean_t change) {
dtape_stub_unsafe();
};
kern_return_t processor_set_policy_disable(processor_set_t pset, int policy, boolean_t change_threads) {
dtape_stub_unsafe();
};
kern_return_t processor_set_policy_enable(processor_set_t pset, int policy) {
dtape_stub_unsafe();
};
kern_return_t processor_set_stack_usage(processor_set_t pset, unsigned int* totalp, vm_size_t* spacep, vm_size_t* residentp, vm_size_t* maxusagep, vm_offset_t* maxstackp) {
dtape_stub_unsafe();
};
kern_return_t processor_set_statistics(processor_set_t pset, int flavor, processor_set_info_t info, mach_msg_type_number_t* count) {
dtape_stub_unsafe();
};
kern_return_t processor_set_tasks(processor_set_t pset, task_array_t* task_list, mach_msg_type_number_t* count) {
dtape_stub_unsafe();
};
kern_return_t processor_set_tasks_with_flavor(processor_set_t pset, mach_task_flavor_t flavor, task_array_t* task_list, mach_msg_type_number_t* count) {
dtape_stub_unsafe();
};
kern_return_t processor_set_threads(processor_set_t pset, thread_array_t* thread_list, mach_msg_type_number_t* count) {
dtape_stub_unsafe();
};
kern_return_t processor_start_from_user(processor_t processor) {
dtape_stub_unsafe();
};

View File

@ -7,7 +7,17 @@
#include <sys/file_internal.h>
#include <pthread/workqueue_internal.h>
#include <kern/host.h>
#include <mach_debug/lockgroup_info.h>
unsigned int kdebug_enable = 0;
uint32_t avenrun[3] = {0};
uint32_t mach_factor[3] = {0};
vm_object_t compressor_object;
uint32_t c_segment_pages_compressed;
expired_task_statistics_t dead_task_statistics;
struct machine_info machine_info;
int sched_allow_NO_SMT_threads;
#undef panic
@ -23,7 +33,7 @@ int fflush(FILE* stream);
#define DTAPE_FATAL_STUBS 0
#endif
void dtape_stub_log(const char* function_name, int safety) {
void dtape_stub_log(const char* function_name, int safety, const char* subsection) {
dtape_log_level_t log_level;
bool do_abort;
const char* kind_info;
@ -46,7 +56,7 @@ void dtape_stub_log(const char* function_name, int safety) {
kind_info = " (safe)";
}
dtape_log(log_level, "stub%s: %s", kind_info, function_name);
dtape_log(log_level, "stub%s: %s%s%s", kind_info, function_name, subsection[0] == '\0' ? "" : ":", subsection);
if (do_abort) {
abort();
@ -171,3 +181,49 @@ boolean_t machine_timeout_suspended(void) {
dtape_stub_safe();
return true;
};
boolean_t IOTaskHasEntitlement(task_t task, const char* entitlement) {
dtape_stub_safe();
return TRUE;
};
kern_return_t kmod_create(host_priv_t host_priv, vm_address_t addr, kmod_t* id) {
dtape_stub_safe();
return KERN_NOT_SUPPORTED;
};
kern_return_t kmod_destroy(host_priv_t host_priv, kmod_t id) {
dtape_stub_safe();
return KERN_NOT_SUPPORTED;
};
kern_return_t kmod_control(host_priv_t host_priv, kmod_t id, kmod_control_flavor_t flavor, kmod_args_t* data, mach_msg_type_number_t* dataCount) {
dtape_stub_safe();
return KERN_NOT_SUPPORTED;
};
kern_return_t kmod_get_info(host_t host, kmod_info_array_t* kmod_list, mach_msg_type_number_t* kmodCount) {
dtape_stub_safe();
return KERN_NOT_SUPPORTED;
};
kern_return_t kext_request(host_priv_t hostPriv, uint32_t clientLogSpec, vm_offset_t requestIn, mach_msg_type_number_t requestLengthIn, vm_offset_t* responseOut, mach_msg_type_number_t* responseLengthOut, vm_offset_t* logDataOut, mach_msg_type_number_t* logDataLengthOut, kern_return_t* op_result) {
dtape_stub_unsafe();
};
uint32_t PE_i_can_has_debugger(uint32_t* something) {
dtape_stub_unsafe();
};
bool work_interval_port_type_render_server(mach_port_name_t port_name) {
dtape_stub_safe();
return false;
};
ipc_port_t convert_suid_cred_to_port(suid_cred_t sc) {
dtape_stub_unsafe();
};
kern_return_t handle_ux_exception(thread_t thread, int exception, mach_exception_code_t code, mach_exception_subcode_t subcode) {
dtape_stub_unsafe();
};

View File

@ -1,17 +1,41 @@
#include <darlingserver/duct-tape/stubs.h>
#include <darlingserver/duct-tape.h>
#include <darlingserver/duct-tape/task.h>
#include <darlingserver/duct-tape/memory.h>
#include <kern/task.h>
#include <kern/ipc_tt.h>
#include <kern/policy_internal.h>
#include <ipc/ipc_importance.h>
#include <kern/restartable.h>
#include <stdlib.h>
// stub
task_t kernel_task;
task_t kernel_task = NULL;
void dtape_task_init(void) {
// this will assign to kernel_task
if (!dtape_task_create(NULL, 0, NULL)) {
panic("Failed to create kernel task");
}
};
dtape_task_handle_t dtape_task_create(dtape_task_handle_t xparent_task, uint32_t nsid, void* context) {
if (xparent_task == NULL && nsid == 0 && kernel_task) {
dtape_task_t* task = dtape_task_for_xnu_task(kernel_task);
// don't acquire an additional reference;
// the managing Task instance acquires ownership of the kernel task
//task_reference(kernel_task);
if (task->context) {
panic("The kernel task already has a context");
} else {
task->context = context;
}
return task;
}
dtape_task_t* parent_task = xparent_task;
dtape_task_t* task = malloc(sizeof(dtape_task_t));
if (!task) {
@ -24,16 +48,26 @@ dtape_task_handle_t dtape_task_create(dtape_task_handle_t xparent_task, uint32_t
// this next section uses code adapted from XNU's task_create_internal() in osfmk/kern/task.c
os_ref_init_count(&task->xnu_task.ref_count, NULL, 1);
os_ref_init(&task->xnu_task.ref_count, NULL);
lck_mtx_init(&task->xnu_task.lock, LCK_GRP_NULL, LCK_ATTR_NULL);
queue_init(&task->xnu_task.threads);
task->xnu_task.active = true;
task->xnu_task.map = dtape_vm_map_create(task);
ipc_task_init(&task->xnu_task, parent_task ? &parent_task->xnu_task : NULL);
ipc_task_enable(&task->xnu_task);
if (xparent_task == NULL && nsid == 0) {
if (kernel_task) {
panic("Another kernel task has been created");
}
kernel_task = &task->xnu_task;
}
return task;
};
@ -48,6 +82,8 @@ void dtape_task_destroy(dtape_task_handle_t xtask) {
ipc_task_terminate(&task->xnu_task);
dtape_vm_map_destroy(task->xnu_task.map);
lck_mtx_destroy(&task->xnu_task.lock, LCK_GRP_NULL);
};
@ -63,6 +99,19 @@ int pid_from_task(task_t xtask) {
return task->saved_pid;
};
int proc_get_effective_task_policy(task_t task, int flavor) {
dtape_stub();
if (flavor == TASK_POLICY_ROLE) {
return TASK_UNSPECIFIED;
} else {
panic("Unimplemented proc_get_effective_task_policy flavor: %d", flavor);
}
};
int task_pid(task_t task) {
return pid_from_task(task);
};
void task_id_token_notify(mach_msg_header_t* msg) {
dtape_stub();
};
@ -91,3 +140,398 @@ void task_update_boost_locked(task_t task, boolean_t boost_active, task_pend_tok
void task_watchport_elem_deallocate(struct task_watchport_elem* watchport_elem) {
dtape_stub();
};
kern_return_t task_create_suid_cred(task_t task, suid_cred_path_t path, suid_cred_uid_t uid, suid_cred_t* sc_p) {
dtape_stub_unsafe();
};
kern_return_t task_create_identity_token(task_t task, task_id_token_t* tokenp) {
dtape_stub_unsafe();
};
ipc_port_t convert_task_id_token_to_port(task_id_token_t token) {
dtape_stub_unsafe();
};
task_id_token_t convert_port_to_task_id_token(ipc_port_t port) {
dtape_stub_unsafe();
};
kern_return_t task_identity_token_get_task_port(task_id_token_t token, task_flavor_t flavor, ipc_port_t* portp) {
dtape_stub_unsafe();
};
void task_id_token_release(task_id_token_t token) {
dtape_stub_unsafe();
};
kern_return_t task_dyld_process_info_notify_deregister(task_t task, mach_port_name_t rcv_name) {
dtape_stub_unsafe();
};
kern_return_t task_dyld_process_info_notify_register(task_t task, ipc_port_t sright) {
dtape_stub_unsafe();
};
kern_return_t task_generate_corpse(task_t task, ipc_port_t* corpse_task_port) {
dtape_stub_unsafe();
};
kern_return_t task_get_assignment(task_t task, processor_set_t* pset) {
dtape_stub_unsafe();
};
kern_return_t task_get_state(task_t task, int flavor, thread_state_t state, mach_msg_type_number_t* state_count) {
dtape_stub_unsafe();
};
kern_return_t task_info_from_user(mach_port_t task_port, task_flavor_t flavor, task_info_t task_info_out, mach_msg_type_number_t* task_info_count) {
dtape_stub_unsafe();
};
kern_return_t task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor, task_inspect_info_t info_out, mach_msg_type_number_t* size_in_out) {
dtape_stub_safe();
return KERN_FAILURE;
};
bool task_is_driver(task_t task) {
dtape_stub_safe();
return false;
};
kern_return_t task_map_corpse_info(task_t task, task_t corpse_task, vm_address_t* kcd_addr_begin, uint32_t* kcd_size) {
dtape_stub_unsafe();
};
kern_return_t task_map_corpse_info_64(task_t task, task_t corpse_task, mach_vm_address_t* kcd_addr_begin, mach_vm_size_t* kcd_size) {
dtape_stub_unsafe();
};
void task_name_deallocate(task_name_t task_name) {
dtape_stub_unsafe();
};
kern_return_t task_policy_get(task_t task, task_policy_flavor_t flavor, task_policy_t policy_info, mach_msg_type_number_t* count, boolean_t* get_default) {
dtape_stub_unsafe();
};
void task_policy_get_deallocate(task_policy_get_t task_policy_get) {
dtape_stub_unsafe();
};
kern_return_t task_policy_set(task_t task, task_policy_flavor_t flavor, task_policy_t policy_info, mach_msg_type_number_t count) {
dtape_stub_unsafe();
};
void task_policy_set_deallocate(task_policy_set_t task_policy_set) {
dtape_stub_unsafe();
};
kern_return_t task_purgable_info(task_t task, task_purgable_info_t* stats) {
dtape_stub_unsafe();
};
void task_read_deallocate(task_read_t task_read) {
dtape_stub_unsafe();
};
kern_return_t task_register_dyld_image_infos(task_t task, dyld_kernel_image_info_array_t infos_copy, mach_msg_type_number_t infos_len) {
dtape_stub_unsafe();
};
kern_return_t task_register_dyld_shared_cache_image_info(task_t task, dyld_kernel_image_info_t cache_img, boolean_t no_cache, boolean_t private_cache) {
dtape_stub_unsafe();
};
kern_return_t task_restartable_ranges_register(task_t task, task_restartable_range_t* ranges, mach_msg_type_number_t count) {
dtape_stub_unsafe();
};
kern_return_t task_restartable_ranges_synchronize(task_t task) {
dtape_stub_unsafe();
};
kern_return_t task_resume(task_t task) {
dtape_stub_unsafe();
};
kern_return_t task_resume2(task_suspension_token_t task) {
dtape_stub_unsafe();
};
kern_return_t task_set_exc_guard_behavior(task_t task, task_exc_guard_behavior_t behavior) {
dtape_stub_unsafe();
};
kern_return_t task_set_info(task_t task, task_flavor_t flavor, task_info_t task_info_in, mach_msg_type_number_t task_info_count) {
dtape_stub_unsafe();
};
kern_return_t task_set_phys_footprint_limit(task_t task, int new_limit_mb, int* old_limit_mb) {
dtape_stub_unsafe();
};
kern_return_t task_set_state(task_t task, int flavor, thread_state_t state, mach_msg_type_number_t state_count) {
dtape_stub_unsafe();
};
kern_return_t task_suspend(task_t task) {
dtape_stub_unsafe();
};
kern_return_t task_suspend2(task_t task, task_suspension_token_t* suspend_token) {
dtape_stub_unsafe();
};
void task_suspension_token_deallocate(task_suspension_token_t token) {
dtape_stub_unsafe();
};
kern_return_t task_terminate(task_t task) {
dtape_stub_unsafe();
};
kern_return_t task_threads_from_user(mach_port_t port, thread_act_array_t* threads_out, mach_msg_type_number_t* count) {
dtape_stub_unsafe();
};
kern_return_t task_unregister_dyld_image_infos(task_t task, dyld_kernel_image_info_array_t infos_copy, mach_msg_type_number_t infos_len) {
dtape_stub_unsafe();
};
// <copied from="xnu://7195.141.2/osfmk/kern/task_policy.c">
/*
* Check if this task should donate importance.
*
* May be called without taking the task lock. In that case, donor status can change
* so you must check only once for each donation event.
*/
boolean_t
task_is_importance_donor(task_t task)
{
if (task->task_imp_base == IIT_NULL) {
return FALSE;
}
return ipc_importance_task_is_donor(task->task_imp_base);
}
/*
* task_policy
*
* Set scheduling policy and parameters, both base and limit, for
* the given task. Policy must be a policy which is enabled for the
* processor set. Change contained threads if requested.
*/
kern_return_t
task_policy(
__unused task_t task,
__unused policy_t policy_id,
__unused policy_base_t base,
__unused mach_msg_type_number_t count,
__unused boolean_t set_limit,
__unused boolean_t change)
{
return KERN_FAILURE;
}
// </copied>
// <copied from="xnu://7195.141.2/osfmk/kern/task.c">
boolean_t
task_get_filter_msg_flag(
task_t task)
{
uint32_t flags = 0;
if (!task) {
return false;
}
flags = os_atomic_load(&task->t_flags, relaxed);
return (flags & TF_FILTER_MSG) ? TRUE : FALSE;
}
/*
* task_assign:
*
* Change the assigned processor set for the task
*/
kern_return_t
task_assign(
__unused task_t task,
__unused processor_set_t new_pset,
__unused boolean_t assign_threads)
{
return KERN_FAILURE;
}
/*
* task_assign_default:
*
* Version of task_assign to assign to default processor set.
*/
kern_return_t
task_assign_default(
task_t task,
boolean_t assign_threads)
{
return task_assign(task, &pset0, assign_threads);
}
kern_return_t
task_create(
task_t parent_task,
__unused ledger_port_array_t ledger_ports,
__unused mach_msg_type_number_t num_ledger_ports,
__unused boolean_t inherit_memory,
__unused task_t *child_task) /* OUT */
{
if (parent_task == TASK_NULL) {
return KERN_INVALID_ARGUMENT;
}
/*
* No longer supported: too many calls assume that a task has a valid
* process attached.
*/
return KERN_FAILURE;
}
kern_return_t
task_get_dyld_image_infos(__unused task_t task,
__unused dyld_kernel_image_info_array_t * dyld_images,
__unused mach_msg_type_number_t * dyld_imagesCnt)
{
return KERN_NOT_SUPPORTED;
}
kern_return_t
task_get_exc_guard_behavior(
task_t task,
task_exc_guard_behavior_t *behaviorp)
{
if (task == TASK_NULL) {
return KERN_INVALID_TASK;
}
*behaviorp = task->task_exc_guard;
return KERN_SUCCESS;
}
/* Placeholders for the task set/get voucher interfaces */
kern_return_t
task_get_mach_voucher(
task_t task,
mach_voucher_selector_t __unused which,
ipc_voucher_t *voucher)
{
if (TASK_NULL == task) {
return KERN_INVALID_TASK;
}
*voucher = NULL;
return KERN_SUCCESS;
}
kern_return_t
task_set_mach_voucher(
task_t task,
ipc_voucher_t __unused voucher)
{
if (TASK_NULL == task) {
return KERN_INVALID_TASK;
}
return KERN_SUCCESS;
}
kern_return_t
task_swap_mach_voucher(
__unused task_t task,
__unused ipc_voucher_t new_voucher,
ipc_voucher_t *in_out_old_voucher)
{
/*
* Currently this function is only called from a MIG generated
* routine which doesn't release the reference on the voucher
* addressed by in_out_old_voucher. To avoid leaking this reference,
* a call to release it has been added here.
*/
ipc_voucher_release(*in_out_old_voucher);
return KERN_NOT_SUPPORTED;
}
/*
* task_inspect_deallocate:
*
* Drop a task inspection reference.
*/
void
task_inspect_deallocate(
task_inspect_t task_inspect)
{
return task_deallocate((task_t)task_inspect);
}
kern_return_t
task_register_dyld_set_dyld_state(__unused task_t task,
__unused uint8_t dyld_state)
{
return KERN_NOT_SUPPORTED;
}
kern_return_t
task_register_dyld_get_process_state(__unused task_t task,
__unused dyld_kernel_process_info_t * dyld_process_state)
{
return KERN_NOT_SUPPORTED;
}
/*
* task_set_policy
*
* Set scheduling policy and parameters, both base and limit, for
* the given task. Policy can be any policy implemented by the
* processor set, whether enabled or not. Change contained threads
* if requested.
*/
kern_return_t
task_set_policy(
__unused task_t task,
__unused processor_set_t pset,
__unused policy_t policy_id,
__unused policy_base_t base,
__unused mach_msg_type_number_t base_count,
__unused policy_limit_t limit,
__unused mach_msg_type_number_t limit_count,
__unused boolean_t change)
{
return KERN_FAILURE;
}
kern_return_t
task_set_ras_pc(
__unused task_t task,
__unused vm_offset_t pc,
__unused vm_offset_t endpc)
{
return KERN_FAILURE;
}
// </copied>
// <copied from="xnu://7195.141.2/osfmk/kern/zalloc.c">
kern_return_t
task_zone_info(
__unused task_t task,
__unused mach_zone_name_array_t *namesp,
__unused mach_msg_type_number_t *namesCntp,
__unused task_zone_info_array_t *infop,
__unused mach_msg_type_number_t *infoCntp)
{
return KERN_FAILURE;
}
// </copied>

View File

@ -64,6 +64,8 @@ dtape_thread_handle_t dtape_thread_create(dtape_task_handle_t xtask, uint64_t ns
thread->xnu_thread.thread_id = nsid;
thread->xnu_thread.map = task->xnu_task.map;
return thread;
};
@ -200,6 +202,22 @@ void thread_set_thread_name(thread_t xthread, const char* name) {
thread->name = name;
};
__attribute__((noreturn))
void thread_syscall_return(kern_return_t ret) {
dtape_stub_unsafe();
};
thread_qos_t thread_get_requested_qos(thread_t thread, int* relpri) {
dtape_stub_safe();
*relpri = 0;
return THREAD_QOS_DEFAULT;
};
thread_qos_t thread_user_promotion_qos_for_pri(int priority) {
dtape_stub_safe();
return THREAD_QOS_DEFAULT;
};
void thread_guard_violation(thread_t thread, mach_exception_data_type_t code, mach_exception_data_type_t subcode, boolean_t fatal) {
dtape_stub();
};
@ -234,32 +252,94 @@ void sched_thread_unpromote_reason(thread_t thread, uint32_t reason, uintptr_t t
dtape_stub_safe();
};
#if 0
kern_return_t thread_wakeup_prim(event_t event, boolean_t one_thread, wait_result_t result) {
dtape_stub();
return KERN_FAILURE;
void thread_poll_yield(thread_t self) {
dtape_stub_safe();
};
kern_return_t thread_wakeup_thread(event_t event, thread_t thread) {
dtape_stub();
return KERN_FAILURE;
kern_return_t act_get_state_to_user(thread_t thread, int flavor, thread_state_t state, mach_msg_type_number_t* count) {
dtape_stub_unsafe();
};
wait_result_t assert_wait_deadline(event_t event, wait_interrupt_t interruptible, uint64_t deadline) {
dtape_stub();
return THREAD_WAITING;
kern_return_t act_set_state_from_user(thread_t thread, int flavor, thread_state_t state, mach_msg_type_number_t count) {
dtape_stub_unsafe();
};
wait_result_t assert_wait_deadline_with_leeway(event_t event, wait_interrupt_t interruptible, wait_timeout_urgency_t urgency, uint64_t deadline, uint64_t leeway) {
dtape_stub();
return THREAD_WAITING;
kern_return_t thread_abort(thread_t thread) {
dtape_stub_unsafe();
};
kern_return_t clear_wait(thread_t thread, wait_result_t result) {
dtape_stub();
return KERN_FAILURE;
kern_return_t thread_abort_safely(thread_t thread) {
dtape_stub_unsafe();
};
kern_return_t thread_convert_thread_state(thread_t thread, int direction, thread_state_flavor_t flavor, thread_state_t in_state, mach_msg_type_number_t in_state_count, thread_state_t out_state, mach_msg_type_number_t* out_state_count) {
dtape_stub_unsafe();
};
kern_return_t thread_create_from_user(task_t task, thread_t* new_thread) {
dtape_stub_unsafe();
};
kern_return_t thread_create_running_from_user(task_t task, int flavor, thread_state_t new_state, mach_msg_type_number_t new_state_count, thread_t* new_thread) {
dtape_stub_unsafe();
};
kern_return_t thread_depress_abort_from_user(thread_t thread) {
dtape_stub_safe();
return KERN_SUCCESS;
};
kern_return_t thread_get_state_to_user(thread_t thread, int flavor, thread_state_t state, mach_msg_type_number_t* state_count) {
dtape_stub_unsafe();
};
kern_return_t thread_info(thread_t thread, thread_flavor_t flavor, thread_info_t thread_info_out, mach_msg_type_number_t* thread_info_count) {
dtape_stub_unsafe();
};
void thread_inspect_deallocate(thread_inspect_t thread_inspect) {
dtape_stub_unsafe();
};
kern_return_t thread_policy(thread_t thread, policy_t policy, policy_base_t base, mach_msg_type_number_t count, boolean_t set_limit) {
dtape_stub_unsafe();
};
kern_return_t thread_policy_get(thread_t thread, thread_policy_flavor_t flavor, thread_policy_t policy_info, mach_msg_type_number_t* count, boolean_t* get_default) {
dtape_stub_unsafe();
};
kern_return_t thread_policy_set(thread_t thread, thread_policy_flavor_t flavor, thread_policy_t policy_info, mach_msg_type_number_t count) {
dtape_stub_unsafe();
};
void thread_read_deallocate(thread_read_t thread_read) {
dtape_stub_unsafe();
};
kern_return_t thread_resume(thread_t thread) {
dtape_stub_unsafe();
};
kern_return_t thread_set_mach_voucher(thread_t thread, ipc_voucher_t voucher) {
dtape_stub_unsafe();
};
kern_return_t thread_set_policy(thread_t thread, processor_set_t pset, policy_t policy, policy_base_t base, mach_msg_type_number_t base_count, policy_limit_t limit, mach_msg_type_number_t limit_count) {
dtape_stub_unsafe();
};
kern_return_t thread_set_state_from_user(thread_t thread, int flavor, thread_state_t state, mach_msg_type_number_t state_count) {
dtape_stub_unsafe();
};
kern_return_t thread_suspend(thread_t thread) {
dtape_stub_unsafe();
};
kern_return_t thread_wire(host_priv_t host_priv, thread_t thread, boolean_t wired) {
dtape_stub_unsafe();
};
#endif
// ignore the lock timeout
#define LockTimeOutUsec UINT32_MAX
@ -530,3 +610,107 @@ clear_wait(
}
// </copied>
// <copied from="xnu://7195.141.2/osfmk/kern/thread.c">
kern_return_t
thread_assign(
__unused thread_t thread,
__unused processor_set_t new_pset)
{
return KERN_FAILURE;
}
/*
* thread_assign_default:
*
* Special version of thread_assign for assigning threads to default
* processor set.
*/
kern_return_t
thread_assign_default(
thread_t thread)
{
return thread_assign(thread, &pset0);
}
/*
* thread_get_assignment
*
* Return current assignment for this thread.
*/
kern_return_t
thread_get_assignment(
thread_t thread,
processor_set_t *pset)
{
if (thread == NULL) {
return KERN_INVALID_ARGUMENT;
}
*pset = &pset0;
return KERN_SUCCESS;
}
/*
* thread_get_mach_voucher - return a voucher reference for the specified thread voucher
*
* Conditions: nothing locked
*
* NOTE: At the moment, there is no distinction between the current and effective
* vouchers because we only set them at the thread level currently.
*/
kern_return_t
thread_get_mach_voucher(
thread_act_t thread,
mach_voucher_selector_t __unused which,
ipc_voucher_t *voucherp)
{
ipc_voucher_t voucher;
if (THREAD_NULL == thread) {
return KERN_INVALID_ARGUMENT;
}
thread_mtx_lock(thread);
voucher = thread->ith_voucher;
if (IPC_VOUCHER_NULL != voucher) {
ipc_voucher_reference(voucher);
thread_mtx_unlock(thread);
*voucherp = voucher;
return KERN_SUCCESS;
}
thread_mtx_unlock(thread);
*voucherp = IPC_VOUCHER_NULL;
return KERN_SUCCESS;
}
/*
* thread_swap_mach_voucher - swap a voucher reference for the specified thread voucher
*
* Conditions: callers holds a reference on the new and presumed old voucher(s).
* nothing locked.
*
* This function is no longer supported.
*/
kern_return_t
thread_swap_mach_voucher(
__unused thread_t thread,
__unused ipc_voucher_t new_voucher,
ipc_voucher_t *in_out_old_voucher)
{
/*
* Currently this function is only called from a MIG generated
* routine which doesn't release the reference on the voucher
* addressed by in_out_old_voucher. To avoid leaking this reference,
* a call to release it has been added here.
*/
ipc_voucher_release(*in_out_old_voucher);
return KERN_NOT_SUPPORTED;
}
// </copied>

View File

@ -6,6 +6,29 @@ uint32_t dtape_task_self_trap(void) {
return task_self_trap(NULL);
};
uint32_t dtape_host_self_trap(void) {
return host_self_trap(NULL);
};
uint32_t dtape_thread_self_trap(void) {
return thread_self_trap(NULL);
};
uint32_t dtape_mach_reply_port(void) {
return mach_reply_port(NULL);
};
int dtape_mach_msg_overwrite(uintptr_t msg, int32_t option, uint32_t send_size, uint32_t rcv_size, uint32_t rcv_name, uint32_t timeout, uint32_t notify, uintptr_t rcv_msg, uint32_t rcv_limit) {
struct mach_msg_overwrite_trap_args args = {
.msg = msg,
.option = option,
.send_size = send_size,
.rcv_size = rcv_size,
.rcv_name = rcv_name,
.timeout = timeout,
.priority = notify,
.rcv_msg = rcv_msg,
// no rcv_limit
};
return mach_msg_overwrite_trap(&args);
};

View File

@ -152,8 +152,12 @@ SECURITY_READ_ONLY_LATE(vm_size_t) msg_ool_size_small;
* Purpose:
* Final initialization
*/
#ifdef __DARLING__
void
#else
__startup_func
static void
#endif // __DARLING__
ipc_init(void)
{
kern_return_t kr;
@ -207,8 +211,10 @@ ipc_init(void)
panic("ipc_init: kmem_suballoc of ipc_kernel_copy_map failed");
}
#ifndef __DARLING__
ipc_kernel_copy_map->no_zero_fill = TRUE;
ipc_kernel_copy_map->wait_for_space = TRUE;
#endif // __DARLING__
/*
* As an optimization, 'small' out of line data regions using a

View File

@ -196,10 +196,19 @@ ipc_voucher_prepare_processing_recipe(
ipc_voucher_attr_manager_flags flags,
int *need_processing);
#ifdef __DARLING__
void
#else
__startup_func
static void
#endif // __DARLING__
ipc_voucher_init(void)
{
#ifdef __DARLING__
ipc_voucher_zone = zone_create("ipc vouchers", sizeof(struct ipc_voucher), ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM | ZC_NOSEQUESTER);
ipc_voucher_attr_control_zone = zone_create("ipc voucher attr controls", sizeof(struct ipc_voucher_attr_control), ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
lck_spin_init(&ivgt_lock_data, LCK_GRP_NULL, LCK_ATTR_NULL);
#endif // __DARLING__
/* initialize voucher hash */
for (iv_index_t i = 0; i < IV_HASH_BUCKETS; i++) {
queue_init(&ivht_bucket[i]);
@ -3234,8 +3243,12 @@ user_data_release(
panic("Voucher user-data manager released");
}
#ifdef __DARLING__
void
#else
__startup_func
static void
#endif // __DARLING__
user_data_attr_manager_init(void)
{
kern_return_t kr;

View File

@ -68,6 +68,11 @@
#include <mach/message.h>
#include <mach/mig_log.h>
#ifdef __DARLING__
__attribute__((format(printf, 1, 2)))
int printf(const char* format, ...);
#endif // __DARLING__
int mig_tracing, mig_errors, mig_full_tracing;
/*

View File

@ -130,6 +130,9 @@ host_data_t realhost;
static void
get_host_vm_stats(vm_statistics64_t out)
{
#ifdef __DARLING__
memset(out, 0, sizeof(*out));
#else
out->zero_fill_count = counter_load(&vm_statistics_zero_fill_count);
out->reactivations = counter_load(&vm_statistics_reactivations);
out->pageins = counter_load(&vm_statistics_pageins);
@ -142,6 +145,7 @@ get_host_vm_stats(vm_statistics64_t out)
out->decompressions = counter_load(&vm_statistics_decompressions);
out->swapins = counter_load(&vm_statistics_swapins);
out->swapouts = counter_load(&vm_statistics_swapouts);
#endif // __DARLING__
}
vm_extmod_statistics_data_t host_extmod_statistics;
@ -179,6 +183,7 @@ host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_t
extern int sched_allow_NO_SMT_threads;
#ifndef __DARLING__
kern_return_t
host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
{
@ -393,9 +398,11 @@ host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_num
default: return KERN_INVALID_ARGUMENT;
}
}
#endif // __DARLING__
kern_return_t host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
#ifndef __DARLING__
kern_return_t
host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
{
@ -585,6 +592,7 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty
default: return KERN_INVALID_ARGUMENT;
}
}
#endif // __DARLING__
extern uint32_t c_segment_pages_compressed;
@ -797,6 +805,7 @@ out:
return rate_limited;
}
#ifndef __DARLING__
kern_return_t
vm_stats(void *info, unsigned int *count)
{
@ -873,6 +882,7 @@ vm_stats(void *info, unsigned int *count)
return KERN_SUCCESS;
}
#endif // __DARLING__
kern_return_t host_statistics64(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);

View File

@ -55,8 +55,12 @@ static mach_msg_id_t host_notify_replyid[HOST_NOTIFY_TYPE_MAX + 1] =
{ HOST_CALENDAR_CHANGED_REPLYID,
HOST_CALENDAR_SET_REPLYID };
#ifdef __DARLING__
void
#else
__startup_func
static void
#endif // __DARLING__
host_notify_init(void)
{
for (int i = 0; i <= HOST_NOTIFY_TYPE_MAX; i++) {

View File

@ -238,9 +238,17 @@ static const struct mig_subsystem *mig_e[] = {
(const struct mig_subsystem *)&mach_eventlink_subsystem,
};
#ifdef __DARLING__
void
#else
static void
#endif // ___DARLING__
mig_init(void)
{
#ifdef __DARLING__
ipc_kobject_label_zone = zone_create("ipc kobject labels", sizeof(struct ipc_kobject_label), ZC_NONE);
#endif // __DARLING__
unsigned int i, n = sizeof(mig_e) / sizeof(const struct mig_subsystem *);
int howmany;
mach_msg_id_t j, pos, nentry, range;

View File

@ -3015,6 +3015,7 @@ convert_port_to_map_with_flavor(
}
map = task->map;
#ifndef __DARLING__
if (map->pmap == kernel_pmap) {
if (flavor == TASK_FLAVOR_CONTROL) {
panic("userspace has control access to a "
@ -3027,6 +3028,7 @@ convert_port_to_map_with_flavor(
} else {
pmap_require(map->pmap);
}
#endif // __DARLING__
vm_map_reference(map);
task_unlock(task);

View File

@ -402,7 +402,11 @@ extern uint32_t processor_avail_count_user;
extern uint32_t primary_processor_avail_count;
extern uint32_t primary_processor_avail_count_user;
#ifdef __DARLING__
extern processor_t master_processor;
#else
#define master_processor PERCPU_GET_MASTER(processor)
#endif // __DARLING__
PERCPU_DECL(struct processor, processor);
extern processor_t current_processor(void);

View File

@ -50,6 +50,10 @@
#include <libkern/section_keywords.h>
#ifdef __DARLING__
#include <mach/host_priv_server.h>
#endif // __DARLING__
/*
* Mach kobject port to reflect Mach exceptions into Unix signals.
*

View File

@ -342,10 +342,10 @@ extern vm_offset_t vm_kernel_addrhash(vm_offset_t addr);
__END_DECLS
#ifdef __DARLING__
#define VM_KERNEL_ADDRHIDE(_v) (_v)
#define VM_KERNEL_UNSLIDE_OR_PERM(_v) (_v)
#define VM_KERNEL_UNSLIDE(_v) (_v)
#define VM_KERNEL_ADDRPERM(_v) (_v)
#define VM_KERNEL_ADDRHIDE(_v) ((vm_address_t)(_v))
#define VM_KERNEL_UNSLIDE_OR_PERM(_v) ((vm_offset_t)(_v))
#define VM_KERNEL_UNSLIDE(_v) ((vm_offset_t)(_v))
#define VM_KERNEL_ADDRPERM(_v) VM_KERNEL_UNSLIDE_OR_PERM(_v)
#else
#define __DO_UNSLIDE(_v) ((vm_offset_t)VM_KERNEL_STRIP_PTR(_v) - vm_kernel_slide)

View File

@ -67,7 +67,9 @@
#include <mach/boolean.h>
#include <mach/kern_return.h>
#include <mach/mach_types.h> /* to get vm_address_t */
#ifndef __DARLING__ // causes unnecessary build issues
#include <mach/memory_object.h>
#endif // __DARLING__
#include <mach/std_types.h> /* to get pointer_t */
#include <mach/vm_attributes.h>
#include <mach/vm_param.h>
@ -549,6 +551,7 @@ vm32_make_memory_entry(
return kr;
}
#ifndef __DARLING__
kern_return_t
vm32__task_wire(
vm_map_t map,
@ -579,6 +582,6 @@ vm32__map_exec_lockdown(
return KERN_SUCCESS;
}
#endif // __DARLING__
#endif /* VM32_SUPPORT */

View File

@ -457,7 +457,12 @@ extern kern_return_t vm_map_wire_and_extract_kernel(
#endif /* XNU_KERNEL_PRIVATE */
#ifdef __DARLING__
#include <kern/task.h>
#define kernel_map (kernel_task->map)
#else
extern vm_map_t kernel_map;
#endif // __DARLING__
extern vm_map_t kernel_pageable_map;
extern vm_map_t ipc_kernel_map;
extern vm_map_t g_kext_map;

View File

@ -418,7 +418,7 @@ VME_OBJECT_SHADOW(
#define MAX_WIRE_COUNT 65535
#ifndef __DARLING__
/*
* Type: struct vm_map_header
*
@ -437,11 +437,15 @@ struct vm_map_header {
#endif
int page_shift; /* page shift */
};
#endif // __DARLING__
#define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
#define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
#define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
#ifdef __DARLING__
#include <darlingserver/duct-tape/memory.h>
#else
/*
* Type: vm_map_t [exported; contents invisible]
*
@ -527,6 +531,7 @@ struct _vm_map {
/* reserved */ pad:14;
unsigned int timestamp; /* Version number */
};
#endif // __DARLING__
#define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x))
#define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links)
@ -551,6 +556,7 @@ typedef struct vm_map_version {
unsigned int main_timestamp;
} vm_map_version_t;
#ifndef __DARLING__
/*
* Type: vm_map_copy_t [exported; contents invisible]
*
@ -599,6 +605,7 @@ struct vm_map_copy {
void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata; /* KERNEL_BUFFER */
} c_u;
};
#endif // __DARLING__
#define cpy_hdr c_u.hdr
@ -644,6 +651,7 @@ vm_map_copy_adjust_to_target(
((map)->timestamp = 0 , \
lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
#ifndef __DARLING__
#define vm_map_lock(map) \
MACRO_BEGIN \
DTRACE_VM(vm_map_lock_w); \
@ -656,6 +664,7 @@ vm_map_copy_adjust_to_target(
(map)->timestamp++; \
lck_rw_done(&(map)->lock); \
MACRO_END
#endif // __DARLING__
#define vm_map_lock_read(map) \
MACRO_BEGIN \
@ -1541,7 +1550,9 @@ extern kern_return_t vm_map_page_range_info_internal(
/*
* Macros for rounding and truncation of vm_map offsets and sizes
*/
#ifndef __DARLING__
#define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT)
#endif // __DARLING__
#define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
#define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
#define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0)
@ -1597,6 +1608,7 @@ VM_MAP_POLICY_WX_STRIP_X(
return false;
}
#ifndef __DARLING__
static inline bool
VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
vm_map_t map __unused)
@ -1606,6 +1618,7 @@ VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(
}
return true;
}
#endif // __DARLING__
static inline bool
VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(

View File

@ -43,8 +43,6 @@ namespace DarlingServer {
MessageQueue& _replyQueue;
Address _replyAddress;
void _stopPending();
public:
Call(MessageQueue& replyQueue, std::shared_ptr<Thread> thread, Address replyAddress);
virtual ~Call();

View File

@ -46,8 +46,8 @@ namespace DarlingServer {
std::vector<std::weak_ptr<Thread>> _threads;
std::string _vchrootPath;
dtape_task_handle_t _dtapeTask;
std::weak_ptr<Process> _parentProcess;
bool _startSuspended = false;
void _unregisterThreads();
@ -55,6 +55,8 @@ namespace DarlingServer {
friend struct ::DTapeHooks;
bool _readOrWriteMemory(bool isWrite, uintptr_t remoteAddress, void* localBuffer, size_t length, int* errorCode) const;
public:
using ID = pid_t;
using NSID = ID;
@ -85,6 +87,12 @@ namespace DarlingServer {
std::shared_ptr<Process> parentProcess() const;
bool startSuspended() const;
void setStartSuspended(bool startSuspended);
bool readMemory(uintptr_t remoteAddress, void* localBuffer, size_t length, int* errorCode = nullptr) const;
bool writeMemory(uintptr_t remoteAddress, const void* localBuffer, size_t length, int* errorCode = nullptr) const;
static std::shared_ptr<Process> currentProcess();
static std::shared_ptr<Process> kernelProcess();
};

View File

@ -21,16 +21,44 @@ calls = [
]),
('task_self_trap', [], [
('port_name', 'unsigned int'),
('port_name', 'uint32_t'),
]),
('host_self_trap', [], [
('port_name', 'uint32_t'),
]),
('thread_self_trap', [], [
('port_name', 'uint32_t'),
]),
('mach_reply_port', [], [
('port_name', 'unsigned int'),
('port_name', 'uint32_t'),
]),
('kprintf', [
('string', 'char*', 'uint64_t'),
('string', 'const char*', 'uint64_t'),
('string_length', 'uint64_t'),
], []),
('started_suspended', [], [
('suspended', 'bool'),
]),
('get_tracer', [], [
('tracer', 'uint32_t'),
]),
('mach_msg_overwrite', [
('msg', 'void*', 'uint64_t'),
('option', 'int32_t'),
('send_size', 'uint32_t'),
('rcv_size', 'uint32_t'),
('rcv_name', 'uint32_t'),
('timeout', 'uint32_t'),
('notify', 'uint32_t'),
('rcv_msg', 'void*', 'uint64_t'),
('rcv_limit', 'uint32_t'),
], [])
]
@ -97,6 +125,7 @@ public_header.write("""\
#include <sys/types.h>
#include <stdint.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {

View File

@ -95,22 +95,9 @@ std::shared_ptr<DarlingServer::Thread> DarlingServer::Call::thread() const {
return _thread.lock();
};
/**
* Note that you MUST NOT have any local variables referencing `this` when this method is called.
*
* This Call object MAY be destroyed upon the return of this method.
*/
void DarlingServer::Call::_stopPending() {
// we're done processing this call; dump it
if (auto thread = _thread.lock()) {
thread->setPendingCall(nullptr);
}
};
void DarlingServer::Call::Checkin::processCall() {
// the Call creation already took care of registering the process and thread
_sendReply(0);
_stopPending();
};
void DarlingServer::Call::Checkout::processCall() {
@ -131,7 +118,6 @@ void DarlingServer::Call::Checkout::processCall() {
}
_sendReply(code);
_stopPending();
};
void DarlingServer::Call::VchrootPath::processCall() {
@ -141,22 +127,14 @@ void DarlingServer::Call::VchrootPath::processCall() {
if (auto thread = _thread.lock()) {
if (auto process = thread->process()) {
if (_body.buffer_size > 0) {
struct iovec local;
struct iovec remote;
auto tmpstr = process->vchrootPath().substr(0, _body.buffer_size - 1);
auto len = std::min(tmpstr.length() + 1, _body.buffer_size);
fullLength = process->vchrootPath().length();
// note that, despite the const cast, this is safe because the local iovec data is not modified by the call
local.iov_base = const_cast<char*>(tmpstr.c_str());
local.iov_len = len;
remote.iov_base = (void*)(uintptr_t)_body.buffer;
remote.iov_len = len;
if (process_vm_writev(process->id(), &local, 1, &remote, 1, 0) < 0) {
code = -errno;
if (!process->writeMemory(_body.buffer, tmpstr.c_str(), len, &code)) {
// writeMemory returns a positive error code, but we want a negative one
code = -code;
}
}
} else {
@ -167,7 +145,6 @@ void DarlingServer::Call::VchrootPath::processCall() {
}
_sendReply(code, fullLength);
_stopPending();
};
void DarlingServer::Call::TaskSelfTrap::processCall() {
@ -182,7 +159,34 @@ void DarlingServer::Call::TaskSelfTrap::processCall() {
callLog.debug() << "TaskSelfTrap returning port " << taskSelfPort << callLog.endLog;
_sendReply(0, taskSelfPort);
_stopPending();
};
void DarlingServer::Call::HostSelfTrap::processCall() {
if (auto thread = _thread.lock()) {
if (auto process = thread->process()) {
callLog.debug() << "Got HostSelfTrap call from " << process->nsid() << ":" << thread->nsid() << callLog.endLog;
}
}
const auto hostSelfPort = dtape_host_self_trap();
callLog.debug() << "HostSelfTrap returning port " << hostSelfPort << callLog.endLog;
_sendReply(0, hostSelfPort);
};
void DarlingServer::Call::ThreadSelfTrap::processCall() {
if (auto thread = _thread.lock()) {
if (auto process = thread->process()) {
callLog.debug() << "Got ThreadSelfTrap call from " << process->nsid() << ":" << thread->nsid() << callLog.endLog;
}
}
const auto threadSelfPort = dtape_thread_self_trap();
callLog.debug() << "ThreadSelfTrap returning port " << threadSelfPort << callLog.endLog;
_sendReply(0, threadSelfPort);
};
void DarlingServer::Call::MachReplyPort::processCall() {
@ -197,7 +201,6 @@ void DarlingServer::Call::MachReplyPort::processCall() {
callLog.debug() << "MachReplyPort returning port " << machReplyPort << callLog.endLog;
_sendReply(0, machReplyPort);
_stopPending();
};
void DarlingServer::Call::Kprintf::processCall() {
@ -209,19 +212,11 @@ void DarlingServer::Call::Kprintf::processCall() {
char* tmp = (char*)malloc(_body.string_length + 1);
if (tmp) {
struct iovec local;
struct iovec remote;
local.iov_base = tmp;
local.iov_len = _body.string_length;
remote.iov_base = (void*)(uintptr_t)_body.string;
remote.iov_len = _body.string_length;
if (process_vm_readv(process->id(), &local, 1, &remote, 1, 0) < 0) {
code = -errno;
} else {
if (process->readMemory(_body.string, tmp, _body.string_length, &code)) {
kprintfLog.info() << tmp << kprintfLog.endLog;
} else {
// readMemory returns a positive error code, but we want a negative one
code = -code;
}
free(tmp);
@ -236,5 +231,34 @@ void DarlingServer::Call::Kprintf::processCall() {
}
_sendReply(code);
_stopPending();
};
void DarlingServer::Call::StartedSuspended::processCall() {
int code = 0;
bool suspended = false;
if (auto thread = _thread.lock()) {
if (auto process = thread->process()) {
suspended = process->startSuspended();
} else {
code = -ESRCH;
}
} else {
code = -ESRCH;
}
_sendReply(code, suspended);
};
void DarlingServer::Call::GetTracer::processCall() {
int code = 0;
uint32_t tracer = 0;
callLog.warning() << "GetTracer: TODO" << callLog.endLog;
_sendReply(code, tracer);
};
void DarlingServer::Call::MachMsgOverwrite::processCall() {
_sendReply(dtape_mach_msg_overwrite(_body.msg, _body.option, _body.send_size, _body.rcv_size, _body.rcv_name, _body.timeout, _body.notify, _body.rcv_msg, _body.rcv_limit));
};

View File

@ -21,6 +21,8 @@
#include <darlingserver/registry.hpp>
#include <sys/syscall.h>
#include <unistd.h>
#include <sys/uio.h>
#include <darlingserver/logging.hpp>
#include <fstream>
@ -147,3 +149,76 @@ std::shared_ptr<DarlingServer::Process> DarlingServer::Process::kernelProcess()
}();
return process;
};
bool DarlingServer::Process::startSuspended() const {
std::shared_lock lock(_rwlock);
return _startSuspended;
};
void DarlingServer::Process::setStartSuspended(bool startSuspended) {
std::unique_lock lock(_rwlock);
_startSuspended = startSuspended;
};
bool DarlingServer::Process::_readOrWriteMemory(bool isWrite, uintptr_t remoteAddress, void* localBuffer, size_t length, int* errorCode) const {
struct iovec local;
struct iovec remote;
const auto func = isWrite ? process_vm_writev : process_vm_readv;
static DarlingServer::Log processMemoryAccessLog("procmem");
local.iov_base = localBuffer;
local.iov_len = length;
remote.iov_base = (void*)remoteAddress;
remote.iov_len = length;
if (func(id(), &local, 1, &remote, 1, 0) < 0) {
int code = errno;
processMemoryAccessLog.error()
<< "Failed to "
<< (isWrite ? "write " : "read ")
<< length
<< " byte(s) at "
<< remoteAddress
<< " in process "
<< id()
<< " ("
<< nsid()
<< "): "
<< code
<< " ("
<< strerror(code)
<< ")"
<< processMemoryAccessLog.endLog;
if (errorCode) {
*errorCode = code;
}
return false;
} else {
processMemoryAccessLog.debug()
<< "Successfully "
<< (isWrite ? "wrote " : "read ")
<< length
<< " byte(s) at "
<< remoteAddress
<< " in process "
<< id()
<< " ("
<< nsid()
<< ")"
<< processMemoryAccessLog.endLog;
if (errorCode) {
*errorCode = 0;
}
return true;
}
};
bool DarlingServer::Process::readMemory(uintptr_t remoteAddress, void* localBuffer, size_t length, int* errorCode) const {
return _readOrWriteMemory(false, remoteAddress, localBuffer, length, errorCode);
};
bool DarlingServer::Process::writeMemory(uintptr_t remoteAddress, const void* localBuffer, size_t length, int* errorCode) const {
// the const_cast is safe; when writing to a process' memory, localBuffer is not modified
return _readOrWriteMemory(true, remoteAddress, const_cast<void*>(localBuffer), length, errorCode);
};

View File

@ -125,6 +125,14 @@ struct DTapeHooks {
DarlingServer::Thread::interruptEnable();
};
static bool dtape_hook_task_read_memory(void* task_context, uintptr_t remote_address, void* local_buffer, size_t length) {
return static_cast<DarlingServer::Process*>(task_context)->readMemory(remote_address, local_buffer, length);
};
static bool dtape_hook_task_write_memory(void* task_context, uintptr_t remote_address, const void* local_buffer, size_t length) {
return static_cast<DarlingServer::Process*>(task_context)->writeMemory(remote_address, local_buffer, length);
};
static constexpr dtape_hooks_t dtape_hooks = {
.thread_suspend = dtape_hook_thread_suspend,
.thread_resume = dtape_hook_thread_resume,
@ -137,6 +145,8 @@ struct DTapeHooks {
.thread_start = dtape_hook_thread_start,
.current_thread_interrupt_disable = dtape_hook_current_thread_interrupt_disable,
.current_thread_interrupt_enable = dtape_hook_current_thread_interrupt_enable,
.task_read_memory = dtape_hook_task_read_memory,
.task_write_memory = dtape_hook_task_write_memory,
};
};

View File

@ -205,7 +205,9 @@ static const auto microthreadLog = DarlingServer::Log("microthread");
// this runs in the context of the microthread (i.e. with the microthread's stack active)
void DarlingServer::Thread::microthreadWorker() {
currentThreadVar->pendingCall()->processCall();
auto call = currentThreadVar->pendingCall();
currentThreadVar->setPendingCall(nullptr);
call->processCall();
};
void DarlingServer::Thread::microthreadContinuation() {