It compiles! (with Clang); plus: Makefile overhaul

All the files that were building with the previous version of XNU are now building again (with Clang), however, the module does not link yet (due to missing symbols; *lots* of them). A lot of them are probably new additions to XNU, but some of them are probably from me not knowing the `ifdef` certain things that should be.

Also, I completely overhauled the Makefile to make it simpler to manage settings and flags across files, folders, modules, and Linux/KBuild versions. I didn't add this feature (because I don't need it yet), but it can be easily extended to easily allow per-compiler flags

Full list of all missing symbols reported by `MODPOST`:

task_is_driver
thread_get_state_to_user
machine_exception
catch_mach_exception_raise_state_identity
turnstile_has_waiters
mach_vm_allocate_kernel
processor_start_from_user
catch_mach_exception_raise_state
task_violated_guard
hw_atomic_test_and_set32
task_is_importance_donor
catch_mach_exception_raise
ipc_importance_task_reference
work_interval_port_notify
random_bool_init
os_ref_retain_try_internal
mach_zone_info_for_zone
processor_exit_from_user
pqueue_pair_meld
mach_vm_page_range_query
turnstile_complete
mach_vm_wire_external
mach_vm_allocate_external
vm_allocate_kernel
ux_handler_init
uext_server
turnstile_waitq_add_thread_priority_queue
thread_depress_abort_from_user
turnstile_deallocate
_Block_object_assign
turnstile_cleanup
turnstile_kernel_update_inheritor_on_wake_locked
_pthread_priority_normalize_for_ipc
filt_ipc_kqueue_turnstile
lck_spin_assert
IOTaskHasEntitlement
thread_get_requested_qos
task_watchport_elem_deallocate
os_ref_init_count_internal
lck_mtx_lock_spin_always
turnstile_update_inheritor_complete
task_inspect
thread_bootstrap_return
thread_setstatus_from_user
turnstile_recompute_priority_locked
mach_vm_remap_external
zone_require
thread_getstatus_to_user
turnstile_hash_bucket_unlock
_NSConcreteGlobalBlock
filt_machport_kqueue_has_turnstile
mach_continuous_time
ipc_importance_init
turnstile_update_inheritor_locked
turnstile_alloc
ipc_importance_send
ipc_importance_thread_call_init
bank_get_bank_ledger_thread_group_and_persona
turnstile_hash_bucket_lock
filt_machport_turnstile_prepare_lazily
turnstile_deallocate_safe
os_ref_release_barrier_internal
turnstile_reference
vm_map_wire_kernel
thread_deallocate_safe
turnstile_stats_update
thread_set_pending_block_hint
task_info_from_user
thread_inspect_deallocate
catch_exc_subsystem
ux_handler_stop
lck_mtx_assert
mach_vm_map_external
filt_machport_stash_port
ipc_importance_task_hold_internal_assertion
lck_spin_lock_grp
_Block_object_dispose
ipc_importance_check_circularity
task_restartable_subsystem
ipc_importance_task_drop_internal_assertion
random_bool_gen_bits
turnstile_update_inheritor
lck_spin_try_lock_grp
ipc_importance_task_release
kdp_lck_spin_is_acquired
ipc_importance_receive
os_ref_retain_internal
task_inspect_deallocate
_NSConcreteStackBlock
task_get_exc_guard_behavior
pid_from_task
sched_thread_unpromote_reason
memory_entry_subsystem
turnstile_prepare
act_get_state_to_user
sched_thread_promote_reason
task_set_exc_guard_behavior
ipc_importance_task_is_any_receiver_type
knote_vanish
thread_user_promotion_qos_for_pri
This commit is contained in:
Ariel Abreu 2020-09-08 10:06:49 -04:00
parent fe1ee5ff55
commit 8f105c7be5
No known key found for this signature in database
GPG Key ID: ECF8C2B9E8AD3E6B
127 changed files with 1952 additions and 969 deletions

2
.gitignore vendored
View File

@ -9,6 +9,8 @@ modules.order
.cache.mk
*.o.ur-safe
*.o.d
*.tmp
darling-mach.mod
darling-overlay.mod

View File

@ -11,13 +11,18 @@ add_definitions(
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/osfmk)
set(MIG_USER_SOURCE_SUFFIX "User.c")
set(MIG_SERVER_SOURCE_SUFFIX "Server.c")
set(MIG_SERVER_HEADER_SUFFIX "Server.h")
set(MIG_NO_XTRACE 1)
mig(osfmk/UserNotification/UNDReply.defs)
set(MIG_USER_SOURCE_SUFFIX "_user.c")
set(MIG_SERVER_SOURCE_SUFFIX "_server.c")
set(MIG_SERVER_HEADER_SUFFIX "_server.h")
set(MIG_NO_XTRACE 1)
mig(osfmk/device/device.defs)
mig(osfmk/UserNotification/UNDReply.defs)
mig(osfmk/default_pager/default_pager_alerts.defs)
mig(osfmk/default_pager/default_pager_object.defs)
mig(osfmk/kextd/kextd_mach.defs)
@ -37,12 +42,14 @@ mig(osfmk/mach/mach_port.defs)
mig(osfmk/mach/mach_vm.defs)
mig(osfmk/mach/mach_voucher.defs)
mig(osfmk/mach/mach_voucher_attr_control.defs)
mig(osfmk/mach/memory_entry.defs)
mig(osfmk/mach/memory_object_control.defs)
mig(osfmk/mach/memory_object_default.defs)
mig(osfmk/mach/memory_object.defs)
mig(osfmk/mach/processor.defs)
mig(osfmk/mach/processor_set.defs)
mig(osfmk/mach/resource_notify.defs)
mig(osfmk/mach/restartable.defs)
mig(osfmk/mach/task_access.defs)
mig(osfmk/mach/task.defs)
mig(osfmk/mach/thread_act.defs)
@ -72,12 +79,14 @@ add_custom_target(lkm_generate
${CMAKE_CURRENT_BINARY_DIR}/osfmk/mach/vm32_map_server.h
${CMAKE_CURRENT_BINARY_DIR}/osfmk/mach/mach_notify.h
${CMAKE_CURRENT_BINARY_DIR}/osfmk/mach/mach_voucher_attr_control.h
${CMAKE_CURRENT_BINARY_DIR}/osfmk/mach/memory_entry_server.h
${CMAKE_CURRENT_BINARY_DIR}/osfmk/mach/memory_object_control.h
${CMAKE_CURRENT_BINARY_DIR}/osfmk/mach/memory_object_server.h
${CMAKE_CURRENT_BINARY_DIR}/osfmk/mach/memory_object_default_server.h
${CMAKE_CURRENT_BINARY_DIR}/osfmk/mach/mach_voucher_server.h
${CMAKE_CURRENT_BINARY_DIR}/osfmk/mach/vm_map.h
${CMAKE_CURRENT_BINARY_DIR}/osfmk/mach/resource_notify.h
${CMAKE_CURRENT_BINARY_DIR}/osfmk/mach/restartable_server.h
${CMAKE_CURRENT_BINARY_DIR}/osfmk/mach/exc_server.h
${CMAKE_CURRENT_BINARY_DIR}/osfmk/mach/mach_exc_server.h
${CMAKE_CURRENT_BINARY_DIR}/osfmk/mach/notify_server.h
@ -95,7 +104,7 @@ add_custom_target(lkm_generate
${CMAKE_CURRENT_BINARY_DIR}/osfmk/mach/thread_act_server.c
${CMAKE_CURRENT_BINARY_DIR}/osfmk/mach/clock_reply_user.c
${CMAKE_CURRENT_BINARY_DIR}/osfmk/device/device_server.c
${CMAKE_CURRENT_BINARY_DIR}/osfmk/UserNotification/UNDReply_server.c
${CMAKE_CURRENT_BINARY_DIR}/osfmk/UserNotification/UNDReplyServer.c
)
add_custom_target(lkm

View File

@ -21,6 +21,29 @@
*===-----------------------------------------------------------------------===
*/
#ifdef __DARLING__
#include_next <stdatomic.h>
#ifndef memory_order
#endif
#ifndef __clang__
enum memory_order {};
//#define __c11_atomic_init()
#define __c11_atomic_thread_fence(_order) __atomic_thread_fence(_order)
#define __c11_atomic_signal_fence(_order) __atomic_signal_fence(_order)
#define __c11_atomic_is_lock_free(_size) __atomic_is_lock_free(_size, 0)
#define __c11_atomic_store(_ptr, _val, _order) __atomic_store_n(_ptr, _val, _order)
#define __c11_atomic_load(_ptr, _order) __atomic_load_n(_ptr, _order)
#define __c11_atomic_exchange(_ptr, _val, _order) __atomic_exchange_n(_ptr, _val, _order)
#define __c11_atomic_compare_exchange_strong(_ptr, _expected, _val, _success, _fail) __atomic_compare_exchange_n(_ptr, _expected, _val, 0, _success, _fail)
#define __c11_atomic_compare_exchange_weak(_ptr, _expected, _val, _success, _fail) __atomic_compare_exchange_n(_ptr, _expected, _val, 1, _success, _fail)
#define __c11_atomic_fetch_add(_ptr, _val, _order) __atomic_add_fetch(_ptr, _val, _order)
#define __c11_atomic_fetch_sub(_ptr, _val, _order) __atomic_sub_fetch(_ptr, _val, _order)
#define __c11_atomic_fetch_and(_ptr, _val, _order) __atomic_and_fetch(_ptr, _val, _order)
#define __c11_atomic_fetch_or(_ptr, _val, _order) __atomic_or_fetch(_ptr, _val, _order)
#define __c11_atomic_fetch_xor(_ptr, _val, _order) __atomic_xor_fetch(_ptr, _val, _order)
#endif
#else
#ifndef __clang__
#error unsupported compiler
#endif
@ -195,4 +218,4 @@ void atomic_flag_clear_explicit(volatile atomic_flag *, memory_order);
#endif /* __STDC_HOSTED__ */
#endif /* __CLANG_STDATOMIC_H */
#endif // !__DARLING__

621
Makefile
View File

@ -5,25 +5,52 @@ $(error Darling's kernel module is now built differently. \
Please run 'make lkm' and 'make lkm_install' inside your CMake build directory)
endif
#
# ***
# general
# ***
#
asflags-y := -D__DARLING__ -D__NO_UNDERSCORES__ \
-I$(MIGDIR)/osfmk \
-I$(BUILD_ROOT)/osfmk \
-I$(BUILD_ROOT)/duct/defines
ccflags-y := -D__DARLING__ -DDARLING_DEBUG \
-I$(BUILD_ROOT)/EXTERNAL_HEADERS \
-I$(BUILD_ROOT)/EXTERNAL_HEADERS/bsd \
-I$(BUILD_ROOT)/duct/defines \
-DPAGE_SIZE_FIXED \
-DCONFIG_SCHED_TRADITIONAL \
-freorder-blocks \
WARNING_FLAGS := \
-Wno-unknown-warning-option \
-Wno-ignored-optimization-argument \
-Wno-unknown-pragmas \
-Wno-error=cast-align \
-Wno-unused-parameter \
-Wno-missing-prototypes \
-Wno-unused-variable \
-Wno-declaration-after-statement \
-Wno-undef \
-Wno-maybe-uninitialized \
-Wno-gnu-variable-sized-type-not-at-end
#-freorder-blocks
FEATURE_FLAGS := \
-fno-builtin \
-fno-common \
-fsigned-bitfields \
-fno-strict-aliasing \
-fno-keep-inline-functions \
-Wno-unknown-pragmas \
-fblocks \
-mfentry
# NOTE: we should further sort these
# e.g. something like MACH_DEFINES for definitions that enable certain behaviors in the XNU code,
# and DARLING_DEFINES for our own personal definitions, and maybe LINUX_DEFINES for definitions
# that affect Linux headers, and then OTHER_DEFINES for everything else
DEFINES := \
-D__DARLING__ \
-DDARLING_DEBUG \
-DPAGE_SIZE_FIXED \
-DCONFIG_SCHED_TRADITIONAL \
-DCONFIG_SCHED_TIMESHARE_CORE \
-DAPPLE \
-DKERNEL \
-DKERNEL_PRIVATE \
@ -32,32 +59,12 @@ ccflags-y := -D__DARLING__ -DDARLING_DEBUG \
-D__MACHO__=1 \
-Dvolatile=__volatile \
-DNEXT \
-Wno-error=cast-align \
-Wno-unused-parameter \
-Wno-missing-prototypes \
-Wno-unused-variable \
-D__LITTLE_ENDIAN__=1 \
-Wno-declaration-after-statement \
-Wno-undef \
-Wno-maybe-uninitialized \
-D__private_extern__=extern \
-D_MODE_T -D_NLINK_T -DVM32_SUPPORT=1 -DMACH_KERNEL_PRIVATE \
-I$(MIGDIR)/bsd \
-I$(BUILD_ROOT)/bsd \
-I$(BUILD_ROOT)/osfmk \
-I$(BUILD_ROOT)/iokit \
-I$(BUILD_ROOT)/libkern \
-I$(BUILD_ROOT)/libsa \
-I$(BUILD_ROOT)/libsa \
-I$(BUILD_ROOT)/pexpert \
-I$(BUILD_ROOT)/security \
-I$(BUILD_ROOT)/export-headers \
-I$(BUILD_ROOT)/osfmk/libsa \
-I$(BUILD_ROOT)/osfmk/mach_debug \
-I$(BUILD_ROOT)/ \
-I$(BUILD_ROOT)/darling \
-I$(MIGDIR)/osfmk \
-I$(MIGDIR)/../../startup \
-D_MODE_T \
-D_NLINK_T \
-DVM32_SUPPORT=1 \
-DMACH_KERNEL_PRIVATE \
-DARCH_PRIVATE \
-DDRIVER_PRIVATE \
-D_KERNEL_BUILD \
@ -133,62 +140,316 @@ ccflags-y := -D__DARLING__ -DDARLING_DEBUG \
-DCONFIG_KERNEL_0DAY_SYSCALL_HANDLER \
-DEVENTMETER \
-DCONFIG_APP_PROFILE=0 \
-DCC_USING_FENTRY=1 \
-DIMPORTANCE_INHERITANCE=1
OTHER_FLAGS := \
-std=gnu11
miggen_cflags := -include $(BUILD_ROOT)/osfmk/duct/duct.h -include $(BUILD_ROOT)/osfmk/duct/duct_pre_xnu.h
atomic_cflags := -include $(BUILD_ROOT)/clang_to_gcc_atomic.h
ccflags-y := $(WARNING_FLAGS) $(FEATURE_FLAGS) $(DEFINES) $(OTHER_FLAGS)
# This takes effect on Linux <5.4
CFLAGS_task_server.o := $(miggen_cflags)
CFLAGS_clock_server.o := $(miggen_cflags)
CFLAGS_lock_set_server.o := $(miggen_cflags)
CFLAGS_clock_priv_server.o := $(miggen_cflags)
CFLAGS_processor_server.o := $(miggen_cflags)
CFLAGS_host_priv_server.o := $(miggen_cflags)
CFLAGS_host_security_server.o := $(miggen_cflags)
CFLAGS_UNDReply_server.o := $(miggen_cflags)
CFLAGS_mach_port_server.o := $(miggen_cflags)
#CFLAGS_default_pager_object_server.o := $(miggen_cflags)
CFLAGS_mach_vm_server.o := $(miggen_cflags)
CFLAGS_mach_host_server.o := $(miggen_cflags)
CFLAGS_thread_act_server.o := $(miggen_cflags)
CFLAGS_processor_set_server.o := $(miggen_cflags)
CFLAGS_vm32_map_server.o := $(miggen_cflags)
CFLAGS_device_server.o := $(miggen_cflags)
CFLAGS_clock_reply_user.o := $(miggen_cflags)
CFLAGS_notify_user.o := $(miggen_cflags)
CFLAGS_mach_voucher_server.o := $(miggen_cflags)
CFLAGS_mach_voucher_attr_control_server.o := $(miggen_cflags)
CFLAGS_OSAtomicOperations.o := $(atomic_cflags)
CFLAGS_mach_exc_server.o := $(miggen_cflags)
CFLAGS_mach_exc_user.o := $(miggen_cflags)
CFLAGS_exc_user.o := $(miggen_cflags)
#
# ***
# darling-mach
# ***
#
# This takes effect on Linux 5.4+
CFLAGS_$(MIGDIR_REL)/osfmk/mach/task_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/clock_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/lock_set_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/clock_priv_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/processor_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/host_priv_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/host_security_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/UserNotification/UNDReply_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/mach_port_server.o := $(miggen_cflags)
#CFLAGS_$(MIGDIR_REL)/osfmk/default_pager/default_pager_object_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/mach_exc_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/mach_exc_user.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/exc_user.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/mach_vm_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/mach_host_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/mach_voucher_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/mach_voucher_attr_control_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/thread_act_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/processor_set_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/vm32_map_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/device/device_server.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/clock_reply_user.o := $(miggen_cflags)
CFLAGS_$(MIGDIR_REL)/osfmk/mach/notify_user.o := $(miggen_cflags)
CFLAGS_libkern/gen/OSAtomicOperations.o := $(atomic_cflags)
#
# object lists
# remember to reference subdirectories by including the subdirectory variables as part of the list
# e.g.
# OBJS_osfmk = \
# $(OBJS_osfmk/cool-subdir) \
# some_random_object.o \
# this_other_dude.o \
# $(OBJS_osfmk/whatever) \
# coolness.o
# note that order doesn't matter, although you'll probably want to keep it sorted and have subdirs separate from objects
# remember to create these as lazily-evaluated variables
#
#
# osfmk/
#
OBJS_osfmk = \
$(OBJS_osfmk/ipc) \
$(OBJS_osfmk/kern) \
$(OBJS_osfmk/duct)
OBJS_osfmk/ipc = \
osfmk/ipc/ipc_entry.o \
osfmk/ipc/ipc_hash.o \
osfmk/ipc/ipc_init.o \
osfmk/ipc/ipc_kmsg.o \
osfmk/ipc/ipc_mqueue.o \
osfmk/ipc/ipc_notify.o \
osfmk/ipc/ipc_object.o \
osfmk/ipc/ipc_port.o \
osfmk/ipc/ipc_pset.o \
osfmk/ipc/ipc_right.o \
osfmk/ipc/ipc_space.o \
osfmk/ipc/ipc_table.o \
osfmk/ipc/ipc_voucher.o \
osfmk/ipc/mach_debug.o \
osfmk/ipc/mach_kernelrpc.o \
osfmk/ipc/mach_msg.o \
osfmk/ipc/mach_port.o \
osfmk/ipc/mig_log.o
OBJS_osfmk/kern = \
osfmk/kern/clock_oldops.o \
osfmk/kern/exception.o \
osfmk/kern/host.o \
osfmk/kern/ipc_clock.o \
osfmk/kern/ipc_host.o \
osfmk/kern/ipc_kobject.o \
osfmk/kern/ipc_mig.o \
osfmk/kern/ipc_misc.o \
osfmk/kern/ipc_sync.o \
osfmk/kern/ipc_tt.o \
osfmk/kern/locks.o \
osfmk/kern/ltable.o \
osfmk/kern/mk_timer.o \
osfmk/kern/sync_sema.o \
osfmk/kern/ux_handler.c \
osfmk/kern/waitq.o
OBJS_osfmk/duct = \
osfmk/duct/darling_xnu_init.o \
osfmk/duct/duct_arm_locks_arm.o \
osfmk/duct/duct_atomic.o \
osfmk/duct/duct_ipc_importance.o \
osfmk/duct/duct_ipc_pset.o \
osfmk/duct/duct_kern_clock.o \
osfmk/duct/duct_kern_debug.o \
osfmk/duct/duct_kern_kalloc.o \
osfmk/duct/duct_kern_printf.o \
osfmk/duct/duct_kern_startup.o \
osfmk/duct/duct_kern_sysctl.o \
osfmk/duct/duct_kern_task.o \
osfmk/duct/duct_kern_thread_act.o \
osfmk/duct/duct_kern_thread_call.o \
osfmk/duct/duct_kern_thread.o \
osfmk/duct/duct_kern_timer_call.o \
osfmk/duct/duct_kern_zalloc.o \
osfmk/duct/duct_libsa.o \
osfmk/duct/duct_machine_routines.o \
osfmk/duct/duct_machine_rtclock.o \
osfmk/duct/duct_pcb.o \
osfmk/duct/duct_vm_init.o \
osfmk/duct/duct_vm_kern.o \
osfmk/duct/duct_vm_map.o \
osfmk/duct/duct_vm_user.o
#
# bsd/
#
OBJS_bsd = \
$(OBJS_bsd/uxkern) \
$(OBJS_bsd/duct)
OBJS_bsd/uxkern = \
bsd/uxkern/ux_exception.o
OBJS_bsd/duct = \
bsd/duct/duct_kern_kern_sig.o \
bsd/duct/duct_uxkern_ux_exception.o
#
# duct/
#
OBJS_duct = \
$(OBJS_duct/osfmk) \
$(OBJS_duct/bsd)
OBJS_duct/osfmk = \
duct/osfmk/dummy-kern-audit-sessionport.o \
duct/osfmk/dummy-kern-clock-oldops.o \
duct/osfmk/dummy-kern-host-notify.o \
duct/osfmk/dummy-kern-kmod.o \
duct/osfmk/dummy-kern-locks.o \
duct/osfmk/dummy-kern-machine.o \
duct/osfmk/dummy-kern-mk-sp.o \
duct/osfmk/dummy-kern-processor.o \
duct/osfmk/dummy-kern-sync-lock.o \
duct/osfmk/dummy-kern-syscall-emulation.o \
duct/osfmk/dummy-kern-task-policy.o \
duct/osfmk/dummy-kern-task.o \
duct/osfmk/dummy-kern-thread-act.o \
duct/osfmk/dummy-kern-thread-call.o \
duct/osfmk/dummy-kern-thread-policy.o \
duct/osfmk/dummy-kern-thread.o \
duct/osfmk/dummy-kern-zalloc.o \
duct/osfmk/dummy-kern.o \
duct/osfmk/dummy-locks.o \
duct/osfmk/dummy-machine.o \
duct/osfmk/dummy-misc.o \
duct/osfmk/dummy-vm-debug.o \
duct/osfmk/dummy-vm-kern.o \
duct/osfmk/dummy-vm-map.o \
duct/osfmk/dummy-vm-memory-object.o \
duct/osfmk/dummy-vm-resident.o \
duct/osfmk/dummy-vm-user.o
OBJS_duct/bsd = \
duct/bsd/dummy-init.o \
duct/bsd/dummy-kdebug.o
#
# libkern/
#
OBJS_libkern = \
$(OBJS_libkern/gen)
OBJS_libkern/gen = \
libkern/gen/OSAtomicOperations.o
#
# pexpert/
#
OBJS_pexpert = \
$(OBJS_pexpert/duct)
OBJS_pexpert/duct = \
pexpert/duct/duct_gen_bootargs.o \
pexpert/duct/duct_pe_kprintf.o
#
# <migdir>/osfmk/
#
OBJS_$(MIGDIR_REL)/osfmk = \
$(OBJS_$(MIGDIR_REL)/osfmk/mach) \
$(OBJS_$(MIGDIR_REL)/osfmk/device) \
$(OBJS_$(MIGDIR_REL)/osfmk/UserNotification)
OBJS_$(MIGDIR_REL)/osfmk/mach = \
$(MIGDIR_REL)/osfmk/mach/clock_priv_server.o \
$(MIGDIR_REL)/osfmk/mach/clock_reply_user.o \
$(MIGDIR_REL)/osfmk/mach/clock_server.o \
$(MIGDIR_REL)/osfmk/mach/exc_user.o \
$(MIGDIR_REL)/osfmk/mach/host_priv_server.o \
$(MIGDIR_REL)/osfmk/mach/host_security_server.o \
$(MIGDIR_REL)/osfmk/mach/lock_set_server.o \
$(MIGDIR_REL)/osfmk/mach/mach_exc_server.o \
$(MIGDIR_REL)/osfmk/mach/mach_exc_user.o \
$(MIGDIR_REL)/osfmk/mach/mach_host_server.o \
$(MIGDIR_REL)/osfmk/mach/mach_port_server.o \
$(MIGDIR_REL)/osfmk/mach/mach_vm_server.o \
$(MIGDIR_REL)/osfmk/mach/mach_voucher_attr_control_server.o \
$(MIGDIR_REL)/osfmk/mach/mach_voucher_server.o \
$(MIGDIR_REL)/osfmk/mach/notify_user.o \
$(MIGDIR_REL)/osfmk/mach/processor_server.o \
$(MIGDIR_REL)/osfmk/mach/processor_set_server.o \
$(MIGDIR_REL)/osfmk/mach/task_server.o \
$(MIGDIR_REL)/osfmk/mach/thread_act_server.o
OBJS_$(MIGDIR_REL)/osfmk/device = \
$(MIGDIR_REL)/osfmk/device/device_server.o
OBJS_$(MIGDIR_REL)/osfmk/UserNotification = \
$(MIGDIR_REL)/osfmk/UserNotification/UNDReplyServer.o
#
# darling/
#
OBJS_darling = \
darling/binfmt.o \
darling/commpage.o \
darling/continuation-asm.o \
darling/continuation.o \
darling/down_interruptible.o \
darling/evprocfd.o \
darling/evpsetfd.o \
darling/foreign_mm.o \
darling/host_info.o \
darling/module.o \
darling/psynch_support.o \
darling/pthread_kill.o \
darling/task_registry.o \
darling/traps.o
#
# full list of all objects in the darling-mach kernel module
#
DARLING_MACH_ALL_OBJS = \
$(OBJS_osfmk) \
$(OBJS_bsd) \
$(OBJS_duct) \
$(OBJS_libkern) \
$(OBJS_pexpert) \
$(OBJS_$(MIGDIR_REL)/osfmk) \
$(OBJS_darling)
#
# normal includes
# these come after any parent directory includes for each object
#
DARLING_MACH_NORMAL_INCLUDES := \
-I$(BUILD_ROOT)/EXTERNAL_HEADERS \
-I$(BUILD_ROOT)/duct/defines \
-I$(BUILD_ROOT)/osfmk \
-I$(MIGDIR)/bsd \
-I$(BUILD_ROOT)/bsd \
-I$(BUILD_ROOT)/iokit \
-I$(BUILD_ROOT)/libkern \
-I$(BUILD_ROOT)/libsa \
-I$(BUILD_ROOT)/pexpert \
-I$(BUILD_ROOT)/security \
-I$(BUILD_ROOT)/export-headers \
-I$(BUILD_ROOT)/osfmk/libsa \
-I$(BUILD_ROOT)/osfmk/mach_debug \
-I$(BUILD_ROOT)/ \
-I$(BUILD_ROOT)/darling \
-I$(MIGDIR)/osfmk \
-I$(MIGDIR)/../../startup \
-I$(BUILD_ROOT)/include
#
# special flags for the generated files in the MIG directory
#
CFLAGS_$(MIGDIR_REL) = \
-include $(BUILD_ROOT)/osfmk/duct/duct.h \
-include $(BUILD_ROOT)/osfmk/duct/duct_pre_xnu.h
INCLUDE_OVERRIDES_$(MIGDIR_REL) = \
-I$(BUILD_ROOT)/osfmk
INCLUDE_OVERRIDES_$(MIGDIR_REL)/osfmk/mach = \
-I$(BUILD_ROOT)/osfmk
#
# other special flags
#
CFLAGS_osfmk = -I$(BUILD_ROOT)/osfmk/kern
#
# darling-mach-specific flags
#
DARLING_MACH_CFLAGS =
#
# ***
# darling-overlay
# ***
#
#
# full list of all objects in the darling-overlay kernel module
#
DARLING_OVERLAY_ALL_OBJS = \
overlayfs/copy_up.o \
overlayfs/dir.o \
overlayfs/export.o \
overlayfs/file.o \
overlayfs/inode.o \
overlayfs/namei.o \
overlayfs/readdir.o \
overlayfs/super.o \
overlayfs/util.o
#
# normal includes
# these come after any parent directory includes for each object
#
DARLING_OVERLAY_NORMAL_INCLUDES := \
-I$(BUILD_ROOT)/include
#
# darling-overlay-specific flags
#
DARLING_OVERLAY_CFLAGS =
#
# ***
# KBuild setup
# ***
#
# KERNELVERSION is a dmks variable to specify the right version of the kernel.
# If this is not done like this, then when updating your kernel, you will
@ -198,143 +459,63 @@ $(info Running kernel version is $(KERNELVERSION))
# If KERNELRELEASE is defined, we've been invoked from the
# kernel build system and can use its language.
ifneq ($(KERNELRELEASE),)
$(info Invoked by kernel build system, building for $(KERNELRELEASE))
obj-m := darling-mach.o darling-overlay.o
darling-mach-objs := osfmk/ipc/ipc_entry.o \
osfmk/ipc/ipc_hash.o \
osfmk/ipc/ipc_space.o \
osfmk/ipc/ipc_kmsg.o \
osfmk/ipc/ipc_notify.o \
osfmk/ipc/ipc_object.o \
osfmk/ipc/ipc_pset.o \
osfmk/ipc/ipc_table.o \
osfmk/ipc/ipc_voucher.o \
osfmk/ipc/mig_log.o \
osfmk/ipc/mach_port.o \
osfmk/ipc/mach_msg.o \
osfmk/ipc/mach_debug.o \
osfmk/ipc/mach_kernelrpc.o \
osfmk/ipc/ipc_init.o \
osfmk/ipc/ipc_right.o \
osfmk/ipc/ipc_mqueue.o \
osfmk/ipc/ipc_port.o \
osfmk/kern/exception.o \
osfmk/kern/sync_sema.o \
bsd/uxkern/ux_exception.o \
darling/down_interruptible.o \
darling/traps.o \
darling/task_registry.o \
darling/module.o \
darling/host_info.o \
darling/evprocfd.o \
darling/evpsetfd.o \
darling/pthread_kill.o \
darling/psynch_support.o \
darling/foreign_mm.o \
darling/continuation.o \
darling/continuation-asm.o \
osfmk/duct/darling_xnu_init.o \
osfmk/duct/duct_atomic.o \
osfmk/duct/duct_ipc_pset.o \
osfmk/duct/duct_kern_clock.o \
osfmk/duct/duct_kern_debug.o \
osfmk/duct/duct_kern_kalloc.o \
osfmk/duct/duct_kern_printf.o \
osfmk/duct/duct_kern_startup.o \
osfmk/duct/duct_kern_sysctl.o \
osfmk/duct/duct_kern_task.o \
osfmk/duct/duct_kern_thread_act.o \
osfmk/duct/duct_kern_thread.o \
osfmk/duct/duct_kern_thread_call.o \
osfmk/duct/duct_kern_timer_call.o \
osfmk/duct/duct_kern_zalloc.o \
osfmk/duct/duct_ipc_importance.o \
osfmk/duct/duct_libsa.o \
osfmk/duct/duct_machine_routines.o \
osfmk/duct/duct_machine_rtclock.o \
osfmk/duct/duct_pcb.o \
osfmk/duct/duct_vm_init.o \
osfmk/duct/duct_vm_kern.o \
osfmk/duct/duct_vm_map.o \
osfmk/duct/duct_vm_user.o \
osfmk/duct/duct_arm_locks_arm.o \
osfmk/kern/clock_oldops.o \
osfmk/kern/ipc_clock.o \
osfmk/kern/ipc_tt.o \
osfmk/kern/ipc_sync.o \
osfmk/kern/ipc_misc.o \
osfmk/kern/host.o \
osfmk/kern/ipc_host.o \
osfmk/kern/ipc_kobject.o \
osfmk/kern/mk_timer.o \
osfmk/kern/ipc_mig.o \
osfmk/kern/locks.o \
osfmk/kern/ltable.o \
osfmk/kern/waitq.o \
duct/osfmk/dummy-locks.o \
duct/osfmk/dummy-misc.o \
duct/osfmk/dummy-kern.o \
duct/osfmk/dummy-machine.o \
duct/osfmk/dummy-vm-resident.o \
duct/osfmk/dummy-kern-thread-call.o \
duct/osfmk/dummy-vm-map.o \
duct/osfmk/dummy-vm-user.o \
duct/osfmk/dummy-kern-task.o \
duct/osfmk/dummy-kern-thread.o \
duct/osfmk/dummy-kern-audit-sessionport.o \
duct/osfmk/dummy-kern-processor.o \
duct/osfmk/dummy-kern-syscall-emulation.o \
duct/osfmk/dummy-kern-zalloc.o \
duct/osfmk/dummy-kern-sync-lock.o \
duct/osfmk/dummy-kern-machine.o \
duct/osfmk/dummy-kern-clock-oldops.o \
duct/osfmk/dummy-kern-thread-policy.o \
duct/osfmk/dummy-kern-task-policy.o \
duct/osfmk/dummy-kern-mk-sp.o \
duct/osfmk/dummy-vm-memory-object.o \
duct/osfmk/dummy-kern-kmod.o \
duct/osfmk/dummy-vm-kern.o \
duct/osfmk/dummy-vm-debug.o \
duct/osfmk/dummy-kern-thread-act.o \
duct/osfmk/dummy-kern-host-notify.o \
duct/bsd/dummy-kdebug.o \
duct/bsd/dummy-init.o \
libkern/gen/OSAtomicOperations.o \
$(MIGDIR_REL)/osfmk/mach/task_server.o \
$(MIGDIR_REL)/osfmk/mach/clock_server.o \
$(MIGDIR_REL)/osfmk/mach/clock_priv_server.o \
$(MIGDIR_REL)/osfmk/mach/processor_server.o \
$(MIGDIR_REL)/osfmk/mach/host_priv_server.o \
$(MIGDIR_REL)/osfmk/mach/host_security_server.o \
$(MIGDIR_REL)/osfmk/mach/lock_set_server.o \
$(MIGDIR_REL)/osfmk/mach/mach_exc_server.o \
$(MIGDIR_REL)/osfmk/mach/mach_exc_user.o \
$(MIGDIR_REL)/osfmk/mach/exc_user.o \
$(MIGDIR_REL)/osfmk/mach/mach_port_server.o \
$(MIGDIR_REL)/osfmk/mach/mach_vm_server.o \
$(MIGDIR_REL)/osfmk/mach/mach_host_server.o \
$(MIGDIR_REL)/osfmk/mach/mach_voucher_server.o \
$(MIGDIR_REL)/osfmk/mach/mach_voucher_attr_control_server.o \
$(MIGDIR_REL)/osfmk/mach/processor_set_server.o \
$(MIGDIR_REL)/osfmk/mach/thread_act_server.o \
$(MIGDIR_REL)/osfmk/mach/clock_reply_user.o \
$(MIGDIR_REL)/osfmk/mach/notify_user.o \
$(MIGDIR_REL)/osfmk/device/device_server.o \
$(MIGDIR_REL)/osfmk/UserNotification/UNDReply_server.o \
pexpert/duct/duct_gen_bootargs.o \
pexpert/duct/duct_pe_kprintf.o \
darling/binfmt.o \
darling/commpage.o
darling-overlay-objs := overlayfs/copy_up.o \
overlayfs/dir.o \
overlayfs/export.o \
overlayfs/file.o \
overlayfs/inode.o \
overlayfs/namei.o \
overlayfs/readdir.o \
overlayfs/super.o \
overlayfs/util.o
# can't indent with tabs here or else Make will complain
$(info Invoked by kernel build system, building for $(KERNELRELEASE))
# some Make wizardry to calculate the includes to each object
# we do this to ensure that the parent directories of each object come first in the search paths
# otherwise, for e.g., when compiling an object in `osfmk`, `kern/ast.h` in bsd might be used instead of `kern/ast.h` in osfmk
# this also contains logic to allow per-directory CFLAGS to be added and cascade down
# (subdirectory flags are added after parent directory flags, so they can override parent directory flags)
do_includes = \
$(if $(1), \
$(if $(shell basename '$(1)' | sed 's/^.*\.o$$//'), \
$(INCLUDE_OVERRIDES_$(1)) \
-I$(BUILD_ROOT)/$(1) \
) \
$(if $(subst $(BUILD_ROOT)/$(MIGDIR_REL),,$(BUILD_ROOT)/$(1)), \
$(call do_includes,$(shell dirname '$(1)' | sed -e 's/^.$$//')) \
) \
$(if $(shell basename '$(1)' | sed 's/^.*\.o$$//'), \
$(CFLAGS_$(1)) \
) \
)
# for the first foreach:
# the first eval is for Linux < 5.4
# the second eval is for Linux >= 5.4 and is the "correct" one
# for the second foreach:
# note that here we only have to add to the CFLAGS for the full path (the one used on Linux >= 5.4)
# the first loop already created lazily-evaluated CFLAGS variables for Linux < 5.4
# when those variables are used, they will automatically refer to the correct CFLAGS for the full path
iterate_objs = \
$(foreach OBJ,$($(1)_ALL_OBJS), \
$(eval CFLAGS_$(notdir $(OBJ)) = $$(CFLAGS_$(OBJ))) \
$(if $(strip $(CFLAGS_$(OBJ))), \
$(eval CFLAGS_$(OBJ) := $(call do_includes,$(OBJ))) \
, \
$(eval CFLAGS_$(OBJ) += $(call do_includes,$(OBJ))) \
) \
) \
$(foreach OBJ,$($(1)_ALL_OBJS), \
$(eval CFLAGS_$(OBJ) += $$($(1)_NORMAL_INCLUDES) $$($(1)_CFLAGS)) \
)
# do the CFLAGS magic for all darling-mach objects
$(call iterate_objs,DARLING_MACH)
# add MIG flags for the MIG objects
$(foreach MIG_OBJ,$(MIG_OBJS), \
$(eval CFLAGS_$(MIG_OBJ) += $$(miggen_cflags)) \
)
# same as before for darling-mach, except this one is for darling-overlay
$(call iterate_objs,DARLING_OVERLAY)
obj-m := darling-mach.o darling-overlay.o
darling-mach-objs := $(DARLING_MACH_ALL_OBJS)
darling-overlay-objs := $(DARLING_OVERLAY_ALL_OBJS)
# Otherwise we were called directly from the command
# line; invoke the kernel build system.

View File

@ -0,0 +1,30 @@
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#include <mach/exception.h>
#include <mach/mach_types.h>
#include <kern/thread.h>
#include <duct/duct_post_xnu.h>
// NOTE(@facekapow): i copied this function over from `bsd/uxkern/ux_exception.c` from the old LKM code,
// but i don't see it actually used by any code that we use now, so we might be able to just delete it
void threadsignal(thread_t sig_actthread, int signum, mach_exception_code_t code, boolean_t set_exitreason)
{
if (sig_actthread->in_sigprocess)
{
if (signum == XNU_SIGSTOP)
{
// TODO: deliver LINUX_SIGSTOP directly
}
else
sig_actthread->pending_signal = signum;
}
else
{
printf(
"Someone introduced a new signal by sending a message to the exception port.\n"
"This is not supported under Darling.\n"
);
}
}

View File

@ -0,0 +1,23 @@
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#include <mach/exception.h>
#include <mach/mach_types.h>
#include <kern/thread.h>
#include <duct/duct_post_xnu.h>
extern int ux_exception(int exception, mach_exception_code_t code, mach_exception_subcode_t subcode);
kern_return_t handle_ux_exception(thread_t thread, int exception, mach_exception_code_t code, mach_exception_subcode_t subcode) {
/* Translate exception and code to signal type */
int ux_signal = ux_exception(exception, code, subcode);
if (thread->in_sigprocess) {
thread->pending_signal = ux_signal;
} else {
// TODO: Introduce signal
printf("handle_ux_exception(): TODO: introduce signal\n");
}
return KERN_SUCCESS;
}

View File

@ -34,6 +34,15 @@
#ifndef _KERN_AST_H_
#define _KERN_AST_H_
#ifdef __DARLING__
// hack/workaround for the wrong file getting picked up when including `osfmk/` headers in `bsd/`
//
// obviously, this shouldn't be necessary because Apple doesn't have to do this normally for XNU,
// so we should revisit this someday and properly set up include paths, but it works for now
#undef _KERN_AST_H_
#include_next <kern/ast.h>
#endif
#include <kern/thread.h>
extern void act_set_astbsd(thread_t);

View File

@ -67,6 +67,10 @@
#ifndef _CDEFS_H_
#define _CDEFS_H_
#ifdef __DARLING__
#include <duct/compiler/gcc/has-builtin.h>
#endif
#if defined(__cplusplus)
#define __BEGIN_DECLS extern "C" {
#define __END_DECLS }
@ -184,7 +188,9 @@
* __kpi_deprecated() specifically indicates deprecation of kernel programming
* interfaces in Kernel.framework used by KEXTs.
*/
#ifndef __DARLING__
#define __deprecated __attribute__((__deprecated__))
#endif
#if __has_extension(attribute_deprecated_with_message) || \
(defined(__GNUC__) && ((__GNUC__ >= 5) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5))))

View File

@ -1095,7 +1095,12 @@ struct filterops {
#define filter_call(_ops, call) \
((_ops)->f_extended_codes ? (_ops)->call : !!((_ops)->call))
#ifdef __DARLING__
struct evpsetfd_ctx;
SLIST_HEAD(klist, evpsetfd_ctx);
#else
SLIST_HEAD(klist, knote);
#endif
extern void knote_init(void);
extern void klist_init(struct klist *list);

View File

@ -441,6 +441,10 @@
#define F_VOLPOSMODE 4 /* specify volume starting postion */
#endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */
#ifdef __DARLING__
#define flock xnu_flock
#endif
/*
* Advisory file segment locking data type -
* information passed to system by user

View File

@ -56,7 +56,9 @@ void uuid_clear(uuid_t uu);
int uuid_compare(const uuid_t uu1, const uuid_t uu2);
#ifndef __DARLING__
void uuid_copy(uuid_t dst, const uuid_t src);
#endif
void uuid_generate(uuid_t out);
void uuid_generate_random(uuid_t out);
@ -64,9 +66,11 @@ void uuid_generate_time(uuid_t out);
void uuid_generate_early_random(uuid_t out);
#ifndef __DARLING__
int uuid_is_null(const uuid_t uu);
int uuid_parse(const uuid_string_t in, uuid_t uu);
#endif
void uuid_unparse(const uuid_t uu, uuid_string_t out);
void uuid_unparse_lower(const uuid_t uu, uuid_string_t out);

View File

@ -32,19 +32,43 @@
* the terms and conditions for use and redistribution.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#ifndef __DARLING__
#include <sys/param.h>
#endif
#include <mach/boolean.h>
#include <mach/exception.h>
#include <mach/kern_return.h>
#ifndef __DARLING__
#include <sys/proc.h>
#include <sys/user.h>
#include <sys/systm.h>
#include <sys/vmparam.h> /* MAXSSIZ */
#endif
#include <sys/ux_exception.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#define SIGSTOP XNU_SIGSTOP
#define SIGSEGV XNU_SIGSEGV
#define SIGBUS XNU_SIGBUS
#define SIGILL XNU_SIGILL
#define SIGFPE XNU_SIGFPE
#define SIGSYS XNU_SIGSYS
#define SIGPIPE XNU_SIGPIPE
#define SIGABRT XNU_SIGABRT
#define SIGKILL XNU_SIGKILL
#define SIGTRAP XNU_SIGTRAP
#endif
/*
* Translate Mach exceptions to UNIX signals.
*
@ -52,7 +76,11 @@
* a signal. Calls machine_exception (machine dependent)
* to attempt translation first.
*/
#ifdef __DARLING__
int
#else
static int
#endif
ux_exception(int exception,
mach_exception_code_t code,
mach_exception_subcode_t subcode)
@ -78,8 +106,10 @@ ux_exception(int exception,
case EXC_ARITHMETIC:
return SIGFPE;
#ifndef __DARLING__
case EXC_EMULATION:
return SIGEMT;
#endif
case EXC_SOFTWARE:
switch (code) {
@ -101,6 +131,8 @@ ux_exception(int exception,
return 0;
}
// we have our own duct-taped version of it
#ifndef __DARLING__
/*
* Sends the corresponding UNIX signal to a thread that has triggered a Mach exception.
*/
@ -178,3 +210,4 @@ handle_ux_exception(thread_t thread,
return KERN_SUCCESS;
}
#endif

View File

@ -1,20 +0,0 @@
#ifndef _CLANG_TO_GCC_ATOMIC_H
#define _CLANG_TO_GCC_ATOMIC_H
#ifndef __clang
#define __c11_atomic_compare_exchange_strong(addr, exp, desired, success_memorder, failure_memorder) \
__atomic_compare_exchange_n(addr, exp, desired, 0, success_memorder, failure_memorder)
#define __c11_atomic_fetch_add __atomic_fetch_add
#define __c11_atomic_fetch_and __atomic_fetch_and
#define __c11_atomic_fetch_or __atomic_fetch_or
#define __c11_atomic_fetch_xor __atomic_fetch_xor
#define memory_order_acq_rel __ATOMIC_ACQ_REL
#define memory_order_relaxed __ATOMIC_RELAXED
#endif
#endif

View File

@ -1,5 +1,7 @@
#ifndef _BINFMT_H
#define _BINFMT_H
#include <duct/compiler/clang/asm-inline.h>
#include <linux/binfmts.h>
extern struct linux_binfmt macho_format;

View File

@ -17,6 +17,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <duct/compiler/clang/asm-inline.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/cpumask.h>

View File

@ -1,3 +1,4 @@
#include <duct/compiler/clang/asm-inline.h>
#include <linux/semaphore.h>
#include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)

View File

@ -1,5 +1,6 @@
#ifndef _LINUX_DOWN_INTERRUPTIBLE_H
#define _LINUX_DOWN_INTERRUPTIBLE_H
#include <duct/compiler/clang/asm-inline.h>
#include <linux/semaphore.h>
int down_interruptible_timeout(struct semaphore *sem, long timeout);

View File

@ -18,6 +18,7 @@
*/
#include "evprocfd.h"
#include "task_registry.h"
#include <duct/compiler/clang/asm-inline.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/anon_inodes.h>

View File

@ -18,7 +18,7 @@
*/
#include <duct/duct.h>
#include "evpsetfd.h"
#include <mach/mach_types.h>
#include <duct/compiler/clang/asm-inline.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/anon_inodes.h>
@ -29,6 +29,7 @@
#include <linux/wait.h>
#include <duct/duct_pre_xnu.h>
#include <duct/duct_kern_waitqueue.h>
#include <mach/mach_types.h>
#include <osfmk/ipc/ipc_types.h>
#include <osfmk/ipc/ipc_object.h>
#include <osfmk/ipc/ipc_space.h>

View File

@ -17,6 +17,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <duct/compiler/clang/asm-inline.h>
#include "foreign_mm.h"
#include <linux/kthread.h>

View File

@ -1,3 +1,4 @@
#include <duct/compiler/clang/asm-inline.h>
#include <linux/module.h>
#include <linux/cred.h>
#include <linux/sched.h>

View File

@ -37,6 +37,7 @@
#include <kern/task.h>
#include <kern/thread_call.h>
#include <kern/kern_types.h>
#include <kern/ipc_tt.h>
#include <duct/duct_kern_printf.h>
#include <duct/duct_post_xnu.h>
@ -113,6 +114,7 @@ static int __test_prints__;
DEFINE_MUTEX(pthread_list_mlock);
#define LIST_ENTRY(where) struct list_head
#define LIST_INIT INIT_LIST_HEAD
#define LIST_HEAD(name) struct list_head name = LIST_HEAD_INIT(name)
#define TAILQ_FIRST(head) (list_empty(head) ? NULL : list_first_entry(head, struct ksyn_waitq_element, kwe_list))
#define TAILQ_LAST(head, member) list_last_entry(head, struct ksyn_waitq_element, kwe_list)
@ -441,7 +443,6 @@ int ksyn_wqfind(user_addr_t mutex, uint32_t mgen, uint32_t ugen, uint32_t rw_wc,
void ksyn_wqrelease(ksyn_wait_queue_t mkwq, ksyn_wait_queue_t ckwq, int qfreenow, int wqtype);
extern int ksyn_findobj(uint64_t mutex, uint64_t * object, uint64_t * offset);
static void UPDATE_CVKWQ(ksyn_wait_queue_t kwq, uint32_t mgen, uint32_t ugen, uint32_t rw_wc, uint64_t tid, int wqtype);
extern thread_t port_name_to_thread(mach_port_name_t port_name);
kern_return_t ksyn_block_thread_locked(ksyn_wait_queue_t kwq, uint64_t abstime, ksyn_waitq_element_t kwe, int log, thread_continue_t, void * parameter);
kern_return_t ksyn_wakeup_thread(ksyn_wait_queue_t kwq, ksyn_waitq_element_t kwe);
@ -1054,7 +1055,7 @@ psynch_cvsignal(__unused proc_t p, struct psynch_cvsignal_args * uap, uint32_t *
/* If we are looking for a specific thread, grab a reference for it */
if (threadport != 0) {
th = (thread_t)port_name_to_thread((mach_port_name_t)threadport);
th = (thread_t)port_name_to_thread((mach_port_name_t)threadport, PORT_TO_THREAD_NONE);
if (th == THREAD_NULL) {
error = LINUX_ESRCH;
goto out;

View File

@ -55,7 +55,7 @@ int pthread_kill_trap(task_t task,
if (copy_from_user(&args, in_args, sizeof(args)))
return -LINUX_EFAULT;
thread = port_name_to_thread(args.thread_port);
thread = port_name_to_thread(args.thread_port, PORT_TO_THREAD_NONE);
if (thread == THREAD_NULL || !thread->linux_task)
return -LINUX_ESRCH;

View File

@ -16,6 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <duct/compiler/clang/asm-inline.h>
#include <linux/types.h>
#include "task_registry.h"
#include "evprocfd.h"

View File

@ -1213,7 +1213,7 @@ int pid_for_task_entry(task_t task, struct pid_for_task* in_args)
int tid_for_thread_entry(task_t task, void* tport_in)
{
int tid;
thread_t t = port_name_to_thread((int)(long) tport_in);
thread_t t = port_name_to_thread((int)(long) tport_in, PORT_TO_THREAD_NONE);
if (!t || !t->linux_task)
return -1;
@ -1405,7 +1405,7 @@ int set_tracer_entry(task_t self, struct set_tracer_args* in_args)
int pthread_markcancel_entry(task_t task, void* tport_in)
{
// mark as canceled if cancelable
thread_t t = port_name_to_thread((int)(long) tport_in);
thread_t t = port_name_to_thread((int)(long) tport_in, PORT_TO_THREAD_NONE);
if (!t)
return -LINUX_ESRCH;
@ -1986,7 +1986,9 @@ int fileport_makefd_entry(task_t task, void* port_in)
kern_return_t kr;
int err;
kr = ipc_object_copyin(task->itk_space, send, MACH_MSG_TYPE_COPY_SEND, (ipc_object_t*) &port);
// NOTE(@facekapow): `ipc_object_copyin` got a few extra parameters in the last update; i just followed suit with what the XNU code passes in for them
// we might need to revisit them later on (particularly `IPC_KMSG_FLAGS_ALLOW_IMMOVABLE_SEND`), but they seem pretty harmless
kr = ipc_object_copyin(task->itk_space, send, MACH_MSG_TYPE_COPY_SEND, (ipc_object_t*) &port, 0, NULL, IPC_KMSG_FLAGS_ALLOW_IMMOVABLE_SEND);
if (kr != KERN_SUCCESS)
{

View File

@ -99,7 +99,7 @@ typedef struct alarm alarm_data_t;
#define ALARM_DONE 4 /* alarm has expired */
/* local data declarations */
decl_simple_lock_data(static,alarm_lock) /* alarm synchronization */
decl_simple_lock_data(static,alarm_lock); /* alarm synchronization */
static struct zone *alarm_zone; /* zone for user alarms */
static struct alarm *alrmfree; /* alarm free list pointer */
static struct alarm *alrmdone; /* alarm done list pointer */

View File

@ -40,7 +40,7 @@
#include "mach/host_notify_reply.h"
decl_lck_mtx_data(,host_notify_lock)
decl_lck_mtx_data(,host_notify_lock);
lck_mtx_ext_t host_notify_lock_ext;
lck_grp_t host_notify_lock_grp;

View File

@ -0,0 +1,29 @@
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#include <kern/locks.h>
#include <duct/duct_post_xnu.h>
void kprintf(const char* fmt, ...);
void lck_mtx_lock_wait(lck_mtx_t* lck, thread_t holder, struct turnstile** ts) {
kprintf("lck_mtx_lock_wait() not yet implemented\n");
};
int lck_mtx_lock_acquire(lck_mtx_t* lck, struct turnstile* ts) {
kprintf("lck_mtx_lock_wait() not yet implemented\n");
return 0;
};
boolean_t lck_mtx_unlock_wakeup(lck_mtx_t* lck, thread_t holder) {
kprintf("lck_mtx_unlock_wakeup() not yet implemented\n");
return false;
};
void lck_mtx_unlockspin_wakeup(lck_mtx_t* lck) {
kprintf("lck_mtx_unlockspin_wakeup() not yet implemented\n");
};
void lck_mtx_yield(lck_mtx_t* lck) {
kprintf("lck_mtx_yield() not yet implemented\n");
};

View File

@ -89,19 +89,19 @@
struct processor_set pset0;
struct pset_node pset_node0;
decl_simple_lock_data(static,pset_node_lock)
decl_simple_lock_data(static,pset_node_lock);
queue_head_t tasks;
queue_head_t terminated_tasks; /* To be used ONLY for stackshot. */
int tasks_count;
queue_head_t threads;
int threads_count;
decl_lck_mtx_data(,tasks_threads_lock)
decl_lck_mtx_data(,tasks_threads_lock);
processor_t processor_list;
unsigned int processor_count;
static processor_t processor_list_tail;
decl_simple_lock_data(,processor_list_lock)
decl_simple_lock_data(,processor_list_lock);
uint32_t processor_avail_count;
@ -149,14 +149,14 @@ processor_pset(
processor_t processor)
{
kprintf("not implemented: processor_pset()\n");
return KERN_FAILURE;
return PROCESSOR_SET_NULL;
}
pset_node_t
pset_node_root(void)
{
kprintf("not implemented: pset_node_root()\n");
return KERN_FAILURE;
return PSET_NODE_NULL;
}
processor_set_t
@ -164,7 +164,7 @@ pset_create(
pset_node_t node)
{
kprintf("not implemented: pset_create()\n");
return KERN_FAILURE;
return PROCESSOR_SET_NULL;
}
/*

View File

@ -207,7 +207,8 @@ task_backing_store_privileged(
void
task_set_64bit(
task_t task,
boolean_t is64bit)
boolean_t is64bit,
boolean_t is_64bit_data)
{
kprintf("not implemented: task_set_64bit()\n");
}
@ -739,14 +740,15 @@ task_pidresume(
*/
kern_return_t
task_freeze(
register task_t task,
uint32_t *purgeable_count,
uint32_t *wired_count,
uint32_t *clean_count,
uint32_t *dirty_count,
uint32_t dirty_budget,
boolean_t *shared,
boolean_t walk_only)
task_t task,
uint32_t* purgeable_count,
uint32_t* wired_count,
uint32_t* clean_count,
uint32_t* dirty_count,
uint32_t dirty_budget,
uint32_t* shared_count,
int* freezer_error_code,
boolean_t eval_only)
{
kprintf("not implemented: task_freeze()\n");
return 0;

View File

@ -146,10 +146,10 @@ static lck_grp_attr_t thread_lck_grp_attr;
// lck_attr_t thread_lck_attr;
// lck_grp_t thread_lck_grp;
decl_simple_lock_data(static,thread_stack_lock)
decl_simple_lock_data(static,thread_stack_lock);
static queue_head_t thread_stack_queue;
decl_simple_lock_data(static,thread_terminate_lock)
decl_simple_lock_data(static,thread_terminate_lock);
static queue_head_t thread_terminate_queue;
static struct thread thread_template, init_thread;
@ -194,12 +194,12 @@ thread_terminate_enqueue(
kprintf("not implemented: thread_terminate_enqueue()\n");
}
void thread_recompute_sched_pri(thread_t thread, boolean_t override_depress)
void thread_recompute_sched_pri(thread_t thread, set_sched_pri_options_t options)
{
kprintf("not implemented: thread_recompute_sched_pri()\n");
}
void thread_guard_violation(thread_t thread, unsigned type)
void thread_guard_violation(thread_t thread, mach_exception_data_type_t code, mach_exception_data_type_t subcode, boolean_t fatal)
{
kprintf("not implemented: thread_guard_violation()\n");
}
@ -392,7 +392,8 @@ void
thread_read_times(
thread_t thread,
time_value_t *user_time,
time_value_t *system_time)
time_value_t *system_time,
time_value_t *runnable_time)
{
kprintf("not implemented: thread_read_times()\n");
}

View File

@ -334,7 +334,7 @@ struct zone_page_table_entry *zone_page_table_lookup(zone_page_index_t pindex);
/*
* Exclude more than one concurrent garbage collection
*/
decl_lck_mtx_data(, zone_gc_lock)
decl_lck_mtx_data(, zone_gc_lock);
lck_attr_t zone_lck_attr;
lck_grp_t zone_lck_grp;
@ -355,7 +355,7 @@ lck_mtx_ext_t zone_lck_ext;
* Protects first_zone, last_zone, num_zones,
* and the next_zone field of zones.
*/
decl_simple_lock_data(, all_zones_lock)
decl_simple_lock_data(, all_zones_lock);
zone_t first_zone;
zone_t *last_zone;
unsigned int num_zones;
@ -993,9 +993,11 @@ struct {
* zones that are marked collectable looking for reclaimable
* pages. zone_gc is called by consider_zone_gc when the system
* begins to run out of memory.
*
* We should ensure that zone_gc never blocks.
*/
void
zone_gc(void)
zone_gc(boolean_t consider_jetsams)
{
kprintf("not implemented: zone_gc()\n");
}

View File

@ -97,7 +97,7 @@ UNDAlertCompletedWithResult_rpc (
return 0;
}
void set_sched_pri(thread_t thread, int priority)
void set_sched_pri(thread_t thread, int priority, set_sched_pri_options_t options)
{
kprintf("not implemented: set_sched_pri\n");
}
@ -231,6 +231,9 @@ mach_memory_info(
mach_msg_type_number_t *memoryInfoCntp)
{
kprintf("not implemented: mach_memory_info()");
// NOTE(@facekapow): i'm adding `return 0` because that's what all the neighboring functions are doing,
// but maybe we (and maybe all the other functions) should be returning `KERN_FAILURE` instead
return 0;
}
#if 0

View File

@ -320,6 +320,8 @@ kmem_suballoc(
vm_size_t size,
boolean_t pageable,
int flags,
vm_map_kernel_flags_t vmk_flags,
vm_tag_t tag,
vm_map_t *new_map)
{
kprintf("not implemented: kmem_suballoc()\n");

View File

@ -424,6 +424,8 @@ vm_map_find_space(
vm_map_size_t size,
vm_map_offset_t mask,
int flags,
vm_map_kernel_flags_t vmk_flags,
vm_tag_t tag,
vm_map_entry_t *o_entry) /* OUT */
{
kprintf("not implemented: vm_map_find_space()\n");
@ -489,6 +491,8 @@ vm_map_enter_mem_object(
vm_map_size_t initial_size,
vm_map_offset_t mask,
int flags,
vm_map_kernel_flags_t vmk_flags,
vm_tag_t tag,
ipc_port_t port,
vm_object_offset_t offset,
boolean_t copy,
@ -509,6 +513,8 @@ vm_map_enter_mem_object_control(
vm_map_size_t initial_size,
vm_map_offset_t mask,
int flags,
vm_map_kernel_flags_t vmk_flags,
vm_tag_t tag,
memory_object_control_t control,
vm_object_offset_t offset,
boolean_t copy,
@ -1073,7 +1079,8 @@ vm_map_exec(
task_t task,
boolean_t is64bit,
void *fsroot,
cpu_type_t cpu)
cpu_type_t cpu,
cpu_subtype_t cpu_subtype)
{
kprintf("not implemented: vm_map_exec()\n");
return 0;
@ -1213,7 +1220,8 @@ vm_map_region_walk(
vm_object_offset_t offset,
vm_object_size_t range,
vm_region_extended_info_t extended,
boolean_t look_for_pages)
boolean_t look_for_pages,
mach_msg_type_number_t count)
{
kprintf("not implemented: vm_map_region_walk()\n");
}
@ -1310,9 +1318,12 @@ vm_map_entry_t vm_map_entry_insert(
unsigned wired_count,
boolean_t no_cache,
boolean_t permanent,
boolean_t no_copy_on_read,
unsigned int superpage_size,
boolean_t clear_map_aligned,
boolean_t is_submap)
boolean_t is_submap,
boolean_t used_for_jit,
int alias)
{
kprintf("not implemented: vm_map_entry_insert()\n");
return 0;
@ -1341,6 +1352,8 @@ vm_map_remap(
vm_map_size_t size,
vm_map_offset_t mask,
int flags,
vm_map_kernel_flags_t vmk_flags,
vm_tag_t tag,
vm_map_t src_map,
vm_map_offset_t memory_address,
boolean_t copy,
@ -1710,13 +1723,15 @@ kern_return_t vm_map_freeze_walk(
return 0;
}
kern_return_t vm_map_freeze(
vm_map_t map,
task_t task,
unsigned int *purgeable_count,
unsigned int *wired_count,
unsigned int *clean_count,
unsigned int *dirty_count,
unsigned int dirty_budget,
boolean_t *has_shared)
unsigned int *shared_count,
int *freezer_error_code,
boolean_t eval_only)
{
kprintf("not implemented: vm_map_freeze()\n");
return 0;

View File

@ -114,7 +114,7 @@
memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
decl_lck_mtx_data(, memory_manager_default_lock)
decl_lck_mtx_data(, memory_manager_default_lock);
/*
@ -448,7 +448,8 @@ kern_return_t memory_object_iopl_request(
upl_t *upl_ptr,
upl_page_info_array_t user_page_list,
unsigned int *page_list_count,
upl_control_flags_t *flags)
upl_control_flags_t *flags,
vm_tag_t tag)
{
kprintf("not implemented: memory_object_iopl_request()\n");
return 0;
@ -471,7 +472,8 @@ memory_object_upl_request(
upl_t *upl_ptr,
upl_page_info_array_t user_page_list,
unsigned int *page_list_count,
int cntrl_flags)
int cntrl_flags,
int tag)
{
kprintf("not implemented: memory_object_upl_request()\n");
return 0;
@ -497,7 +499,8 @@ memory_object_super_upl_request(
upl_t *upl,
upl_page_info_t *user_page_list,
unsigned int *page_list_count,
int cntrl_flags)
int cntrl_flags,
int tag)
{
kprintf("not implemented: memory_object_super_upl_request()\n");
return 0;

View File

@ -1135,7 +1135,7 @@ upl_range_needed(
* It would be nice to be able to encrypt and decrypt in physical
* mode but that might not always be more efficient...
*/
decl_simple_lock_data(,vm_paging_lock)
decl_simple_lock_data(,vm_paging_lock);
#define VM_PAGING_NUM_PAGES 64
vm_map_offset_t vm_paging_base_address = 0;
boolean_t vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, };

View File

@ -241,7 +241,7 @@ unsigned int vm_page_free_count_minimum; /* debugging */
*/
zone_t vm_page_zone;
vm_locks_array_t vm_page_locks;
decl_lck_mtx_data(,vm_page_alloc_lock)
decl_lck_mtx_data(,vm_page_alloc_lock);
lck_mtx_ext_t vm_page_alloc_lock_ext;
unsigned int io_throttle_zero_fill;
@ -260,7 +260,7 @@ struct vplq *vm_page_local_q = NULL;
* For debugging, this should be a strange value
* that the pmap module can recognize in assertions.
*/
ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
const ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
/*
* Guard pages are not accessible so they don't
@ -270,7 +270,7 @@ ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
* we don't use a real physical page with that
* physical address.
*/
ppnum_t vm_page_guard_addr = (ppnum_t) -2;
const ppnum_t vm_page_guard_addr = (ppnum_t) -2;
/*
* Resident page structures are also chained on
@ -581,10 +581,9 @@ vm_page_lookup(
*/
void
vm_page_rename(
register vm_page_t mem,
register vm_object_t new_object,
vm_object_offset_t new_offset,
boolean_t encrypted_ok)
vm_page_t mem,
vm_object_t new_object,
vm_object_offset_t new_offset)
{
kprintf("not implemented: vm_page_rename()\n");
}

View File

@ -118,6 +118,8 @@
#include <vm/vm_pageout.h>
#include <vm/vm_protos.h>
#include <duct/duct_vm_user.h>
vm_size_t upl_offset_to_pagelist = 0;
#if VM_CPM
@ -1180,6 +1182,7 @@ kern_return_t vm_map_get_upl(
upl_page_info_array_t page_info,
unsigned int *page_infoCnt,
upl_control_flags_t *flags,
vm_tag_t tag,
int force_data_sync)
{
kprintf("not implemented: vm_map_get_upl()\n");

View File

@ -0,0 +1,5 @@
// workaround for Clang choking on Linux headers that use `asm __inline(...)`
#ifdef __clang__
#undef asm_inline
#define asm_inline asm
#endif

View File

@ -0,0 +1,69 @@
#ifndef _DARLING_LKM_DUCT_GCC_HAS_BUILTIN_H_
#define _DARLING_LKM_DUCT_GCC_HAS_BUILTIN_H_
#ifndef __clang__
#ifndef __has_builtin
// GCC < 10 needs a shim for __has_builtin
// list of builtins that Mach checks for
// __builtin_assume
// __builtin_add_overflow
// __builtin_sub_overflow
// __builtin_mul_overflow
// __builtin_dynamic_object_size
// __builtin___memcpy_chk
// __builtin___memmove_chk
// __builtin___strncpy_chk
// __builtin___strncat_chk
// __builtin___strlcat_chk
// __builtin___strlcpy_chk
// __builtin___strcpy_chk
// __builtin___strcat_chk
// __builtin___memmove_chk
// __builtin_ia32_rdpmc
// GCC does not have `__builtin_assume`
//#define _hasbin___builtin_assume 1
#if __GNUC__ >= 5
// GCC 5+ has builtin safe math functions
#define _hasbin___builtin_add_overflow 1
#define _hasbin___builtin_sub_overflow 1
#define _hasbin___builtin_mul_overflow 1
#endif
// GCC does not have `__builtin_dynamic_object_size`
//#define _hasbin___builtin_dynamic_object_size 1
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
// GCC 4.1+ has builtin `chk` variants
// (maybe before 4.1 they're available too, but i couldn't find them
// in the older GCC source)
#define _hasbin___builtin___memcpy_chk 1
#define _hasbin___builtin___memmove_chk 1
#define _hasbin___builtin___strncpy_chk 1
#define _hasbin___builtin___strncat_chk 1
#define _hasbin___builtin___strlcat_chk 1
#define _hasbin___builtin___strlcpy_chk 1
#define _hasbin___builtin___strcpy_chk 1
#define _hasbin___builtin___strcat_chk 1
#define _hasbin___builtin___memmove_chk 1
#endif
#if __GNUC__ >= 7
// GCC 7+ has builtin 32-bit `rdpmc`
// (same as with the `chk` builtins: might be available in older GCC,
// but i couldn't find it in older GCC sources)
#define _hasbin___builtin_ia32_rdpmc 1
#endif
#define __has_builtin_internal(x) defined(_hasbin_ ## x)
#define __has_builtin(x) __has_builtin_internal(x)
#endif
#endif // !__clang
#endif // _DARLING_LKM_DUCT_GCC_HAS_BUILTIN_H_

View File

@ -0,0 +1 @@
../../bsd/sys_private/kdebug_private.h

View File

@ -45,6 +45,9 @@
#include <stdbool.h>
#include <os/base.h>
#ifdef __DARLING__
static
#endif
bool __header_always_inline OS_WARN_RESULT
__os_warn_unused(__const bool x)
{

View File

@ -45,6 +45,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/* WC - todo shouldn't be here */
#define fsid_t linux_fsid_t
#undef asm_inline // shut up a compiler warning
#define asm_inline asm
#include <linux/module.h>
#include <linux/kernel.h>
@ -435,4 +437,43 @@ static inline void linux_spin_unlock(spinlock_t* l)
#undef spin_unlock
#define spin_unlock linux_spin_unlock
// for xnu/bsd/sys/fcntl.h
#undef O_RDONLY
#undef O_WRONLY
#undef O_RDWR
#undef O_ACCMODE
#undef O_NONBLOCK
#undef O_APPEND
#undef O_NOFOLLOW
#undef O_CREAT
#undef O_TRUNC
#undef O_EXCL
#undef AT_FDCWD
#undef AT_SYMLINK_NOFOLLOW
#undef AT_SYMLINK_FOLLOW
#undef AT_REMOVEDIR
#undef O_NOCTTY
#undef O_DIRECTORY
#undef O_CLOEXEC
#undef FASYNC
#undef F_GETOWN
#undef F_SETOWN
#undef F_GETLK
#undef F_SETLK
#undef F_SETLKW
#undef F_OFD_SETLK
#undef F_OFD_SETLKW
#undef F_OFD_GETLK
#undef F_DUPFD_CLOEXEC
#undef F_RDLCK
#undef F_WRLCK
#undef LOCK_SH
#undef LOCK_EX
#undef LOCK_NB
#undef LOCK_UN
// for xnu/sys/queue.h
#undef LIST_HEAD
#endif // DUCT_H

View File

@ -66,6 +66,8 @@ lck_attr_t task_lck_attr;
lck_grp_t task_lck_grp;
lck_grp_attr_t task_lck_grp_attr;
os_refgrp_decl(static, task_refgrp, "task", NULL);
extern void duct_vm_map_deallocate(vm_map_t map);
void duct_task_init (void)
@ -139,228 +141,51 @@ void duct_task_destroy(task_t task)
kern_return_t duct_task_create_internal (task_t parent_task, boolean_t inherit_memory, boolean_t is_64bit, task_t * child_task, struct task_struct* ltask)
{
task_t new_task;
// this function is pretty much a heavy trim of the real `task_create_internal`,
// with a few Darling-specific additions
task_t new_task;
#if defined (__DARLING__)
#else
vm_shared_region_t shared_region;
new_task = (task_t) zalloc(task_zone);
ledger_t ledger = NULL;
#endif
if (new_task == TASK_NULL) {
return KERN_RESOURCE_SHORTAGE;
}
new_task = (task_t) duct_zalloc(task_zone);
// take advantadge of C99 and initialize everything to zero
// allows us to eliminate a lot of unnecessary initialization
*new_task = (struct task){0};
if (ltask != NULL && ltask->mm != NULL)
pth_proc_hashinit(new_task);
/* one ref for just being alive; one for our caller */
os_ref_init_count(&new_task->ref_count, &task_refgrp, 2);
// printk (KERN_NOTICE "task create internal's new task: 0x%x", (unsigned int) new_task);
#if defined(CONFIG_SCHED_MULTIQ)
new_task->sched_group = sched_group_create();
#endif
if (new_task == TASK_NULL)
return(KERN_RESOURCE_SHORTAGE);
lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
queue_init(&new_task->threads);
new_task->active = TRUE;
new_task->returnwait_inheritor = current_thread();
/* one ref for just being alive; one for our caller */
new_task->ref_count = 2;
new_task->vchroot = NULL;
new_task->vchroot_path = (char*) __get_free_page(GFP_KERNEL);
new_task->sigexc = FALSE;
queue_init(&new_task->semaphore_list);
// /* allocate with active entries */
// assert(task_ledger_template != NULL);
// if ((ledger = ledger_instantiate(task_ledger_template,
// LEDGER_CREATE_ACTIVE_ENTRIES)) == NULL) {
// zfree(task_zone, new_task);
// return(KERN_RESOURCE_SHORTAGE);
// }
// new_task->ledger = ledger;
ipc_task_init(new_task, parent_task);
if (parent_task != TASK_NULL) {
new_task->sec_token = parent_task->sec_token;
new_task->audit_token = parent_task->audit_token;
} else {
new_task->sec_token = KERNEL_SECURITY_TOKEN;
new_task->audit_token = KERNEL_AUDIT_TOKEN;
}
#if defined (__DARLING__)
#else
// /* if inherit_memory is true, parent_task MUST not be NULL */
// if (inherit_memory)
// new_task->map = vm_map_fork(ledger, parent_task->map);
// else
// new_task->map = vm_map_create(pmap_create(ledger, 0, is_64bit),
// (vm_map_offset_t)(VM_MIN_ADDRESS),
// (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
//
// /* Inherit memlock limit from parent */
// if (parent_task)
// vm_map_set_user_wire_limit(new_task->map, (vm_size_t)parent_task->map->user_wire_limit);
#endif
//
lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
queue_init(&new_task->threads);
new_task->suspend_count = 0;
new_task->thread_count = 0;
new_task->active_thread_count = 0;
new_task->user_stop_count = 0;
// new_task->role = TASK_UNSPECIFIED;
new_task->active = TRUE;
new_task->halting = FALSE;
new_task->user_data = NULL;
new_task->faults = 0;
new_task->cow_faults = 0;
new_task->pageins = 0;
new_task->messages_sent = 0;
new_task->messages_received = 0;
new_task->syscalls_mach = 0;
new_task->priv_flags = 0;
new_task->syscalls_unix=0;
new_task->c_switch = new_task->p_switch = new_task->ps_switch = 0;
// new_task->taskFeatures[0] = 0; /* Init task features */
// new_task->taskFeatures[1] = 0; /* Init task features */
ipc_task_enable(new_task);
// zinfo_task_init(new_task);
// Darling-specific code
new_task->vchroot_path = (char*)__get_free_page(GFP_KERNEL);
// #ifdef MACH_BSD
// new_task->bsd_info = NULL;
// #endif /* MACH_BSD */
// #if defined(__i386__) || defined(__x86_64__)
// new_task->i386_ldt = 0;
// new_task->task_debug = NULL;
// #endif
queue_init(&new_task->semaphore_list);
// queue_init(&new_task->lock_set_list);
new_task->semaphores_owned = 0;
// new_task->lock_sets_owned = 0;
if (ltask != NULL && ltask->mm != NULL)
{
new_task->map = duct_vm_map_create(ltask);
}
#if CONFIG_MACF_MACH
new_task->label = labelh_new(1);
mac_task_label_init (&new_task->maclabel);
#endif
new_task->t_flags = 0;
ipc_task_init(new_task, parent_task);
new_task->total_user_time = 0;
new_task->total_system_time = 0;
new_task->vtimers = 0;
new_task->shared_region = NULL;
new_task->affinity_space = NULL;
//
// #if CONFIG_COUNTERS
// new_task->t_chud = 0U;
// #endif
//
new_task->pidsuspended = FALSE;
// new_task->frozen = FALSE;
// new_task->rusage_cpu_flags = 0;
// new_task->rusage_cpu_percentage = 0;
// new_task->rusage_cpu_interval = 0;
// new_task->rusage_cpu_deadline = 0;
// new_task->rusage_cpu_callt = NULL;
// new_task->proc_terminate = 0;
// #if CONFIG_EMBEDDED
// queue_init(&new_task->task_watchers);
// new_task->appstate = TASK_APPSTATE_ACTIVE;
// new_task->num_taskwatchers = 0;
// new_task->watchapplying = 0;
// #endif /* CONFIG_EMBEDDED */
// new_task->uexc_range_start = new_task->uexc_range_size = new_task->uexc_handler = 0;
//
if (parent_task != TASK_NULL) {
new_task->sec_token = parent_task->sec_token;
new_task->audit_token = parent_task->audit_token;
// printk (KERN_NOTICE "- new task audit[5]: 0x%x\n", new_task->audit_token.val[5]);
#if defined (__DARLING__)
#else
/* inherit the parent's shared region */
// shared_region = vm_shared_region_get(parent_task);
// vm_shared_region_set(new_task, shared_region);
#endif
//
// if(task_has_64BitAddr(parent_task))
// task_set_64BitAddr(new_task);
// new_task->all_image_info_addr = parent_task->all_image_info_addr;
// new_task->all_image_info_size = parent_task->all_image_info_size;
//
// #if defined (__DARLING__)
// #else
// // #if defined(__i386__) || defined(__x86_64__)
// // if (inherit_memory && parent_task->i386_ldt)
// // new_task->i386_ldt = user_ldt_copy(parent_task->i386_ldt);
// // #endif
// if (inherit_memory && parent_task->affinity_space)
// task_affinity_create(parent_task, new_task);
// #endif
//
// #if defined (__DARLING__)
// #else
// new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
// #endif
//
// new_task->policystate = parent_task->policystate;
// /* inherit the self action state */
// new_task->appliedstate = parent_task->appliedstate;
// new_task->ext_policystate = parent_task->ext_policystate;
// #if NOTYET
// /* till the child lifecycle is cleared do not inherit external action */
// new_task->ext_appliedstate = parent_task->ext_appliedstate;
// #else
// new_task->ext_appliedstate = default_task_null_policy;
// #endif
}
else {
new_task->sec_token = KERNEL_SECURITY_TOKEN;
new_task->audit_token = KERNEL_AUDIT_TOKEN;
// // #ifdef __LP64__
// // if(is_64bit)
// // task_set_64BitAddr(new_task);
// // #endif
// new_task->all_image_info_addr = (mach_vm_address_t)0;
// new_task->all_image_info_size = (mach_vm_size_t)0;
//
// new_task->pset_hint = PROCESSOR_SET_NULL;
// new_task->policystate = default_task_proc_policy;
// new_task->ext_policystate = default_task_proc_policy;
// new_task->appliedstate = default_task_null_policy;
// new_task->ext_appliedstate = default_task_null_policy;
}
//
// if (kernel_task == TASK_NULL) {
// new_task->priority = BASEPRI_KERNEL;
// new_task->max_priority = MAXPRI_KERNEL;
// }
// else {
// new_task->priority = BASEPRI_DEFAULT;
// new_task->max_priority = MAXPRI_USER;
// }
//
// bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
// new_task->task_timer_wakeups_bin_1 = new_task->task_timer_wakeups_bin_2 = 0;
//
// lck_mtx_lock(&tasks_threads_lock);
// queue_enter(&tasks, new_task, task_t, tasks);
// tasks_count++;
// lck_mtx_unlock(&tasks_threads_lock);
//
#if defined (__DARLING__)
#else
// if (vm_backing_store_low && parent_task != NULL)
// new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
#endif
ipc_task_enable (new_task);
*child_task = new_task;
return(KERN_SUCCESS);
*child_task = new_task;
return KERN_SUCCESS;
}
@ -448,39 +273,6 @@ static void __thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut
*st = cputime.stime;
}
#ifndef TASK_VM_INFO
// Copied from newer XNU
#define TASK_VM_INFO 22
#define TASK_VM_INFO_PURGEABLE 23
struct task_vm_info {
mach_vm_size_t virtual_size; /* virtual memory size (bytes) */
integer_t region_count; /* number of memory regions */
integer_t page_size;
mach_vm_size_t resident_size; /* resident memory size (bytes) */
mach_vm_size_t resident_size_peak; /* peak resident size (bytes) */
mach_vm_size_t device;
mach_vm_size_t device_peak;
mach_vm_size_t internal;
mach_vm_size_t internal_peak;
mach_vm_size_t external;
mach_vm_size_t external_peak;
mach_vm_size_t reusable;
mach_vm_size_t reusable_peak;
mach_vm_size_t purgeable_volatile_pmap;
mach_vm_size_t purgeable_volatile_resident;
mach_vm_size_t purgeable_volatile_virtual;
mach_vm_size_t compressed;
mach_vm_size_t compressed_peak;
mach_vm_size_t compressed_lifetime;
};
typedef struct task_vm_info task_vm_info_data_t;
typedef struct task_vm_info *task_vm_info_t;
#define TASK_VM_INFO_COUNT ((mach_msg_type_number_t) \
(sizeof (task_vm_info_data_t) / sizeof (natural_t)))
#endif
kern_return_t
task_info(
task_t task,
@ -1081,51 +873,33 @@ kern_return_t
task_suspend(
task_t task)
{
kern_return_t kr;
mach_port_t port, send, old_notify;
mach_port_name_t name;
kern_return_t kr;
mach_port_t port;
mach_port_name_t name;
if (task == TASK_NULL || task == kernel_task)
return (KERN_INVALID_ARGUMENT);
if (task == TASK_NULL || task == kernel_task) {
return KERN_INVALID_ARGUMENT;
}
task_lock(task);
/*
* Claim a send right on the task resume port, and request a no-senders
* notification on that port (if none outstanding).
*/
if (task->itk_resume == IP_NULL) {
task->itk_resume = ipc_port_alloc_kernel();
if (!IP_VALID(task->itk_resume))
panic("failed to create resume port");
ipc_kobject_set(task->itk_resume, (ipc_kobject_t)task, IKOT_TASK_RESUME);
}
port = task->itk_resume;
ip_lock(port);
assert(ip_active(port));
send = ipc_port_make_send_locked(port);
assert(IP_VALID(send));
if (port->ip_nsrequest == IP_NULL) {
ipc_port_nsrequest(port, port->ip_mscount, ipc_port_make_sonce_locked(port), &old_notify);
assert(old_notify == IP_NULL);
/* port unlocked */
} else {
ip_unlock(port);
}
/*
* place a legacy hold on the task.
*/
kr = place_task_hold(task, TASK_HOLD_LEGACY);
if (kr != KERN_SUCCESS) {
task_unlock(task);
ipc_port_release_send(send);
return kr;
}
/*
* Claim a send right on the task resume port, and request a no-senders
* notification on that port (if none outstanding).
*/
(void)ipc_kobject_make_send_lazy_alloc_port(&task->itk_resume,
(ipc_kobject_t)task, IKOT_TASK_RESUME);
port = task->itk_resume;
task_unlock(task);
/*
@ -1133,17 +907,17 @@ task_suspend(
* but we'll look it up when calling a traditional resume. Any IPC operations that
* deallocate the send right will auto-release the suspension.
*/
if ((kr = ipc_kmsg_copyout_object(current_task()->itk_space, (ipc_object_t)send,
MACH_MSG_TYPE_MOVE_SEND, &name)) != KERN_SUCCESS) {
if ((kr = ipc_kmsg_copyout_object(current_task()->itk_space, ip_to_object(port),
MACH_MSG_TYPE_MOVE_SEND, NULL, NULL, &name)) != KERN_SUCCESS) {
#ifndef __DARLING__
printf("warning: %s(%d) failed to copyout suspension token for pid %d with error: %d\n",
proc_name_address(current_task()->bsd_info), proc_pid(current_task()->bsd_info),
task_pid(task), kr);
#endif
return (kr);
return kr;
}
return (kr);
return kr;
}
/*

View File

@ -43,6 +43,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <kern/mach_param.h>
#include <kern/thread.h>
#include <kern/ipc_tt.h>
#include <kern/policy_internal.h>
#include "duct_post_xnu.h"
#include <darling/task_registry.h>
@ -76,6 +77,8 @@ static lck_grp_attr_t thread_lck_grp_attr;
lck_attr_t thread_lck_attr;
lck_grp_t thread_lck_grp;
os_refgrp_decl(static, thread_refgrp, "thread", NULL);
// decl_simple_lock_data(static,thread_stack_lock)
// static queue_head_t thread_stack_queue;
//
@ -109,129 +112,107 @@ static uint64_t thread_unique_id = 0;
// static ledger_template_t thread_ledger_template = NULL;
// void init_thread_ledgers(void);
static kern_return_t duct_thread_create_internal (task_t parent_task, integer_t priority, thread_continue_t continuation, int options, thread_t * out_thread);
static kern_return_t duct_thread_create_internal2 (task_t task, thread_t * new_thread, boolean_t from_user);
static kern_return_t duct_thread_create_internal (task_t parent_task, integer_t priority, thread_continue_t continuation, void* parameter, int options, thread_t * out_thread);
static kern_return_t duct_thread_create_internal2 (task_t task, thread_t * new_thread, boolean_t from_user, thread_continue_t continuation);
static void thread_deallocate_complete(thread_t thread);
kern_return_t duct_thread_terminate (thread_t thread);
void duct_thread_deallocate (thread_t thread);
void duct_thread_bootstrap (void)
{
/*
* Fill in a template thread for fast initialization.
*/
// WC - the following is not necessary
/*
* Fill in a template thread for fast initialization.
*/
// Note for Darling, since static (and global) variables are always initialized to `0`,
// we can avoid unnecessarily copying lots of code (stuff like `thread_template.<blah> = 0`)
thread_template.runq = PROCESSOR_NULL;
thread_template.runq = PROCESSOR_NULL;
thread_template.ref_count = 2;
thread_template.reason = AST_NONE;
thread_template.at_safe_point = FALSE;
thread_template.wait_event = NO_EVENT64;
thread_template.waitq = NULL;
thread_template.wait_result = THREAD_WAITING;
thread_template.options = THREAD_ABORTSAFE;
thread_template.state = TH_WAIT | TH_UNINT;
thread_template.wake_active = FALSE;
thread_template.continuation = THREAD_CONTINUE_NULL;
thread_template.parameter = NULL;
thread_template.reason = 0;
thread_template.at_safe_point = FALSE;
thread_template.wait_event = NO_EVENT64;
thread_template.waitq = NULL;
thread_template.wait_result = THREAD_WAITING;
thread_template.options = THREAD_ABORTSAFE;
thread_template.state = TH_WAIT | TH_UNINT;
thread_template.wake_active = FALSE;
thread_template.continuation = THREAD_CONTINUE_NULL;
thread_template.parameter = NULL;
thread_template.sched_mode = TH_MODE_NONE;
thread_template.saved_mode = TH_MODE_NONE;
thread_template.th_sched_bucket = TH_BUCKET_RUN;
thread_template.importance = 0;
thread_template.sched_mode = TH_MODE_NONE;
thread_template.sched_flags = 0;
thread_template.saved_mode = TH_MODE_NONE;
thread_template.safe_release = 0;
thread_template.sfi_class = SFI_CLASS_UNSPECIFIED;
thread_template.sfi_wait_class = SFI_CLASS_UNSPECIFIED;
// thread_template.priority = 0;
thread_template.sched_pri = 0;
thread_template.max_priority = 0;
thread_template.task_priority = 0;
thread_template.promotions = 0;
thread_template.pending_promoter_index = 0;
thread_template.pending_promoter[0] =
thread_template.pending_promoter[1] = NULL;
thread_template.base_pri = BASEPRI_DEFAULT;
thread_template.waiting_for_mutex = NULL;
thread_template.realtime.deadline = UINT64_MAX;
thread_template.realtime.deadline = UINT64_MAX;
// thread_template.current_quantum = 0;
thread_template.last_run_time = 0;
// thread_template.last_quantum_refill_time = 0;
thread_template.last_made_runnable_time = THREAD_NOT_RUNNABLE;
thread_template.last_basepri_change_time = THREAD_NOT_RUNNABLE;
thread_template.computation_metered = 0;
thread_template.computation_epoch = 0;
thread_template.c_switch = thread_template.p_switch = thread_template.ps_switch = 0;
thread_template.bound_processor = PROCESSOR_NULL;
thread_template.last_processor = PROCESSOR_NULL;
thread_template.sched_call = sched_call_null;
#if defined (__DARLING__)
#else
timer_init(&thread_template.user_timer);
timer_init(&thread_template.system_timer);
#endif
thread_template.user_timer_save = 0;
thread_template.system_timer_save = 0;
thread_template.vtimer_user_save = 0;
thread_template.vtimer_prof_save = 0;
thread_template.vtimer_rlim_save = 0;
thread_template.wait_timer_is_set = FALSE;
thread_template.wait_timer_active = 0;
thread_template.depress_timer_active = 0;
//thread_template.special_handler.handler = special_handler;
//thread_template.special_handler.next = NULL;
//thread_template.funnel_lock = THR_FUNNEL_NULL;
//thread_template.funnel_state = 0;
thread_template.recover = (vm_offset_t)NULL;
thread_template.map = VM_MAP_NULL;
#if CONFIG_DTRACE
thread_template.t_dtrace_predcache = 0;
thread_template.t_dtrace_vtime = 0;
thread_template.t_dtrace_tracing = 0;
#endif /* CONFIG_DTRACE */
// thread_template.t_chud = 0;
thread_template.t_page_creation_count = 0;
thread_template.t_page_creation_time = 0;
thread_template.affinity_set = NULL;
thread_template.syscalls_unix = 0;
thread_template.syscalls_mach = 0;
thread_template.t_ledger = LEDGER_NULL;
thread_template.t_threadledger = LEDGER_NULL;
#if 0
thread_template.appliedstate = default_task_null_policy;
thread_template.ext_appliedstate = default_task_null_policy;
thread_template.policystate = default_task_proc_policy;
thread_template.ext_policystate = default_task_proc_policy;
#if defined(CONFIG_SCHED_TIMESHARE_CORE)
thread_template.pri_shift = INT8_MAX;
#endif
#if CONFIG_EMBEDDED
thread_template.taskwatch = NULL;
thread_template.saved_importance = 0;
#endif /* CONFIG_EMBEDDED */
init_thread = thread_template;
thread_template.bound_processor = PROCESSOR_NULL;
thread_template.last_processor = PROCESSOR_NULL;
thread_template.sched_call = NULL;
thread_template.wait_timer_is_set = FALSE;
thread_template.recover = (vm_offset_t)NULL;
thread_template.map = VM_MAP_NULL;
#if DEVELOPMENT || DEBUG
thread_template.pmap_footprint_suspended = FALSE;
#endif /* DEVELOPMENT || DEBUG */
#if KPC
thread_template.kpc_buf = NULL;
#endif
#if HYPERVISOR
thread_template.hv_thread_target = NULL;
#endif /* HYPERVISOR */
thread_template.affinity_set = NULL;
thread_template.t_ledger = LEDGER_NULL;
thread_template.t_threadledger = LEDGER_NULL;
thread_template.t_bankledger = LEDGER_NULL;
thread_template.requested_policy = (struct thread_requested_policy) {};
thread_template.effective_policy = (struct thread_effective_policy) {};
bzero(&thread_template.overrides, sizeof(thread_template.overrides));
thread_template.iotier_override = THROTTLE_LEVEL_NONE;
thread_template.thread_io_stats = NULL;
#if CONFIG_EMBEDDED
thread_template.taskwatch = NULL;
#endif /* CONFIG_EMBEDDED */
thread_template.ith_voucher_name = MACH_PORT_NULL;
thread_template.ith_voucher = IPC_VOUCHER_NULL;
thread_template.th_work_interval = NULL;
init_thread = thread_template;
/* fiddle with init thread to skip asserts in set_sched_pri */
init_thread.sched_pri = MAXPRI_KERNEL;
#warning Init thread initialization disabled!
#if defined (__DARLING__) && 0
// machine_set_current_thread(&init_thread);
linux_current->mach_thread = (void *) &init_thread;
init_thread.linux_task = linux_current;
#endif
#if 0
linux_current->mach_thread = (void*)&init_thread;
init_thread.linux_task = linux_current;
#endif
}
@ -262,241 +243,145 @@ void duct_thread_init (void)
}
#define TH_OPTION_NONE 0x00
#define TH_OPTION_NOCRED 0x01
#define TH_OPTION_NOSUSP 0x02
#define TH_OPTION_WORKQ 0x04
kern_return_t duct_thread_create (task_t task, thread_t * new_thread)
#ifdef current_thread
#undef current_thread
#endif
static kern_return_t duct_thread_create_internal (task_t parent_task, integer_t priority, thread_continue_t continuation, void* parameter, int options, thread_t * out_thread)
{
return duct_thread_create_internal2 (task, new_thread, FALSE);
}
thread_t new_thread;
static thread_t first_thread = THREAD_NULL;
static kern_return_t duct_thread_create_internal (task_t parent_task, integer_t priority, thread_continue_t continuation, int options, thread_t * out_thread)
{
#define TH_OPTION_NONE 0x00
#define TH_OPTION_NOCRED 0x01
#define TH_OPTION_NOSUSP 0x02
/*
* Allocate a thread and initialize static fields
*/
if (first_thread == THREAD_NULL) {
new_thread = first_thread = current_thread();
} else {
new_thread = (thread_t)duct_zalloc(thread_zone);
}
if (new_thread == THREAD_NULL) {
return KERN_RESOURCE_SHORTAGE;
}
thread_t new_thread;
static thread_t first_thread = THREAD_NULL;
if (new_thread != first_thread) {
*new_thread = thread_template;
}
/*
* Allocate a thread and initialize static fields
*/
if (first_thread == THREAD_NULL)
new_thread = first_thread = current_thread();
os_ref_init_count(&new_thread->ref_count, &thread_refgrp, 2);
new_thread = (thread_t) duct_zalloc(thread_zone);
if (new_thread == THREAD_NULL)
return (KERN_RESOURCE_SHORTAGE);
new_thread->task = parent_task;
if (new_thread != first_thread)
*new_thread = thread_template;
// WC - todo: compat_uthread_alloc
#warning compat_uthread disabled
#if 0
new_thread->compat_uthread = (void *) compat_uthread_alloc (parent_task, new_thread);
// Darling addition
#ifdef MACH_ASSERT
new_thread->thread_magic = THREAD_MAGIC;
#endif
// #ifdef MACH_BSD
// new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0);
// if (new_thread->uthread == NULL) {
// zfree(thread_zone, new_thread);
// return (KERN_RESOURCE_SHORTAGE);
// }
// #endif /* MACH_BSD */
thread_lock_init(new_thread);
// if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
// #ifdef MACH_BSD
// void *ut = new_thread->uthread;
//
// new_thread->uthread = NULL;
// /* cred free may not be necessary */
// uthread_cleanup(parent_task, ut, parent_task->bsd_info);
// uthread_cred_free(ut);
// uthread_zone_free(ut);
// #endif /* MACH_BSD */
//
// zfree(thread_zone, new_thread);
// return (KERN_FAILURE);
// }
lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr);
new_thread->task = parent_task;
new_thread->ref_count = 2;
new_thread->waitq = NULL;
new_thread->thread_magic = THREAD_MAGIC;
new_thread->dispatch_qaddr = 0;
ipc_thread_init(new_thread);
thread_lock_init(new_thread);
// wake_lock_init(new_thread);
new_thread->continuation = continuation;
new_thread->parameter = parameter;
new_thread->inheritor_flags = TURNSTILE_UPDATE_FLAGS_NONE;
priority_queue_init(&new_thread->sched_inheritor_queue, PRIORITY_QUEUE_BUILTIN_MAX_HEAP);
priority_queue_init(&new_thread->base_inheritor_queue, PRIORITY_QUEUE_BUILTIN_MAX_HEAP);
#if CONFIG_SCHED_CLUTCH
priority_queue_entry_init(&new_thread->sched_clutchpri_link);
#endif /* CONFIG_SCHED_CLUTCH */
lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr);
// do we still need this in Darling?
/* Allocate I/O Statistics structure */
new_thread->thread_io_stats = (io_stat_info_t)duct_kalloc(sizeof(struct io_stat_info));
assert(new_thread->thread_io_stats != NULL);
bzero(new_thread->thread_io_stats, sizeof(struct io_stat_info));
ipc_thread_init(new_thread);
#if CONFIG_IOSCHED
/* Clear out the I/O Scheduling info for AppleFSCompression */
new_thread->decmp_upl = NULL;
#endif /* CONFIG_IOSCHED */
#if defined (__DARLING__)
#else
// queue_init(&new_thread->held_ulocks);
// new_thread->continuation = continuation;
#endif
lck_mtx_lock(&tasks_threads_lock);
task_lock(parent_task);
lck_mtx_lock(&tasks_threads_lock);
/*
* Fail thread creation if parent task is being torn down or has too many threads
* If the caller asked for TH_OPTION_NOSUSP, also fail if the parent task is suspended
*/
if (parent_task->active == 0 || parent_task->halting ||
(parent_task->suspend_count > 0 && (options & TH_OPTION_NOSUSP) != 0) ||
(parent_task->thread_count >= task_threadmax && parent_task != kernel_task)) {
task_unlock(parent_task);
lck_mtx_unlock(&tasks_threads_lock);
task_lock(parent_task);
ipc_thread_disable(new_thread);
ipc_thread_terminate(new_thread);
kfree(new_thread->thread_io_stats, sizeof(struct io_stat_info));
lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp);
zfree(thread_zone, new_thread);
return KERN_FAILURE;
}
if (!parent_task->active || parent_task->halting ||
((options & TH_OPTION_NOSUSP) != 0 &&
parent_task->suspend_count > 0) ||
(parent_task->thread_count >= task_threadmax &&
parent_task != kernel_task) ) {
/* Protected by the tasks_threads_lock */
new_thread->thread_id = ++thread_unique_id;
task_unlock(parent_task);
lck_mtx_unlock(&tasks_threads_lock);
task_reference_internal(parent_task);
// #ifdef MACH_BSD
// {
// void *ut = new_thread->uthread;
//
// new_thread->uthread = NULL;
// uthread_cleanup(parent_task, ut, parent_task->bsd_info);
// /* cred free may not be necessary */
// uthread_cred_free(ut);
// uthread_zone_free(ut);
// }
// #endif /* MACH_BSD */
ipc_thread_disable(new_thread);
ipc_thread_terminate(new_thread);
lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp);
// machine_thread_destroy(new_thread);
zfree(thread_zone, new_thread);
return (KERN_FAILURE);
}
#if defined(CONFIG_SCHED_MULTIQ)
/* Cache the task's sched_group */
new_thread->sched_group = parent_task->sched_group;
#endif /* defined(CONFIG_SCHED_MULTIQ) */
// /* New threads inherit any default state on the task */
// machine_thread_inherit_taskwide(new_thread, parent_task);
/* Cache the task's map */
new_thread->map = parent_task->map;
task_reference_internal (parent_task);
//
#if defined (__DARLING__)
#else
if (new_thread->task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
/*
* This task has a per-thread CPU limit; make sure this new thread
* gets its limit set too, before it gets out of the kernel.
*/
set_astledger(new_thread);
}
timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
/* Set the thread's scheduling parameters */
new_thread->max_priority = parent_task->max_priority;
new_thread->task_priority = parent_task->priority;
int new_priority = (priority < 0) ? parent_task->priority: priority;
new_priority = (priority < 0)? parent_task->priority: priority;
if (new_priority > new_thread->max_priority) {
new_priority = new_thread->max_priority;
}
#if CONFIG_EMBEDDED
if (new_priority < MAXPRI_THROTTLE) {
new_priority = MAXPRI_THROTTLE;
}
#endif /* CONFIG_EMBEDDED */
new_thread->importance = new_priority - new_thread->task_priority;
new_thread->t_threadledger = LEDGER_NULL; /* per thread ledger is not inherited */
new_thread->t_ledger = new_thread->task->ledger;
/* Chain the thread onto the task's list */
queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
parent_task->thread_count++;
if (new_thread->t_ledger)
ledger_reference(new_thread->t_ledger);
#endif
/* So terminating threads don't need to take the task lock to decrement */
os_atomic_inc(&parent_task->active_thread_count, relaxed);
/* Cache the task's map */
new_thread->map = parent_task->map;
new_thread->active = TRUE;
// we might need to do this later if things break
//new_thread->turnstile = turnstile_alloc();
//
// /* Chain the thread onto the task's list */
queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
parent_task->thread_count++;
//
// /* So terminating threads don't need to take the task lock to decrement */
hw_atomic_add(&parent_task->active_thread_count, 1);
//
// /* Protected by the tasks_threads_lock */
// new_thread->thread_id = ++thread_unique_id;
//
// queue_enter(&threads, new_thread, thread_t, threads);
// threads_count++;
//
// #if defined (__DARLING__)
// #else
timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
// timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);
// #endif
// Darling additions
get_task_struct(linux_current);
new_thread->linux_task = linux_current;
new_thread->in_sigprocess = FALSE;
// #if CONFIG_COUNTERS
// /*
// * If parent task has any reservations, they need to be propagated to this
// * thread.
// */
// new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ?
// THREAD_PMC_FLAG : 0U;
// #endif
*out_thread = new_thread;
// /* Set the thread's scheduling parameters */
// new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task);
// new_thread->sched_flags = 0;
// new_thread->max_priority = parent_task->max_priority;
// new_thread->task_priority = parent_task->priority;
// new_thread->priority = (priority < 0)? parent_task->priority: priority;
// if (new_thread->priority > new_thread->max_priority)
// new_thread->priority = new_thread->max_priority;
// #if CONFIG_EMBEDDED
// if (new_thread->priority < MAXPRI_THROTTLE) {
// new_thread->priority = MAXPRI_THROTTLE;
// }
// #endif /* CONFIG_EMBEDDED */
// new_thread->importance =
// new_thread->priority - new_thread->task_priority;
// #if CONFIG_EMBEDDED
// new_thread->saved_importance = new_thread->importance;
// /* apple ios daemon starts all threads in darwin background */
// if (parent_task->ext_appliedstate.apptype == PROC_POLICY_IOS_APPLE_DAEMON) {
// /* Cannot use generic routines here so apply darwin bacground directly */
// new_thread->policystate.hw_bg = TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL;
// /* set thread self backgrounding */
// new_thread->appliedstate.hw_bg = new_thread->policystate.hw_bg;
// /* priority will get recomputed suitably bit later */
// new_thread->importance = INT_MIN;
// /* to avoid changes to many pri compute routines, set the effect of those here */
// new_thread->priority = MAXPRI_THROTTLE;
// }
// #endif /* CONFIG_EMBEDDED */
// #if defined(CONFIG_SCHED_TRADITIONAL)
// new_thread->sched_stamp = sched_tick;
// new_thread->pri_shift = sched_pri_shift;
// #endif
// SCHED(compute_priority)(new_thread, FALSE);
// #endif
new_thread->active = TRUE;
get_task_struct(linux_current);
new_thread->linux_task = linux_current;
new_thread->in_sigprocess = FALSE;
*out_thread = new_thread;
// {
// long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;
//
// kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);
//
// KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
// TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
// (vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0);
//
// kdbg_trace_string(parent_task->bsd_info,
// &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);
//
// KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
// TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
// dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
// }
//
// DTRACE_PROC1(lwp__create, thread_t, *out_thread);
task_unlock(parent_task);
return (KERN_SUCCESS);
return KERN_SUCCESS;
}
static kern_return_t duct_thread_create_internal2 (task_t task, thread_t * new_thread, boolean_t from_user)
static kern_return_t duct_thread_create_internal2 (task_t task, thread_t * new_thread, boolean_t from_user, thread_continue_t continuation)
{
kern_return_t result;
thread_t thread;
@ -505,7 +390,7 @@ static kern_return_t duct_thread_create_internal2 (task_t task, thread_t * new_t
return (KERN_INVALID_ARGUMENT);
result =
duct_thread_create_internal (task, -1, (thread_continue_t) thread_bootstrap_return, TH_OPTION_NONE, &thread);
duct_thread_create_internal (task, -1, continuation, NULL, TH_OPTION_NONE, &thread);
if (result != KERN_SUCCESS)
return (result);
@ -526,10 +411,18 @@ static kern_return_t duct_thread_create_internal2 (task_t task, thread_t * new_t
return (KERN_SUCCESS);
}
kern_return_t duct_thread_create(task_t task, thread_t* new_thread) {
return duct_thread_create_internal2(task, new_thread, FALSE, (thread_continue_t)thread_bootstrap_return);
}
kern_return_t duct_thread_create_from_user(task_t task, thread_t* new_thread) {
return duct_thread_create_internal2(task, new_thread, TRUE, (thread_continue_t)thread_bootstrap_return);
}
kern_return_t duct_thread_create_with_continuation(task_t task, thread_t* new_thread, thread_continue_t continuation) {
return duct_thread_create_internal2(task, new_thread, FALSE, continuation);
}
#ifdef current_thread
#undef current_thread
#endif
thread_t current_thread (void)
{
// kprintf ("calling current thread on linux task: 0x%x\n", (unsigned int) linux_current);
@ -543,72 +436,62 @@ void duct_thread_destroy(thread_t thread)
thread->linux_task = NULL;
thread->active = FALSE;
hw_atomic_add(&task->active_thread_count, -1);
os_atomic_dec(&task->active_thread_count, relaxed);
duct_thread_deallocate(thread);
}
void duct_thread_deallocate (thread_t thread)
static bool thread_ref_release(thread_t thread)
{
task_t task;
if (thread == THREAD_NULL) {
return false;
}
if (thread == THREAD_NULL) {
return;
}
return os_ref_release(&thread->ref_count) == 0;
}
timer_call_cancel(&thread->wait_timer);
void duct_thread_deallocate(thread_t thread)
{
if (thread_ref_release(thread)) {
thread_deallocate_complete(thread);
}
}
task = thread->task;
static void thread_deallocate_complete(thread_t thread)
{
task_t task;
if (hw_atomic_sub(&(thread)->ref_count, 1) > 0) {
return;
}
ipc_thread_terminate(thread);
ipc_thread_terminate (thread);
task = thread->task;
// #ifdef MACH_BSD
// {
// void *ut = thread->uthread;
//
// thread->uthread = NULL;
// uthread_zone_free(ut);
// }
// #endif /* MACH_BSD */
// NOTE(@facekapow): we might need to uncomment this later if it turns out we really need turnstiles
/*
if (thread->turnstile) {
turnstile_deallocate(thread->turnstile);
}
*/
#if 0
void * uthread = thread->compat_uthread;
thread->compat_uthread = NULL;
// WC - todo check below: should use zone free (), not uthread_free
compat_uthread_zone_free (uthread);
// compat_uthread_free (thread->compat_uthread);
#endif
if (IPC_VOUCHER_NULL != thread->ith_voucher) {
ipc_voucher_release(thread->ith_voucher);
}
// if (thread->t_ledger)
// ledger_dereference(thread->t_ledger);
// if (thread->t_threadledger)
// ledger_dereference(thread->t_threadledger);
if (thread->thread_io_stats) {
duct_kfree(thread->thread_io_stats, sizeof(struct io_stat_info));
}
// if (thread->kernel_stack != 0)
// stack_free (thread);
task_lock(task);
// WC - todo check below
// lck_mtx_destroy (&thread->mutex, &thread_lck_grp);
// machine_thread_destroy (thread);
queue_remove(&task->threads, thread, thread_t, task_threads);
task->thread_count--;
// Remove itself from thread list
task_lock(task);
task_deallocate(task);
queue_remove(&task->threads, thread, thread_t, task_threads);
task->thread_count--;
if (thread->linux_task != NULL)
put_task_struct(thread->linux_task);
task_unlock(task);
task_deallocate (task);
if (thread->linux_task != NULL)
put_task_struct(thread->linux_task);
debug_msg("Deallocating thread %p\n", thread);
duct_zfree (thread_zone, thread);
debug_msg("Deallocating thread %p\n", thread);
duct_zfree(thread_zone, thread);
}
struct task_struct* thread_get_linux_task(thread_t thread)

View File

@ -171,7 +171,7 @@ kern_return_t duct_waitq_init (waitq_t wq, int policy)
kern_return_t duct_waitq_set_init (waitq_set_t wqset, int policy, uint64_t *reserved_link,
void *prepost_hook)
waitq_set_prepost_hook_t *prepost_hook)
{
kern_return_t ret;

View File

@ -52,7 +52,7 @@ extern void duct_waitq_bootstrap (void);
extern kern_return_t duct_waitq_init (waitq_t wq, int policy);
extern kern_return_t duct_waitq_set_init (waitq_set_t wqset, int policy, uint64_t *reserved_link,
void *prepost_hook);
waitq_set_prepost_hook_t *prepost_hook);
extern waitq_link_t duct_waitq_link_allocate (void);
extern kern_return_t duct_waitq_link_free (waitq_link_t wql);

View File

@ -70,3 +70,13 @@ void duct_zfree (zone_t zone, void * elem)
{
kmem_cache_free ((struct kmem_cache *) zone, elem);
}
boolean_t kdp_is_in_zone(void* addr, const char* zone_name) {
// this isn't too important; it's only used in debug/development mode inside asserts
// TODO: how are we actually supposed to check this? as far as i can tell,
// Linux doesn't have a way to lookup a slab via an address
// well, since we would only lookup caches that we ourselves allocate, we could keep a list of
// the ones we have, but that's a lot of unnecessary work for something that is only
// ever really used in development
return 1;
};

View File

@ -44,23 +44,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// #define XNU_PROBE_IN_MACHTRAP_WRAPPERS_TIMEKEEPING
// should be in machine/conf.c
extern struct clock_ops sysclk_ops, calend_ops;
/*
* List of clock devices.
*/
struct clock clock_list[] = {
{ &sysclk_ops, 0, 0 },
{ &calend_ops, 0, 0 }
};
int clock_count = sizeof(clock_list) / sizeof(clock_list[0]);
int rtclock_init (void)
{
// pe_arm_init_timebase(NULL);

View File

@ -112,11 +112,11 @@ extern void duct_panic(const char* reason, ...);
#undef hw_lock_to
#undef hw_lock_unlock
#define hw_lock_init(lck) spin_lock_init ((spinlock_t *) lck)
#define hw_lock_try(lck) spin_trylock ((spinlock_t *) lck)
#define hw_lock_held(lck) spin_is_locked ((spinlock_t *) lck)
#define hw_lock_to(lock, timeout) spin_lock ((spinlock_t *) lck)
#define hw_lock_unlock(lock) spin_unlock ((spinlock_t *) lck)
#define hw_lock_init(lck) spin_lock_init ((spinlock_t *) lck)
#define hw_lock_try(lck, grp) spin_trylock ((spinlock_t *) lck)
#define hw_lock_held(lck) spin_is_locked ((spinlock_t *) lck)
#define hw_lock_to(lock, timeout, grp) spin_lock ((spinlock_t *) lck)
#define hw_lock_unlock(lock) spin_unlock ((spinlock_t *) lck)
/* wait queue locks */

View File

@ -116,17 +116,18 @@ kmem_free(
vfree((void*) addr);
}
kern_return_t kmem_alloc_external(vm_map_t map, vm_offset_t* addrp, vm_size_t size) {
return kmem_alloc(map, addrp, size, 0);
}
kern_return_t
kmem_alloc(
vm_map_t map,
vm_offset_t *addrp,
vm_size_t size)
vm_size_t size,
vm_tag_t tag)
{
*addrp = (vm_offset_t) vmalloc(size);
if (*addrp)
return KERN_SUCCESS;
else
return KERN_NO_SPACE;
return kernel_memory_allocate(map, addrp, size, 0, 0, tag);
}
kern_return_t kernel_memory_allocate(

View File

@ -137,6 +137,8 @@ void duct_vm_map_deallocate(vm_map_t map)
}
}
os_refgrp_decl(static, map_refgrp, "vm_map", NULL);
// Calling with a NULL task makes other funcs consider the map a kernel map
vm_map_t duct_vm_map_create (struct task_struct* linux_task)
{
@ -195,7 +197,7 @@ vm_map_t duct_vm_map_create (struct task_struct* linux_task)
get_task_struct(linux_task);
result->linux_task = linux_task;
result->max_offset = darling_is_task_64bit() ? 0x7fffffffffffull : VM_MAX_ADDRESS;
result->ref_count = 1;
os_ref_init_count(&result->map_refcnt, &map_refgrp, 1);
result->hdr.page_shift = PAGE_SHIFT;
#endif

View File

@ -477,11 +477,15 @@ get_interrupt_level(void)
{
CPU_DATA_GET(cpu_interrupt_level, int)
}
#ifdef __DARLING__
int get_cpu_number(void);
#else
static inline int
get_cpu_number(void)
{
CPU_DATA_GET(cpu_number, int)
}
#endif
static inline int
get_cpu_phys_number(void)
{
@ -718,6 +722,9 @@ disable_preemption_internal(void)
static inline void
enable_preemption_internal(void)
{
#ifdef __DARLING__
printf("STUB: enable_preemption_internal\n");
#else
assert(get_preemption_level() > 0);
pltrace(TRUE);
os_compiler_barrier(release);
@ -736,6 +743,7 @@ enable_preemption_internal(void)
: "eax", "ecx", "edx", "cc", "memory");
#endif
os_compiler_barrier(acquire);
#endif
}
static inline void

View File

@ -66,8 +66,10 @@
#ifdef KERNEL_PRIVATE
#ifndef __DARLING__
/* Use a function to do this less directly. */
extern int cpu_number(void);
#endif
#ifdef MACH_KERNEL_PRIVATE
#include <i386/cpu_data.h>

View File

@ -50,26 +50,31 @@ extern unsigned int LckDisablePreemptCheck;
#endif /* MACH_KERNEL_PRIVATE */
#if defined(MACH_KERNEL_PRIVATE)
#ifndef __DARLING__
typedef struct {
volatile uintptr_t interlock;
#if MACH_LDEBUG
unsigned long lck_spin_pad[9]; /* XXX - usimple_lock_data_t */
#endif
} lck_spin_t;
#endif
#define LCK_SPIN_TAG_DESTROYED 0x00002007 /* lock marked as Destroyed */
#else /* MACH_KERNEL_PRIVATE */
#ifdef KERNEL_PRIVATE
#ifndef __DARLING__
typedef struct {
unsigned long opaque[10];
} lck_spin_t;
#endif
#else /* KERNEL_PRIVATE */
typedef struct __lck_spin_t__ lck_spin_t;
#endif
#endif
#ifdef MACH_KERNEL_PRIVATE
#ifndef __DARLING__
/* The definition of this structure, including the layout of the
* state bitfield, is tailored to the asm implementation in i386_lock.s
*/
@ -101,6 +106,7 @@ typedef struct _lck_mtx_ {
};
};
} lck_mtx_t;
#endif
#define LCK_MTX_WAITERS_MSK 0x0000ffff
#define LCK_MTX_WAITER 0x00000001

View File

@ -238,6 +238,7 @@ set_ds(uint16_t ds)
__asm__ volatile ("mov %0, %%ds" : : "r" (ds));
}
#ifndef __DARLING__
static inline uint16_t
get_fs(void)
{
@ -251,6 +252,7 @@ set_fs(uint16_t fs)
{
__asm__ volatile ("mov %0, %%fs" : : "r" (fs));
}
#endif
static inline uint16_t
get_gs(void)
@ -416,11 +418,13 @@ extern int rdmsr64_carefully(uint32_t msr, uint64_t *val);
extern int wrmsr64_carefully(uint32_t msr, uint64_t val);
#endif /* MACH_KERNEL_PRIVATE */
#ifndef __DARLING__
static inline void
wbinvd(void)
{
__asm__ volatile ("wbinvd");
}
#endif
static inline void
invlpg(uintptr_t addr)
@ -428,6 +432,7 @@ invlpg(uintptr_t addr)
__asm__ volatile ("invlpg (%0)" :: "r" (addr) : "memory");
}
#ifndef __DARLING__
static inline void
clac(void)
{
@ -439,6 +444,7 @@ stac(void)
{
__asm__ volatile ("stac");
}
#endif
/*
* Access to machine-specific registers (available on 586 and better only)
@ -446,6 +452,7 @@ stac(void)
* pointer indirection), this allows gcc to optimize better
*/
#ifndef __DARLING__
#define rdmsr(msr, lo, hi) \
__asm__ volatile("rdmsr" : "=a" (lo), "=d" (hi) : "c" (msr))
@ -510,6 +517,7 @@ rdtscp64(uint32_t *aux)
return ((hi) << 32) | (lo);
}
#endif /* __LP64__ */
#endif // __DARLING__
/*
* rdmsr_carefully() returns 0 when the MSR has been read successfully,
@ -521,6 +529,9 @@ __END_DECLS
#endif /* ASSEMBLER */
// has the same values as the Linux definitions, but the redefinitions
// produce lots of warnings, so let's shut those up
#ifndef __DARLING__
#define MSR_IA32_P5_MC_ADDR 0
#define MSR_IA32_P5_MC_TYPE 1
#define MSR_IA32_PLATFORM_ID 0x17
@ -679,6 +690,7 @@ __END_DECLS
#define MSR_IA32_GT_PERF_LIMIT_REASONS 0x6B0
#define MSR_IA32_TSC_DEADLINE 0x6e0
#endif
#define MSR_IA32_EFER 0xC0000080
#define MSR_IA32_EFER_SCE 0x00000001

View File

@ -265,6 +265,7 @@ __END_DECLS
#define NULL_SEG 0
#ifndef __DARLING__
/*
* Kernel descriptors for MACH - 64-bit flat address space.
@ -285,6 +286,7 @@ __END_DECLS
/* 12: other 8 bytes of USER_LDT */
#define KERNEL_DS 0x68 /* 13: 32-bit kernel data */
#endif
#define SYSENTER_TF_CS (USER_CS|0x10000)
#define SYSENTER_DS KERNEL64_SS /* sysenter kernel data segment */

View File

@ -94,6 +94,10 @@ typedef struct uslock_debug {
unsigned char unlock_cpu;
} uslock_debug;
#ifdef __DARLING__
typedef struct spinlock usimple_lock_data_t;
typedef struct spinlock* usimple_lock_t;
#else
typedef struct slock {
hw_lock_data_t interlock; /* must be first... see lock.c */
#if USLOCK_DEBUG
@ -102,6 +106,7 @@ typedef struct slock {
uslock_debug debug;
#endif
} usimple_lock_data_t, *usimple_lock_t;
#endif
extern void i386_lock_unlock_with_flush(
hw_lock_t);

View File

@ -198,6 +198,9 @@ extern void act_thread_cfree(void *ctx);
static inline vm_offset_t
current_stack_depth(void)
{
#ifdef __DARLING__
return 0;
#else
vm_offset_t stack_ptr;
assert(get_preemption_level() > 0 || !ml_get_interrupts_enabled());
@ -210,6 +213,7 @@ current_stack_depth(void)
return current_cpu_datap()->cpu_kernel_stack
+ sizeof(struct thread_kernel_state)
- stack_ptr;
#endif
}
/*

View File

@ -63,6 +63,11 @@
* Primitive functions to manipulate translation entries.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach_debug.h>
#include <mach/kern_return.h>
@ -81,6 +86,10 @@
#include <string.h>
#include <sys/kdebug.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
/*
* Routine: ipc_entry_lookup
* Purpose:
@ -559,6 +568,11 @@ ipc_entry_grow_table(
uint64_t rescan_count = 0;
#endif
assert(is_active(space));
#ifdef __DARLING__
lck_spin_unlock(&space->is_lock_data);
mutex_lock(&space->is_mutex_lock);
lck_spin_lock(&space->is_lock_data);
#endif
if (is_growing(space)) {
/*
@ -636,7 +650,11 @@ ipc_entry_grow_table(
is_write_lock(space);
is_done_growing(space);
is_write_unlock(space);
#ifdef __DARLING__
mutex_unlock(&space->is_mutex_lock);
#else
thread_wakeup((event_t) space);
#endif
return KERN_RESOURCE_SHORTAGE;
}
@ -722,7 +740,11 @@ rescan:
is_done_growing(space);
is_write_unlock(space);
#ifdef __DARLING__
mutex_unlock(&space->is_mutex_lock);
#else
thread_wakeup((event_t) space);
#endif
it_entries_free(its, table);
is_write_lock(space);
return KERN_SUCCESS;
@ -769,7 +791,11 @@ rescan:
is_done_growing(space);
is_write_unlock(space);
#ifdef __DARLING__
mutex_unlock(&space->is_mutex_lock);
#else
thread_wakeup((event_t) space);
#endif
/*
* Now we need to free the old table.

View File

@ -63,6 +63,11 @@
* Entry hash table operations.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/boolean.h>
#include <mach/port.h>
#include <kern/kalloc.h>
@ -83,6 +88,10 @@
#include <vm/vm_kern.h>
#endif /* MACH_IPC_DEBUG */
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
/*
* Forward declarations
*/

View File

@ -26,6 +26,11 @@
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/mach_types.h>
#include <mach/notify.h>
#include <ipc/ipc_types.h>
@ -47,6 +52,10 @@
#include <mach/mach_voucher_attr_control.h>
#include <mach/machine/sdt.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
extern int proc_pid(void *);
extern int proc_selfpid(void);
extern uint64_t proc_uniqueid(void *p);

View File

@ -70,6 +70,11 @@
* Functions to initialize the IPC system.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach_debug.h>
#include <mach/port.h>
@ -107,6 +112,10 @@
#include <mach/machine/ndr_def.h> /* NDR_record */
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
vm_map_t ipc_kernel_map;
vm_size_t ipc_kernel_map_size = 1024 * 1024;
@ -251,6 +260,12 @@ vm_size_t msg_ool_size_small;
void
ipc_init(void)
{
#ifdef __DARLING__
extern vm_map_t duct_vm_map_create(struct task_struct* t);
msg_ool_size_small = MSG_OOL_SIZE_SMALL_MAX;
ipc_kernel_map = duct_vm_map_create(NULL);
#else
kern_return_t retval;
vm_offset_t min;
@ -292,6 +307,7 @@ ipc_init(void)
}
/* account for overhead to avoid spilling over a page */
msg_ool_size_small -= cpy_kdata_hdr_sz;
#endif
ipc_host_init();
ux_handler_init();

View File

@ -70,6 +70,10 @@
* Operations on kernel messages.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/mach_types.h>
#include <mach/boolean.h>
@ -139,6 +143,11 @@
#include <sys/kdebug.h>
#include <libkern/OSAtomic.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#include <darling/debug_print.h>
#endif
#pragma pack(4)
typedef struct{
@ -217,6 +226,7 @@ mm_copy_options_string64(
void db_print_msg_uid64(mach_msg_header_t *);
#ifndef __DARLING__
static void
ipc_msg_body_print64(void *body, int size)
{
@ -237,7 +247,7 @@ ipc_msg_body_print64(void *body, int size)
kprintf("\n %p: ", word);
}
}
#endif
const char *
ipc_type_name64(
@ -384,11 +394,13 @@ ipc_msg_print64(
msgh->msgh_id,
msgh->msgh_size);
#ifndef __DARLING__
if (mbits & MACH_MSGH_BITS_COMPLEX) {
ipc_msg_print_untyped64((mach_msg_body_t *) (msgh + 1));
}
ipc_msg_body_print64((void *)(msgh + 1), msgh->msgh_size);
#endif
}
@ -1655,6 +1667,16 @@ ipc_kmsg_get(
kmsg->ikm_header->msgh_voucher_port = legacy_base.header.msgh_voucher_port;
kmsg->ikm_header->msgh_id = legacy_base.header.msgh_id;
#ifdef __DARLING__
debug_msg("- ikm_header->msgh_size: %d, bits: 0x%x rport: 0x%x, lport: 0x%x, reserved: 0x%x, id: %d\n",
kmsg->ikm_header->msgh_size,
kmsg->ikm_header->msgh_bits,
kmsg->ikm_header->msgh_remote_port,
kmsg->ikm_header->msgh_local_port,
kmsg->ikm_header->msgh_reserved,
kmsg->ikm_header->msgh_id);
#endif
DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_get header:\n"
" size: 0x%.8x\n"
" bits: 0x%.8x\n"
@ -3683,6 +3705,16 @@ ipc_kmsg_copyin(
return mr;
}
#ifdef __DARLING__
debug_msg("- copyin_header->msgh_size: %d, bits: 0x%x rport: 0x%p, lport: 0x%p, reserved: 0x%x, id: %d\n",
kmsg->ikm_header->msgh_size,
kmsg->ikm_header->msgh_bits,
kmsg->ikm_header->msgh_remote_port,
kmsg->ikm_header->msgh_local_port,
kmsg->ikm_header->msgh_reserved,
kmsg->ikm_header->msgh_id);
#endif
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_SEND) | DBG_FUNC_NONE,
VM_KERNEL_ADDRPERM((uintptr_t)kmsg),
(uintptr_t)kmsg->ikm_header->msgh_bits,

View File

@ -69,6 +69,10 @@
* Version 2.0.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/port.h>
#include <mach/message.h>
@ -101,6 +105,11 @@
#include <sys/event.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#include <darling/debug_print.h>
#endif
extern char *proc_name_address(void *p);
int ipc_mqueue_full; /* address is event for queue space */
@ -1100,6 +1109,11 @@ ipc_mqueue_receive_on_thread(
int interruptible,
thread_t thread)
{
#if defined (__DARLING__)
debug_msg("- ipc_mqueue_receive_on_thread (mqueue: 0x%p, option: 0x%x, rcv_timeout: %d) called\n",
mqueue, option, (int)rcv_timeout);
#endif
wait_result_t wresult;
uint64_t deadline;
struct turnstile *rcv_turnstile = TURNSTILE_NULL;

View File

@ -63,6 +63,11 @@
* Notification-sending functions.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/port.h>
#include <mach/message.h>
#include <mach/mach_notify.h>
@ -70,6 +75,10 @@
#include <ipc/ipc_notify.h>
#include <ipc/ipc_port.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
/*
* Routine: ipc_notify_port_deleted
* Purpose:

View File

@ -70,6 +70,11 @@
* Functions to manipulate IPC objects.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/mach_types.h>
#include <mach/boolean.h>
#include <mach/kern_return.h>
@ -94,6 +99,10 @@
#include <security/mac_mach_internal.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
zone_t ipc_object_zones[IOT_NUMBER];
/*

View File

@ -69,6 +69,11 @@
* Functions to manipulate IPC ports.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <zone_debug.h>
#include <mach_assert.h>
@ -98,6 +103,12 @@
#include <string.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
boolean_t kdp_is_in_zone(void* addr, const char* zone_name);
#endif
decl_lck_spin_data(, ipc_port_multiple_lock_data);
ipc_port_timestamp_t ipc_port_timestamp_data;
int ipc_portbt;
@ -2986,6 +2997,7 @@ ipc_port_init_debug(
port->ip_spares[i] = 0;
}
#ifndef __DARLING__
#ifdef MACH_BSD
task_t task = current_task();
if (task != TASK_NULL) {
@ -2995,6 +3007,7 @@ ipc_port_init_debug(
}
}
#endif /* MACH_BSD */
#endif
#if 0
lck_spin_lock(&port_alloc_queue_lock);

View File

@ -63,6 +63,11 @@
* Functions to manipulate IPC port sets.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/port.h>
#include <mach/kern_return.h>
#include <mach/message.h>
@ -78,6 +83,10 @@
#include <vm/vm_map.h>
#include <libkern/section_keywords.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
/*
* Routine: ipc_pset_alloc
* Purpose:
@ -380,6 +389,8 @@ ipc_pset_destroy(
ips_release(pset); /* consume the ref our caller gave us */
}
#ifndef __DARLING__
/*
* Kqueue EVFILT_MACHPORT support
*
@ -1244,3 +1255,4 @@ SECURITY_READ_ONLY_EARLY(struct filterops) machport_filtops = {
.f_process = filt_machportprocess,
.f_peek = filt_machportpeek,
};
#endif

View File

@ -70,6 +70,11 @@
* Functions to manipulate IPC capabilities.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/boolean.h>
#include <mach/kern_return.h>
#include <mach/port.h>
@ -90,6 +95,10 @@
#include <ipc/ipc_importance.h>
#include <security/mac_mach_internal.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
/*
* Routine: ipc_right_lookup_write
* Purpose:

View File

@ -69,6 +69,11 @@
* Functions to manipulate IPC capability spaces.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/boolean.h>
#include <mach/kern_return.h>
#include <mach/port.h>
@ -86,6 +91,10 @@
#include <prng/random.h>
#include <string.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
/* Remove this in the future so port names are less predictable. */
#define CONFIG_SEMI_RANDOM_ENTRIES
#ifdef CONFIG_SEMI_RANDOM_ENTRIES
@ -264,6 +273,10 @@ ipc_space_create(
ipc_space_rand_freelist(space, table, 0, new_size);
is_lock_init(space);
#ifdef __DARLING__
mutex_init(&space->is_mutex_lock);
#endif
space->is_bits = 2; /* 2 refs, active, not growing */
space->is_table_hashed = 0;
space->is_table_size = new_size;
@ -306,6 +319,9 @@ ipc_space_create_special(
}
is_lock_init(space);
#ifdef __DARLING__
mutex_init(&space->is_mutex_lock);
#endif
space->is_bits = IS_INACTIVE | 1; /* 1 ref, not active, not growing */
space->is_table = IE_NULL;

View File

@ -113,6 +113,9 @@ typedef natural_t ipc_space_refs_t;
#define IS_ENTROPY_CNT 1 /* per-space entropy pool size */
struct ipc_space {
#ifdef __DARLING__
struct mutex is_mutex_lock;
#endif
lck_spin_t is_lock_data;
ipc_space_refs_t is_bits; /* holds refs, active, growing */
ipc_entry_num_t is_table_size; /* current size of table */
@ -178,19 +181,27 @@ extern lck_attr_t ipc_lck_attr;
#define is_read_lock(is) lck_spin_lock_grp(&(is)->is_lock_data, &ipc_lck_grp)
#define is_read_unlock(is) lck_spin_unlock(&(is)->is_lock_data)
#ifdef __DARLING__
#define is_read_sleep(is) printk(KERN_NOTICE "- BUG: is_read_sleep () called\n")
#else
#define is_read_sleep(is) lck_spin_sleep_grp(&(is)->is_lock_data, \
LCK_SLEEP_DEFAULT, \
(event_t)(is), \
THREAD_UNINT, \
&ipc_lck_grp)
#endif
#define is_write_lock(is) lck_spin_lock_grp(&(is)->is_lock_data, &ipc_lck_grp)
#define is_write_unlock(is) lck_spin_unlock(&(is)->is_lock_data)
#ifdef __DARLING__
#define is_write_sleep(is) printk(KERN_NOTICE "- BUG: is_write_sleep () called\n")
#else
#define is_write_sleep(is) lck_spin_sleep_grp(&(is)->is_lock_data, \
LCK_SLEEP_DEFAULT, \
(event_t)(is), \
THREAD_UNINT, \
&ipc_lck_grp)
#endif
#define is_refs(is) ((is)->is_bits & IS_REFS_MAX)
@ -211,6 +222,9 @@ is_release(ipc_space_t is)
if (1 == (OSDecrementAtomic(&(is->is_bits)) & IS_REFS_MAX)) {
assert(!is_active(is));
is_lock_destroy(is);
#ifdef __DARLING__
mutex_destroy(&is->is_mutex_lock);
#endif
is_free(is);
}
}

View File

@ -63,6 +63,11 @@
* Functions to manipulate tables of IPC capabilities.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/kern_return.h>
#include <mach/vm_param.h>
#include <ipc/ipc_table.h>
@ -71,6 +76,10 @@
#include <kern/kalloc.h>
#include <vm/vm_kern.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
ipc_table_size_t ipc_table_entries = NULL;
unsigned int ipc_table_entries_size = CONFIG_IPC_TABLE_ENTRIES_STEPS;

View File

@ -26,6 +26,11 @@
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/mach_types.h>
#include <mach/mach_traps.h>
#include <mach/notify.h>
@ -45,6 +50,10 @@
#include <mach/mach_host_server.h>
#include <voucher/ipc_pthread_priority_types.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
/*
* Sysctl variable; enable and disable tracing of voucher contents
*/

View File

@ -62,6 +62,12 @@
*
* Exported IPC debug calls.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach_ipc_debug.h>
#include <mach/vm_param.h>
@ -88,6 +94,10 @@
#include <security/mac_mach_internal.h>
#endif
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
/*
* Routine: mach_port_get_srights [kernel call]
* Purpose:

View File

@ -26,6 +26,11 @@
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/mach_types.h>
#include <mach/mach_traps.h>
#include <mach/mach_vm_server.h>
@ -38,6 +43,10 @@
#include <kern/kalloc.h>
#include <vm/vm_protos.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
int
_kernelrpc_mach_vm_allocate_trap(struct _kernelrpc_mach_vm_allocate_trap_args *args)
{

View File

@ -70,6 +70,11 @@
* Exported message traps. See mach/message.h.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/mach_types.h>
#include <mach/kern_return.h>
#include <mach/port.h>
@ -113,6 +118,10 @@
#include <sys/kdebug.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
#ifndef offsetof
#define offsetof(type, member) ((size_t)(&((type *)0)->member))
#endif /* offsetof */

View File

@ -70,6 +70,11 @@
* Exported kernel calls. See mach/mach_port.defs.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach_debug.h>
#include <mach/port.h>
@ -103,6 +108,9 @@
#include <ipc/ipc_importance.h>
#endif
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
/* Zeroed template of qos flags */
@ -1408,10 +1416,12 @@ mach_port_get_set_status(
(vm_map_size_t)size_used, TRUE, &memory);
assert(kr == KERN_SUCCESS);
#ifndef __DARLING__ // Our kmem_free cannot free parts of a block
if (vm_size_used != size) {
kmem_free(ipc_kernel_map,
addr + vm_size_used, size - vm_size_used);
}
#endif
}
*members = (mach_port_name_t *) memory;

View File

@ -61,6 +61,11 @@
* $EndLog$
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#ifdef MACH_KERNEL
#include <mig_debug.h>
#endif
@ -68,6 +73,10 @@
#include <mach/message.h>
#include <mach/mig_log.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
int mig_tracing, mig_errors, mig_full_tracing;
/*

View File

@ -38,7 +38,11 @@
typedef unsigned int uint;
// unnecessary, since the Linux def. is equivalent,
// but shuts up a compiler warning
#ifndef __DARLING__
#define BIT(b) (1ULL << (b))
#endif
#define mask(width) (width >= 64 ? -1 : (BIT(width) - 1))
#define extract(x, shift, width) ((((uint64_t)(x)) >> (shift)) & mask(width))
@ -196,6 +200,14 @@ atomic_bit_clear(_Atomic bitmap_t *map, int n, int mem_order)
#define bitmap_bit(n) bits(n, 5, 0)
#define bitmap_index(n) bits(n, 63, 6)
#ifdef __DARLING__
#define bitmap_zero xnu_bitmap_zero
#define bitmap_full xnu_bitmap_full
#define bitmap_set xnu_bitmap_set
#define bitmap_free xnu_bitmap_free
#define bitmap_clear xnu_bitmap_clear
#endif
inline static bitmap_t *
bitmap_zero(bitmap_t *map, uint nbits)
{

View File

@ -32,6 +32,10 @@
#include <kern/queue.h>
#include <kern/assert.h>
#ifdef __DARLING__
#define __builtin_assume(x)
#endif
__BEGIN_DECLS
/*

View File

@ -37,6 +37,11 @@
* machine-independent clock service layer.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/mach_types.h>
#include <kern/host.h>
@ -61,6 +66,10 @@
#include <mach/host_priv_server.h>
#include <libkern/section_keywords.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
/*
* Actual clock alarm structure. Used for user clock_sleep() and
* clock_alarm() calls. Alarms are allocated from the alarm free

View File

@ -390,8 +390,13 @@ struct efi_aurr_extended_panic_log {
__BEGIN_DECLS
#ifdef __DARLING__
__printflike(1,2)
extern void duct_panic(const char* string, ...);
#else
__abortlike __printflike(1, 2)
extern void panic(const char *string, ...);
#endif
__END_DECLS
@ -507,6 +512,20 @@ __BEGIN_DECLS
#define LINE_NUMBER(x) __STRINGIFY(x)
#define PANIC_LOCATION __FILE__ ":" LINE_NUMBER(__LINE__)
// we only have to redefine these for Darling so that they use `duct_panic` instead of `panic`
#ifdef __DARLING__
#if CONFIG_EMBEDDED
#define panic(ex, ...) ({ \
__asm__("" ::: "memory"); \
(duct_panic)(# ex, ## __VA_ARGS__); \
})
#else
#define panic(ex, ...) ({ \
__asm__("" ::: "memory"); \
(duct_panic)(# ex "@" PANIC_LOCATION, ## __VA_ARGS__); \
})
#endif
#else
#if CONFIG_EMBEDDED
#define panic(ex, ...) ({ \
__asm__("" ::: "memory"); \
@ -518,6 +537,7 @@ __BEGIN_DECLS
(panic)(# ex "@" PANIC_LOCATION, ## __VA_ARGS__); \
})
#endif
#endif
__abortlike __printflike(4, 5)
void panic_with_options(unsigned int reason, void *ctx,

View File

@ -56,6 +56,11 @@
/*
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/mach_types.h>
#include <mach/boolean.h>
#include <mach/kern_return.h>
@ -92,6 +97,10 @@
#include <pexpert/pexpert.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
bool panic_on_exception_triage = false;
unsigned long c_thr_exc_raise = 0;
@ -498,10 +507,12 @@ exception_triage_thread(
}
out:
#ifndef __DARLING__
if ((exception != EXC_CRASH) && (exception != EXC_RESOURCE) &&
(exception != EXC_GUARD) && (exception != EXC_CORPSE_NOTIFY)) {
thread_exception_return();
}
#endif
return kr;
}

View File

@ -62,6 +62,11 @@
* Non-ipc host functions.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/mach_types.h>
#include <mach/boolean.h>
#include <mach/host_info.h>
@ -109,6 +114,20 @@
#include <pexpert/pexpert.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#include <darling/debug_print.h>
#include <linux/version.h>
#include <linux/kernel_stat.h>
#undef avenrun
#include <linux/sched/loadavg.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
#define global_zone_page_state global_page_state
#endif
#endif
host_data_t realhost;
vm_extmod_statistics_data_t host_extmod_statistics;
@ -162,6 +181,10 @@ host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_t
kern_return_t
host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count)
{
#ifdef __DARLING__
extern kern_return_t darling_host_info(host_flavor_t flavor, host_info_t info, mach_msg_type_number_t* count);
return darling_host_info(flavor, info, count);
#else
if (host == HOST_NULL) {
return KERN_INVALID_ARGUMENT;
}
@ -366,6 +389,7 @@ host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_num
default: return KERN_INVALID_ARGUMENT;
}
#endif
}
kern_return_t host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count);
@ -389,13 +413,29 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty
load_info = (host_load_info_t)info;
#ifdef __DARLING__
#ifndef LOAD_INT // until Linux 4.20
#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
#endif // !LOAD_INT
for (int i = 0; i < 3; i++)
{
unsigned long v = avenrun[i] + FIXED_1/200; // This is what /proc/loadavg does
load_info->xnu_avenrun[i] = LOAD_INT(v) * 1000 + LOAD_FRAC(v) * 10;
}
memset(load_info->mach_factor, 0, sizeof(load_info->mach_factor));
#else
bcopy((char *)avenrun, (char *)load_info->avenrun, sizeof avenrun);
bcopy((char *)mach_factor, (char *)load_info->mach_factor, sizeof mach_factor);
#endif
*count = HOST_LOAD_INFO_COUNT;
return KERN_SUCCESS;
}
#ifndef __DARLING__
case HOST_VM_INFO: {
processor_t processor;
vm_statistics64_t stat;
@ -483,6 +523,7 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty
return KERN_SUCCESS;
}
#endif
case HOST_CPU_LOAD_INFO: {
processor_t processor;
@ -492,6 +533,26 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty
return KERN_FAILURE;
}
#ifdef __DARLING__
int i;
cpu_load_info = (host_cpu_load_info_t)info;
for_each_possible_cpu(i) {
struct kernel_cpustat kcpustat;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0)
kcpustat_cpu_fetch(&kcpustat, i);
#else
kcpustat = kcpustat_cpu(i);
#endif
cpu_load_info->cpu_ticks[CPU_STATE_USER] = kcpustat.cpustat[CPUTIME_USER];
cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = kcpustat.cpustat[CPUTIME_SYSTEM] + \
kcpustat.cpustat[CPUTIME_SOFTIRQ] + kcpustat.cpustat[CPUTIME_IRQ];
cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = kcpustat.cpustat[CPUTIME_IDLE];
cpu_load_info->cpu_ticks[CPU_STATE_NICE] = kcpustat.cpustat[CPUTIME_NICE];
}
#else
#define GET_TICKS_VALUE(state, ticks) \
MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \
MACRO_END
@ -544,12 +605,15 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty
}
}
simple_unlock(&processor_list_lock);
#endif
*count = HOST_CPU_LOAD_INFO_COUNT;
return KERN_SUCCESS;
}
#ifndef __DARLING__
case HOST_EXPIRED_TASK_INFO: {
if (*count < TASK_POWER_INFO_COUNT) {
return KERN_FAILURE;
@ -581,7 +645,15 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty
return KERN_SUCCESS;
}
#endif
#ifdef __DARLING__
default:
printf("Unimplemented host_statistics: flavor %d\n", flavor);
return KERN_INVALID_ARGUMENT;
#else
default: return KERN_INVALID_ARGUMENT;
#endif
}
}
@ -809,6 +881,34 @@ host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_ms
return KERN_INVALID_HOST;
}
#ifdef __DARLING__
switch (flavor) {
case HOST_VM_INFO64:
{
vm_statistics64_t stat = (vm_statistics64_t) info;
if (*count < HOST_VM_INFO64_COUNT)
return (KERN_FAILURE);
memset(stat, 0, sizeof(*stat));
stat->free_count = global_zone_page_state(NR_FREE_PAGES) /*+ nr_blockdev_pages()*/;
stat->active_count = totalram_pages - stat->free_count;
stat->zero_fill_count = global_zone_page_state(NR_ZONE_INACTIVE_ANON);
stat->wire_count = global_zone_page_state(NR_ZONE_UNEVICTABLE);
stat->internal_page_count = global_zone_page_state(NR_ZONE_ACTIVE_ANON);
stat->external_page_count = global_zone_page_state(NR_ZONE_ACTIVE_FILE);
// stat->speculative_count = nr_blockdev_pages(); // not exported
*count = HOST_VM_INFO64_COUNT;
return KERN_SUCCESS;
}
default:
debug_msg("host_statistics64: Unsupported flavor %d\n", flavor);
return KERN_INVALID_ARGUMENT;
}
#else
switch (flavor) {
case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */
{
@ -937,6 +1037,7 @@ host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_ms
default: /* If we didn't recognize the flavor, send to host_statistics */
return host_statistics(host, flavor, (host_info_t)info, count);
}
#endif
}
kern_return_t
@ -1034,6 +1135,10 @@ get_pages_grabbed_count(void)
kern_return_t
get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count)
{
#if defined (__DARLING__)
kprintf("not implemented: get_sched_statistics()\n");
return KERN_FAILURE;
#else
processor_t processor;
if (!sched_stats_active) {
@ -1080,6 +1185,7 @@ get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count)
*count += (uint32_t)sizeof(struct _processor_statistics_np);
return KERN_SUCCESS;
#endif
}
kern_return_t
@ -1100,6 +1206,12 @@ host_page_size(host_t host, vm_size_t * out_page_size)
*/
extern char version[];
#ifdef __DARLING__
#include <generated/utsrelease.h>
#include <darling/api.h>
static const char KERNEL_VERSION[] = "Darling Mach (API level " DARLING_MACH_API_VERSION_STR ") on Linux " UTS_RELEASE;
#endif
kern_return_t
host_kernel_version(host_t host, kernel_version_t out_version)
{
@ -1107,7 +1219,11 @@ host_kernel_version(host_t host, kernel_version_t out_version)
return KERN_INVALID_ARGUMENT;
}
#ifdef __DARLING__
(void)strncpy(out_version, KERNEL_VERSION, sizeof(KERNEL_VERSION));
#else
(void)strncpy(out_version, version, sizeof(kernel_version_t));
#endif
return KERN_SUCCESS;
}

View File

@ -34,6 +34,11 @@
* alarm clock facility.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/message.h>
#include <kern/host.h>
#include <kern/processor.h>
@ -46,6 +51,10 @@
#include <ipc/ipc_port.h>
#include <ipc/ipc_space.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
/*
* Routine: ipc_clock_init
* Purpose:

View File

@ -61,6 +61,12 @@
*
* Routines to implement host ports.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/message.h>
#include <mach/mach_traps.h>
#include <mach/mach_host_server.h>
@ -80,6 +86,10 @@
#include <security/mac_mach_internal.h>
#endif
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
/*
* Forward declarations
*/
@ -142,8 +152,10 @@ ipc_host_init(void)
/*
* And for master processor
*/
#ifndef __DARLING__
ipc_processor_init(master_processor);
ipc_processor_enable(master_processor);
#endif
}
/*

View File

@ -70,6 +70,11 @@
* Functions for letting a port represent a kernel object.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach_debug.h>
#include <mach_ipc_test.h>
#include <mach/mig.h>
@ -149,6 +154,10 @@
#include <security/mac_mach_internal.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
extern char *proc_name_address(void *p);
extern int proc_pid(void *p);
@ -198,8 +207,10 @@ const struct mig_subsystem *mig_e[] = {
(const struct mig_subsystem *)&lock_set_subsystem,
(const struct mig_subsystem *)&task_subsystem,
(const struct mig_subsystem *)&thread_act_subsystem,
#ifndef __DARLING__
#ifdef VM32_SUPPORT
(const struct mig_subsystem *)&vm32_map_subsystem,
#endif
#endif
(const struct mig_subsystem *)&UNDReply_subsystem,
(const struct mig_subsystem *)&mach_voucher_subsystem,
@ -378,6 +389,10 @@ ipc_kobject_server(
ipc_kmsg_trace_send(request, option);
{
if (ptr) {
#ifdef __DARLING__
printf( "- kobject routine: %pF\n", ptr->routine);
#endif
/*
* Check if the port is a task port, if its a task port then
* snapshot the task exec token before the mig routine call.

View File

@ -56,6 +56,11 @@
/*
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/boolean.h>
#include <mach/port.h>
#include <mach/mig.h>
@ -84,6 +89,10 @@
#include <libkern/OSAtomic.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
void
mach_msg_receive_results_complete(ipc_object_t object);

View File

@ -25,6 +25,12 @@
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/mach_types.h>
#include <mach/notify.h>
#include <ipc/ipc_port.h>
@ -36,6 +42,10 @@
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
extern void fileport_releasefg(struct fileglob *);
/*

View File

@ -30,6 +30,11 @@
*
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <kern/sync_sema.h>
#include <kern/sync_lock.h>
#include <kern/ipc_kobject.h>
@ -43,6 +48,9 @@
#include <mach/mach_port_server.h>
#include <mach/port.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
/*
* Routine: port_name_to_semaphore

View File

@ -68,6 +68,11 @@
* Task and thread related IPC functions.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/mach_types.h>
#include <mach/boolean.h>
#include <mach/kern_return.h>
@ -102,6 +107,11 @@
#include <sys/csr.h>
#endif
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#include <darling/debug_print.h>
#endif
#if CONFIG_EMBEDDED && !SECURE_KERNEL
extern int cs_relax_platform_task_ports;
#endif
@ -1155,6 +1165,11 @@ task_get_special_port(
}
itk_unlock(task);
#ifdef __DARLING__
debug_msg("- task_get_special_port(%s) (task: 0x%p, ->itk_bootstrap: 0x%p, which: %d) to return port: 0x%p\n",
linux_current->comm, task, task->itk_bootstrap, which, port);
#endif
*portp = port;
return KERN_SUCCESS;
}
@ -1181,6 +1196,11 @@ task_set_special_port(
int which,
ipc_port_t port)
{
#if defined (__DARLING__)
debug_msg("- task_set_special_port(%s) (task: 0x%p, which: %d, port: 0x%p) called\n",
linux_current->comm, task, which, port);
#endif
if (task == TASK_NULL) {
return KERN_INVALID_ARGUMENT;
}
@ -1858,7 +1878,9 @@ convert_port_to_map(
}
map = task->map;
#ifndef __DARLING__
vm_map_reference_swap(map);
#endif
task_unlock(task);
return map;
}

View File

@ -86,10 +86,14 @@ extern vm_size_t
kalloc_bucket_size(
vm_size_t size);
#ifdef __DARLING__
extern void *kalloc(vm_size_t size) __attribute__((alloc_size(1)));
#else
#define kalloc(size) \
({ VM_ALLOC_SITE_STATIC(0, 0); \
vm_size_t tsize = (size); \
kalloc_canblock(&tsize, TRUE, &site); })
#endif
#define kalloc_tag(size, itag) \
({ VM_ALLOC_SITE_STATIC(0, (itag)); \
@ -101,10 +105,14 @@ kalloc_bucket_size(
vm_size_t tsize = (size); \
kalloc_canblock(&tsize, TRUE, &site); })
#ifdef __DARLING__
extern void *kalloc_noblock(vm_size_t size) __attribute__((alloc_size(1)));
#else
#define kalloc_noblock(size) \
({ VM_ALLOC_SITE_STATIC(0, 0); \
vm_size_t tsize = (size); \
kalloc_canblock(&tsize, FALSE, &site); })
#endif
#define kalloc_noblock_tag(size, itag) \
({ VM_ALLOC_SITE_STATIC(0, (itag)); \
@ -144,6 +152,7 @@ kalloc_bucket_size(
extern void kfree(void *data,
vm_size_t size);
#ifndef __DARLING__
#define kfree(data, size) \
_Pragma("clang diagnostic push") \
_Pragma("clang diagnostic ignored \"-Wshadow\"") \
@ -155,6 +164,7 @@ _Pragma("clang diagnostic ignored \"-Wshadow\"") \
(kfree)(__tmp_addr, __tmp_size); \
} while (0) \
_Pragma("clang diagnostic pop")
#endif
#define kfree_addr(addr) \
_Pragma("clang diagnostic push") \

View File

@ -53,9 +53,11 @@ struct wait_queue { unsigned char opaque[32]; };
typedef struct zone *zone_t;
#define ZONE_NULL ((zone_t) 0)
#ifndef __DARLING__
typedef struct wait_queue *wait_queue_t;
#define WAIT_QUEUE_NULL ((wait_queue_t) 0)
#define SIZEOF_WAITQUEUE sizeof(struct wait_queue)
#endif
typedef vm_offset_t ipc_kobject_t;
#define IKO_NULL ((ipc_kobject_t) 0)
@ -206,6 +208,9 @@ typedef int wait_timeout_urgency_t;
#ifdef MACH_KERNEL_PRIVATE
#include <kern/misc_protos.h>
#ifdef __DARLING__
#define clock_t xnu_clock_t
#endif
typedef struct clock *clock_t;
typedef struct mig_object *mig_object_t;

View File

@ -56,6 +56,14 @@
#define LOCK_PRIVATE 1
#ifdef __DARLING__
#include <duct/duct__pre_linux_types.h>
#include <linux/delay.h> // for usleep_range
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach_ldebug.h>
#include <debug.h>
@ -78,6 +86,10 @@
#include <sys/kdebug.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
#define LCK_MTX_SLEEP_CODE 0
#define LCK_MTX_SLEEP_DEADLINE_CODE 1
#define LCK_MTX_LCK_WAIT_CODE 2
@ -494,6 +506,14 @@ lck_attr_free(
kfree(attr, sizeof(lck_attr_t));
}
#ifdef __DARLING__
#undef hw_lock_init
#undef hw_lock_try
#undef hw_lock_held
#undef hw_lock_to
#undef hw_lock_unlock
#endif
/*
* Routine: hw_lock_init
*
@ -1226,6 +1246,7 @@ lck_mtx_sleep_deadline(
* steal the lock without having to wait for the last waiter to make forward progress.
*/
#ifndef __DARLING__
/*
* Routine: lck_mtx_lock_wait
*
@ -1407,6 +1428,7 @@ lck_mtx_unlock_wakeup(
return mutex->lck_mtx_waiters > 0;
}
#endif
/*
* Routine: mutex_pause
@ -1439,17 +1461,22 @@ mutex_pause(uint32_t collisions)
}
back_off = collision_backoffs[collisions];
#ifdef __DARLING__
usleep_range(back_off, back_off);
#else
wait_result = assert_wait_timeout((event_t)mutex_pause, THREAD_UNINT, back_off, NSEC_PER_USEC);
assert(wait_result == THREAD_WAITING);
wait_result = thread_block(THREAD_CONTINUE_NULL);
assert(wait_result == THREAD_TIMED_OUT);
#endif
}
unsigned int mutex_yield_wait = 0;
unsigned int mutex_yield_no_wait = 0;
#ifndef __DARLING__
void
lck_mtx_yield(
lck_mtx_t *lck)
@ -1475,6 +1502,7 @@ lck_mtx_yield(
lck_mtx_lock(lck);
}
}
#endif
/*

View File

@ -25,6 +25,12 @@
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <kern/cpu_data.h>
#include <kern/kern_types.h>
#include <kern/locks.h>
@ -34,6 +40,9 @@
#include <pexpert/pexpert.h>
#include <vm/vm_kern.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
#define P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align)))
#define ROUNDDOWN(x, y) (((x)/(y))*(y))

View File

@ -79,11 +79,13 @@ extern void cpu_exit_wait(
extern boolean_t cpu_can_exit(
int slot_num);
#ifndef __DARLING__
extern kern_return_t cpu_info(
processor_flavor_t flavor,
int slot_num,
processor_info_t info,
unsigned int *count);
#endif
extern kern_return_t cpu_info_count(
processor_flavor_t flavor,

View File

@ -55,6 +55,7 @@
#define MAX(a, b) (((a)>(b))?(a):(b))
#endif /* MAX */
#ifndef __DARLING__
/* Set a bit in a bit array */
extern void setbit(
int which,
@ -251,6 +252,7 @@ extern void bootstrap_create(void);
extern boolean_t no_bootstrap_task(void);
extern ipc_port_t get_root_master_device_port(void);
#endif /* DIPC */
#endif // !__DARLING__
extern kern_return_t kernel_set_special_port(
host_priv_t host_priv,
@ -262,11 +264,13 @@ extern kern_return_t kernel_get_special_port(
int which,
ipc_port_t *portp);
#ifndef __DARLING__
user_addr_t get_useraddr(void);
/* symbol lookup */
struct kmod_info_t;
extern uint64_t early_random(void);
#endif
#endif /* _MISC_PROTOS_H_ */

View File

@ -34,6 +34,11 @@
* Created.
*/
#ifdef __DARLING__
#include <duct/duct.h>
#include <duct/duct_pre_xnu.h>
#endif
#include <mach/mach_types.h>
#include <mach/mach_traps.h>
#include <mach/mach_port_server.h>
@ -46,6 +51,10 @@
#include <kern/mk_timer.h>
#include <kern/thread_call.h>
#ifdef __DARLING__
#include <duct/duct_post_xnu.h>
#endif
static zone_t mk_timer_zone;
static mach_port_qos_t mk_timer_qos = {

Some files were not shown because too many files have changed in this diff Show More