Added preliminary Box32 support (#1760)

* Improve the ReserveHigMemory helper function

* [BOX32] Added some wrapping infrastructure

* [BOX32] More wrapped 32bits lib infrastructure

* [BOX32] Added callback and tls 32bits handling

* [BOX32] Added more 32bits, around wrappers and elfs

* [BOX32] Added the 32bits version of myalign

* [BOX32] More wrapped libs and 32bits fixes and imrpovments

* [BOX32] Added some 32bits tests

* [BOX32] Try to enable some Box32 build and test on the CI

* [BOX32] Disable Box32 testing on CI platform that use qemu

* [BOX32] Another attempt to disable Box32 testing on CI platform that use qemu

* [BOX32] Small fix for another attempt to disable Box32 testing on CI platform that use qemu

* [BOX32] Yet another fix for another attempt to disable Box32 testing on CI platform that use qemu

* [BOX32] Fixed a typo in CI script

* [BOX32] Better scratch alighnment and enabled more tests

* [BOX32] Added (partial) wrapped 32bits librt

* [BOX32] Added mention of Box32 in README

* [BOX32] Added phtread handling, and numerous fixes to 32bits handling. [ARM64_DYNAREC] Fixed access to segment with negative offset

* [BOX32] Added system libs and cpp testing, plus some more fixes

* [BOX32] Fix previous commit

* [BOX32] Better stack adjustment for 32bits processes

* [BOX32] Added getenv wrapped 32bits function and friends

* [BOX32] Don't look for box86 for a Box32 build

* [BOX32] Don't do 32bits cppThreads test for now on CI

* [BOX32] Enabled a few more 32bits tests

* [BOX32] For ld_lib_path for both CppThreads tests

* [BOX32] [ANDROID] Some Fixes for Android Build

* [BOX32] Still need to disable cppThread_32bits test on CI for some reason

* [BOX32] [ANDROID] Don't show PreInit Array Warning (#1751)

* [BOX32] [ANDROID] One More Fix for Android Build That I forgotten to … (#1752)

* [BOX32] [ANDROID] One More Fix for Android Build That I forgotten to push before

* [BOX32] [ANDROID] Try to Create __libc_init

* [BOX32] [ANDROID] Try to disable NEEDED_LIBS for now (libdl is not wrapped)

* [BOX32] Updated generated files

* [BOX32] Added 32bits context functions

* [BOX32] Added 32bits signal handling

* [BOX32] Added some missing 32bits elfloader functions

* [BOX32] Fix build on x86_64 machine

* [BOX32] Better fix for x86_64 build

* [BOX32] Actually added missing libs, and re-enabled cppThreads_32bits test

* [BOX32] Added wrapped 32bits libdl

* [BOX32] Try to re-enabled Box32 test on CI for ARM64 builds

* [BOX32] fine-tuning Box32 test on CI for ARM64 builds

* [BOX32] More fine-tuning to Box32 test on CI for ARM64 builds

* [BOX32] Enabled Box32 test on CI for LA64 and RV64 builds too

* [BOX32] re-Disabled Box32 test on CI for LA64 and RV64 builds, not working for now

* [BOX32] Temporarily disabled cppThreads_32bits test on CI

---------

Co-authored-by: KreitinnSoftware <pablopro5051@gmail.com>
Co-authored-by: KreitinnSoftware <80591934+KreitinnSoftware@users.noreply.github.com>
This commit is contained in:
ptitSeb 2024-08-26 17:45:13 +02:00 committed by GitHub
parent 9beb745765
commit b5105a1e57
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
914 changed files with 25929 additions and 1024 deletions

View File

@ -28,7 +28,7 @@ jobs:
fail-fast: false
matrix:
platform: [X64, OTHER_ARM, RISCV, RPI4ARM64, RPI5ARM64, RK3326, RK3399, RK3588, PHYTIUM, SD845, SD888, ADLINK, ARM64, ANDROID, TERMUX, LARCH64]
type: [Release, Trace, StaticBuild]
type: [Release, Trace, StaticBuild, Box32]
os: [ubuntu-latest]
include:
- platform: TEGRAX1
@ -46,6 +46,10 @@ jobs:
type: StaticBuild
- platform: X64
type: StaticBuild
- platform: ANDROID
type: Box32
- platform: TERMUX
type: Box32
runs-on: ${{ matrix.os }}
steps:
@ -119,14 +123,22 @@ jobs:
echo BOX64_BUILD_TYPE=Release >> $GITHUB_ENV
echo BOX64_HAVE_TRACE=0 >> $GITHUB_ENV
echo BOX64_STATICBUILD=0 >> $GITHUB_ENV
echo BOX64_BOX32=0 >> $GITHUB_ENV
elif [[ ${{ matrix.type }} == 'StaticBuild' ]]; then
echo BOX64_BUILD_TYPE=Release >> $GITHUB_ENV
echo BOX64_HAVE_TRACE=0 >> $GITHUB_ENV
echo BOX64_STATICBUILD=1 >> $GITHUB_ENV
echo BOX64_BOX32=0 >> $GITHUB_ENV
elif [[ ${{ matrix.type }} == 'Box32' ]]; then
echo BOX64_BUILD_TYPE=Release >> $GITHUB_ENV
echo BOX64_HAVE_TRACE=0 >> $GITHUB_ENV
echo BOX64_STATICBUILD=0 >> $GITHUB_ENV
echo BOX64_BOX32=1 >> $GITHUB_ENV
else
echo BOX64_BUILD_TYPE=RelWithDebInfo >> $GITHUB_ENV
echo BOX64_HAVE_TRACE=1 >> $GITHUB_ENV
echo BOX64_STATICBUILD=0 >> $GITHUB_ENV
echo BOX64_BOX32=0 >> $GITHUB_ENV
fi
- name: "Display Build info"
@ -136,6 +148,7 @@ jobs:
echo "Build type: ${{ env.BOX64_BUILD_TYPE }}"
echo "Trace Enabled: ${{ env.BOX64_HAVE_TRACE }}"
echo "StaticBuild Enabled: ${{ env.BOX64_STATICBUILD }}"
echo "Box32 Enabled: ${{ env.BOX64_BOX32 }}"
- name: "Build Box64"
run: |
@ -147,11 +160,13 @@ jobs:
-DCMAKE_BUILD_TYPE=${{ env.BOX64_BUILD_TYPE }}\
-DHAVE_TRACE=${{ env.BOX64_HAVE_TRACE }}\
-DSTATICBUILD=${{ env.BOX64_STATICBUILD }}\
-DBOX32=${{ env.BOX64_BOX32 }}\
-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON\
-DCI=${{ matrix.platform != 'ANDROID' }}
make -j$(nproc) VERBOSE=1
- name: "Test Box64"
## Qemu doesn't like Box32 test, so disabling it on non-native platform
if: ${{ matrix.platform != 'TEGRAX1' }}
run: |
if [[ ${{ matrix.platform }} != 'X64' ]]; then
@ -162,6 +177,7 @@ jobs:
cd build
if [[ ${{ matrix.platform }} == 'RISCV' ]]; then
if [[ ${{ env.BOX64_BOX32 }} != 1 ]]; then
INTERPRETER=qemu-riscv64-static QEMU_LD_PREFIX=/usr/riscv64-linux-gnu/ ctest -j$(nproc) --output-on-failure
INTERPRETER=qemu-riscv64-static QEMU_LD_PREFIX=/usr/riscv64-linux-gnu/ BOX64_DYNAREC_TEST=2 ctest -j$(nproc) --repeat until-pass:20 --output-on-failure
INTERPRETER=qemu-riscv64-static QEMU_LD_PREFIX=/usr/riscv64-linux-gnu/ QEMU_CPU=rv64,v=false BOX64_DYNAREC=0 ctest -j$(nproc) --output-on-failure
@ -173,20 +189,29 @@ jobs:
INTERPRETER=qemu-riscv64-static QEMU_LD_PREFIX=/usr/riscv64-linux-gnu/ BOX64_DYNAREC_TEST=2 QEMU_CPU=rv64,v=true,vlen=256,vext_spec=v1.0 ctest -j$(nproc) --repeat until-pass:20 --output-on-failure
INTERPRETER=qemu-riscv64-static QEMU_LD_PREFIX=/usr/riscv64-linux-gnu/ QEMU_CPU=rv64,v=false,xtheadba=true,xtheadba=true,xtheadbb=true,xtheadbs=true,xtheadcondmov=true,xtheadmemidx=true,xtheadmempair=true,xtheadfmemidx=true,xtheadmac=true,xtheadfmv=true ctest -j$(nproc) --output-on-failure
INTERPRETER=qemu-riscv64-static QEMU_LD_PREFIX=/usr/riscv64-linux-gnu/ BOX64_DYNAREC_TEST=2 QEMU_CPU=rv64,v=false,xtheadba=true,xtheadba=true,xtheadbb=true,xtheadbs=true,xtheadcondmov=true,xtheadmemidx=true,xtheadmempair=true,xtheadfmemidx=true,xtheadmac=true,xtheadfmv=true ctest -j$(nproc) --repeat until-pass:20 --output-on-failure
fi
elif [[ ${{ matrix.platform }} == 'LARCH64' ]]; then
if [[ ${{ env.BOX64_BOX32 }} != 1 ]]; then
INTERPRETER=qemu-loongarch64-static QEMU_LD_PREFIX=/usr/loongarch64-linux-gnu/ BOX64_DYNAREC_LA64NOEXT=1 ctest -j$(nproc) --repeat until-pass:20 --output-on-failure
INTERPRETER=qemu-loongarch64-static QEMU_LD_PREFIX=/usr/loongarch64-linux-gnu/ BOX64_DYNAREC_TEST=2 BOX64_DYNAREC_LA64NOEXT=1 ctest -j$(nproc) --repeat until-pass:20 --output-on-failure
INTERPRETER=qemu-loongarch64-static QEMU_LD_PREFIX=/usr/loongarch64-linux-gnu/ BOX64_DYNAREC=0 ctest -j$(nproc) --repeat until-pass:20 --output-on-failure
fi
elif [[ ${{ matrix.platform }} == 'ANDROID' ]]; then
if [[ ${{ env.BOX64_BOX32 }} != 1 ]]; then
INTERPRETER=qemu-aarch64-static QEMU_LD_PREFIX=/system/lib64 BOX64_DYNAREC=0 ctest -j$(nproc) --repeat until-pass:20 --output-on-failure
INTERPRETER=qemu-aarch64-static QEMU_LD_PREFIX=/system/lib64 ctest -j$(nproc) --repeat until-pass:20 --output-on-failure
fi
elif [[ ${{ matrix.platform }} == 'TERMUX' ]]; then
if [[ ${{ env.BOX64_BOX32 }} != 1 ]]; then
INTERPRETER=qemu-aarch64-static QEMU_SET_ENV=LD_LIBRARY_PATH=/data/data/com.termux/files/usr/lib QEMU_LD_PREFIX=/system/lib64:/data/data/com.termux/files/usr/lib BOX64_DYNAREC=0 ctest -j$(nproc) --repeat until-pass:20 --output-on-failure
INTERPRETER=qemu-aarch64-static QEMU_SET_ENV=LD_LIBRARY_PATH=/data/data/com.termux/files/usr/lib QEMU_LD_PREFIX=/system/lib64:/data/data/com.termux/files/usr/lib ctest -j$(nproc) --repeat until-pass:20 --output-on-failure
fi
elif [[ ${{ matrix.platform }} != 'X64' ]]; then # AArch64
INTERPRETER=qemu-aarch64-static QEMU_LD_PREFIX=/usr/aarch64-linux-gnu/ BOX64_DYNAREC=0 ctest -j$(nproc) --output-on-failure
INTERPRETER=qemu-aarch64-static QEMU_LD_PREFIX=/usr/aarch64-linux-gnu/ ctest -j$(nproc) --output-on-failure
INTERPRETER=qemu-aarch64-static QEMU_LD_PREFIX=/usr/aarch64-linux-gnu/ BOX64_DYNAREC=0 ctest -j$(nproc) --repeat until-pass:20 --output-on-failure
INTERPRETER=qemu-aarch64-static QEMU_LD_PREFIX=/usr/aarch64-linux-gnu/ ctest -j$(nproc) --repeat until-pass:20 --output-on-failure
if [[ ${{ env.BOX64_BOX32 }} != 1 ]]; then
INTERPRETER=qemu-aarch64-static QEMU_LD_PREFIX=/usr/aarch64-linux-gnu/ BOX64_DYNAREC_TEST=2 ctest -j$(nproc) --output-on-failure
fi
else
ctest -j$(nproc) --output-on-failure
fi

View File

@ -37,7 +37,7 @@ option(BAD_SIGNAL "Set to ON to activate the workaround for incoherent si_info o
option(SW64 "Set ON if targeting an SW64 based device" ${SW64})
option(CI "Set to ON if running in CI" ${CI})
option(WITH_MOLD "Set to ON to use with mold" ${WITH_MOLD})
option(BOX32 "Set to ON to add Linux 32bits support (experimental, do not use)" ${BOX32})
option(BOX32 "Set to ON to add Linux 32bits support (experimental, do not use)" ${BO64})
if(TERMUX)
set(TERMUX_PATH "/data/data/com.termux/files")
@ -302,6 +302,11 @@ include_directories(
"${BOX64_ROOT}/src"
"${BOX64_ROOT}/src/wrapped/generated"
)
if(BOX32)
include_directories(
"${BOX64_ROOT}/src/wrapped32/generated"
)
endif()
# git_head.h is a generated file
set_source_files_properties(
@ -330,7 +335,6 @@ set(ELFLOADER_SRC
"${BOX64_ROOT}/src/emu/x64run_private.c"
"${BOX64_ROOT}/src/emu/x64shaext.c"
"${BOX64_ROOT}/src/emu/x64syscall.c"
"${BOX64_ROOT}/src/emu/x86syscall.c"
"${BOX64_ROOT}/src/emu/x64tls.c"
"${BOX64_ROOT}/src/emu/x64trace.c"
"${BOX64_ROOT}/src/librarian/librarian.c"
@ -365,10 +369,24 @@ if(NOT STATICBUILD)
endif()
if(BOX32)
list(APPEND ELFLOADER_SRC
"${BOX64_ROOT}/src/box32.c"
"${BOX64_ROOT}/src/elfs/elfhash32.c"
"${BOX64_ROOT}/src/elfs/elfloader32.c"
"${BOX64_ROOT}/src/elfs/elfparser32.c"
"${BOX64_ROOT}/src/elfs/elfload_dump32.c"
"${BOX64_ROOT}/src/tools/box32stack.c"
"${BOX64_ROOT}/src/emu/x86int3.c"
"${BOX64_ROOT}/src/libtools/myalign32.c"
"${BOX64_ROOT}/src/libtools/myalign64_32.c"
"${BOX64_ROOT}/src/libtools/signal32.c"
"${BOX64_ROOT}/src/libtools/threads32.c"
"${BOX64_ROOT}/src/emu/x86syscall_32.c"
"${BOX64_ROOT}/src/wrapped32/generated/wrapper32.c"
"${BOX64_ROOT}/src/wrapped32/generated/converter32.c"
)
else()
list(APPEND ELFLOADER_SRC
"${BOX64_ROOT}/src/emu/x86syscall.c"
)
endif()
if(NOT ANDROID)
@ -725,6 +743,57 @@ endif ()
add_custom_target(WRAPPERS DEPENDS "${BOX64_ROOT}/src/wrapped/generated/functions_list.txt")
#add_custom_target(PRINTER DEPENDS "${BOX64_ROOT}/src/dynarec/last_run.txt")
if(BOX32)
if(STATICBUILD)
set(WRAPPEDS32
"${BOX64_ROOT}/src/wrapped32/wrappedldlinux.c"
"${BOX64_ROOT}/src/wrapped32/wrappedlibc.c"
"${BOX64_ROOT}/src/wrapped32/wrappedlibdl.c"
"${BOX64_ROOT}/src/wrapped32/wrappedlibm.c"
"${BOX64_ROOT}/src/wrapped32/wrappedlibpthread.c"
"${BOX64_ROOT}/src/wrapped32/wrappedlibrt.c"
)
else()
set(WRAPPEDS32
"${BOX64_ROOT}/src/wrapped32/wrappedldlinux.c"
"${BOX64_ROOT}/src/wrapped32/wrappedlibc.c"
"${BOX64_ROOT}/src/wrapped32/wrappedlibdl.c"
"${BOX64_ROOT}/src/wrapped32/wrappedlibm.c"
"${BOX64_ROOT}/src/wrapped32/wrappedlibpthread.c"
"${BOX64_ROOT}/src/wrapped32/wrappedlibrt.c"
"${BOX64_ROOT}/src/wrapped32/wrappedcrashhandler.c"
)
endif()
string(REPLACE ".c" "_private.h" MODROOT ${BOX64_ROOT})
#set(WRAPPEDS32_HEAD "${BOX64_ROOT}/src/wrapped/wrappedd3dadapter9_genvate.h")
foreach(A ${WRAPPEDS32})
string(REPLACE ".c" "_private.h" C ${A})
string(REPLACE "${MODROOT}" "${BOX64_ROOT}" B ${C})
set(WRAPPEDS32_HEAD ${WRAPPEDS32_HEAD} ${B})
set_source_files_properties(${A} PROPERTIES OBJECT_DEPENDS ${B})
endforeach()
set(WRAPPER32 "${BOX64_ROOT}/src/wrapped32/generated/wrapper32.c" "${BOX64_ROOT}/src/wrapped32/generated/wrapper32.h")
if(NOT CI)
add_custom_command(
OUTPUT "${BOX64_ROOT}/src/wrapped32/generated/functions_list.txt"
COMMAND "${PYTHON_EXECUTABLE}" "${BOX64_ROOT}/rebuild_wrappers_32.py"
"${BOX64_ROOT}"
"PANDORA" "HAVE_LD80BITS" "NOALIGN" "HAVE_TRACE" "ANDROID" "TERMUX" "STATICBUILD" "--"
${WRAPPEDS32_HEAD}
MAIN_DEPENDENCY "${BOX64_ROOT}/rebuild_wrappers_32.py"
DEPENDS ${WRAPPEDS32} ${WRAPPEDS32_HEAD}
BYPRODUCTS ${WRAPPER32}
)
endif()
add_custom_target(WRAPPERS32 DEPENDS "${BOX64_ROOT}/src/wrapped32/generated/functions_list.txt")
else()
set(WRAPPEDS32)
endif()
if(DYNAREC)
set(DYNAREC_SRC
"${BOX64_ROOT}/src/dynarec/dynablock.c"
@ -885,6 +954,11 @@ if(DYNAREC)
${DYNAREC_PASS}
"${BOX64_ROOT}/src/dynarec/dynarec_native_pass.c"
)
if(BOX32)
list(APPEND DYNAREC_PASS
"${BOX64_ROOT}/src/wrapped32/generated/wrapper32.h"
)
endif()
add_library(dynarec_native OBJECT ${DYNAREC_SRC})
@ -902,6 +976,12 @@ if(DYNAREC)
add_dependencies(native_pass1 WRAPPERS)
add_dependencies(native_pass2 WRAPPERS)
add_dependencies(native_pass3 WRAPPERS)
if(BOX32)
add_dependencies(native_pass0 WRAPPERS32)
add_dependencies(native_pass1 WRAPPERS32)
add_dependencies(native_pass2 WRAPPERS32)
add_dependencies(native_pass3 WRAPPERS32)
endif()
add_library(dynarec STATIC
$<TARGET_OBJECTS:dynarec_native>
@ -918,21 +998,24 @@ if(DYNAREC)
add_custom_command(
OUTPUT "${BOX64_ROOT}/src/git_head.h"
COMMAND sh -c "echo \\\#define GITREV \\\"$(git rev-parse --short HEAD)\\\">\"${BOX64_ROOT}/src/git_head.h\""
DEPENDS dynarec ${ELFLOADER_SRC} ${INTERPRETER} ${WRAPPEDS}
DEPENDS dynarec ${ELFLOADER_SRC} ${INTERPRETER} ${WRAPPEDS} ${WRAPPEDS32}
VERBATIM)
else()
add_custom_command(
OUTPUT "${BOX64_ROOT}/src/git_head.h"
COMMAND sh -c "echo \\\#define GITREV \\\"$(git rev-parse --short HEAD)\\\">\"${BOX64_ROOT}/src/git_head.h\""
DEPENDS ${ELFLOADER_SRC} ${INTERPRETER} ${WRAPPEDS}
DEPENDS ${ELFLOADER_SRC} ${INTERPRETER} ${WRAPPEDS} ${WRAPPEDS32}
VERBATIM)
endif()
add_library(interpreter OBJECT ${INTERPRETER})
add_executable(${BOX64} ${ELFLOADER_SRC} ${WRAPPEDS} "${BOX64_ROOT}/src/git_head.h")
add_executable(${BOX64} ${ELFLOADER_SRC} ${WRAPPEDS} ${WRAPPEDS32} "${BOX64_ROOT}/src/git_head.h")
set_target_properties(${BOX64} PROPERTIES ENABLE_EXPORTS ON)
add_dependencies(${BOX64} WRAPPERS)
if(BOX32)
add_dependencies(${BOX64} WRAPPERS32)
endif()
#add_dependencies(${BOX64} PRINTER)
#target_link_libraries(${BOX64} c m dl rt pthread resolv)
if(STATICBUILD)
@ -1037,6 +1120,24 @@ if(NOT _x86 AND NOT _x86_64)
install(FILES ${CMAKE_SOURCE_DIR}/x64lib/libmbedcrypto.so.3 DESTINATION ${TERMUX_PATH}${INSTALL_PATH})
endif()
endif()
if(BOX32)
set(INSTALL_PATH "/usr/lib/i386-linux-gnu/")
if(NOT NO_LIB_INSTALL)
if(NOT TERMUX)
install(FILES ${CMAKE_SOURCE_DIR}/x86lib/libstdc++.so.5 DESTINATION ${INSTALL_PATH})
install(FILES ${CMAKE_SOURCE_DIR}/x86lib/libstdc++.so.6 DESTINATION ${INSTALL_PATH})
install(FILES ${CMAKE_SOURCE_DIR}/x86lib/libgcc_s.so.1 DESTINATION ${INSTALL_PATH})
install(FILES ${CMAKE_SOURCE_DIR}/x86lib/libpng12.so.0 DESTINATION ${INSTALL_PATH})
install(FILES ${CMAKE_SOURCE_DIR}/x86lib/libunwind.so.8 DESTINATION ${INSTALL_PATH})
else()
install(FILES ${CMAKE_SOURCE_DIR}/x86lib/libstdc++.so.5 DESTINATION ${TERMUX_PATH}${INSTALL_PATH})
install(FILES ${CMAKE_SOURCE_DIR}/x86lib/libstdc++.so.6 DESTINATION ${TERMUX_PATH}${INSTALL_PATH})
install(FILES ${CMAKE_SOURCE_DIR}/x86lib/libgcc_s.so.1 DESTINATION ${TERMUX_PATH}${INSTALL_PATH})
install(FILES ${CMAKE_SOURCE_DIR}/x86lib/libpng12.so.0 DESTINATION ${TERMUX_PATH}${INSTALL_PATH})
install(FILES ${CMAKE_SOURCE_DIR}/x86lib/libunwind.so.8 DESTINATION ${TERMUX_PATH}${INSTALL_PATH})
endif()
endif()
endif()
endif()
if(NOT TARGET uninstall)
@ -1149,6 +1250,8 @@ add_test(cppThreads ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests/ref10.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
set_tests_properties(cppThreads PROPERTIES ENVIRONMENT "BOX64_LD_LIBRARY_PATH=${CMAKE_SOURCE_DIR}/x64lib")
add_test(tlsData ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests/test11 -D TEST_OUTPUT=tmpfile11.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests/ref11.txt
@ -1425,3 +1528,139 @@ foreach(file ${extension_tests})
-P ${CMAKE_SOURCE_DIR}/runTest.cmake)
endforeach()
endif()
if(BOX32)
add_test(NAME bootSyscall_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test01 -D TEST_OUTPUT=tmpfile01.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref01.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
add_test(NAME bootSyscallC_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test02 -D TEST_OUTPUT=tmpfile02.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref02.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
add_test(NAME printf_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test03 -D TEST_OUTPUT=tmpfile03.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref03.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
add_test(NAME args_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test04 -D TEST_ARGS2=yeah -D TEST_OUTPUT=tmpfile04.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref04.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
add_test(NAME maths1_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test05 -D TEST_ARGS2=7 -D TEST_OUTPUT=tmpfile05.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref05.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
add_test(NAME threadsStart_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test06 -D TEST_OUTPUT=tmpfile06.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref06.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
add_test(NAME trig_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test07 -D TEST_OUTPUT=tmpfile07.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref07.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
add_test(NAME pi_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test08 -D TEST_OUTPUT=tmpfile08.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref08.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
add_test(NAME fork_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test09 -D TEST_OUTPUT=tmpfile09.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref09.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
if(NOT CI)
add_test(NAME cppThreads_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test10 -D TEST_OUTPUT=tmpfile10.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref10.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
set_tests_properties(cppThreads_32bits PROPERTIES ENVIRONMENT "BOX64_LD_LIBRARY_PATH=${CMAKE_SOURCE_DIR}/x86lib")
endif()
add_test(NAME tlsData_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test11 -D TEST_OUTPUT=tmpfile11.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref11.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
add_test(NAME fpu_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test12 -D TEST_OUTPUT=tmpfile12.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref12.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
add_test(NAME contexts_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test13 -D TEST_OUTPUT=tmpfile13.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref13.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
if(NOT LD80BITS)
add_test(NAME conditionalThreads_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test14 -D TEST_OUTPUT=tmpfile14.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref14.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
endif()
add_test(NAME linkingIndirectNoversion_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test15 -D TEST_OUTPUT=tmpfile15.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref15.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
add_test(NAME linkingIndirectVersion_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test16 -D TEST_OUTPUT=tmpfile16.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref16.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
add_test(NAME sse_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test17 -D TEST_OUTPUT=tmpfile17.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref17.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
set_tests_properties(sse_32bits PROPERTIES ENVIRONMENT "BOX64_DYNAREC_FASTNAN=0;BOX64_DYNAREC_FASTROUND=0")
add_test(NAME longjumpInSignals_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test18 -D TEST_OUTPUT=tmpfile18.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref18.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
add_test(NAME x87_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test19 -D TEST_OUTPUT=tmpfile19.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref19.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
add_test(NAME idiv_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test20 -D TEST_OUTPUT=tmpfile20.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref20.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
add_test(NAME multiple_dlopen_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test21 -D TEST_OUTPUT=tmpfile21.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref21.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
file(GLOB extension_tests "${CMAKE_SOURCE_DIR}/tests32/extensions/*.c")
foreach(file ${extension_tests})
get_filename_component(testname "${file}" NAME_WE)
add_test(NAME "${testname}_32bits" COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/extensions/${testname} -D TEST_OUTPUT=tmpfile-${testname}.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/extensions/${testname}.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake)
endforeach()
#add_test(NAME sse_optimized_32bits COMMAND ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
# -D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test17_o2 -D TEST_OUTPUT=tmpfile17_o2.txt
# -D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref17_o2.txt
# -P ${CMAKE_SOURCE_DIR}/runTest.cmake )
#set_tests_properties(sse_optimized_32bits PROPERTIES ENVIRONMENT "BOX64_DYNAREC_FASTNAN=0;BOX64_DYNAREC_FASTROUND=0")
add_test(bswap_32bits ${CMAKE_COMMAND} -D TEST_PROGRAM=${CMAKE_BINARY_DIR}/${BOX64}
-D TEST_ARGS=${CMAKE_SOURCE_DIR}/tests32/test23 -D TEST_OUTPUT=tmpfile23.txt
-D TEST_REFERENCE=${CMAKE_SOURCE_DIR}/tests32/ref23.txt
-P ${CMAKE_SOURCE_DIR}/runTest.cmake )
endif()

View File

@ -18,6 +18,8 @@ Box64 integrates with DynaRec (dynamic recompiler) for the ARM64 and RV64 platfo
Some x64 internal opcodes use parts of "Realmode X86 Emulator Library", see [x64primop.c](src/emu/x64primop.c) for copyright details
Box64 now have an option Box32 part, that allows to run 32bits x86 program on 64bits system. Note that this option is experimental and a work in progress. Basically, nothing work with it yet, so unless you plan an helping with the development, don't bother enabling it (and don't create issue ticket about box32 not being able to run X or Y program). The main goals with box32 is to be able to run SteamCMD first. Then Linux Steam and Wine/Proton might come later.
<img src="docs/img/Box64Icon.png" width="96" height="96">
Logo and Icon made by @grayduck, thanks!

View File

@ -1469,7 +1469,7 @@ def main(root: str, files: Iterable[Filename], ver: str):
inttext = ""
file.write("\n")
for k1 in simple_idxs:
file.write("#{inttext}if defined({k1})\nint isSimpleWrapper(wrapper_t fun) {{\n".format(inttext=inttext, k1=k1))
file.write("#{inttext}if defined({k1})\nint isSimpleWrapper(wrapper_t fun) {{\n\tif (box64_is32bits) return 0;\n".format(inttext=inttext, k1=k1))
inttext = "el"
for k in simple_idxs[k1]:
if k != str(Clauses()):
@ -1479,9 +1479,10 @@ def main(root: str, files: Iterable[Filename], ver: str):
if k != str(Clauses()):
file.write("#endif\n")
file.write("\treturn 0;\n}\n")
file.write("\n#else\nint isSimpleWrapper(wrapper_t fun) {\n\treturn 0;\n}\n#endif\n")
file.write("#else\nint isSimpleWrapper(wrapper_t fun) {\n\treturn 0;\n}\n#endif\n")
# Write the isRetX87Wrapper function
file.write("\nint isRetX87Wrapper32(wrapper_t fun)\n#ifndef BOX32\n{ return 0; }\n#else\n ;\n#endif\n")
file.write("\nint isRetX87Wrapper(wrapper_t fun) {\n")
for k in retx87_idxs:
if k != str(Clauses()):
@ -1565,6 +1566,6 @@ if __name__ == '__main__':
if v == "--":
limit.append(i)
Define.defines = list(map(DefineType, sys.argv[2:limit[0]]))
if main(sys.argv[1], sys.argv[limit[0]+1:], "2.4.0.21") != 0:
if main(sys.argv[1], sys.argv[limit[0]+1:], "2.4.0.23") != 0:
exit(2)
exit(0)

1616
rebuild_wrappers_32.py Executable file

File diff suppressed because it is too large Load Diff

283
src/box32.c Normal file
View File

@ -0,0 +1,283 @@
#include <stdint.h>
#include <stdlib.h>
#include <pthread.h>
#include "debug.h"
#include "box32.h"
#include "custommem.h"
#include "converter32.h"
#include "khash.h"
KHASH_MAP_INIT_INT64(to, ulong_t);
KHASH_MAP_INIT_INT(from, uintptr_t);
static kh_from_t* hash_from;
static kh_to_t* hash_to;
#define CNT_INIT 0x80000001
#define HASH_MASK 0x7fffffff
static uint32_t hash_cnt = CNT_INIT;
static pthread_rwlock_t hash_lock = {0};
static int hash_running = 0;
// locale
static kh_from_t* locale_from;
static kh_to_t* locale_to;
void init_hash_helper() {
hash_from = kh_init(from);
hash_to = kh_init(to);
locale_from = kh_init(from);
locale_to = kh_init(to);
pthread_rwlock_init(&hash_lock, NULL);
hash_running = 1;
}
void fini_hash_helper() {
hash_running = 0;
kh_destroy(from, hash_from);
hash_from = NULL;
kh_destroy(to, hash_to);
hash_to = NULL;
hash_cnt = CNT_INIT;
kh_destroy(from, locale_from);
locale_from = NULL;
kh_destroy(to, locale_to);
locale_to = NULL;
pthread_rwlock_destroy(&hash_lock);
}
// Convert from hash key to original 64bits value
uintptr_t from_hash(ulong_t l) {
// easy case first
if((l&HASH_MASK)==l) {
return (uintptr_t)l;
}
if(l==0xffffffff) {
return 0xffffffffffffffffll;
}
// get value from hash table second
uintptr_t ret = 0;
if(!hash_running) {
//printf_log(LOG_INFO, "Warning, from_hash used but hash not running\n");
return ret;
}
pthread_rwlock_rdlock(&hash_lock);
khint_t k = kh_get(from, hash_from, l);
if (k==kh_end(hash_from)) {
ret = (uintptr_t)l;
} else {
ret = kh_value(hash_from, k);
}
pthread_rwlock_unlock(&hash_lock);
return ret;
}
// same as from_hash
uintptr_t from_hash_d(ulong_t l) {
return from_hash(l);
}
// Convert from 64bits to hash key, creating it if needed
ulong_t to_hash(uintptr_t p) {
if((p&(uintptr_t)HASH_MASK)==p) {
return (ulong_t)p;
}
if(p==0xffffffffffffffffll) {
return 0xffffffff;
}
ulong_t ret = 0;
if(!hash_running) {
//printf_log(LOG_INFO, "Warning, to_hash used but hash not running\n");
return ret;
}
khint_t k;
pthread_rwlock_rdlock(&hash_lock);
k = kh_get(to, hash_to, p);
if(k==kh_end(hash_to)) {
// create a new key, but need write lock!
pthread_rwlock_unlock(&hash_lock);
pthread_rwlock_wrlock(&hash_lock);
ret = hash_cnt++;
if(hash_cnt==0xffffffff)
hash_cnt = CNT_INIT;
int r;
k = kh_put(to, hash_to, p, &r);
kh_value(hash_to, k) = ret;
k = kh_put(from, hash_from, ret, &r);
kh_value(hash_from, k) = p;
} else {
ret = kh_value(hash_to, k);
}
pthread_rwlock_unlock(&hash_lock);
return ret;
}
// Convert from 64bits to hash key and delete the entry from both hash table
ulong_t to_hash_d(uintptr_t p) {
if((p&(uintptr_t)HASH_MASK)==p)
return (ulong_t)p;
if(p==0xffffffffffffffffll)
return 0xffffffff;
ulong_t ret = 0;
if(!hash_running) {
//printf_log(LOG_INFO, "Warning, to_hash_d used but hash not running\n");
return ret;
}
khint_t k;
pthread_rwlock_wrlock(&hash_lock);
k = kh_get(to, hash_to, p);
if(k==kh_end(hash_to)) {
/// should this be an assert?
} else {
ret = kh_value(hash_to, k);
// delete both entries
k = kh_get(to, hash_to, p);
kh_del(to, hash_to, k);
k = kh_get(from, hash_from, ret);
kh_del(from, hash_from, k);
}
pthread_rwlock_unlock(&hash_lock);
return ret;
}
typedef struct struct_locale_s {
void* p0[13];
void* p1;
void* p2;
void* p3;
void* p4[13];
} struct_locale_t;
void from_struct_locale(struct_locale_t *dest, ptr_t s) {
uint8_t* src = (uint8_t*)from_ptrv(s);
for(int i=0; i<13; ++i) {
dest->p0[i] = (void*)from_hash(*(ptr_t*)src); src += 4;
}
dest->p1 = from_ptrv(*(ptr_t*)src); src += 4;
dest->p2 = from_ptrv(*(ptr_t*)src); src += 4;
dest->p3 = from_ptrv(*(ptr_t*)src); src += 4;
for(int i=0; i<13; ++i) {
dest->p4[i] = (void*)from_hash(*(ptr_t*)src); src += 4;
}
}
void to_struct_locale(ptr_t d, const struct_locale_t *src) {
if (!src) return;
uint8_t* dest = (uint8_t*)from_ptrv(d);
for(int i=0; i<13; ++i) {
*(ptr_t*)dest = to_hashv(src->p0[i]); dest += 4;
}
// copy the 3 ctype int (1st is short int, but int will do)
*(unsigned short int*)(d+(13+3+13)*sizeof(ptr_t)) = *(unsigned short int*)src->p1;
*(ptr_t*)dest = d+(13+3+13)*sizeof(ptr_t); dest += 4;
*(int*)(d+(13+3+13+1)*sizeof(ptr_t)) = *(int*)src->p2;
*(ptr_t*)dest = d+(13+3+13+1)*sizeof(ptr_t); dest += 4;
*(int*)(d+(13+3+13+3)*sizeof(ptr_t)) = *(int*)src->p3;
*(ptr_t*)dest = d+(13+3+13+2)*sizeof(ptr_t); dest += 4;
for(int i=0; i<13; ++i) {
*(ptr_t*)dest = to_hashv(src->p4[i]); dest += 4;
}
}
void free_struct_locale(const struct_locale_t *src) {
for(int i=0; i<13; ++i) {
to_hash_d((uintptr_t)src->p0[i]);
}
for(int i=0; i<13; ++i) {
to_hash_d((uintptr_t)src->p4[i]);
}
}
// Convert from locale key to original 64bits value
void* from_locale(ptr_t l) {
// easy case first
if(l < 0x100) {
return from_ptrv(l);
}
if(l == 0xffffffff) {
return (void*)-1;
}
// get value from hash table second
void* ret = 0;
if(!hash_running) {
//printf_log(LOG_INFO, "Warning, from_locale used but hash not running\n");
return ret;
}
pthread_rwlock_rdlock(&hash_lock);
khint_t k = kh_get(from, locale_from, l);
if (k==kh_end(locale_from)) {
ret = from_ptrv(l);
} else {
ret = (void*)kh_value(locale_from, k);
}
pthread_rwlock_unlock(&hash_lock);
//from_struct_locale((struct_locale_t*)ret, l);
return ret;
}
// same as from_locale
void* from_locale_d(ptr_t l) {
return from_locale(l);
}
// Convert from 64bits to locale key, creating it if needed
ptr_t to_locale(void* p) {
if((uintptr_t)p < 0x100) {
return to_ptrv(p);
}
if(p == (void*)-1) {
return 0xffffffff;
}
ptr_t ret = 0;
if(!hash_running) {
//printf_log(LOG_INFO, "Warning, to_locale used but hash not running\n");
return ret;
}
khint_t k;
pthread_rwlock_rdlock(&hash_lock);
k = kh_get(to, locale_to, (uintptr_t)p);
int conv = 0;
if(k==kh_end(locale_to)) {
// create a new key, but need write lock!
pthread_rwlock_unlock(&hash_lock);
pthread_rwlock_wrlock(&hash_lock);
// a locale_t is 5 pointer!
void* m = calloc(13+3+13+3, sizeof(ptr_t)); // the 3 ctype value are also inside the locale struct
ret = to_ptrv(m);
// add to hash maps
int r;
k = kh_put(to, locale_to, (uintptr_t)p, &r);
kh_value(locale_to, k) = ret;
k = kh_put(from, locale_from, ret, &r);
kh_value(locale_from, k) = (uintptr_t)p;
conv = 1;
} else {
ret = kh_value(locale_to, k);
}
pthread_rwlock_unlock(&hash_lock);
if(conv)
to_struct_locale(ret, (struct_locale_t*)p);
return ret;
}
// Convert from 64bits to hash key and delete the entry from both hash table
ptr_t to_locale_d(void* p) {
if((uintptr_t)p < 0x100)
return to_ptrv(p);
ptr_t ret = 0;
if(!hash_running)
return ret;
khint_t k;
pthread_rwlock_wrlock(&hash_lock);
k = kh_get(to, locale_to, (uintptr_t)p);
if(k==kh_end(locale_to)) {
/// should this be an assert?
} else {
ret = kh_value(locale_to, k);
// free the memory
free_struct_locale(p);
free(from_ptrv(ret));
// delete both entries
k = kh_get(to, locale_to, (uintptr_t)p);
kh_del(to, locale_to, k);
k = kh_get(from, locale_from, ret);
kh_del(from, locale_from, k);
}
pthread_rwlock_unlock(&hash_lock);
return ret;
}

View File

@ -23,6 +23,9 @@
#include "gltools.h"
#include "rbtree.h"
#include "dynarec.h"
#ifdef BOX32
#include "box32.h"
#endif
EXPORTDYN
void initAllHelpers(box64context_t* context)
@ -31,6 +34,9 @@ void initAllHelpers(box64context_t* context)
if(inited)
return;
my_context = context;
#ifdef BOX32
init_hash_helper();
#endif
init_pthread_helper();
init_bridge_helper();
init_signal_helper(context);
@ -47,6 +53,9 @@ void finiAllHelpers(box64context_t* context)
fini_pthread_helper(context);
fini_signal_helper();
fini_bridge_helper();
#ifdef BOX32
fini_hash_helper();
#endif
fini_custommem_helper(context);
finied = 1;
}
@ -75,6 +84,7 @@ void free_tlsdatasize(void* p)
}
void x64Syscall(x64emu_t *emu);
void x86Syscall(x64emu_t *emu);
int unlockMutex()
{
@ -223,15 +233,21 @@ box64context_t *NewBox64Context(int argc)
context->system = NewBridge();
// Cannot use Bridge name as the map is not initialized yet
// create vsyscall
context->vsyscall = AddBridge(context->system, vFEv, x64Syscall, 0, NULL);
context->vsyscall = AddBridge(context->system, vFEv, box64_is32bits?x86Syscall:x64Syscall, 0, NULL);
// create the vsyscalls
context->vsyscalls[0] = AddVSyscall(context->system, 96);
context->vsyscalls[1] = AddVSyscall(context->system, 201);
context->vsyscalls[2] = AddVSyscall(context->system, 309);
// create the alternate to map at address
addAlternate((void*)0xffffffffff600000, (void*)context->vsyscalls[0]);
addAlternate((void*)0xffffffffff600400, (void*)context->vsyscalls[1]);
addAlternate((void*)0xffffffffff600800, (void*)context->vsyscalls[2]);
if(box64_is32bits) {
#ifdef BOX32
addAlternate((void*)0xffffe400, from_ptrv(context->vsyscall));
#endif
} else {
context->vsyscalls[0] = AddVSyscall(context->system, 96);
context->vsyscalls[1] = AddVSyscall(context->system, 201);
context->vsyscalls[2] = AddVSyscall(context->system, 309);
// create the alternate to map at address
addAlternate((void*)0xffffffffff600000, (void*)context->vsyscalls[0]);
addAlternate((void*)0xffffffffff600400, (void*)context->vsyscalls[1]);
addAlternate((void*)0xffffffffff600800, (void*)context->vsyscalls[2]);
}
// create exit bridge
context->exit_bridge = AddBridge(context->system, NULL, NULL, 0, NULL);
// get handle to box64 itself

View File

@ -189,6 +189,7 @@ char* box64_custom_gstreamer = NULL;
uintptr_t fmod_smc_start = 0;
uintptr_t fmod_smc_end = 0;
uint32_t default_gs = 0x53;
uint32_t default_fs = 0x53;
int jit_gdb = 0;
int box64_tcmalloc_minimal = 0;
@ -1269,7 +1270,11 @@ int GatherEnv(char*** dest, char** env, char* prog)
(*dest)[idx++] = box_strdup("BOX64_PATH=.:bin");
}
if(!ld_path) {
#ifdef BOX32
(*dest)[idx++] = box_strdup("BOX64_LD_LIBRARY_PATH=.:lib:lib64:x86_64:bin64:libs64:i386:libs:bin");
#else
(*dest)[idx++] = box_strdup("BOX64_LD_LIBRARY_PATH=.:lib:lib64:x86_64:bin64:libs64");
#endif
}
// add "_=prog" at the end...
if(prog) {
@ -1382,24 +1387,7 @@ void LoadEnvVars(box64context_t *context)
}
} while(p);
}
// check BOX64_LD_LIBRARY_PATH and load it
LoadEnvPath(&context->box64_ld_lib, ".:lib:lib64:x86_64:bin64:libs64", "BOX64_LD_LIBRARY_PATH");
#ifndef TERMUX
if(FileExist("/lib/x86_64-linux-gnu", 0))
AddPath("/lib/x86_64-linux-gnu", &context->box64_ld_lib, 1);
if(FileExist("/usr/lib/x86_64-linux-gnu", 0))
AddPath("/usr/lib/x86_64-linux-gnu", &context->box64_ld_lib, 1);
if(FileExist("/usr/x86_64-linux-gnu/lib", 0))
AddPath("/usr/x86_64-linux-gnu/lib", &context->box64_ld_lib, 1);
if(FileExist("/data/data/com.termux/files/usr/glibc/lib/x86_64-linux-gnu", 0))
AddPath("/data/data/com.termux/files/usr/glibc/lib/x86_64-linux-gnu", &context->box64_ld_lib, 1);
#else
//TODO: Add Termux Library Path - Lily
if(FileExist("/data/data/com.termux/files/usr/lib/x86_64-linux-gnu", 0))
AddPath("/data/data/com.termux/files/usr/lib/x86_64-linux-gnu", &context->box64_ld_lib, 1);
#endif
if(getenv("LD_LIBRARY_PATH"))
PrependList(&context->box64_ld_lib, getenv("LD_LIBRARY_PATH"), 1); // in case some of the path are for x86 world
if(getenv("BOX64_EMULATED_LIBS")) {
char* p = getenv("BOX64_EMULATED_LIBS");
ParseList(p, &context->box64_emulated_libs, 0);
@ -1497,6 +1485,54 @@ void LoadEnvVars(box64context_t *context)
#endif
}
EXPORTDYN
void LoadLDPath(box64context_t *context)
{
// check BOX64_LD_LIBRARY_PATH and load it
#ifdef BOX32
if(box64_is32bits)
LoadEnvPath(&context->box64_ld_lib, ".:lib:i386:bin:libs", "BOX64_LD_LIBRARY_PATH");
else
#endif
LoadEnvPath(&context->box64_ld_lib, ".:lib:lib64:x86_64:bin64:libs64", "BOX64_LD_LIBRARY_PATH");
#ifndef TERMUX
if(box64_is32bits) {
#ifdef BOX32
if(FileExist("/lib/i386-linux-gnu", 0))
AddPath("/lib/i386-linux-gnu", &context->box64_ld_lib, 1);
if(FileExist("/usr/lib/i386-linux-gnu", 0))
AddPath("/usr/lib/i386-linux-gnu", &context->box64_ld_lib, 1);
if(FileExist("/usr/i386-linux-gnu/lib", 0))
AddPath("/usr/i386-linux-gnu/lib", &context->box64_ld_lib, 1);
if(FileExist("/data/data/com.termux/files/usr/glibc/lib/i386-linux-gnu", 0))
AddPath("/data/data/com.termux/files/usr/glibc/lib/i386-linux-gnu", &context->box64_ld_lib, 1);
#endif
} else {
if(FileExist("/lib/x86_64-linux-gnu", 0))
AddPath("/lib/x86_64-linux-gnu", &context->box64_ld_lib, 1);
if(FileExist("/usr/lib/x86_64-linux-gnu", 0))
AddPath("/usr/lib/x86_64-linux-gnu", &context->box64_ld_lib, 1);
if(FileExist("/usr/x86_64-linux-gnu/lib", 0))
AddPath("/usr/x86_64-linux-gnu/lib", &context->box64_ld_lib, 1);
if(FileExist("/data/data/com.termux/files/usr/glibc/lib/x86_64-linux-gnu", 0))
AddPath("/data/data/com.termux/files/usr/glibc/lib/x86_64-linux-gnu", &context->box64_ld_lib, 1);
}
#else
//TODO: Add Termux Library Path - Lily
if(box64_is32bits) {
#ifdef BOX32
if(FileExist("/data/data/com.termux/files/usr/lib/i386-linux-gnu", 0))
AddPath("/data/data/com.termux/files/usr/lib/i386-linux-gnu", &context->box64_ld_lib, 1);
#endif
} else {
if(FileExist("/data/data/com.termux/files/usr/lib/x86_64-linux-gnu", 0))
AddPath("/data/data/com.termux/files/usr/lib/x86_64-linux-gnu", &context->box64_ld_lib, 1);
}
#endif
if(getenv("LD_LIBRARY_PATH"))
PrependList(&context->box64_ld_lib, getenv("LD_LIBRARY_PATH"), 1); // in case some of the path are for x86 world
}
EXPORTDYN
void setupTraceInit()
{
@ -1957,12 +1993,14 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf
// check if box86 is present
{
my_context->box86path = box_strdup(my_context->box64path);
#ifndef BOX32
char* p = strrchr(my_context->box86path, '6'); // get the 6 of box64
p[0] = '8'; p[1] = '6'; // change 64 to 86
if(!FileExist(my_context->box86path, IS_FILE)) {
box_free(my_context->box86path);
my_context->box86path = NULL;
}
#endif
}
const char* prgname = strrchr(prog, '/');
if(!prgname)
@ -2089,9 +2127,12 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf
box64_is32bits = FileIsX86ELF(my_context->fullpath);
if(box64_is32bits) {
printf_log(LOG_INFO, "BOX64: Using Box32 to load 32bits elf\n");
loadProtectionFromMap();
reserveHighMem();
init_pthread_helper_32();
}
#endif
LoadLDPath(my_context);
elfheader_t *elf_header = LoadAndCheckElfHeader(f, my_context->fullpath, 1);
if(!elf_header) {
int x86 = my_context->box86path?FileIsX86ELF(my_context->fullpath):0;
@ -2252,10 +2293,15 @@ int initialize(int argc, const char **argv, char** env, x64emu_t** emulator, elf
// stack setup is much more complicated then just that!
SetupInitialStack(emu); // starting here, the argv[] don't need free anymore
SetupX64Emu(emu, NULL);
SetRSI(emu, my_context->argc);
SetRDX(emu, (uint64_t)my_context->argv);
SetRCX(emu, (uint64_t)my_context->envv);
SetRBP(emu, 0); // Frame pointer so to "No more frame pointer"
if(box64_is32bits) {
SetEAX(emu, my_context->argc);
SetEBX(emu, my_context->argv32);
} else {
SetRSI(emu, my_context->argc);
SetRDX(emu, (uint64_t)my_context->argv);
SetRCX(emu, (uint64_t)my_context->envv);
SetRBP(emu, 0); // Frame pointer so to "No more frame pointer"
}
// child fork to handle traces
pthread_atfork(NULL, NULL, my_child_fork);
@ -2339,10 +2385,19 @@ int emulate(x64emu_t* emu, elfheader_t* elf_header)
// emulate!
printf_log(LOG_DEBUG, "Start x64emu on Main\n");
// Stack is ready, with stacked: NULL env NULL argv argc
SetRIP(emu, my_context->ep);
ResetFlags(emu);
Push64(emu, my_context->exit_bridge); // push to pop it just after
SetRDX(emu, Pop64(emu)); // RDX is exit function
#ifdef BOX32
if(box64_is32bits) {
SetEIP(emu, my_context->ep);
Push32(emu, my_context->exit_bridge); // push to pop it just after
SetEDX(emu, Pop32(emu)); // RDX is exit function
} else
#endif
{
SetRIP(emu, my_context->ep);
Push64(emu, my_context->exit_bridge); // push to pop it just after
SetRDX(emu, Pop64(emu)); // RDX is exit function
}
Run(emu, 0);
// Get EAX
int ret = GetEAX(emu);

View File

@ -1521,21 +1521,68 @@ static void atfork_child_custommem(void)
// (re)init mutex if it was lock before the fork
init_mutexes();
}
#ifdef BOX32
void reverveHigMem32(void)
{
loadProtectionFromMap();
uintptr_t cur_size = 1024LL*1024*1024*1024; // start with 1TB check
void* cur;
while(cur_size>=65536) {
cur = internal_mmap(NULL, cur_size, 0, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
if((cur==MAP_FAILED) || (cur<(void*)0x100000000LL)) {
if(cur!=MAP_FAILED) {
//printf_log(LOG_DEBUG, " Failed to reserve high %p (%zx)\n", cur, cur_size);
internal_munmap(cur, cur_size);
} //else
// printf_log(LOG_DEBUG, " Failed to reserve %zx sized block\n", cur_size);
cur_size>>=1;
} else {
rb_set(mapallmem, (uintptr_t)cur, (uintptr_t)cur+cur_size, 1);
//printf_log(LOG_DEBUG, "Reserved high %p (%zx)\n", cur, cur_size);
}
}
printf_log(LOG_INFO, "Memory higher than 32bits reserved\n");
if(box64_log>=LOG_DEBUG) {
uintptr_t start=0x100000000LL;
int prot;
uintptr_t bend;
while (bend!=0xffffffffffffffffLL) {
if(rb_get_end(mapallmem, start, &prot, &bend)) {
printf_log(LOG_DEBUG, " Reserved: %p - %p (%d)\n", (void*)start, (void*)bend, prot);
}
start = bend;
}
}
}
#endif
void my_reserveHighMem()
{
static int reserved = 0;
if(reserved || (!have48bits && !box64_is32bits))
return;
reserved = 1;
#ifdef BOX32
if(box64_is32bits) {
reverveHigMem32();
return;
}
#endif
uintptr_t cur = box64_is32bits?(1ULL<<32):(1ULL<<47);
uintptr_t bend = 0;
uint32_t prot;
while (bend!=0xffffffffffffffffLL) {
if(!rb_get_end(mapallmem, cur, &prot, &bend)) {
void* ret = internal_mmap((void*)cur, bend-cur, 0, MAP_ANONYMOUS|MAP_FIXED|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
printf_log(LOG_DEBUG, "Reserve %p-%p => %p (%s)\n", (void*)cur, bend, ret, strerror(errno));
printf_log(LOG_DEBUG, "mmap %p-%p\n", cur, bend);
// create a border at 39bits...
if(cur<(1ULL<<39) && bend>(1ULL<<39))
bend = 1ULL<<39;
// create a border at 47bits
if(cur<(1ULL<<47) && bend>(1ULL<<47))
bend = 1ULL<<47;
// create a border at 48bits
if(cur<(1ULL<<48) && bend>(1ULL<<48))
bend = 1ULL<<48;
void* ret = internal_mmap((void*)cur, bend-cur, 0, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
printf_log(LOG_DEBUG, "Reserve %p-%p => %p (%s)\n", (void*)cur, bend, ret, (ret==MAP_FAILED)?strerror(errno):"ok");
if(ret!=(void*)-1) {
rb_set(mapallmem, cur, bend, 1);
}

View File

@ -283,13 +283,19 @@ int convert_bitmask(uint64_t bitmask);
#define LDRx_REG(Rt, Rn, Rm) EMIT(LDR_REG_gen(0b11, Rm, 0b011, 0, Rn, Rt))
#define LDRx_REG_LSL3(Rt, Rn, Rm) EMIT(LDR_REG_gen(0b11, Rm, 0b011, 1, Rn, Rt))
#define LDRx_REG_UXTW3(Rt, Rn, Rm) EMIT(LDR_REG_gen(0b11, Rm, 0b010, 1, Rn, Rt))
#define LDRx_REG_SXTW(Rt, Rn, Rm) EMIT(LDR_REG_gen(0b11, Rm, 0b110, 0, Rn, Rt))
#define LDRw_REG(Rt, Rn, Rm) EMIT(LDR_REG_gen(0b10, Rm, 0b011, 0, Rn, Rt))
#define LDRw_REG_LSL2(Rt, Rn, Rm) EMIT(LDR_REG_gen(0b10, Rm, 0b011, 1, Rn, Rt))
#define LDRw_REG_SXTW(Rt, Rn, Rm) EMIT(LDR_REG_gen(0b10, Rm, 0b110, 0, Rn, Rt))
#define LDRxw_REG(Rt, Rn, Rm) EMIT(LDR_REG_gen(0b10+rex.w, Rm, 0b011, 0, Rn, Rt))
#define LDRz_REG(Rt, Rn, Rm) EMIT(LDR_REG_gen(rex.is32bits?0b10:0b11, Rm, 0b011, 0, Rn, Rt))
#define LDRxw_REG_SXTW(Rt, Rn, Rm) EMIT(LDR_REG_gen(0b10+rex.w, Rm, 0b110, 0, Rn, Rt))
#define LDRz_REG_SXTW(Rt, Rn, Rm) EMIT(LDR_REG_gen(rex.is32bits?0b10:0b11, Rm, 0b110, 0, Rn, Rt))
#define LDRB_REG(Rt, Rn, Rm) EMIT(LDR_REG_gen(0b00, Rm, 0b011, 0, Rn, Rt))
#define LDRB_REG_UXTW(Rt, Rn, Rm) EMIT(LDR_REG_gen(0b00, Rm, 0b010, 0, Rn, Rt))
#define LDRB_REG_SXTW(Rt, Rn, Rm) EMIT(LDR_REG_gen(0b00, Rm, 0b110, 0, Rn, Rt))
#define LDRH_REG(Rt, Rn, Rm) EMIT(LDR_REG_gen(0b01, Rm, 0b011, 0, Rn, Rt))
#define LDRH_REG_SXTW(Rt, Rn, Rm) EMIT(LDR_REG_gen(0b01, Rm, 0b110, 0, Rn, Rt))
#define LDRS_U12_gen(size, op1, opc, imm12, Rn, Rt) ((size)<<30 | 0b111<<27 | (op1)<<24 | (opc)<<22 | (imm12)<<10 | (Rn)<<5 | (Rt))
#define LDRSHx_U12(Rt, Rn, imm12) EMIT(LDRS_U12_gen(0b01, 0b01, 0b10, ((uint32_t)(imm12>>1))&0xfff, Rn, Rt))
@ -301,6 +307,7 @@ int convert_bitmask(uint64_t bitmask);
#define LDRS_REG_gen(size, Rm, option, S, Rn, Rt) ((size)<<30 | 0b111<<27 | 0b10<<22 | 1<<21 | (Rm)<<16 | (option)<<13 | (S)<<12 | (0b10)<<10 | (Rn)<<5 | (Rt))
#define LDRSW_REG(Rt, Rn, Rm) EMIT(LDRS_REG_gen(0b10, Rm, 0b011, 0, Rn, Rt))
#define LDRSW_REG_SXTW(Rt, Rn, Rm) EMIT(LDRS_REG_gen(0b10, Rm, 0b110, 0, Rn, Rt))
#define LDR_PC_gen(opc, imm19, Rt) ((opc)<<30 | 0b011<<27 | (imm19)<<5 | (Rt))
#define LDRx_literal(Rt, imm19) EMIT(LDR_PC_gen(0b01, ((imm19)>>2)&0x7FFFF, Rt))
@ -371,12 +378,18 @@ int convert_bitmask(uint64_t bitmask);
#define STRx_REG(Rt, Rn, Rm) EMIT(STR_REG_gen(0b11, Rm, 0b011, 0, Rn, Rt))
#define STRx_REG_LSL3(Rt, Rn, Rm) EMIT(STR_REG_gen(0b11, Rm, 0b011, 1, Rn, Rt))
#define STRx_REG_UXTW(Rt, Rn, Rm) EMIT(STR_REG_gen(0b11, Rm, 0b010, 0, Rn, Rt))
#define STRx_REG_SXTW(Rt, Rn, Rm) EMIT(STR_REG_gen(0b11, Rm, 0b110, 0, Rn, Rt))
#define STRw_REG(Rt, Rn, Rm) EMIT(STR_REG_gen(0b10, Rm, 0b011, 0, Rn, Rt))
#define STRw_REG_LSL2(Rt, Rn, Rm) EMIT(STR_REG_gen(0b10, Rm, 0b011, 1, Rn, Rt))
#define STRw_REG_SXTW(Rt, Rn, Rm) EMIT(STR_REG_gen(0b10, Rm, 0b110, 0, Rn, Rt))
#define STRB_REG(Rt, Rn, Rm) EMIT(STR_REG_gen(0b00, Rm, 0b011, 0, Rn, Rt))
#define STRB_REG_SXTW(Rt, Rn, Rm) EMIT(STR_REG_gen(0b00, Rm, 0b110, 0, Rn, Rt))
#define STRH_REG(Rt, Rn, Rm) EMIT(STR_REG_gen(0b01, Rm, 0b011, 0, Rn, Rt))
#define STRH_REG_SXTW(Rt, Rn, Rm) EMIT(STR_REG_gen(0b01, Rm, 0b110, 0, Rn, Rt))
#define STRxw_REG(Rt, Rn, Rm) EMIT(STR_REG_gen(rex.w?0b11:0b10, Rm, 0b011, 0, Rn, Rt))
#define STRxw_REG_SXTW(Rt, Rn, Rm) EMIT(STR_REG_gen(rex.w?0b11:0b10, Rm, 0b110, 0, Rn, Rt))
#define STRz_REG(Rt, Rn, Rm) EMIT(STR_REG_gen(rex.is32bits?0b10:0b11, Rm, 0b011, 0, Rn, Rt))
#define STRz_REG_SXTW(Rt, Rn, Rm) EMIT(STR_REG_gen(rex.is32bits?0b10:0b11, Rm, 0b110, 0, Rn, Rt))
// LOAD/STORE PAIR
#define MEMPAIR_gen(size, L, op2, imm7, Rt2, Rn, Rt) ((size)<<31 | 0b101<<27 | (op2)<<23 | (L)<<22 | (imm7)<<15 | (Rt2)<<10 | (Rn)<<5 | (Rt))
@ -896,18 +909,24 @@ int convert_bitmask(uint64_t bitmask);
#define VMEM_REG_gen(size, opc, Rm, option, S, Rn, Rt) ((size)<<30 | 0b111<<27 | 1<<26 | (opc)<<22 | 1<<21 | (Rm)<<16 | (option)<<13 | (S)<<12 | 0b10<<10 | (Rn)<<5 | (Rt))
#define VLDR32_REG(Dt, Rn, Rm) EMIT(VMEM_REG_gen(0b10, 0b01, Rm, 0b011, 0, Rn, Dt))
#define VLDR32_REG_SXTW(Dt, Rn, Rm) EMIT(VMEM_REG_gen(0b10, 0b01, Rm, 0b110, 0, Rn, Dt))
#define VLDR32_REG_LSL2(Dt, Rn, Rm) EMIT(VMEM_REG_gen(0b10, 0b01, Rm, 0b011, 1, Rn, Dt))
#define VLDR64_REG(Dt, Rn, Rm) EMIT(VMEM_REG_gen(0b11, 0b01, Rm, 0b011, 0, Rn, Dt))
#define VLDR64_REG_SXTW(Dt, Rn, Rm) EMIT(VMEM_REG_gen(0b11, 0b01, Rm, 0b110, 0, Rn, Dt))
#define VLDR64_REG_LSL3(Dt, Rn, Rm) EMIT(VMEM_REG_gen(0b11, 0b01, Rm, 0b011, 1, Rn, Dt))
#define VLDR128_REG(Qt, Rn, Rm) EMIT(VMEM_REG_gen(0b00, 0b11, Rm, 0b011, 0, Rn, Qt))
#define VLDR128_REG_LSL4(Qt, Rn, Rm) EMIT(VMEM_REG_gen(0b00, 0b11, Rm, 0b011, 1, Rn, Qt))
#define VLDR128_REG_SXTW(Qt, Rn, Rm) EMIT(VMEM_REG_gen(0b00, 0b11, Rm, 0b110, 0, Rn, Qt))
#define VSTR32_REG(Dt, Rn, Rm) EMIT(VMEM_REG_gen(0b10, 0b00, Rm, 0b011, 0, Rn, Dt))
#define VSTR32_REG_LSL2(Dt, Rn, Rm) EMIT(VMEM_REG_gen(0b10, 0b00, Rm, 0b011, 1, Rn, Dt))
#define VSTR32_REG_SXTW(Dt, Rn, Rm) EMIT(VMEM_REG_gen(0b10, 0b00, Rm, 0b110, 0, Rn, Dt))
#define VSTR64_REG(Dt, Rn, Rm) EMIT(VMEM_REG_gen(0b11, 0b00, Rm, 0b011, 0, Rn, Dt))
#define VSTR64_REG_LSL3(Dt, Rn, Rm) EMIT(VMEM_REG_gen(0b11, 0b00, Rm, 0b011, 1, Rn, Dt))
#define VSTR64_REG_SXTW(Dt, Rn, Rm) EMIT(VMEM_REG_gen(0b11, 0b00, Rm, 0b110, 0, Rn, Dt))
#define VSTR128_REG(Qt, Rn, Rm) EMIT(VMEM_REG_gen(0b00, 0b10, Rm, 0b011, 0, Rn, Qt))
#define VSTR128_REG_LSL4(Qt, Rn, Rm) EMIT(VMEM_REG_gen(0b00, 0b10, Rm, 0b011, 1, Rn, Qt))
#define VSTR128_REG_SXTW(Qt, Rn, Rm) EMIT(VMEM_REG_gen(0b00, 0b10, Rm, 0b110, 0, Rn, Qt))
#define VLDR_PC_gen(opc, imm19, Rt) ((opc)<<30 | 0b011<<27 | 1<<26 | (imm19)<<5 | (Rt))
#define VLDR32_literal(Vt, imm19) EMIT(VLDR_PC_gen(0b00, ((imm19)>>2)&0x7FFFF, Vt))

View File

@ -108,7 +108,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
v0 = sse_get_reg_empty(dyn, ninst, x1, gd);
SMREAD();
addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, &unscaled, 0xfff<<3, 7, rex, NULL, 0, 0);
ADDx_REG(x4, x4, ed);
ADDz_REG(x4, x4, ed);
VLD64(v0, x4, fixedaddress); // upper part reseted
}
break;
@ -126,7 +126,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
v0 = sse_get_reg_empty(dyn, ninst, x1, gd);
SMREAD();
addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, &unscaled, 0xfff<<2, 3, rex, NULL, 0, 0);
ADDx_REG(x4, x4, ed);
ADDz_REG(x4, x4, ed);
VLD32(v0, x4, fixedaddress);
}
break;
@ -148,7 +148,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
} else {
grab_segdata(dyn, addr, ninst, x4, seg);
addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, &unscaled, 0xfff<<4, 15, rex, NULL, 0, 0);
ADDx_REG(x4, x4, ed);
ADDz_REG(x4, x4, ed);
VST128(v0, x4, fixedaddress);
SMWRITE2();
}
@ -165,7 +165,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
} else {
grab_segdata(dyn, addr, ninst, x4, seg);
addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, &unscaled, 0xfff<<3, 7, rex, NULL, 0, 0);
ADDx_REG(x4, x4, ed);
ADDz_REG(x4, x4, ed);
VST64(v0, x4, fixedaddress);
SMWRITE2();
}
@ -182,7 +182,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
} else {
grab_segdata(dyn, addr, ninst, x4, seg);
addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, &unscaled, 0xfff<<2, 3, rex, NULL, 0, 0);
ADDx_REG(x4, x4, ed);
ADDz_REG(x4, x4, ed);
VST32(v0, x4, fixedaddress);
SMWRITE2();
}
@ -206,7 +206,7 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
grab_segdata(dyn, addr, ninst, x4, seg);
SMREAD();
addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, &unscaled, 0xfff<<4, 15, rex, NULL, 0, 0);
ADDx_REG(x4, x4, ed);
ADDz_REG(x4, x4, ed);
VLD128(v0, ed, fixedaddress);
}
break;
@ -275,7 +275,10 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
} else {
SMREAD();
addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);
LDRB_REG(gd, ed, x4);
if(rex.is32bits)
LDRB_REG_SXTW(gd, x4, ed);
else
LDRB_REG(gd, ed, x4);
}
break;
default:
@ -397,7 +400,10 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
grab_segdata(dyn, addr, ninst, x4, seg);
SMREAD();
addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);
LDRSW_REG(gd, ed, x4);
if(rex.is32bits)
LDRSW_REG_SXTW(gd, x4, ed);
else
LDRSW_REG(gd, ed, x4);
}
} else {
if(MODREG) { // reg <= reg
@ -406,7 +412,10 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
grab_segdata(dyn, addr, ninst, x4, seg);
SMREAD();
addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);
LDRw_REG(gd, ed, x4);
if(rex.is32bits)
LDRw_REG_SXTW(gd, x4, ed);
else
LDRw_REG(gd, ed, x4);
}
}
}
@ -639,7 +648,10 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
grab_segdata(dyn, addr, ninst, x4, seg);
SMREAD();
addr = geted(dyn, addr, ninst, nextop, &wback, x3, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);
LDRB_REG(x4, wback, x4);
if(rex.is32bits)
LDRB_REG_SXTW(x4, x4, wback);
else
LDRB_REG(x4, wback, x4);
ed = x4;
}
BFIx(gb1, ed, gb2, 8);
@ -653,7 +665,10 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
MOVxw_REG(xRAX+(nextop&7)+(rex.b<<3), gd);
} else { // mem <= reg
addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);
STRxw_REG(gd, ed, x4);
if(rex.is32bits)
STRxw_REG_SXTW(gd, x4, ed);
else
STRxw_REG(gd, ed, x4);
SMWRITE2();
}
break;
@ -668,7 +683,10 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
} else { // mem <= reg
SMREAD();
addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);
LDRxw_REG(gd, ed, x4);
if(rex.is32bits)
LDRxw_REG_SXTW(gd, x4, ed);
else
LDRxw_REG(gd, ed, x4);
}
break;
@ -698,7 +716,10 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
} else {
SMREAD();
addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);
LDRH_REG(x1, wback, x4);
if(rex.is32bits)
LDRH_REG_SXTW(x1, x4, wback);
else
LDRH_REG(x1, wback, x4);
ed = x1;
}
STRH_U12(ed, xEmu, offsetof(x64emu_t, segs[u8]));
@ -714,11 +735,17 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
POP1z(x2); // so this can handle POP [ESP] and maybe some variant too
addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, &unscaled, 0, 0, rex, NULL, 0, 0);
if(ed==xRSP) {
STRz_REG(x2, ed, x4);
if(rex.is32bits)
STRz_REG_SXTW(x2, x4, ed);
else
STRz_REG(x2, ed, x4);
} else {
// complicated to just allow a segfault that can be recovered correctly
SUBz_U12(xRSP, xRSP, rex.is32bits?4:8);
STRz_REG(x2, ed, x4);
if(rex.is32bits)
STRz_REG_SXTW(x2, x4, ed);
else
STRz_REG(x2, ed, x4);
ADDz_U12(xRSP, xRSP, rex.is32bits?4:8);
}
}
@ -735,9 +762,25 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
else
u64 = F64;
MOV64z(x1, u64);
LDRxw_REG(xRAX, x1, x4);
if(rex.is32bits)
LDRxw_REG_SXTW(xRAX, x4, x1);
else
LDRxw_REG(xRAX, x4, x1);
break;
case 0xA2:
INST_NAME("MOV FS:Od,AL");
grab_segdata(dyn, addr, ninst, x4, seg);
if(rex.is32bits)
u64 = F32;
else
u64 = F64;
MOV64z(x1, u64);
if(rex.is32bits)
STRB_REG_SXTW(xRAX, x4, x1);
else
STRB_REG(xRAX, x4, x1);
SMWRITE2();
break;
case 0xA3:
INST_NAME("MOV FS:Od,EAX");
grab_segdata(dyn, addr, ninst, x4, seg);
@ -746,7 +789,10 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
else
u64 = F64;
MOV64z(x1, u64);
STRxw_REG(xRAX, x1, x4);
if(rex.is32bits)
STRxw_REG_SXTW(xRAX, x4, x1);
else
STRxw_REG(xRAX, x4, x1);
SMWRITE2();
break;
@ -770,7 +816,10 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 1);
u8 = F8;
MOV32w(x3, u8);
STRB_REG(x3, ed, x4);
if(rex.is32bits)
STRB_REG_SXTW(x3, x4, ed);
else
STRB_REG(x3, ed, x4);
SMWRITE2();
}
break;
@ -786,7 +835,10 @@ uintptr_t dynarec64_64(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int nin
addr = geted(dyn, addr, ninst, nextop, &ed, x2, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 4);
i64 = F32S;
MOV64xw(x3, i64);
STRxw_REG(x3, ed, x4);
if(rex.is32bits)
STRxw_REG_SXTW(x3, x4, ed);
else
STRxw_REG(x3, ed, x4);
SMWRITE2();
}
break;

View File

@ -62,7 +62,10 @@ uintptr_t dynarec64_6664(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
SMREAD();
addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);
v1 = fpu_get_scratch(dyn, ninst);
VLDR64_REG(v1, ed, x4);
if(rex.is32bits)
VLDR64_REG_SXTW(v1, x4, ed);
else
VLDR64_REG(v1, ed, x4);
}
FCMPD(v0, v1);
FCOMI(x1, x2);
@ -80,7 +83,10 @@ uintptr_t dynarec64_6664(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
grab_segdata(dyn, addr, ninst, x4, seg);
addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);
SMREAD();
VLDR128_REG(v0, ed, x4);
if(rex.is32bits)
VLDR128_REG_SXTW(v0, x4, ed);
else
VLDR128_REG(v0, ed, x4);
}
break;
@ -94,7 +100,10 @@ uintptr_t dynarec64_6664(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
} else {
grab_segdata(dyn, addr, ninst, x4, seg);
addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);
VSTR128_REG(v0, ed, x4);
if(rex.is32bits)
VSTR128_REG_SXTW(v0, x4, ed);
else
VSTR128_REG(v0, ed, x4);
SMWRITE2();
}
break;
@ -110,7 +119,10 @@ uintptr_t dynarec64_6664(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
} else {
grab_segdata(dyn, addr, ninst, x4, seg);
addr = geted(dyn, addr, ninst, nextop, &ed, x1, &fixedaddress, NULL, 0, 0, rex, NULL, 0, 0);
VSTR64_REG(v0, ed, x4);
if(rex.is32bits)
VSTR64_REG_SXTW(v0, x4, ed);
else
VSTR64_REG(v0, ed, x4);
SMWRITE();
}
break;
@ -229,7 +241,10 @@ uintptr_t dynarec64_6664(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
if(rex.w) {
STRx_REG(gd, ed, x4);
} else {
STRH_REG(gd, ed, x4);
if(rex.is32bits)
STRH_REG_SXTW(gd, x4, ed);
else
STRH_REG(gd, ed, x4);
}
SMWRITE();
}
@ -255,7 +270,10 @@ uintptr_t dynarec64_6664(dynarec_arm_t* dyn, uintptr_t addr, uintptr_t ip, int n
if(rex.w) {
LDRx_REG(gd, ed, x4);
} else {
LDRH_REG(x1, ed, x4);
if(rex.is32bits)
LDRH_REG_SXTW(x1, x4, ed);
else
LDRH_REG(x1, ed, x4);
BFIx(gd, x1, 0, 16);
}
}

View File

@ -213,10 +213,13 @@
} else { \
SMREAD(); \
addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, NULL, 0, D); \
LDRxw_REG(x1, wback, O); \
if(rex.is32bits) \
LDRxw_REG_SXTW(x1, O, wback); \
else \
LDRxw_REG(x1, wback, O); \
ed = x1; \
}
#define WBACKO(O) if(wback) {STRxw_REG(ed, wback, O); SMWRITE2();}
#define WBACKO(O) if(wback) {if(rex.is32bits) STRxw_REG_SXTW(ed, O, wback); else STRxw_REG(ed, wback, O); SMWRITE2();}
//GETEDOx can use r1 for ed, and r2 for wback. wback is 0 if ed is xEAX..xEDI
#define GETEDOx(O, D) if(MODREG) { \
ed = xRAX+(nextop&7)+(rex.b<<3); \
@ -224,7 +227,10 @@
} else { \
SMREAD(); \
addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, NULL, 0, D); \
LDRx_REG(x1, wback, O); \
if(rex.is32bits) \
LDRx_REG_SXTW(x1, O, wback); \
else \
LDRx_REG(x1, wback, O); \
ed = x1; \
}
//GETEDOz can use r1 for ed, and r2 for wback. wback is 0 if ed is xEAX..xEDI
@ -234,7 +240,10 @@
} else { \
SMREAD(); \
addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, NULL, 0, D); \
LDRz_REG(x1, wback, O); \
if(rex.is32bits) \
LDRz_REG_SXTW(x1, O, wback); \
else \
LDRz_REG(x1, wback, O); \
ed = x1; \
}
#define GETSEDOw(O, D) if((nextop&0xC0)==0xC0) { \
@ -245,7 +254,10 @@
} else { \
SMREAD(); \
addr = geted(dyn, addr, ninst, nextop, &wback, x2, &fixedaddress, NULL, 0, 0, rex, NULL, 0, D); \
LDRSW_REG(x1, wback, O); \
if(rex.is32bits) \
LDRSW_REG_SXTW(x1, O, wback); \
else \
LDRSW_REG(x1, wback, O); \
wb = ed = x1; \
}
//FAKEELike GETED, but doesn't get anything
@ -304,7 +316,7 @@
} else { \
SMREAD(); \
addr = geted(dyn, addr, ninst, nextop, &wback, x3, &fixedaddress, &unscaled, 0xfff<<1, (1<<1)-1, rex, NULL, 0, D); \
ADDx_REG(x3, wback, i); \
ADDz_REG(x3, wback, i); \
if(wback!=x3) wback = x3; \
LDH(i, wback, fixedaddress);\
wb1 = 1; \
@ -365,7 +377,7 @@
} else { \
SMREAD(); \
addr = geted(dyn, addr, ninst, nextop, &wback, x3, &fixedaddress, &unscaled, 0xfff, 0, rex, NULL, 0, D); \
ADDx_REG(x3, wback, i); \
ADDz_REG(x3, wback, i); \
if(wback!=x3) wback = x3; \
LDB(i, wback, fixedaddress);\
wb1 = 1; \

View File

@ -104,10 +104,15 @@ void DynaCall(x64emu_t* emu, uintptr_t addr)
multiuint_t old_res_sav= emu->res_sav;
deferred_flags_t old_df_sav= emu->df_sav;
// uc_link
x64_ucontext_t* old_uc_link = emu->uc_link;
void* old_uc_link = emu->uc_link;
emu->uc_link = NULL;
PushExit(emu);
#ifdef BOX32
if(box64_is32bits)
PushExit_32(emu);
else
#endif
PushExit(emu);
R_RIP = addr;
emu->df = d_none;
DynaRun(emu);
@ -137,6 +142,9 @@ void DynaCall(x64emu_t* emu, uintptr_t addr)
}
int my_setcontext(x64emu_t* emu, void* ucp);
#ifdef BOX32
int my32_setcontext(x64emu_t* emu, void* ucp);
#endif
void DynaRun(x64emu_t* emu)
{
// prepare setjump for signal handling
@ -206,7 +214,12 @@ void DynaRun(x64emu_t* emu)
}
if(emu->quit && emu->uc_link) {
emu->quit = 0;
my_setcontext(emu, emu->uc_link);
#ifdef BOX32
if(box64_is32bits)
my32_setcontext(emu, emu->uc_link);
else
#endif
my_setcontext(emu, emu->uc_link);
}
}
#endif

View File

@ -111,7 +111,7 @@ uintptr_t native_pass(dynarec_native_t* dyn, uintptr_t addr, int alternate, int
#endif
dyn->f.dfnone_here = 0;
NEW_INST;
MESSAGE(LOG_DUMP, "New Instruction x64:%p, native:%p\n", (void*)addr, (void*)dyn->block);
MESSAGE(LOG_DUMP, "New Instruction %s:%p, native:%p\n", is32bits?"x86":"x64",(void*)addr, (void*)dyn->block);
if(!ninst) {
GOTEST(x1, x2);
}

View File

@ -78,7 +78,12 @@ const char* GetParentSymbolVersion(elfheader_t* h, int version)
return box64_is32bits?GetParentSymbolVersion32(h, version):GetParentSymbolVersion64(h, version);
}
uint16_t GetParentSymbolVersionFlag32(elfheader_t* h, int index) { /* TODO */ return (uint16_t)-1; }
uint16_t GetParentSymbolVersionFlag32(elfheader_t* h, int index)
#ifndef BOX32
{ return (uint16_t)-1; }
#else
;
#endif
uint16_t GetParentSymbolVersionFlag64(elfheader_t* h, int index)
{
if(!h->VerDef._64 || (index<1))
@ -97,7 +102,12 @@ uint16_t GetParentSymbolVersionFlag(elfheader_t* h, int index)
return box64_is32bits?GetParentSymbolVersionFlag32(h, index):GetParentSymbolVersionFlag64(h, index);
}
uint16_t GetSymbolVersionFlag32(elfheader_t* h, int version) { /* TODO */ return (uint16_t)-1; }
uint16_t GetSymbolVersionFlag32(elfheader_t* h, int version)
#ifndef BOX32
{ return (uint16_t)-1; }
#else
;
#endif
uint16_t GetSymbolVersionFlag64(elfheader_t* h, int version)
{
if(version<2)

View File

@ -41,6 +41,40 @@ static int SymbolMatch(elfheader_t* h, uint32_t i, int ver, const char* vername,
return strcmp(vername, symvername)?0:1;
}
uint16_t GetParentSymbolVersionFlag32(elfheader_t* h, int index)
{
if(!h->VerDef._32 || (index<1))
return (uint16_t)-1;
Elf32_Verdef *def = (Elf32_Verdef*)((uintptr_t)h->VerDef._32 + h->delta);
while(def) {
if(def->vd_ndx==index) {
return def->vd_flags;
}
def = def->vd_next?((Elf32_Verdef*)((uintptr_t)def + def->vd_next)):NULL;
}
return (uint16_t)-1;
}
uint16_t GetSymbolVersionFlag32(elfheader_t* h, int version)
{
if(version<2)
return (uint16_t)-1;
if(h->VerNeed._32) {
Elf32_Verneed *ver = (Elf32_Verneed*)((uintptr_t)h->VerNeed._32 + h->delta);
while(ver) {
Elf32_Vernaux *aux = (Elf32_Vernaux*)((uintptr_t)ver + ver->vn_aux);
for(int j=0; j<ver->vn_cnt; ++j) {
if(aux->vna_other==version)
return aux->vna_flags;
aux = (Elf32_Vernaux*)((uintptr_t)aux + aux->vna_next);
}
ver = ver->vn_next?((Elf32_Verneed*)((uintptr_t)ver + ver->vn_next)):NULL;
}
}
return GetParentSymbolVersionFlag32(h, version); // if symbol is "internal", use Def table instead
}
static Elf32_Sym* old_elf_lookup(elfheader_t* h, const char* symname, int ver, const char* vername, int local, int veropt)
{
// Prepare hash table
@ -90,16 +124,16 @@ static Elf32_Sym* new_elf_lookup(elfheader_t* h, const char* symname, int ver, c
const uint32_t symoffset = hashtab[1];
const uint32_t bloom_size = hashtab[2];
const uint32_t bloom_shift = hashtab[3];
const uint64_t *blooms = (uint64_t*)&hashtab[4];
const uint32_t *blooms = (uint32_t*)&hashtab[4];
const uint32_t *buckets = (uint32_t*)&blooms[bloom_size];
const uint32_t *chains = &buckets[nbuckets];
// get hash from symname to lookup
const uint32_t hash = new_elf_hash(symname);
// early check with bloom: if at least one bit is not set, a symbol is surely missing.
uint64_t word = blooms[(hash/64)%bloom_size];
uint64_t mask = 0
| 1LL << (hash%64)
| 1LL << ((hash>>bloom_shift)%64);
uint32_t word = blooms[(hash/32)%bloom_size];
uint32_t mask = 0
| 1LL << (hash%32)
| 1LL << ((hash>>bloom_shift)%32);
if ((word & mask) != mask) {
return NULL;
}
@ -127,7 +161,7 @@ static void new_elf_hash_dump(elfheader_t* h)
const uint32_t symoffset = hashtab[1];
const uint32_t bloom_size = hashtab[2];
const uint32_t bloom_shift = hashtab[3];
const uint64_t *blooms = (uint64_t*)&hashtab[4];
const uint32_t *blooms = (uint32_t*)&hashtab[4];
const uint32_t *buckets = (uint32_t*)&blooms[bloom_size];
const uint32_t *chains = &buckets[nbuckets];
printf_log(LOG_NONE, "===============Dump GNU_HASH from %s\n", h->name);

View File

@ -9,10 +9,6 @@
#include "elfload_dump.h"
#include "elfloader_private.h"
#ifndef SHT_CHECKSUM
#define SHT_CHECKSUM 0x6ffffff8
#endif
static const char* DumpSection(Elf64_Shdr *s, char* SST) {
static char buff[400];
switch (s->sh_type) {

View File

@ -875,7 +875,12 @@ int RelocateElf(lib_t *maplib, lib_t *local_maplib, int bindnow, int deepbind, e
return box64_is32bits?RelocateElf32(maplib, local_maplib, bindnow, deepbind, head):RelocateElf64(maplib, local_maplib, bindnow, deepbind, head);
}
int RelocateElfPlt32(lib_t *maplib, lib_t *local_maplib, int bindnow, int deepbind, elfheader_t* head) { /* TODO */ return -1; }
int RelocateElfPlt32(lib_t *maplib, lib_t *local_maplib, int bindnow, int deepbind, elfheader_t* head)
#ifndef BOX32
{ return -1; }
#else
;
#endif
int RelocateElfPlt64(lib_t *maplib, lib_t *local_maplib, int bindnow, int deepbind, elfheader_t* head)
{
int need_resolver = 0;
@ -953,7 +958,12 @@ uintptr_t GetLastByte(elfheader_t* h)
#endif
void checkHookedSymbols(elfheader_t* h); // in mallochook.c
void AddSymbols32(lib_t *maplib, elfheader_t* h) { /* TODO */ }
void AddSymbols32(lib_t *maplib, elfheader_t* h)
#ifndef BOX32
{ }
#else
;
#endif
void AddSymbols(lib_t *maplib, elfheader_t* h)
{
if(box64_is32bits) {
@ -1121,15 +1131,10 @@ void startMallocHook();
#else
void startMallocHook() {}
#endif
void RunElfInit32(elfheader_t* h, x64emu_t *emu) { /* TODO*/ }
void RunElfInit(elfheader_t* h, x64emu_t *emu)
{
if(!h || h->init_done)
return;
if(box64_is32bits) {
RunElfInit32(h, emu);
return;
}
// reset Segs Cache
memset(emu->segs_serial, 0, sizeof(emu->segs_serial));
uintptr_t p = h->initentry + h->delta;
@ -1157,11 +1162,24 @@ void RunElfInit(elfheader_t* h, x64emu_t *emu)
RunFunctionWithEmu(emu, 0, p, 3, my_context->argc, my_context->argv, my_context->envv);
printf_dump(LOG_DEBUG, "Done Init for %s\n", ElfName(h));
// and check init array now
Elf64_Addr *addr = (Elf64_Addr*)(h->initarray + h->delta);
for (size_t i=0; i<h->initarray_sz; ++i) {
if(addr[i]) {
printf_dump(LOG_DEBUG, "Calling Init[%zu] for %s @%p\n", i, ElfName(h), (void*)addr[i]);
RunFunctionWithEmu(emu, 0, (uintptr_t)addr[i], 3, my_context->argc, my_context->argv, my_context->envv);
#ifdef BOX32
if(box64_is32bits) {
Elf32_Addr *addr = (Elf32_Addr*)(h->initarray + h->delta);
for (size_t i=0; i<h->initarray_sz; ++i) {
if(addr[i]) {
printf_dump(LOG_DEBUG, "Calling Init[%zu] for %s @%p\n", i, ElfName(h), from_ptrv(addr[i]));
RunFunctionWithEmu(emu, 0, (uintptr_t)addr[i], 3, my_context->argc, my_context->argv, my_context->envv);
}
}
} else
#endif
{
Elf64_Addr *addr = (Elf64_Addr*)(h->initarray + h->delta);
for (size_t i=0; i<h->initarray_sz; ++i) {
if(addr[i]) {
printf_dump(LOG_DEBUG, "Calling Init[%zu] for %s @%p\n", i, ElfName(h), (void*)addr[i]);
RunFunctionWithEmu(emu, 0, (uintptr_t)addr[i], 3, my_context->argc, my_context->argv, my_context->envv);
}
}
}
@ -1191,15 +1209,10 @@ void RunDeferredElfInit(x64emu_t *emu)
box_free(List);
}
void RunElfFini32(elfheader_t* h, x64emu_t *emu) { /* TODO */ }
void RunElfFini(elfheader_t* h, x64emu_t *emu)
{
if(!h || h->fini_done || !h->init_done)
return;
if(box64_is32bits) {
RunElfFini32(h, emu);
return;
}
h->fini_done = 1;
// Call the registered cxa_atexit functions
CallCleanup(emu, h);
@ -1208,10 +1221,21 @@ void RunElfFini(elfheader_t* h, x64emu_t *emu)
printf_log(LOG_DEBUG, "Android does not support Fini for %s\n", ElfName(h));
#else
// first check fini array
Elf64_Addr *addr = (Elf64_Addr*)(h->finiarray + h->delta);
for (int i=h->finiarray_sz-1; i>=0; --i) {
printf_dump(LOG_DEBUG, "Calling Fini[%d] for %s @%p\n", i, ElfName(h), (void*)addr[i]);
RunFunctionWithEmu(emu, 0, (uintptr_t)addr[i], 0);
#ifdef BOX32
if(box64_is32bits) {
Elf32_Addr *addr = (Elf32_Addr*)(h->finiarray + h->delta);
for (int i=h->finiarray_sz-1; i>=0; --i) {
printf_dump(LOG_DEBUG, "Calling Fini[%d] for %s @%p\n", i, ElfName(h), from_ptrv(addr[i]));
RunFunctionWithEmu(emu, 0, (uintptr_t)addr[i], 0);
}
} else
#endif
{
Elf64_Addr *addr = (Elf64_Addr*)(h->finiarray + h->delta);
for (int i=h->finiarray_sz-1; i>=0; --i) {
printf_dump(LOG_DEBUG, "Calling Fini[%d] for %s @%p\n", i, ElfName(h), (void*)addr[i]);
RunFunctionWithEmu(emu, 0, (uintptr_t)addr[i], 0);
}
}
// then the "old-style" fini
if(h->finientry) {
@ -1499,7 +1523,12 @@ EXPORT int my_dl_iterate_phdr(x64emu_t *emu, void* F, void *data) {
return ret;
}
void ResetSpecialCaseMainElf32(elfheader_t* h) { /* TODO */ }
void ResetSpecialCaseMainElf32(elfheader_t* h)
#ifndef BOX32
{ }
#else
;
#endif
void ResetSpecialCaseMainElf(elfheader_t* h)
{
if(box64_is32bits) {
@ -1620,7 +1649,12 @@ static Elf64_Sym* ElfLocateSymbol(elfheader_t* head, uintptr_t *offs, uintptr_t
return sym;
}
void* ElfGetLocalSymbolStartEnd32(elfheader_t* head, uintptr_t *offs, uintptr_t *end, const char* symname, int* ver, const char** vername, int local, int* veropt) { /* TOODO */ return NULL; }
void* ElfGetLocalSymbolStartEnd32(elfheader_t* head, uintptr_t *offs, uintptr_t *end, const char* symname, int* ver, const char** vername, int local, int* veropt)
#ifndef BOX32
{ return NULL; }
#else
;
#endif
void* ElfGetLocalSymbolStartEnd64(elfheader_t* head, uintptr_t *offs, uintptr_t *end, const char* symname, int* ver, const char** vername, int local, int* veropt)
{
Elf64_Sym* sym = ElfLocateSymbol(head, offs, end, symname, ver, vername, local, veropt);
@ -1636,7 +1670,12 @@ void* ElfGetLocalSymbolStartEnd(elfheader_t* head, uintptr_t *offs, uintptr_t *e
return box64_is32bits?ElfGetLocalSymbolStartEnd32(head, offs, end, symname, ver, vername, local, veropt):ElfGetLocalSymbolStartEnd64(head, offs, end, symname, ver, vername, local, veropt);
}
void* ElfGetGlobalSymbolStartEnd32(elfheader_t* head, uintptr_t *offs, uintptr_t *end, const char* symname, int* ver, const char** vername, int local, int* veropt) { /*T ODO */ return NULL; }
void* ElfGetGlobalSymbolStartEnd32(elfheader_t* head, uintptr_t *offs, uintptr_t *end, const char* symname, int* ver, const char** vername, int local, int* veropt)
#ifndef BOX32
{ return NULL; }
#else
;
#endif
void* ElfGetGlobalSymbolStartEnd64(elfheader_t* head, uintptr_t *offs, uintptr_t *end, const char* symname, int* ver, const char** vername, int local, int* veropt)
{
Elf64_Sym* sym = ElfLocateSymbol(head, offs, end, symname, ver, vername, local, veropt);
@ -1652,7 +1691,12 @@ void* ElfGetGlobalSymbolStartEnd(elfheader_t* head, uintptr_t *offs, uintptr_t *
return box64_is32bits?ElfGetGlobalSymbolStartEnd32(head, offs, end, symname, ver, vername, local, veropt):ElfGetGlobalSymbolStartEnd64(head, offs, end, symname, ver, vername, local, veropt);
}
void* ElfGetWeakSymbolStartEnd32(elfheader_t* head, uintptr_t *offs, uintptr_t *end, const char* symname, int* ver, const char** vername, int local, int* veropt) { /* TODO */ return NULL; }
void* ElfGetWeakSymbolStartEnd32(elfheader_t* head, uintptr_t *offs, uintptr_t *end, const char* symname, int* ver, const char** vername, int local, int* veropt)
#ifndef BOX32
{ return NULL; }
#else
;
#endif
void* ElfGetWeakSymbolStartEnd64(elfheader_t* head, uintptr_t *offs, uintptr_t *end, const char* symname, int* ver, const char** vername, int local, int* veropt)
{
Elf64_Sym* sym = ElfLocateSymbol(head, offs, end, symname, ver, vername, local, veropt);
@ -1668,7 +1712,12 @@ void* ElfGetWeakSymbolStartEnd(elfheader_t* head, uintptr_t *offs, uintptr_t *en
return box64_is32bits?ElfGetWeakSymbolStartEnd32(head, offs, end, symname, ver, vername, local, veropt):ElfGetWeakSymbolStartEnd64(head, offs, end, symname, ver, vername, local, veropt);
}
int ElfGetSymTabStartEnd32(elfheader_t* head, uintptr_t *offs, uintptr_t *end, const char* symname) { /* TODO */ return 0; }
int ElfGetSymTabStartEnd32(elfheader_t* head, uintptr_t *offs, uintptr_t *end, const char* symname)
#ifndef BOX32
{ return 0; }
#else
;
#endif
int ElfGetSymTabStartEnd64(elfheader_t* head, uintptr_t *offs, uintptr_t *end, const char* symname)
{
Elf64_Sym* sym = ElfSymTabLookup64(head, symname);

View File

@ -42,6 +42,12 @@
#include "x64tls.h"
#include "box32.h"
ptr_t pltResolver32 = ~(ptr_t)0;
extern void* my__IO_2_1_stderr_;
extern void* my__IO_2_1_stdin_ ;
extern void* my__IO_2_1_stdout_;
// return the index of header (-1 if it doesn't exist)
static int getElfIndex(box64context_t* ctx, elfheader_t* head) {
for (int i=0; i<ctx->elfsize; ++i)
@ -60,6 +66,68 @@ static elfheader_t* checkElfLib(elfheader_t* h, library_t* lib)
return h;
}
static Elf32_Sym* ElfLocateSymbol(elfheader_t* head, uintptr_t *offs, uintptr_t *end, const char* symname, int* ver, const char** vername, int local, int* veropt)
{
Elf32_Sym* sym = ElfLookup32(head, symname, *ver, *vername, local, *veropt);
if(!sym) return NULL;
if(head->VerSym && !*veropt) {
int idx = ((uintptr_t)sym - (uintptr_t)head->DynSym._32)/sizeof(Elf32_Sym);
int version = ((Elf32_Half*)((uintptr_t)head->VerSym+head->delta))[idx];
if(version!=-1) version &=0x7fff;
const char* symvername = GetSymbolVersion(head, version);
Elf32_Half flags = GetSymbolVersionFlag(head, version);
if(version>1 && *ver<2 && (flags==0)) {
*ver = version;
*vername = symvername;
*veropt = 1;
} else if(flags==0 && !*veropt && version>1 && *ver>1 && !strcmp(symvername, *vername)) {
*veropt = 1;
}
}
if(!sym->st_shndx) return NULL;
int vis = ELF32_ST_VISIBILITY(sym->st_other);
if(vis==STV_HIDDEN && !local)
return NULL;
return sym;
}
static void GrabX32CopyMainElfReloc(elfheader_t* head)
{
if(head->rela) {
int cnt = head->relasz / head->relaent;
Elf32_Rela* rela = (Elf32_Rela *)(head->rela + head->delta);
printf_dump(LOG_DEBUG, "Grabbing R_386_COPY Relocation(s) in advance for %s\n", head->name);
for (int i=0; i<cnt; ++i) {
int t = ELF32_R_TYPE(rela[i].r_info);
if(t == R_386_COPY) {
Elf32_Sym *sym = &head->DynSym._32[ELF32_R_SYM(rela[i].r_info)];
const char* symname = SymName32(head, sym);
int version = head->VerSym?((Elf32_Half*)((uintptr_t)head->VerSym+head->delta))[ELF32_R_SYM(rela[i].r_info)]:-1;
if(version!=-1) version &=0x7fff;
const char* vername = GetSymbolVersion(head, version);
Elf32_Half flags = GetSymbolVersionFlag(head, version);
int veropt = flags?0:1;
uintptr_t offs = sym->st_value + head->delta;
AddUniqueSymbol(my_context->globdata, symname, offs, sym->st_size, version, vername, veropt);
}
}
}
}
void checkHookedSymbols(elfheader_t* h);
void AddSymbols32(lib_t *maplib, elfheader_t* h)
{
//if(box64_dump && h->hash) old_elf_hash_dump(h);
//if(box64_dump && h->gnu_hash) new_elf_hash_dump(h);
if(box64_dump && h->DynSym._32) DumpDynSym32(h);
if(h==my_context->elfs[0])
GrabX32CopyMainElfReloc(h);
#ifndef STATICBUILD
checkHookedSymbols(h);
#endif
}
int AllocLoadElfMemory32(box64context_t* context, elfheader_t* head, int mainbin)
{
ptr_t offs = 0;
@ -624,3 +692,199 @@ int RelocateElf32(lib_t *maplib, lib_t *local_maplib, int bindnow, int deepbind,
}
return 0;
}
int RelocateElfPlt32(lib_t *maplib, lib_t *local_maplib, int bindnow, int deepbind, elfheader_t* head)
{
int need_resolver = 0;
if(0 && (head->flags&DF_BIND_NOW) && !bindnow) { // disable for now, needs more symbol in a fow libs like gtk and nss3
bindnow = 1;
printf_log(LOG_DEBUG, "Forcing %s to Bind Now\n", head->name);
}
if(head->pltrel) {
int cnt = head->pltsz / head->pltent;
if(head->pltrel==DT_REL) {
DumpRelTable32(head, cnt, (Elf32_Rel *)(head->jmprel + head->delta), "PLT");
printf_log(LOG_DEBUG, "Applying %d PLT Relocation(s) for %s\n", cnt, head->name);
if(RelocateElfREL(maplib, local_maplib, bindnow, deepbind, head, cnt, (Elf32_Rel *)(head->jmprel + head->delta), &need_resolver))
return -1;
} else if(head->pltrel==DT_RELA) {
DumpRelATable32(head, cnt, (Elf32_Rela *)(head->jmprel + head->delta), "PLT");
printf_log(LOG_DEBUG, "Applying %d PLT Relocation(s) with Addend for %s\n", cnt, head->name);
if(RelocateElfRELA(maplib, local_maplib, bindnow, deepbind, head, cnt, (Elf32_Rela *)(head->jmprel + head->delta), &need_resolver))
return -1;
}
if(need_resolver) {
if(pltResolver32==~(ptr_t)0) {
pltResolver32 = AddBridge(my_context->system, vFEv, PltResolver32, 0, "(PltResolver)");
}
if(head->pltgot) {
*(ptr_t*)from_ptrv(head->pltgot+head->delta+8) = pltResolver32;
*(ptr_t*)from_ptrv(head->pltgot+head->delta+4) = to_ptrv(head);
printf_log(LOG_DEBUG, "PLT Resolver injected in plt.got at %p\n", from_ptrv(head->pltgot+head->delta+8));
} else if(head->got) {
*(ptr_t*)from_ptrv(head->got+head->delta+8) = pltResolver32;
*(ptr_t*)from_ptrv(head->got+head->delta+4) = to_ptrv(head);
printf_log(LOG_DEBUG, "PLT Resolver injected in got at %p\n", from_ptrv(head->got+head->delta+8));
}
}
}
return 0;
}
void ResetSpecialCaseMainElf32(elfheader_t* h)
{
Elf32_Sym *sym = NULL;
for (uint32_t i=0; i<h->numDynSym; ++i) {
if(h->DynSym._32[i].st_info == 17) {
sym = h->DynSym._32+i;
const char * symname = h->DynStr+sym->st_name;
if(strcmp(symname, "_IO_2_1_stderr_")==0 && (from_ptrv(sym->st_value+h->delta))) {
memcpy(from_ptrv(sym->st_value+h->delta), stderr, sym->st_size);
my__IO_2_1_stderr_ = from_ptrv(sym->st_value+h->delta);
printf_log(LOG_DEBUG, "BOX32: Set @_IO_2_1_stderr_ to %p\n", my__IO_2_1_stderr_);
} else
if(strcmp(symname, "_IO_2_1_stdin_")==0 && (from_ptrv(sym->st_value+h->delta))) {
memcpy(from_ptrv(sym->st_value+h->delta), stdin, sym->st_size);
my__IO_2_1_stdin_ = from_ptrv(sym->st_value+h->delta);
printf_log(LOG_DEBUG, "BOX32: Set @_IO_2_1_stdin_ to %p\n", my__IO_2_1_stdin_);
} else
if(strcmp(symname, "_IO_2_1_stdout_")==0 && (from_ptrv(sym->st_value+h->delta))) {
memcpy(from_ptrv(sym->st_value+h->delta), stdout, sym->st_size);
my__IO_2_1_stdout_ = from_ptrv(sym->st_value+h->delta);
printf_log(LOG_DEBUG, "BOX32: Set @_IO_2_1_stdout_ to %p\n", my__IO_2_1_stdout_);
} else
if(strcmp(symname, "_IO_stderr_")==0 && (from_ptrv(sym->st_value+h->delta))) {
memcpy(from_ptrv(sym->st_value+h->delta), stderr, sym->st_size);
my__IO_2_1_stderr_ = from_ptrv(sym->st_value+h->delta);
printf_log(LOG_DEBUG, "BOX32: Set @_IO_stderr_ to %p\n", my__IO_2_1_stderr_);
} else
if(strcmp(symname, "_IO_stdin_")==0 && (from_ptrv(sym->st_value+h->delta))) {
memcpy(from_ptrv(sym->st_value+h->delta), stdin, sym->st_size);
my__IO_2_1_stdin_ = from_ptrv(sym->st_value+h->delta);
printf_log(LOG_DEBUG, "BOX32: Set @_IO_stdin_ to %p\n", my__IO_2_1_stdin_);
} else
if(strcmp(symname, "_IO_stdout_")==0 && (from_ptrv(sym->st_value+h->delta))) {
memcpy(from_ptrv(sym->st_value+h->delta), stdout, sym->st_size);
my__IO_2_1_stdout_ = from_ptrv(sym->st_value+h->delta);
printf_log(LOG_DEBUG, "BOX32: Set @_IO_stdout_ to %p\n", my__IO_2_1_stdout_);
}
}
}
}
void* ElfGetLocalSymbolStartEnd32(elfheader_t* head, uintptr_t *offs, uintptr_t *end, const char* symname, int* ver, const char** vername, int local, int* veropt)
{
Elf32_Sym* sym = ElfLocateSymbol(head, offs, end, symname, ver, vername, local, veropt);
if(!sym) return NULL;
int bind = ELF32_ST_BIND(sym->st_info);
if(bind!=STB_LOCAL) return 0;
if(offs) *offs = sym->st_value + head->delta;
if(end) *end = sym->st_value + head->delta + sym->st_size;
return sym;
}
void* ElfGetGlobalSymbolStartEnd32(elfheader_t* head, uintptr_t *offs, uintptr_t *end, const char* symname, int* ver, const char** vername, int local, int* veropt)
{
Elf32_Sym* sym = ElfLocateSymbol(head, offs, end, symname, ver, vername, local, veropt);
if(!sym) return NULL;
int bind = ELF32_ST_BIND(sym->st_info);
if(bind!=STB_GLOBAL && bind!=STB_GNU_UNIQUE) return 0;
if(offs) *offs = sym->st_value + head->delta;
if(end) *end = sym->st_value + head->delta + sym->st_size;
return sym;
}
void* ElfGetWeakSymbolStartEnd32(elfheader_t* head, uintptr_t *offs, uintptr_t *end, const char* symname, int* ver, const char** vername, int local, int* veropt)
{
Elf32_Sym* sym = ElfLocateSymbol(head, offs, end, symname, ver, vername, local, veropt);
if(!sym) return NULL;
int bind = ELF32_ST_BIND(sym->st_info);
if(bind!=STB_WEAK) return 0;
if(offs) *offs = sym->st_value + head->delta;
if(end) *end = sym->st_value + head->delta + sym->st_size;
return sym;
}
int ElfGetSymTabStartEnd32(elfheader_t* head, uintptr_t *offs, uintptr_t *end, const char* symname)
{
Elf32_Sym* sym = ElfSymTabLookup32(head, symname);
if(!sym) return 0;
if(!sym->st_shndx) return 0;
if(!sym->st_size) return 0; //needed?
if(offs) *offs = sym->st_value + head->delta;
if(end) *end = sym->st_value + head->delta + sym->st_size;
return 1;
}
EXPORT void PltResolver32(x64emu_t* emu)
{
ptr_t addr = Pop32(emu);
int slot = (int)Pop32(emu);
elfheader_t *h = (elfheader_t*)from_ptrv(addr);
library_t* lib = h->lib;
lib_t* local_maplib = GetMaplib(lib);
int deepbind = GetDeepBind(lib);
printf_dump(LOG_DEBUG, "PltResolver32: Addr=%p, Slot=%d Return=%p: elf is %s (VerSym=%p)\n", from_ptrv(addr), slot, *(ptr_t*)from_ptrv(R_ESP), h->name, h->VerSym);
Elf32_Rel * rel = (Elf32_Rel *)(from_ptrv(h->jmprel + h->delta + slot));
Elf32_Sym *sym = &h->DynSym._32[ELF32_R_SYM(rel->r_info)];
int bind = ELF32_ST_BIND(sym->st_info);
const char* symname = SymName32(h, sym);
int version = h->VerSym?((Elf32_Half*)((uintptr_t)h->VerSym+h->delta))[ELF32_R_SYM(rel->r_info)]:-1;
if(version!=-1) version &= 0x7fff;
const char* vername = GetSymbolVersion(h, version);
Elf32_Half flags = GetSymbolVersionFlag(h, version);
int veropt = flags?0:1;
ptr_t *p = (uint32_t*)from_ptrv(rel->r_offset + h->delta);
uintptr_t offs = 0;
uintptr_t end = 0;
Elf32_Sym *elfsym = NULL;
if(bind==STB_LOCAL) {
elfsym = ElfDynSymLookup32(h, symname);
if(elfsym && elfsym->st_shndx) {
offs = elfsym->st_value + h->delta;
end = offs + elfsym->st_size;
}
if(!offs && !end && local_maplib && deepbind)
GetLocalSymbolStartEnd(local_maplib, symname, &offs, &end, h, version, vername, veropt, (void**)&elfsym);
if(!offs && !end)
GetLocalSymbolStartEnd(my_context->maplib, symname, &offs, &end, h, version, vername, veropt, (void**)&elfsym);
if(!offs && !end && local_maplib && !deepbind)
GetLocalSymbolStartEnd(local_maplib, symname, &offs, &end, h, version, vername, veropt, (void**)&elfsym);
} else if(bind==STB_WEAK) {
if(local_maplib && deepbind)
GetGlobalWeakSymbolStartEnd(local_maplib, symname, &offs, &end, h, version, vername, veropt, (void**)&elfsym);
else
GetGlobalWeakSymbolStartEnd(my_context->maplib, symname, &offs, &end, h, version, vername, veropt, (void**)&elfsym);
if(!offs && !end && local_maplib && !deepbind)
GetGlobalWeakSymbolStartEnd(local_maplib, symname, &offs, &end, h, version, vername, veropt, (void**)&elfsym);
} else {
if(!offs && !end && local_maplib && deepbind)
GetGlobalSymbolStartEnd(local_maplib, symname, &offs, &end, h, version, vername, veropt, (void**)&elfsym);
if(!offs && !end)
GetGlobalSymbolStartEnd(my_context->maplib, symname, &offs, &end, h, version, vername, veropt, (void**)&elfsym);
if(!offs && !end && local_maplib && !deepbind)
GetGlobalSymbolStartEnd(local_maplib, symname, &offs, &end, h, version, vername, veropt, (void**)&elfsym);
}
if (!offs) {
printf_log(LOG_NONE, "Error: PltResolver32: Symbol %s(ver %d: %s%s%s) not found, cannot apply R_386_JMP_SLOT %p (%p) in %s\n", symname, version, symname, vername?"@":"", vername?vername:"", p, from_ptrv(*p), h->name);
emu->quit = 1;
return;
} else {
elfheader_t* sym_elf = FindElfSymbol(my_context, elfsym);
offs = (uintptr_t)getAlternate(from_ptrv(offs));
if(p) {
printf_dump(LOG_DEBUG, " Apply %s R_386_JMP_SLOT %p with sym=%s(ver %d: %s%s%s) (%p -> %p / %s)\n", (bind==STB_LOCAL)?"Local":((bind==STB_WEAK)?"Weak":"Global"), p, symname, version, symname, vername?"@":"", vername?vername:"",from_ptrv(*p), from_ptrv(offs), ElfName(FindElfAddress(my_context, offs)));
*p = offs;
} else {
printf_log(LOG_NONE, "PltResolver32: Warning, Symbol %s(ver %d: %s%s%s) found, but Jump Slot Offset is NULL \n", symname, version, symname, vername?"@":"", vername?vername:"");
}
}
// jmp to function
R_EIP = offs;
}

View File

@ -194,8 +194,12 @@ typedef struct elfheader_s {
#define STB_GNU_UNIQUE 10
#endif
#ifndef ELF32_ST_VISIBILITY
#define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
#endif
#ifndef ELF64_ST_VISIBILITY
#define ELF64_ST_VISIBILITY(o) ((o) & 0x03)
#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
#endif
elfheader_t* ParseElfHeader32(FILE* f, const char* name, int exec);

View File

@ -252,8 +252,10 @@ elfheader_t* ParseElfHeader32(FILE* f, const char* name, int exec)
printf_log(LOG_DEBUG, "The DT_INIT_ARRAYSZ is %d\n", h->initarray_sz);
break;
case DT_PREINIT_ARRAYSZ:
if(val)
printf_log(LOG_NONE, "Warning, PreInit Array (size=%d) present and ignored!\n", val);
#ifndef ANDROID
if(val)
printf_log(LOG_NONE, "Warning, PreInit Array (size=%d) present and ignored!\n", val);
#endif
break;
case DT_FINI: // Exit hook
h->finientry = ptr;

View File

@ -64,11 +64,29 @@ static void internalX64Setup(x64emu_t* emu, box64context_t *context, uintptr_t s
// set default value
R_RIP = start;
R_RSP = (stack + stacksize) & ~7; // align stack start, always
#ifdef BOX32
if(box64_is32bits) {
if(stack>=0x100000000LL) {
printf_log(LOG_NONE, "BOX32: Stack pointer too high (%p), aborting\n", (void*)stack);
abort();
}
if(R_RSP>=0x100000000LL) { // special case, stack is just a bit too high
R_RSP = 0x100000000LL - 16;
}
}
#endif
// fake init of segments...
emu->segs[_CS] = 0x33;
emu->segs[_DS] = emu->segs[_ES] = emu->segs[_SS] = 0x2b;
emu->segs[_FS] = 0x43;
emu->segs[_GS] = default_gs;
if(box64_is32bits) {
emu->segs[_CS] = 0x23;
emu->segs[_DS] = emu->segs[_ES] = emu->segs[_SS] = 0x2b;
emu->segs[_FS] = default_fs;
emu->segs[_GS] = 0x33;
} else {
emu->segs[_CS] = 0x33;
emu->segs[_DS] = emu->segs[_ES] = emu->segs[_SS] = 0x2b;
emu->segs[_FS] = 0x43;
emu->segs[_GS] = default_gs;
}
// setup fpu regs
reset_fpu(emu);
emu->mxcsr.x32 = 0x1f80;
@ -77,7 +95,7 @@ static void internalX64Setup(x64emu_t* emu, box64context_t *context, uintptr_t s
EXPORTDYN
x64emu_t *NewX64Emu(box64context_t *context, uintptr_t start, uintptr_t stack, int stacksize, int ownstack)
{
printf_log(LOG_DEBUG, "Allocate a new X86_64 Emu, with EIP=%p and Stack=%p/0x%X\n", (void*)start, (void*)stack, stacksize);
printf_log(LOG_DEBUG, "Allocate a new X86_64 Emu, with %cIP=%p and Stack=%p/0x%X\n", box64_is32bits?'E':'R', (void*)start, (void*)stack, stacksize);
x64emu_t *emu = (x64emu_t*)box_calloc(1, sizeof(x64emu_t));
@ -161,7 +179,7 @@ void CallCleanup(x64emu_t *emu, elfheader_t* h)
if(!h)
return;
for(int i=h->clean_sz-1; i>=0; --i) {
printf_log(LOG_DEBUG, "Call cleanup #%d\n", i);
printf_log(LOG_DEBUG, "Call cleanup #%d (args:%d, arg:%p)\n", i, h->cleanups[i].arg, h->cleanups[i].a);
RunFunctionWithEmu(emu, 0, (uintptr_t)(h->cleanups[i].f), h->cleanups[i].arg, h->cleanups[i].a );
// now remove the cleanup
if(i!=h->clean_sz-1)
@ -325,10 +343,14 @@ void SetEBP(x64emu_t *emu, uint32_t v)
{
R_EBP = v;
}
//void SetESP(x64emu_t *emu, uint32_t v)
//{
// R_ESP = v;
//}
void SetESP(x64emu_t *emu, uint32_t v)
{
R_ESP = v;
}
void SetEIP(x64emu_t *emu, uint32_t v)
{
R_EIP = v;
}
void SetRAX(x64emu_t *emu, uint64_t v)
{
R_RAX = v;
@ -373,7 +395,7 @@ uint64_t GetRBP(x64emu_t *emu)
{
return R_RBP;
}
/*void SetFS(x64emu_t *emu, uint16_t v)
void SetFS(x64emu_t *emu, uint16_t v)
{
emu->segs[_FS] = v;
emu->segs_serial[_FS] = 0;
@ -381,7 +403,7 @@ uint64_t GetRBP(x64emu_t *emu)
uint16_t GetFS(x64emu_t *emu)
{
return emu->segs[_FS];
}*/
}
void ResetFlags(x64emu_t *emu)
@ -572,9 +594,14 @@ void EmuCall(x64emu_t* emu, uintptr_t addr)
uint64_t old_rip = R_RIP;
//Push64(emu, GetRBP(emu)); // set frame pointer
//SetRBP(emu, GetRSP(emu)); // save RSP
R_RSP -= 200;
R_RSP &= ~63LL;
PushExit(emu);
//R_RSP -= 200;
//R_RSP &= ~63LL;
#ifdef BOX32
if(box64_is32bits)
PushExit_32(emu);
else
#endif
PushExit(emu);
R_RIP = addr;
emu->df = d_none;
Run(emu, 0);

View File

@ -5,6 +5,9 @@
typedef struct box64context_s box64context_t;
typedef struct x64_ucontext_s x64_ucontext_t;
#ifdef BOX32
typedef struct i386_ucontext_s i386_ucontext_t;
#endif
#define ERR_UNIMPL 1
#define ERR_DIVBY0 2
@ -116,6 +119,7 @@ typedef struct x64emu_s {
uintptr_t prev2_ip;
#endif
// scratch stack, used for alignment of double and 64bits ints on arm. 200 elements should be enough
__int128_t dummy_align; // here to have scratch 128bits aligned
uint64_t scratch[200];
// local stack, do be deleted when emu is freed
void* stack2free; // this is the stack to free (can be NULL)
@ -126,7 +130,7 @@ typedef struct x64emu_s {
uintptr_t old_savedsp;
#endif
x64_ucontext_t *uc_link; // to handle setcontext
void* uc_link; // to handle setcontext (can be x64_ucontext_t or a i386_ucontext_t)
int type; // EMUTYPE_xxx define
} x64emu_t;

View File

@ -88,6 +88,10 @@ static uint8_t Peek8(uintptr_t addr, uintptr_t offset)
void x64Int3(x64emu_t* emu, uintptr_t* addr)
{
if(box64_is32bits) {
x86Int3(emu,addr);
return;
}
onebridge_t* bridge = (onebridge_t*)(*addr-1);
if(Peek8(*addr, 0)=='S' && Peek8(*addr, 1)=='C') // Signature for "Out of x86 door"
{
@ -393,4 +397,12 @@ void print_cycle_log(int loglevel) {
}
}
}
}
}
#ifndef BOX32
void x86Int3(x64emu_t* emu, uintptr_t* addr)
{
printf_log(LOG_NONE, "Error: Calling 32bits wrapped function without box32 support built in\n");
abort();
}
#endif

View File

@ -27,6 +27,9 @@
#include "modrm.h"
int my_setcontext(x64emu_t* emu, void* ucp);
#ifdef BOX32
int my32_setcontext(x64emu_t* emu, void* ucp);
#endif
#ifdef TEST_INTERPRETER
int RunTest(x64test_t *test)
@ -2232,7 +2235,12 @@ if(emu->segs[_CS]!=0x33 && emu->segs[_CS]!=0x23) printf_log(LOG_NONE, "Warning,
// setcontext handling
else if(emu->quit && emu->uc_link) {
emu->quit = 0;
my_setcontext(emu, emu->uc_link);
#ifdef BOX32
if(box64_is32bits)
my32_setcontext(emu, emu->uc_link);
else
#endif
my_setcontext(emu, emu->uc_link);
addr = R_RIP;
goto x64emurun;
}

View File

@ -639,7 +639,15 @@ uintptr_t Run64(x64emu_t *emu, rex_t rex, int seg, uintptr_t addr)
R_RAX = *(uint32_t*)(tlsdata+tmp64u);
}
break;
case 0xA2: /* MOV Ob,AL */
if(rex.is32bits) {
tmp32s = F32S;
*(uint8_t*)(uintptr_t)(tlsdata+tmp32s) = R_AL;
} else {
tmp64u = F64;
*(uint8_t*)(tlsdata+tmp64u) = R_AL;
}
break;
case 0xA3: /* MOV FS:Od,EAX */
if(rex.is32bits) {
tmp32s = F32S;

View File

@ -24,6 +24,9 @@
#endif
#include "x64tls.h"
#include "bridge.h"
#ifdef BOX32
#include "box32.h"
#endif
#define PARITY(x) (((emu->x64emu_parity_tab[(x) / 32] >> ((x) % 32)) & 1) == 0)
#define XOR2(x) (((x) ^ ((x)>>1)) & 0x1)
@ -53,7 +56,7 @@ void EXPORT my___libc_init(x64emu_t* emu, void* raw_args , void (*onexit)(void)
emu->quit = 1; // finished!
}
#else
int32_t EXPORT my___libc_start_main(x64emu_t* emu, int (*main) (int, char * *, char * *), int argc, char * * ubp_av, void (*init) (void), void (*fini) (void), void (*rtld_fini) (void), void (* stack_end))
EXPORT int32_t my___libc_start_main(x64emu_t* emu, int (*main) (int, char * *, char * *), int argc, char * * ubp_av, void (*init) (void), void (*fini) (void), void (*rtld_fini) (void), void (* stack_end))
{
(void)argc; (void)ubp_av; (void)fini; (void)rtld_fini; (void)stack_end;
@ -106,6 +109,54 @@ int32_t EXPORT my___libc_start_main(x64emu_t* emu, int (*main) (int, char * *, c
}
return (int)GetEAX(emu);
}
#ifdef BOX32
#ifdef ANDROID
void EXPORT my32___libc_init(x64emu_t* emu, void* raw_args , void (*onexit)(void) , int (*main)(int, char**, char**), void const * const structors )
{
//TODO: register fini
// let's cheat and set all args...
Push_32(emu, (uint32_t)my_context->envv32);
Push_32(emu, (uint32_t)my_context->argv32);
Push_32(emu, (uint32_t)my_context->argc);
printf_log(LOG_DEBUG, "Transfert to main(%d, %p, %p)=>%p from __libc_init\n", my_context->argc, my_context->argv, my_context->envv, main);
// should call structors->preinit_array and structors->init_array!
// call main and finish
PushExit_32(emu);
R_EIP=to_ptrv(main);
DynaRun(emu);
emu->quit = 1; // finished!
}
#else
int32_t EXPORT my32___libc_start_main(x64emu_t* emu, int *(main) (int, char * *, char * *), int argc, char * * ubp_av, void (*init) (void), void (*fini) (void), void (*rtld_fini) (void), void (* stack_end))
{
// let's cheat and set all args...
Push_32(emu, my_context->envv32);
Push_32(emu, my_context->argv32);
Push_32(emu, my_context->argc);
if(init) {
PushExit_32(emu);
R_EIP=to_ptrv(*init);
printf_log(LOG_DEBUG, "Calling init(%p) from __libc_start_main\n", *init);
DynaRun(emu);
if(emu->error) // any error, don't bother with more
return 0;
emu->quit = 0;
}
printf_log(LOG_DEBUG, "Transfert to main(%d, %p, %p)=>%p from __libc_start_main\n", my_context->argc, my_context->argv, my_context->envv, main);
// call main and finish
PushExit_32(emu);
R_EIP=to_ptrv(main);
DynaRun(emu);
emu->quit = 1; // finished!
return 0;
}
#endif
#endif
#endif
const char* GetNativeName(void* p)

View File

@ -50,6 +50,12 @@ static inline void Push16(x64emu_t *emu, uint16_t v)
*((uint16_t*)R_RSP) = v;
}
static inline void Push_32(x64emu_t *emu, uint32_t v)
{
R_ESP -= 4;
*((uint32_t*)(uintptr_t)R_ESP) = v;
}
static inline void Push32(x64emu_t *emu, uint32_t v)
{
R_RSP -= 4;
@ -70,6 +76,13 @@ static inline uint16_t Pop16(x64emu_t *emu)
return *st;
}
static inline uint32_t Pop_32(x64emu_t *emu)
{
uint32_t* st = (uint32_t*)(uintptr_t)R_RSP;
R_ESP += 4;
return *st;
}
static inline uint32_t Pop32(x64emu_t *emu)
{
uint32_t* st = (uint32_t*)R_RSP;
@ -90,6 +103,13 @@ static inline void PushExit(x64emu_t* emu)
*((uint64_t*)R_RSP) = my_context->exit_bridge;
}
#ifdef BOX32
static inline void PushExit_32(x64emu_t* emu)
{
R_ESP -= 4;
*((ptr_t*)(uintptr_t)R_ESP) = my_context->exit_bridge;
}
#endif
// the op code definition can be found here: http://ref.x86asm.net/geek32.html
reg64_t* GetECommon(x64emu_t* emu, uintptr_t* addr, rex_t rex, uint8_t m, uint8_t delta);
@ -232,6 +252,7 @@ void x64Syscall(x64emu_t *emu);
void x64Int3(x64emu_t* emu, uintptr_t* addr);
x64emu_t* x64emu_fork(x64emu_t* e, int forktype);
void x86Syscall(x64emu_t *emu); //32bits syscall
void x86Int3(x64emu_t* emu, uintptr_t* addr);
uintptr_t GetSegmentBaseEmu(x64emu_t* emu, int seg);
#define GetGSBaseEmu(emu) GetSegmentBaseEmu(emu, _GS)

View File

@ -11,6 +11,9 @@
#include "x64emu_private.h"
#include "x64tls.h"
#include "elfloader.h"
#ifdef BOX32
#include "box32.h"
#endif
typedef struct thread_area_s
{
@ -118,12 +121,19 @@ uint32_t my_modify_ldt(x64emu_t* emu, int op, thread_area_t* td, int size)
return (uint32_t)-1;
}
/*
my_context->segtls[idx].base = td->base_addr;
my_context->segtls[idx].limit = td->limit;
pthread_setspecific(my_context->segtls[idx].key, (void*)my_context->segtls[idx].base);
*/
if(box64_is32bits) {
emu->segs_serial[_GS] = 0;
my_context->segtls[idx].base = td->base_addr;
my_context->segtls[idx].limit = td->limit;
my_context->segtls[idx].present = 1;
if(idx>8 && !my_context->segtls[idx].key_init) {
pthread_key_create(&my_context->segtls[idx].key, NULL);
my_context->segtls[idx].key_init = 1;
}
if(my_context->segtls[idx].key_init)
pthread_setspecific(my_context->segtls[idx].key, (void*)my_context->segtls[idx].base);
}
ResetSegmentsCache(emu);
return 0;
@ -220,6 +230,7 @@ int my_arch_prctl(x64emu_t *emu, int code, void* addr)
#define POS_TLS 0x200
#define POS_TLS_32 0x50
/*
tls record should looks like:
void* tcb 0x00
@ -255,7 +266,7 @@ static tlsdatasize_t* setupTLSData(box64context_t* context)
// Setup the GS segment:
int dtssize = sizeDTS(context);
int datasize = sizeTLSData(context->tlssize);
void *ptr_oversized = (char*)box_malloc(dtssize+POS_TLS+datasize);
void *ptr_oversized = (char*)box_malloc(dtssize+(box64_is32bits?POS_TLS_32:POS_TLS)+datasize);
void *ptr = (void*)((uintptr_t)ptr_oversized + datasize);
memcpy((void*)((uintptr_t)ptr-context->tlssize), context->tlsdata, context->tlssize);
tlsdatasize_t *data = (tlsdatasize_t*)box_calloc(1, sizeof(tlsdatasize_t));
@ -264,23 +275,45 @@ static tlsdatasize_t* setupTLSData(box64context_t* context)
data->ptr = ptr_oversized;
data->n_elfs = context->elfsize;
pthread_setspecific(context->tlskey, data);
// copy canary...
memset((void*)((uintptr_t)ptr), 0, POS_TLS+dtssize); // set to 0 remining bytes
memcpy((void*)((uintptr_t)ptr+0x28), context->canary, sizeof(void*)); // put canary in place
uintptr_t tlsptr = (uintptr_t)ptr;
memcpy((void*)((uintptr_t)ptr+0x0), &tlsptr, sizeof(void*));
memcpy((void*)((uintptr_t)ptr+0x10), &tlsptr, sizeof(void*)); // set tcb and self same address
uintptr_t dtp = (uintptr_t)ptr+POS_TLS;
memcpy((void*)(tlsptr+sizeof(void*)), &dtp, sizeof(void*));
if(dtssize) {
for (int i=0; i<context->elfsize; ++i) {
// set pointer
dtp = (uintptr_t)ptr + GetTLSBase(context->elfs[i]);
*(uint64_t*)((uintptr_t)ptr+POS_TLS+i*16) = dtp;
*(uint64_t*)((uintptr_t)ptr+POS_TLS+i*16+8) = i; // index
#ifdef BOX32
if(box64_is32bits) {
// copy canary...
memset((void*)((uintptr_t)ptr), 0, POS_TLS_32+dtssize); // set to 0 remining bytes
memcpy((void*)((uintptr_t)ptr+0x14), context->canary, 4); // put canary in place
ptr_t tlsptr = to_ptrv(ptr);
memcpy((void*)((uintptr_t)ptr+0x0), &tlsptr, 4);
ptr_t dtp = to_ptrv(ptr+POS_TLS_32);
memcpy(from_ptrv(tlsptr+0x4), &dtp, 4);
if(dtssize) {
for (int i=0; i<context->elfsize; ++i) {
// set pointer
dtp = to_ptrv(ptr + GetTLSBase(context->elfs[i]));
memcpy((void*)((uintptr_t)ptr+POS_TLS_32+i*8), &dtp, 4);
memcpy((void*)((uintptr_t)ptr+POS_TLS_32+i*8+4), &i, 4); // index
}
}
memcpy((void*)((uintptr_t)ptr+0x10), &context->vsyscall, 4); // address of vsyscall
} else
#endif
{
// copy canary...
memset((void*)((uintptr_t)ptr), 0, POS_TLS+dtssize); // set to 0 remining bytes
memcpy((void*)((uintptr_t)ptr+0x28), context->canary, sizeof(void*)); // put canary in place
uintptr_t tlsptr = (uintptr_t)ptr;
memcpy((void*)((uintptr_t)ptr+0x0), &tlsptr, sizeof(void*));
memcpy((void*)((uintptr_t)ptr+0x10), &tlsptr, sizeof(void*)); // set tcb and self same address
uintptr_t dtp = (uintptr_t)ptr+POS_TLS;
memcpy((void*)(tlsptr+sizeof(void*)), &dtp, sizeof(void*));
if(dtssize) {
for (int i=0; i<context->elfsize; ++i) {
// set pointer
dtp = (uintptr_t)ptr + GetTLSBase(context->elfs[i]);
*(uint64_t*)((uintptr_t)ptr+POS_TLS+i*16) = dtp;
*(uint64_t*)((uintptr_t)ptr+POS_TLS+i*16+8) = i; // index
}
}
memcpy((void*)((uintptr_t)ptr+0x20), &context->vsyscall, sizeof(void*)); // address of vsyscall
}
memcpy((void*)((uintptr_t)ptr+0x20), &context->vsyscall, sizeof(void*)); // address of vsyscall
return data;
}
@ -356,7 +389,9 @@ void* GetSegmentBase(uint32_t desc)
return NULL;
}
int base = desc>>3;
if(base==0x8 && !my_context->segtls[base].key_init)
if(!box64_is32bits && base==0x8 && !my_context->segtls[base].key_init)
return GetSeg43Base();
if(box64_is32bits && (base==0x6))
return GetSeg43Base();
if(base>15) {
printf_log(LOG_NONE, "Warning, accessing segment unknown 0x%x or unset\n", desc);

347
src/emu/x86int3.c Executable file
View File

@ -0,0 +1,347 @@
#define _GNU_SOURCE /* See feature_test_macros(7) */
#include <dlfcn.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <pthread.h>
#include <signal.h>
#include "debug.h"
#include "box64stack.h"
#include "x64emu.h"
#include "x64run.h"
#include "x64emu_private.h"
#include "x64run_private.h"
#include "x87emu_private.h"
#include "x64primop.h"
#include "x64trace.h"
#include "wrapper32.h"
#include "box32context.h"
#include "librarian.h"
#include "signals.h"
#include "tools/bridge_private.h"
#include <elf.h>
#include "elfloader.h"
#include "elfload_dump.h"
#include "elfs/elfloader_private.h"
typedef int32_t (*iFpppp_t)(void*, void*, void*, void*);
static uint64_t F64(uintptr_t* addr) {
uint64_t ret = *(uint64_t*)*addr;
*addr+=8;
return ret;
}
static uint8_t Peek8(uintptr_t addr, uintptr_t offset)
{
return *(uint8_t*)(addr+offset);
}
extern int errno;
void x86Int3(x64emu_t* emu, uintptr_t* addr)
{
onebridge_t* bridge = (onebridge_t*)(*addr-1);
if(Peek8(*addr, 0)=='S' && Peek8(*addr, 1)=='C') // Signature for "Out of x86 door"
{
*addr += 2;
uintptr_t a = F64(addr);
if(a==0) {
R_RIP = *addr;
//printf_log(LOG_INFO, "%p:Exit x86 emu (emu=%p)\n", *(void**)(R_ESP), emu);
emu->quit=1; // normal quit
} else {
RESET_FLAGS(emu);
wrapper_t w = bridge->w;
a = F64(addr);
R_RIP = *addr;
/* This party can be used to trace only 1 specific lib (but it is quite slow)
elfheader_t *h = FindElfAddress(my_context, *(uintptr_t*)(R_ESP));
int have_trace = 0;
if(h && strstr(ElfName(h), "libMiles")) have_trace = 1;*/
if(box64_log>=LOG_DEBUG || cycle_log) {
int tid = GetTID();
char t_buff[256] = "\0";
char buff2[64] = "\0";
char buff3[64] = "\0";
int cycle_line = my_context->current_line;
if(cycle_log) {
my_context->current_line = (my_context->current_line+1)%cycle_log;
}
char* buff = cycle_log?my_context->log_call[cycle_line]:t_buff;
char* buffret = cycle_log?my_context->log_ret[cycle_line]:NULL;
if(buffret) buffret[0] = '\0';
char *tmp;
int post = 0;
int perr = 0;
uint64_t *pu64 = NULL;
uint32_t *pu32 = NULL;
uint8_t *pu8 = NULL;
const char *s = bridge->name;
if(!s)
s = GetNativeName((void*)a);
if(a==(uintptr_t)PltResolver32) {
if(cycle_log) {
ptr_t addr = *((uint32_t*)from_ptrv(R_ESP));
int slot = *((uint32_t*)from_ptrv(R_ESP+4));
elfheader_t *h = (elfheader_t*)from_ptrv(addr);
Elf32_Rel * rel = (Elf32_Rel *)from_ptrv(h->jmprel + h->delta + slot);
Elf32_Sym *sym = &h->DynSym._32[ELF32_R_SYM(rel->r_info)];
const char* symname = SymName32(h, sym);
snprintf(buff, 256, "%04d|PltResolver \"%s\"", tid, symname?symname:"???");
} else {
snprintf(buff, 256, "%s", " ... ");
}
} else
if(strstr(s, "SDL_RWFromFile")==s || strstr(s, "SDL_RWFromFile")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%s, %s)", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)));
} else if(strstr(s, "glColor4f")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%f, %f, %f, %f)", tid, *(void**)from_ptr(R_ESP), s, *(float*)from_ptr(R_ESP+4), *(float*)from_ptr(R_ESP+8), *(float*)from_ptr(R_ESP+12), *(float*)from_ptr(R_ESP+16));
} else if(strstr(s, "glTexCoord2f")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%f, %f)", tid, *(void**)from_ptr(R_ESP), s, *(float*)from_ptr(R_ESP+4), *(float*)from_ptr(R_ESP+8));
} else if(strstr(s, "glVertex2f")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%f, %f)", tid, *(void**)from_ptr(R_ESP), s, *(float*)from_ptr(R_ESP+4), *(float*)from_ptr(R_ESP+8));
} else if(strstr(s, "glVertex3f")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%f, %f, %f)", tid, *(void**)from_ptr(R_ESP), s, *(float*)from_ptr(R_ESP+4), *(float*)from_ptr(R_ESP+8), *(float*)from_ptr(R_ESP+12));
} else if(strstr(s, "__open64")==s || strcmp(s, "open64")==0) {
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\", %d, %d)", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)), *(int*)from_ptr(R_ESP+8), *(int*)from_ptr(R_ESP+12));
perr = 1;
} else if(!strcmp(s, "opendir")) {
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\")", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)));
perr = 1;
} else if(strstr(s, "__open")==s || !strcmp(s, "open") || !strcmp(s, "my_open64")) {
tmp = from_ptrv(*(ptr_t*)from_ptr(R_ESP+4));
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\", %d (,%d))", tid, *(void**)from_ptr(R_ESP), s, (tmp)?tmp:"(nil)", *(int*)from_ptr(R_ESP+8), *(int*)from_ptr(R_ESP+12));
perr = 1;
} else if(!strcmp(s, "shm_open")) {
tmp = from_ptrv(*(ptr_t*)from_ptr(R_ESP+4));
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\", %d, %d)", tid, *(void**)from_ptr(R_ESP), s, (tmp)?tmp:"(nil)", *(int*)from_ptr(R_ESP+8), *(int*)from_ptr(R_ESP+12));
perr = 1;
} else if(strcmp(s, "mkdir")==0) {
tmp = from_ptrv(*(ptr_t*)from_ptr(R_ESP+4));
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\", %d)", tid, *(void**)from_ptr(R_ESP), s, (tmp)?tmp:"(nil)", *(int*)from_ptr(R_ESP+8));
perr = 1;
} else if(!strcmp(s, "fopen")) {
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\", \"%s\")", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)));
perr = 2;
} else if(!strcmp(s, "freopen")) {
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\", \"%s\", %p)", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)), *(void**)from_ptr(R_ESP+12));
perr = 2;
} else if(!strcmp(s, "fopen64")) {
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\", \"%s\")", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)));
perr = 2;
} else if(!strcmp(s, "chdir")) {
pu32=*(uint32_t**)from_ptr(R_ESP+4);
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\")", tid, *(void**)from_ptr(R_ESP), s, pu32?((pu32==(uint32_t*)1)?"/1/":(char*)pu32):"/0/");
} else if(strstr(s, "getenv")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\")", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)));
post = 2;
} else if(strstr(s, "putenv")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\")", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)));
} else if(strstr(s, "pread")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%d, %p, %u, %d)", tid, *(void**)from_ptr(R_ESP), s, *(int32_t*)from_ptr(R_ESP+4), *(void**)from_ptr(R_ESP+8), *(uint32_t*)from_ptr(R_ESP+12), *(int32_t*)from_ptr(R_ESP+16));
perr = 1;
} else if(!strcmp(s, "read")) {
snprintf(buff, 255, "%04d|%p: Calling %s(%d, %p, %u)", tid, *(void**)from_ptr(R_ESP), s, *(int32_t*)from_ptr(R_ESP+4), *(void**)from_ptr(R_ESP+8), *(uint32_t*)from_ptr(R_ESP+12));
perr = 1;
} else if(strstr(s, "ioctl")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%d, 0x%x, %p)", tid, *(void**)from_ptr(R_ESP), s, *(int32_t*)from_ptr(R_ESP+4), *(int32_t*)from_ptr(R_ESP+8), *(void**)from_ptr(R_ESP+12));
perr = 1;
} else if(strstr(s, "statvfs64")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%p(\"%s\"), %p)", tid, *(void**)from_ptr(R_ESP), s, *(void**)from_ptr(R_ESP+4), from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)), *(void**)from_ptr(R_ESP+8));
} else if(strstr(s, "index")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%p(\"%s\"), %i(%c))", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)), from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)), *(int32_t*)from_ptr(R_ESP+8), *(int32_t*)from_ptr(R_ESP+8));
} else if(strstr(s, "rindex")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%p(\"%s\"), %i(%c))", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)), from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)), *(int32_t*)from_ptr(R_ESP+8), *(int32_t*)from_ptr(R_ESP+8));
} else if(strstr(s, "__xstat64")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%d, %p(\"%s\"), %p)", tid, *(void**)from_ptr(R_ESP), s, *(int32_t*)from_ptr(R_ESP+4), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)), *(void**)from_ptr(R_ESP+12));
perr = 1;
} else if(strcmp(s, "__xstat")==0) {
snprintf(buff, 255, "%04d|%p: Calling %s(%d, %p(\"%s\"), %p)", tid, *(void**)from_ptr(R_ESP), s, *(int32_t*)from_ptr(R_ESP+4), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)), *(void**)from_ptr(R_ESP+12));
perr = 1;
} else if(strstr(s, "__lxstat64")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%d, %p(\"%s\"), %p)", tid, *(void**)from_ptr(R_ESP), s, *(int32_t*)from_ptr(R_ESP+4), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)), *(void**)from_ptr(R_ESP+12));
perr = 1;
} else if(strstr(s, "sem_timedwait")==s) {
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+8));
snprintf(buff, 255, "%04d|%p: Calling %s(%p, %p[%d sec %d ns])", tid, *(void**)from_ptr(R_ESP), s, *(void**)from_ptr(R_ESP+4), *(void**)from_ptr(R_ESP+8), pu32?pu32[0]:-1, pu32?pu32[1]:-1);
perr = 1;
} else if(strstr(s, "waitpid")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%d, %p, 0x%x)", tid, *(void**)from_ptr(R_ESP), s, *(int32_t*)from_ptr(R_ESP+4), *(void**)from_ptr(R_ESP+8), *(uint32_t*)from_ptr(R_ESP+12));
perr = 1;
} else if(strstr(s, "clock_gettime")==s || strstr(s, "__clock_gettime")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%d, %p)", tid, *(void**)from_ptr(R_ESP), s, *(uint32_t*)from_ptr(R_ESP+4), *(void**)from_ptr(R_ESP+8));
post = 1;
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+8));
} else if(strstr(s, "semop")==s) {
int16_t* p16 = *(int16_t**)from_ptr(R_ESP+8);
snprintf(buff, 255, "%04d|%p: Calling %s(%d, %p[%u/%d/0x%x], %d)", tid, *(void**)from_ptr(R_ESP), s, *(int*)from_ptr(R_ESP+4), p16, p16[0], p16[1], p16[2], *(int*)from_ptr(R_ESP+12));
perr = 1;
} else if(!strcmp(s, "mmap64")) {
snprintf(buff, 255, "%04d|%p: Calling %s(%p, 0x%x, %d, 0x%x, %d, %ld)", tid, *(void**)from_ptr(R_ESP), s, *(void**)from_ptr(R_ESP+4), *(ulong_t*)from_ptr(R_ESP+8), *(int*)from_ptr(R_ESP+12), *(int*)from_ptr(R_ESP+16), *(int*)from_ptr(R_ESP+20), *(int64_t*)from_ptr(R_ESP+24));
perr = 3;
} else if(!strcmp(s, "mmap")) {
snprintf(buff, 255, "%04d|%p: Calling %s(%p, 0x%x, %d, 0x%x, %d, %d)", tid, *(void**)from_ptr(R_ESP), s, *(void**)from_ptr(R_ESP+4), *(ulong_t*)from_ptr(R_ESP+8), *(int*)from_ptr(R_ESP+12), *(int*)from_ptr(R_ESP+16), *(int*)from_ptr(R_ESP+20), *(int*)from_ptr(R_ESP+24));
perr = 3;
} else if(strstr(s, "strcasecmp")==s || strstr(s, "__strcasecmp")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\", \"%s\")", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)));
} else if(strstr(s, "gtk_signal_connect_full")) {
snprintf(buff, 255, "%04d|%p: Calling %s(%p, \"%s\", %p, %p, %p, %p, %d, %d)", tid, *(void**)from_ptr(R_ESP), "gtk_signal_connect_full", *(void**)from_ptr(R_ESP+4), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)), *(void**)from_ptr(R_ESP+12), *(void**)from_ptr(R_ESP+16), *(void**)from_ptr(R_ESP+20), *(void**)from_ptr(R_ESP+24), *(int32_t*)from_ptr(R_ESP+28), *(int32_t*)from_ptr(R_ESP+32));
} else if(strstr(s, "strcmp")==s || strstr(s, "__strcmp")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\", \"%s\")", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)));
} else if(strstr(s, "strstr")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(\"%.127s\", \"%.127s\")", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)));
} else if(strstr(s, "strlen")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%p(\"%s\"))", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)), ((R_ESP+4))?((char*)from_ptrv(*(ptr_t*)from_ptr(R_ESP+4))):"nil");
} else if(strstr(s, "vsnprintf")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%08X, %u, %08X...)", tid, *(void**)from_ptr(R_ESP), s, *(uint32_t*)from_ptr(R_ESP+4), *(uint32_t*)from_ptr(R_ESP+8), *(uint32_t*)from_ptr(R_ESP+12));
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+4));
post = 3;
} else if(strstr(s, "vsprintf")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%p, \"%s\", %p)", tid, *(void**)from_ptr(R_ESP), s, *(void**)from_ptr(R_ESP+4), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)), *(void**)from_ptr(R_ESP+12));
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+4));
post = 3;
} else if(strstr(s, "__vsprintf_chk")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%p, %d, %zu, \"%s\", %p)", tid, *(void**)from_ptr(R_ESP), s, *(void**)from_ptr(R_ESP+4), *(int*)from_ptr(R_ESP+8), *(size_t*)from_ptr(R_ESP+12), from_ptrv(*(ptr_t*)from_ptr(R_ESP+16)), *(void**)from_ptr(R_ESP+20));
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+4));
post = 3;
} else if(strstr(s, "__snprintf_chk")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%p, %zu, %d, %d, \"%s\", %p)", tid, *(void**)from_ptr(R_ESP), s, *(void**)from_ptr(R_ESP+4), *(size_t*)from_ptr(R_ESP+8), *(int*)from_ptr(R_ESP+12), *(int*)from_ptr(R_ESP+16), from_ptrv(*(ptr_t*)from_ptr(R_ESP+20)), *(void**)from_ptr(R_ESP+24));
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+4));
post = 3;
} else if(strstr(s, "snprintf")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%p, %zu, \"%s\", ...)", tid, *(void**)from_ptr(R_ESP), s, *(void**)from_ptr(R_ESP+4), *(size_t*)from_ptr(R_ESP+8), from_ptrv(*(ptr_t*)from_ptr(R_ESP+12)));
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+4));
post = 3;
} else if(strstr(s, "sprintf")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%08X, %08X...)", tid, *(void**)from_ptr(R_ESP), s, *(uint32_t*)from_ptr(R_ESP+4), *(uint32_t*)from_ptr(R_ESP+8));
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+4));
post = 3;
} else if(strstr(s, "printf")==s) {
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+4));
if(((uintptr_t)pu32)<0x5) // probably a _chk function
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+8));
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\"...)", tid, *(void**)from_ptr(R_ESP), s, pu32?((char*)(pu32)):"nil");
} else if(strstr(s, "__printf_chk")==s) {
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+8));
snprintf(buff, 255, "%04d|%p: Calling %s(%d, \"%s\", ...)", tid, *(void**)from_ptr(R_ESP), s, from_ptri(int, R_ESP+4), pu32?((char*)(pu32)):"nil");
} else if(strstr(s, "wprintf")==s) {
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+4));
if(((uintptr_t)pu32)<0x5) // probably a _chk function
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+8));
snprintf(buff, 255, "%04d|%p: Calling %s(\"%S\"...)", tid, *(void**)from_ptr(R_ESP), s, pu32?((wchar_t*)(pu32)):L"nil");
} else if(strstr(s, "__vswprintf")==s) {
if(*(size_t*)from_ptr(R_ESP+12)<2) {
snprintf(buff, 255, "%04d|%p: Calling %s(%p, %u, %p, %p, %p)", tid, *(void**)from_ptr(R_ESP), s, *(void**)from_ptr(R_ESP+4), *(ulong_t*)from_ptr(R_ESP+8), *(void**)from_ptr(R_ESP+12), *(void**)from_ptr(R_ESP+16), *(void**)from_ptr(R_ESP+20));
} else {
snprintf(buff, 255, "%04d|%p: Calling %s(%p, %u, \"%S\", %p)", tid, *(void**)from_ptr(R_ESP), s, *(void**)from_ptr(R_ESP+4), *(ulong_t*)from_ptr(R_ESP+8), *(wchar_t**)from_ptr(R_ESP+12), *(void**)from_ptr(R_ESP+16));
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+4));
post = 6;
}
} else if(strstr(s, "puts")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\"...)", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)));
} else if(strstr(s, "fputs")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\", %p...)", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)), *(void**)from_ptr(R_ESP+8));
} else if(strstr(s, "fprintf")==s) {
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+8));
if(((uintptr_t)pu32)<0x5) // probably a __fprint_chk
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+12));
snprintf(buff, 255, "%04d|%p: Calling %s(%08X, \"%s\", ...)", tid, *(void**)from_ptr(R_ESP), s, *(uint32_t*)from_ptr(R_ESP+4), pu32?((char*)(pu32)):"nil");
} else if(strstr(s, "vfprintf")==s) {
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+8));
if(((uintptr_t)pu32)<0x5) // probably a _chk function
pu32 = (uint32_t*)from_ptr(*(ptr_t*)from_ptr(R_ESP+12));
snprintf(buff, 255, "%04d|%p: Calling %s(%08X, \"%s\", ...)", tid, *(void**)from_ptr(R_ESP), s, *(uint32_t*)from_ptr(R_ESP+4), pu32?((char*)(pu32)):"nil");
} else if(strstr(s, "vkGetInstanceProcAddr")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%p, \"%s\")", tid, *(void**)from_ptr(R_ESP), s, *(void**)from_ptr(R_ESP+4), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)));
} else if(strstr(s, "vkGetDeviceProcAddr")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%p, \"%s\")", tid, *(void**)from_ptr(R_ESP), s, *(void**)from_ptr(R_ESP+4), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)));
} else if(strstr(s, "glXGetProcAddress")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\")", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)));
} else if(strstr(s, "sscanf")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\", \"%s\", ...)", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)));
} else if(!strcmp(s, "vsscanf")) {
snprintf(buff, 255, "%04d|%p: Calling %s(\"%s\", \"%s\", ...)", tid, *(void**)from_ptr(R_ESP), s, from_ptrv(*(ptr_t*)from_ptr(R_ESP+4)), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)));
} else if(strstr(s, "XCreateWindow")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%p, %p, %d, %d, %u, %u, %u, %d, %u, %p, %u, %p)", tid, *(void**)from_ptr(R_ESP), s, *(void**)from_ptr(R_ESP+4), *(void**)from_ptr(R_ESP+8), *(int*)from_ptr(R_ESP+12), *(int*)from_ptr(R_ESP+16), *(uint32_t*)from_ptr(R_ESP+20), *(uint32_t*)from_ptr(R_ESP+24), *(uint32_t*)from_ptr(R_ESP+28), *(int32_t*)from_ptr(R_ESP+32), *(uint32_t*)from_ptr(R_ESP+36), *(void**)from_ptr(R_ESP+40), *(uint32_t*)from_ptr(R_ESP+44), *(void**)from_ptr(R_ESP+48));
} else if(strstr(s, "XLoadQueryFont")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%p, \"%s\")", tid, *(void**)from_ptr(R_ESP), s, *(void**)from_ptr(R_ESP+4), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)));
} else if(strstr(s, "pthread_mutex_lock")==s) {
snprintf(buff, 255, "%04d|%p: Calling %s(%p)", tid, *(void**)from_ptr(R_ESP), s, *(void**)from_ptr(R_ESP+4));
} else if(!strcmp(s, "fmodf")) {
post = 4;
snprintf(buff, 255, "%04d|%p: Calling %s(%f, %f)", tid, *(void**)from_ptr(R_ESP), s, *(float*)from_ptr(R_ESP+4), *(float*)from_ptr(R_ESP+8));
} else if(!strcmp(s, "fmod")) {
post = 4;
snprintf(buff, 255, "%04d|%p: Calling %s(%f, %f)", tid, *(void**)from_ptr(R_ESP), s, *(double*)from_ptr(R_ESP+4), *(double*)from_ptr(R_ESP+12));
} else if(strstr(s, "SDL_GetWindowSurface")==s) {
post = 5;
snprintf(buff, 255, "%04d|%p: Calling %s(%p)", tid, *(void**)from_ptr(R_ESP), s, *(void**)from_ptr(R_ESP+4));
} else if(strstr(s, "udev_monitor_new_from_netlink")==s) {
post = 5;
snprintf(buff, 255, "%04d|%p: Calling %s(%p, \"%s\")", tid, *(void**)from_ptr(R_ESP), s, *(void**)from_ptr(R_ESP+4), from_ptrv(*(ptr_t*)from_ptr(R_ESP+8)));
} else if(!strcmp(s, "syscall")) {
snprintf(buff, 255, "%04d|%p: Calling %s(%d, %p, %p, %p...)", tid, *(void**)from_ptr(R_ESP), s, *(int32_t*)from_ptr(R_ESP+4), *(void**)from_ptr(R_ESP+8), *(void**)from_ptr(R_ESP+12), *(void**)from_ptr(R_ESP+16));
perr = 1;
} else {
snprintf(buff, 255, "%04d|%p: Calling %s (%08X, %08X, %08X...)", tid, *(void**)from_ptr(R_ESP), s, *(uint32_t*)from_ptr(R_ESP+4), *(uint32_t*)from_ptr(R_ESP+8), *(uint32_t*)from_ptr(R_ESP+12));
}
if(!cycle_log) {
mutex_lock(&emu->context->mutex_trace);
printf_log(LOG_NONE, "%s =>", buff);
mutex_unlock(&emu->context->mutex_trace);
}
w(emu, a); // some function never come back, so unlock the mutex first!
if(post)
switch(post) {
case 1: snprintf(buff2, 63, " [%d sec %d nsec]", pu32?pu32[0]:-1, pu32?pu32[1]:-1);
break;
case 2: snprintf(buff2, 63, "(%s)", R_EAX?((char*)from_ptr(R_EAX)):"nil");
break;
case 3: snprintf(buff2, 63, "(%s)", pu32?((char*)pu32):"nil");
break;
case 4: snprintf(buff2, 63, " (%f)", ST0.d);
break;
case 5: {
uint32_t* p = (uint32_t*)from_ptrv(R_EAX);
if(p)
snprintf(buff2, 63, " size=%dx%d, pitch=%d, pixels=%p", p[2], p[3], p[4], p+5);
else
snprintf(buff2, 63, "NULL Surface");
}
break;
case 6: snprintf(buff2, 63, "(%S)", pu32?((wchar_t*)pu32):L"nil");
break;
}
if(perr==1 && ((int)R_EAX)<0)
snprintf(buff3, 63, " (errno=%d:\"%s\")", errno, strerror(errno));
else if(perr==2 && R_EAX==0)
snprintf(buff3, 63, " (errno=%d:\"%s\")", errno, strerror(errno));
else if(perr==3 && ((int)R_EAX)==-1)
snprintf(buff3, 63, " (errno=%d:\"%s\")", errno, strerror(errno));
if(cycle_log)
snprintf(buffret, 128, "0x%lX%s%s", R_RAX, buff2, buff3);
else {
mutex_lock(&emu->context->mutex_trace);
printf_log(LOG_NONE, " return 0x%lX%s%s\n", R_RAX, buff2, buff3);
mutex_unlock(&emu->context->mutex_trace);
}
} else
w(emu, a);
}
return;
}
if(!box64_ignoreint3 && my_context->signals[SIGTRAP]) {
R_RIP = *addr; // update RIP
emit_signal(emu, SIGTRAP, NULL, 3);
} else {
printf_log(LOG_DEBUG, "%04d|Warning, ignoring unsupported Int 3 call @%p\n", GetTID(), (void*)R_RIP);
R_RIP = *addr;
}
//emu->quit = 1;
}

View File

@ -266,7 +266,7 @@ void EXPORT x86Syscall(x64emu_t *emu)
R_EAX = R_EBX; // faking the syscall here, we don't want to really terminate the thread now
break;
/*case 123: // SYS_modify_ldt
R_EAX = my_modify_ldt(emu, R_EBX, (thread_area_t*)(uintptr_t)R_ECX, R_EDX);
R_EAX = my32_modify_ldt(emu, R_EBX, (thread_area_t*)(uintptr_t)R_ECX, R_EDX);
if(R_EAX==0xffffffff && errno>0)
R_EAX = (uint32_t)-errno;
break;*/

447
src/emu/x86syscall_32.c Normal file
View File

@ -0,0 +1,447 @@
#define _GNU_SOURCE /* See feature_test_macros(7) */
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/syscall.h> /* For SYS_xxx definitions */
#include <unistd.h>
#include <time.h>
#include <sys/mman.h>
#include <sys/select.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <asm/stat.h>
#include <errno.h>
#include <sched.h>
#include <sys/wait.h>
#include <sys/utsname.h>
#ifndef __NR_socketcall
#include <linux/net.h>
#include <sys/socket.h>
#endif
#include <sys/resource.h>
#include <poll.h>
#include "debug.h"
#include "box64stack.h"
#include "x64emu.h"
#include "x64run.h"
#include "x64emu_private.h"
#include "x64trace.h"
#include "myalign32.h"
#include "box64context.h"
#include "callback.h"
#include "signals.h"
#include "x64tls.h"
#include "box32.h"
// Syscall table for x86_64 can be found
typedef struct scwrap_s {
uint32_t x86s;
int nats;
int nbpars;
} scwrap_t;
static const scwrap_t syscallwrap[] = {
//{ 2, __NR_fork, 1 },
//{ 3, __NR_read, 3 }, // wrapped so SA_RESTART can be handled by libc
//{ 4, __NR_write, 3 }, // same
//{ 5, __NR_open, 3 }, // flags need transformation
//{ 6, __NR_close, 1 }, // wrapped so SA_RESTART can be handled by libc
//{ 7, __NR_waitpid, 3 },
//{ 10, __NR_unlink, 1 },
//{ 12, __NR_chdir, 1 },
//{ 13, __NR_time, 1 },
//{ 15, __NR_chmod, 2 },
//{ 19, __NR_lseek, 3 },
//{ 20, __NR_getpid, 0 },
//{ 24, __NR_getuid, 0 },
//{ 33, __NR_access, 2 },
//{ 37, __NR_kill, 2 },
//{ 38, __NR_rename, 2 },
//{ 39, __NR_mkdir, 2 },
//{ 40, __NR_rmdir, 1 },
//{ 41, __NR_dup, 1 },
//{ 42, __NR_pipe, 1 },
//{ 45, __NR_brk, 1 },
//{ 47, __NR_getgid, 0 },
//{ 49, __NR_geteuid, 0 },
//{ 50, __NR_getegid, 0 },
//{ 54, __NR_ioctl, 3 }, // should be wrapped to allow SA_RESTART handling by libc, but syscall is only 3 arguments, ioctl can be 5
//{ 55, __NR_fcntl, 3 }, // wrapped to allow filter of F_SETFD
//{ 60, __NR_umask, 1 },
//{ 63, __NR_dup2, 2 },
//{ 64, __NR_getppid, 0 },
//{ 66, __NR_setsid, 0 },
//{ 75, __NR_setrlimit, 2 },
//{ 76, __NR_getrlimit, 2 },
//{ 77, __NR_getrusage, 2 },
//{ 78, __NR_gettimeofday, 2 },
//{ 83, __NR_symlink, 2 },
//{ 82, __NR_select, 5 },
//{ 85, __NR_readlink, 3 },
//{ 91, __NR_munmap, 2 },
//{ 94, __NR_fchmod, 2 },
//{ 99, __NR_statfs, 2 },
//{ 102, __NR_socketcall, 2 },
//{ 104, __NR_setitimer, 3 },
//{ 105, __NR_getitimer, 2 },
//{ 106, __NR_newstat, 2 },
//{ 106, __NR_stat, 2 },
//{ 107, __NR_newlstat, 2 },
//{ 107, __NR_lstat, 2 },
//{ 108, __NR_newfstat, 2 },
//{ 108, __NR_fstat, 2 },
//{ 109, __NR_olduname, 1 },
//{ 110, __NR_iopl, 1 },
//{ 114, __NR_wait4, 4 }, //TODO: check struct rusage alignment
//{ 117, __NR_ipc, 6 },
//{ 119, __NR_sigreturn, 0},
//{ 120, __NR_clone, 5 }, // need works
//{ 122, __NR_uname, 1 },
//{ 123, __NR_modify_ldt },
//{ 125, __NR_mprotect, 3 },
//{ 136, __NR_personality, 1 },
//{ 140, __NR__llseek, 5 },
//{ 141, __NR_getdents, 3 },
//{ 142, __NR__newselect, 5 },
//{ 143, __NR_flock, 2 },
//{ 144, __NR_msync, 3 },
//{ 145, __NR_readv, 3 },
//{ 146, __NR_writev, 3 },
//{ 148, __NR_fdatasync, 1 },
//{ 149, __NR__sysctl, 1 }, // need wrapping?
//{ 156, __NR_sched_setscheduler, 3 },
//{ 157, __NR_sched_getscheduler, 1 },
//{ 158, __NR_sched_yield, 0 },
//{ 162, __NR_nanosleep, 2 },
//{ 164, __NR_setresuid, 3 },
//{ 168, __NR_poll, 3 }, // wrapped to allow SA_RESTART wrapping by libc
//{ 172, __NR_prctl, 5 },
//{ 173, __NR_rt_sigreturn, 0 },
//{ 175, __NR_rt_sigprocmask, 4 },
//{ 179, __NR_rt_sigsuspend, 2 },
//{ 183, __NR_getcwd, 2 },
//{ 184, __NR_capget, 2},
//{ 185, __NR_capset, 2},
//{ 186, __NR_sigaltstack, 2 }, // neeed wrap or something?
//{ 191, __NR_ugetrlimit, 2 },
// { 192, __NR_mmap2, 6},
//{ 195, __NR_stat64, 2 }, // need proprer wrap because of structure size change
//{ 196, __NR_lstat64, 2 }, // need proprer wrap because of structure size change
//{ 197, __NR_fstat64, 2 }, // need proprer wrap because of structure size change
//{ 199, __NR_getuid32, 0 },
//{ 200, __NR_getgid32, 0 },
//{ 201, __NR_geteuid32, 0 },
//{ 202, __NR_getegid32, 0 },
//{ 208, __NR_setresuid32, 3 },
//{ 209, __NR_getresuid32, 3 },
//{ 210, __NR_setresgid32, 3 },
//{ 211, __NR_getresgid32, 3 },
//{ 220, __NR_getdents64, 3 },
//{ 221, __NR_fcntl64, 3 },
{ 224, __NR_gettid, 0 },
//{ 240, __NR_futex, 6 },
//{ 241, __NR_sched_setaffinity, 3 },
//{ 242, __NR_sched_getaffinity, 3 },
//{ 252, __NR_exit_group, 1 },
//{ 254, __NR_epoll_create, 1 },
//{ 255, __NR_epoll_ctl, 4 },
//{ 256, __NR_epoll_wait, 4 },
//{ 265, __NR_clock_gettime, 2 },
//{ 266, __NR_clock_getres, 2 },
//{ 270, __NR_tgkill, 3 },
//{ 271, __NR_utimes, 2 },
//{ 291, __NR_inotify_init, 0},
//{ 292, __NR_inotify_add_watch, 3},
//{ 293, __NR_inotify_rm_watch, 2},
//{ 311, __NR_set_robust_list, 2 },
//{ 312, __NR_get_robust_list, 4 },
//{ 318, __NR_getcpu, 3},
//{ 328, __NR_eventfd2, 2},
//{ 329, __NR_epoll_create1, 1 },
//{ 331, __NR_pipe2, 2},
//{ 332, __NR_inotify_init1, 1},
//{ 355, __NR_getrandom, 3 },
//{ 356, __NR_memfd_create, 2},
//{ 449, __NR_futex_waitv, 5},
};
struct mmap_arg_struct {
unsigned long addr;
unsigned long len;
unsigned long prot;
unsigned long flags;
unsigned long fd;
unsigned long offset;
};
#undef st_atime
#undef st_ctime
#undef st_mtime
struct x64_pt_regs {
long ebx;
long ecx;
long edx;
long esi;
long edi;
long ebp;
long eax;
int xds;
int xes;
int xfs;
int xgs;
long orig_eax;
long eip;
int xcs;
long eflags;
long esp;
int xss;
};
#ifndef __NR_olduname
struct oldold_utsname {
char sysname[9];
char nodename[9];
char release[9];
char version[9];
char machine[9];
};
#endif
struct old_utsname {
char sysname[65];
char nodename[65];
char release[65];
char version[65];
char machine[65];
};
struct i386_user_desc {
unsigned int entry_number;
unsigned long base_addr;
unsigned int limit;
unsigned int seg_32bit:1;
unsigned int contents:2;
unsigned int read_exec_only:1;
unsigned int limit_in_pages:1;
unsigned int seg_not_present:1;
unsigned int useable:1;
};
int32_t my32_open(x64emu_t* emu, void* pathname, int32_t flags, uint32_t mode);
int32_t my32_execve(x64emu_t* emu, const char* path, char* const argv[], char* const envp[]);
int my32_munmap(x64emu_t* emu, void* addr, unsigned long length);
void EXPORT x86Syscall(x64emu_t *emu)
{
uint32_t s = R_EAX;
printf_log(LOG_DEBUG, "%p: Calling 32bits syscall 0x%02X (%d) %p %p %p %p %p", (void*)R_RIP, s, s, (void*)(uintptr_t)R_EBX, (void*)(uintptr_t)R_ECX, (void*)(uintptr_t)R_EDX, (void*)(uintptr_t)R_ESI, (void*)(uintptr_t)R_EDI);
// check wrapper first
int cnt = sizeof(syscallwrap) / sizeof(scwrap_t);
for (int i=0; i<cnt; i++) {
if(syscallwrap[i].x86s == s) {
int sc = syscallwrap[i].nats;
switch(syscallwrap[i].nbpars) {
case 0: *(int32_t*)&R_EAX = syscall(sc); break;
case 1: *(int32_t*)&R_EAX = syscall(sc, R_EBX); break;
case 2: *(int32_t*)&R_EAX = syscall(sc, R_EBX, R_ECX); break;
case 3: *(int32_t*)&R_EAX = syscall(sc, R_EBX, R_ECX, R_EDX); break;
case 4: *(int32_t*)&R_EAX = syscall(sc, R_EBX, R_ECX, R_EDX, R_ESI); break;
case 5: *(int32_t*)&R_EAX = syscall(sc, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI); break;
case 6: *(int32_t*)&R_EAX = syscall(sc, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP); break;
default:
printf_log(LOG_NONE, "ERROR, Unimplemented syscall wrapper (%d, %d)\n", s, syscallwrap[i].nbpars);
emu->quit = 1;
return;
}
if(R_EAX==0xffffffff && errno>0)
R_EAX = (uint32_t)-errno;
printf_log(LOG_DEBUG, " => 0x%x\n", R_EAX);
return;
}
}
switch (s) {
case 1: // sys_exit
emu->quit = 1;
emu->exit = 1;
//R_EAX = syscall(__NR_exit, R_EBX); // the syscall should exit only current thread
R_EAX = R_EBX; // faking the syscall here, we don't want to really terminate the thread now
break;
case 3: // sys_read
S_EAX = read((int)R_EBX, from_ptrv(R_ECX), from_ulong(R_EDX));
break;
case 4: // sys_write
S_EAX = write((int)R_EBX, from_ptrv(R_ECX), from_ulong(R_EDX));
break;
case 5: // sys_open
if(s==5) {printf_log(LOG_DEBUG, " => sys_open(\"%s\", %d, %d)", (char*)from_ptrv(R_EBX), of_convert32(R_ECX), R_EDX);};
//S_EAX = open((void*)R_EBX, of_convert32(R_ECX), R_EDX);
S_EAX = my32_open(emu, from_ptrv(R_EBX), of_convert32(R_ECX), R_EDX);
break;
case 6: // sys_close
S_EAX = close((int)R_EBX);
break;
/*case 123: // SYS_modify_ldt
R_EAX = my32_modify_ldt(emu, R_EBX, (thread_area_t*)(uintptr_t)R_ECX, R_EDX);
if(R_EAX==0xffffffff && errno>0)
R_EAX = (uint32_t)-errno;
break;*/
case 243: // set_thread_area
R_EAX = my_set_thread_area_32(emu, (thread_area_32_t*)(uintptr_t)R_EBX);
if(R_EAX==0xffffffff && errno>0)
R_EAX = (uint32_t)-errno;
break;
default:
printf_log(LOG_INFO, "Warning: Unsupported Syscall 0x%02Xh (%d)\n", s, s);
R_EAX = (uint32_t)-ENOSYS;
return;
}
printf_log(LOG_DEBUG, " => 0x%x\n", R_EAX);
}
#ifdef BOX32
#define stack(n) (b[(n)/4])
#define i32(n) (int32_t)stack(n)
#define u32(n) (uint32_t)stack(n)
#define p(n) from_ptrv(stack(n))
uint32_t EXPORT my32_syscall(x64emu_t *emu, ptr_t* b)
{
static uint32_t warned = 0;
uint32_t s = u32(0);
printf_log(LOG_DEBUG, "%p: Calling libc syscall 0x%02X (%d) %p %p %p %p %p\n", from_ptrv(R_EIP), s, s, from_ptrv(u32(4)), from_ptrv(u32(8)), from_ptrv(u32(12)), from_ptrv(u32(16)), from_ptrv(u32(20)));
// check wrapper first
int cnt = sizeof(syscallwrap) / sizeof(scwrap_t);
for (int i=0; i<cnt; i++) {
if(syscallwrap[i].x86s == s) {
int sc = syscallwrap[i].nats;
switch(syscallwrap[i].nbpars) {
case 0: return syscall(sc);
case 1: return syscall(sc, u32(4));
case 2: return syscall(sc, u32(4), u32(8));
case 3: return syscall(sc, u32(4), u32(8), u32(12));
case 4: return syscall(sc, u32(4), u32(8), u32(12), u32(16));
case 5: return syscall(sc, u32(4), u32(8), u32(12), u32(16), u32(20));
case 6: return syscall(sc, u32(4), u32(8), u32(12), u32(16), u32(20), u32(24));
default:
printf_log(LOG_NONE, "ERROR, Unimplemented syscall wrapper (%d, %d)\n", s, syscallwrap[i].nbpars);
emu->quit = 1;
return 0;
}
}
}
switch (s) {
case 1: // __NR_exit
emu->quit = 1;
return u32(4); // faking the syscall here, we don't want to really terminate the program now
case 3: // sys_read
return (uint32_t)read(i32(4), p(8), u32(12));
case 4: // sys_write
return (uint32_t)write(i32(4), p(8), u32(12));
case 5: // sys_open
return my32_open(emu, p(4), of_convert32(u32(8)), u32(12));
case 6: // sys_close
return (uint32_t)close(i32(4));
case 11: // execve
return (uint32_t)my32_execve(emu, p(4), p(8), p(12));
case 91: // munmap
return (uint32_t)my32_munmap(emu, p(4), u32(8));
#if 0
case 120: // clone
// x86 raw syscall is long clone(unsigned long flags, void *stack, int *parent_tid, unsigned long tls, int *child_tid);
// so flags=u(4), stack=p(8), parent_tid=p(12), tls=p(16), child_tid=p(20)
if(p(8))
{
void* stack_base = p(8);
int stack_size = 0;
if(!stack_base) {
// allocate a new stack...
int currstack = 0;
if((R_ESP>=(uintptr_t)emu->init_stack) && (R_ESP<=((uintptr_t)emu->init_stack+emu->size_stack)))
currstack = 1;
stack_size = (currstack)?emu->size_stack:(1024*1024);
stack_base = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_GROWSDOWN, -1, 0);
// copy value from old stack to new stack
if(currstack)
memcpy(stack_base, emu->init_stack, stack_size);
else {
int size_to_copy = (uintptr_t)emu->init_stack + emu->size_stack - (R_ESP);
memcpy(stack_base+stack_size-size_to_copy, (void*)R_ESP, size_to_copy);
}
}
x64emu_t * newemu = NewX86Emu(emu->context, R_EIP, (uintptr_t)stack_base, stack_size, (p(8))?0:1);
SetupX86Emu(newemu);
CloneEmu(newemu, emu);
Push32(newemu, 0);
PushExit(newemu);
void* mystack = NULL;
if(my32_context->stack_clone_used) {
mystack = malloc(1024*1024); // stack for own process... memory leak, but no practical way to remove it
} else {
if(!my32_context->stack_clone)
my32_context->stack_clone = malloc(1024*1024);
mystack = my32_context->stack_clone;
my32_context->stack_clone_used = 1;
}
// x86_64 raw clone is long clone(unsigned long flags, void *stack, int *parent_tid, int *child_tid, unsigned long tls);
long ret = clone(clone_fn, (void*)((uintptr_t)mystack+1024*1024), u32(4), newemu, p(12), p(16), p(20));
return (uint32_t)ret;
}
else
return (uint32_t)syscall(__NR_clone, u32(4), p(8), p(12), p(16), p(20));
break;
case 123: // SYS_modify_ldt
return my32_modify_ldt(emu, i32(4), (thread_area_t*)p(8), i32(12));
case 125: // mprotect
return (uint32_t)my32_mprotect(emu, p(4), u32(8), i32(12));
case 174: // sys_rt_sigaction
return (uint32_t)my32_sigaction(emu, i32(4), (x86_sigaction_t*)p(8), (x86_sigaction_t*)p(12));
case 192: // mmap2
return (uint32_t)my32_mmap64(emu, p(4), u32(8), i32(12), i32(16), i32(20), u32(24));
case 243: // set_thread_area
return my32_set_thread_area((thread_area_t*)p(4));
#ifndef NOALIGN
case 254: // epoll_create
return my32_epoll_create(emu, i32(4));
case 255: // epoll_ctl
return my32_epoll_ctl(emu, i32(4), i32(8), i32(12), p(16));
case 256: // epoll_wait
return my32_epoll_wait(emu, i32(4), p(8), i32(12), i32(16));
#endif
case 270: //_NR_tgkill
/*if(!u32(12))*/ {
//printf("tgkill(%u, %u, %u) => ", u32(4), u32(8), u32(12));
uint32_t ret = (uint32_t)syscall(__NR_tgkill, u32(4), u32(8), u32(12));
//printf("%u (errno=%d)\n", ret, (ret==(uint32_t)-1)?errno:0);
return ret;
}/* else {
printf_log(LOG_INFO, "Warning: ignoring libc Syscall tgkill (%u, %u, %u)\n", u32(4), u32(8), u32(12));
}*/
return 0;
#ifndef NOALIGN
case 329: // epoll_create1
return my32_epoll_create1(emu, of_convert32(i32(4)));
#endif
#ifndef __NR_getrandom
case 355: // getrandom
return (uint32_t)my32_getrandom(emu, p(4), u32(8), u32(12));
#endif
#ifndef __NR_memfd_create
case 356: // memfd_create
return (uint32_t)my32_memfd_create(emu, (void*)R_EBX, R_ECX);
#endif
#endif
default:
if(!(warned&(1<<s))) {
printf_log(LOG_INFO, "Warning: Unsupported libc Syscall 0x%02X (%d)\n", s, s);
warned|=(1<<s);
}
errno = ENOSYS;
return -1;
}
return 0;
}
#endif //BOX32

View File

@ -295,7 +295,6 @@ void fpu_savenv(x64emu_t* emu, char* p, int b16)
// other stuff are not pushed....
}
// this is the 64bits version (slightly different than the 32bits!)
typedef struct xsave32_s {
uint16_t ControlWord; /* 000 */
uint16_t StatusWord; /* 002 */
@ -311,9 +310,10 @@ typedef struct xsave32_s {
uint32_t MxCsr; /* 018 */
uint32_t MxCsr_Mask; /* 01c */
sse_regs_t FloatRegisters[8];/* 020 */ // fpu/mmx are store in 128bits here
sse_regs_t XmmRegisters[16]; /* 0a0 */
uint8_t Reserved4[96]; /* 1a0 */
sse_regs_t XmmRegisters[8]; /* 0a0 */
uint8_t Reserved4[56*4]; /* 1a0 */
} xsave32_t;
// this is the 64bits version (slightly different than the 32bits!)
typedef struct xsave64_s {
uint16_t ControlWord; /* 000 */
uint16_t StatusWord; /* 002 */
@ -354,8 +354,7 @@ void fpu_fxsave32(x64emu_t* emu, void* ed)
for(int i=0; i<8; ++i)
memcpy(&p->FloatRegisters[i].q[0], (i<stack)?&ST(i):&emu->mmx[i], sizeof(mmx87_regs_t));
// copy SSE regs
for(int i=0; i<16; ++i)
memcpy(&p->XmmRegisters[i], &emu->xmm[i], 16);
memcpy(p->XmmRegisters, emu->xmm, 8*16);
}
void fpu_fxsave64(x64emu_t* emu, void* ed)
@ -381,8 +380,7 @@ void fpu_fxsave64(x64emu_t* emu, void* ed)
for(int i=0; i<8; ++i)
memcpy(&p->FloatRegisters[i].q[0], (i<stack)?&ST(i):&emu->mmx[i], sizeof(mmx87_regs_t));
// copy SSE regs
for(int i=0; i<16; ++i)
memcpy(&p->XmmRegisters[i], &emu->xmm[i], 16);
memcpy(p->XmmRegisters, emu->xmm, 16*16);
}
void fpu_fxrstor32(x64emu_t* emu, void* ed)
@ -406,8 +404,7 @@ void fpu_fxrstor32(x64emu_t* emu, void* ed)
for(int i=0; i<8; ++i)
memcpy((i<stack)?&ST(i):&emu->mmx[i], &p->FloatRegisters[i].q[0], sizeof(mmx87_regs_t));
// copy SSE regs
for(int i=0; i<16; ++i)
memcpy(&emu->xmm[i], &p->XmmRegisters[i], 16);
memcpy(emu->xmm, p->XmmRegisters, 8*16);
}
void fpu_fxrstor64(x64emu_t* emu, void* ed)
@ -431,8 +428,7 @@ void fpu_fxrstor64(x64emu_t* emu, void* ed)
for(int i=0; i<8; ++i)
memcpy((i<stack)?&ST(i):&emu->mmx[i], &p->FloatRegisters[i].q[0], sizeof(mmx87_regs_t));
// copy SSE regs
for(int i=0; i<16; ++i)
memcpy(&emu->xmm[i], &p->XmmRegisters[i], 16);
memcpy(emu->xmm, p->XmmRegisters, 16*16);
}
typedef struct xsaveheader_s {

View File

@ -104,9 +104,4 @@ void fini_hash_helper();
typedef struct x86emu_s x86emu_t;
void* my_mmap(x86emu_t* emu, void* addr, unsigned long length, int prot, int flags, int fd, int offset);
void* my_mmap64(x86emu_t* emu, void *addr, unsigned long length, int prot, int flags, int fd, int64_t offset);
int my_munmap(x86emu_t* emu, void* addr, unsigned long length);
int my_mprotect(x86emu_t* emu, void *addr, unsigned long len, int prot);
#endif //__BOX32_64__H_

View File

@ -0,0 +1,7 @@
#ifndef __BOX32CONTEXT_H_
#define __BOX32CONTEXT_H_
#include "box32.h"
#include "box64context.h"
#endif//__BOX32CONTEXT_H_

View File

@ -7,12 +7,19 @@
#ifdef DYNAREC
#include "dynarec/native_lock.h"
#endif
#ifndef BOX32_DEF
#define BOX32_DEF
typedef uint32_t ptr_t;
typedef int32_t long_t;
typedef uint32_t ulong_t;
#endif
#ifdef DYNAREC
// disabling for now, seems to have a negative impact on performances
//#define USE_CUSTOM_MUTEX
#endif
typedef struct elfheader_s elfheader_t;
typedef struct cleanup_s cleanup_t;
typedef struct x64emu_s x64emu_t;
@ -26,6 +33,7 @@ typedef struct kh_defaultversion_s kh_defaultversion_t;
typedef struct kh_mapsymbols_s kh_mapsymbols_t;
typedef struct library_s library_t;
typedef struct linkmap_s linkmap_t;
typedef struct linkmap32_s linkmap32_t;
typedef struct kh_threadstack_s kh_threadstack_t;
typedef struct rbtree rbtree;
typedef struct atfork_fnc_s {
@ -97,9 +105,11 @@ typedef struct box64context_s {
int argc;
char** argv;
ptr_t argv32;
int envc;
char** envv;
ptr_t envv32;
int orig_argc;
char** orig_argv;
@ -175,6 +185,7 @@ typedef struct box64context_s {
library_t *sdl2lib;
library_t *sdl2mixerlib;
linkmap_t *linkmap;
linkmap32_t *linkmap32;
void* sdl1allocrw; // SDL1 AllocRW/FreeRW function
void* sdl1freerw;
void* sdl2allocrw; // SDL2 AllocRW/FreeRW function

View File

@ -121,7 +121,7 @@ extern int box64_x11threads;
extern int box64_x11glx;
extern char* box64_libGL;
extern uintptr_t fmod_smc_start, fmod_smc_end; // to handle libfmod (from Unreal) SMC (self modifying code)
extern uint32_t default_gs;
extern uint32_t default_gs, default_fs;
extern int jit_gdb; // launch gdb when a segfault is trapped
extern int box64_tcmalloc_minimal; // when using tcmalloc_minimal
#define LOG_NONE 0

View File

@ -26,4 +26,8 @@ void DumpRelRTable64(elfheader_t *h, int cnt, Elf64_Relr *relr, const char *name
void DumpBinary(char* p, int sz);
#ifndef SHT_CHECKSUM
#define SHT_CHECKSUM 0x6ffffff8
#endif
#endif //ELFLOADER_DUMP_H

409
src/include/myalign32.h Executable file
View File

@ -0,0 +1,409 @@
#ifndef __MY_ALIGN32__H_
#define __MY_ALIGN32__H_
#include <stdint.h>
#define X64_VA_MAX_REG (6*8)
#define X64_VA_MAX_XMM ((6*8)+(8*16))
#define ALIGN64_16(A) (uint64_t*)((((uintptr_t)A)+15)&~15LL)
#ifdef __x86_64__
// x86_64, 6 64bits general regs and 16 or 8? 128bits float regs
/*
For reference, here is the x86_64 va_list structure
typedef struct {
unsigned int gp_offset;
unsigned int fp_offset;
void *overflow_arg_area;
void *reg_save_area;
} va_list[1];
*/
#define CREATE_SYSV_VALIST_32(A) \
va_list sysv_varargs; \
sysv_varargs->gp_offset=X64_VA_MAX_REG; \
sysv_varargs->fp_offset=X64_VA_MAX_XMM; \
sysv_varargs->reg_save_area=(A); \
sysv_varargs->overflow_arg_area=A
#define CONVERT_VALIST_32(A) \
va_list sysv_varargs; \
sysv_varargs->gp_offset=X64_VA_MAX_REG; \
sysv_varargs->fp_offset=X64_VA_MAX_XMM; \
sysv_varargs->reg_save_area=(A); \
sysv_varargs->overflow_arg_area=A
#elif defined(__aarch64__)
// aarch64: 8 64bits general regs and 8 128bits float regs
/*
va_list declared as
typedef struct va_list {
void * stack; // next stack param
void * gr_top; // end of GP arg reg save area
void * vr_top; // end of FP/SIMD arg reg save area
int gr_offs; // offset from gr_top to next GP register arg
int vr_offs; // offset from vr_top to next FP/SIMD register arg
} va_list;
*/
#define CREATE_SYSV_VALIST_32(A) \
va_list sysv_varargs; \
sysv_varargs.__gr_offs=(8*8); \
sysv_varargs.__vr_offs=(8*16); \
sysv_varargs.__stack=(A);
#define CONVERT_VALIST_32(A) \
va_list sysv_varargs; \
sysv_varargs.__gr_offs=(8*8); \
sysv_varargs.__vr_offs=(8*16)); \
sysv_varargs.__stack=(A);
#elif defined(__loongarch64) || defined(__powerpc64__) || defined(__riscv)
#define CREATE_SYSV_VALIST_32(A) \
va_list sysv_varargs = (va_list)A
#define CREATE_VALIST_FROM_VALIST_32(VA, SCRATCH) \
va_list sysv_varargs = (va_list)A
#else
#error Unknown architecture!
#endif
#define VARARGS_32 sysv_varargs
#define PREPARE_VALIST_32 CREATE_SYSV_VALIST_32(emu->scratch)
#define VARARGS_32_(A) sysv_varargs
#define PREPARE_VALIST_32_(A) CREATE_SYSV_VALIST_32(A)
void myStackAlign32(const char* fmt, uint32_t* st, uint64_t* mystack);
void myStackAlignGVariantNew32(const char* fmt, uint32_t* st, uint64_t* mystack);
void myStackAlignW32(const char* fmt, uint32_t* st, uint64_t* mystack);
void UnalignStat64_32(const void* source, void* dest);
void UnalignStatFS64_32(const void* source, void* dest);
#if 0
void UnalignOggVorbis(void* dest, void* source); // Arm -> x86
void AlignOggVorbis(void* dest, void* source); // x86 -> Arm
void UnalignVorbisDspState(void* dest, void* source); // Arm -> x86
void AlignVorbisDspState(void* dest, void* source); // x86 -> Arm
void UnalignVorbisBlock(void* dest, void* source); // Arm -> x86
void AlignVorbisBlock(void* dest, void* source); // x86 -> Arm
void UnalignEpollEvent(void* dest, void* source, int nbr); // Arm -> x86
void AlignEpollEvent(void* dest, void* source, int nbr); // x86 -> Arm
void UnalignSmpegInfo(void* dest, void* source); // Arm -> x86
void AlignSmpegInfo(void* dest, void* source); // x86 -> Arm
#endif
// stat64 is packed on i386, not on arm (and possibly other structures)
#undef st_atime
#undef st_atime_nsec
#undef st_mtime
#undef st_mtime_nsec
#undef st_ctime
#undef st_ctime_nsec
struct i386_stat64 {
uint64_t st_dev;
uint8_t __pad0[4];
uint32_t __st_ino;
uint32_t st_mode;
uint32_t st_nlink;
uint32_t st_uid;
uint32_t st_gid;
uint64_t st_rdev;
uint8_t __pad3[4];
int64_t st_size;
uint32_t st_blksize;
uint64_t st_blocks;
uint32_t st_atime;
uint32_t st_atime_nsec;
uint32_t st_mtime;
uint32_t st_mtime_nsec;
uint32_t st_ctime;
uint32_t st_ctime_nsec;
uint64_t st_ino;
} __attribute__((packed));
struct i386_fsid {
int val[2];
} __attribute__((packed));
struct i386_statfs {
uint32_t f_type;
uint32_t f_bsize;
uint32_t f_blocks;
uint32_t f_bfree;
uint32_t f_bavail;
uint32_t f_files;
uint32_t f_ffree;
struct i386_fsid f_fsid;
uint32_t f_namelen;
uint32_t f_frsize;
uint32_t f_flags;
uint32_t f_spare[4];
} __attribute__((packed));
struct i386_statfs64 {
uint32_t f_type;
uint32_t f_bsize;
uint64_t f_blocks;
uint64_t f_bfree;
uint64_t f_bavail;
uint64_t f_files;
uint64_t f_ffree;
struct i386_fsid f_fsid;
uint32_t f_namelen;
uint32_t f_frsize;
uint32_t f_flags;
uint32_t f_spare[4];
} __attribute__((packed));
#if 0
typedef struct {
unsigned char *data;
int storage;
int fill;
int returned;
int unsynced;
int headerbytes;
int bodybytes;
} ogg_sync_state;
typedef struct {
unsigned char *body_data; /* bytes from packet bodies */
long body_storage; /* storage elements allocated */
long body_fill; /* elements stored; fill mark */
long body_returned; /* elements of fill returned */
int *lacing_vals; /* The values that will go to the segment table */
int64_t *granule_vals; /* granulepos values for headers. Not compact
this way, but it is simple coupled to the
lacing fifo */
long lacing_storage;
long lacing_fill;
long lacing_packet;
long lacing_returned;
unsigned char header[282]; /* working space for header encode */
int header_fill;
int e_o_s; /* set when we have buffered the last packet in the
logical bitstream */
int b_o_s; /* set after we've written the initial page
of a logical bitstream */
long serialno;
long pageno;
int64_t packetno; /* sequence number for decode; the framing
knows where there's a hole in the data,
but we need coupling so that the codec
(which is in a separate abstraction
layer) also knows about the gap */
int64_t granulepos;
} ogg_stream_state;
typedef struct vorbis_dsp_state {
int analysisp;
ptr_t vi; //vorbis_info
ptr_t pcm; //float**
ptr_t pcmret; // float**
int pcm_storage;
int pcm_current;
int pcm_returned;
int preextrapolate;
int eofflag;
long lW;
long W;
long nW;
long centerW;
int64_t granulepos;
int64_t sequence;
int64_t glue_bits;
int64_t time_bits;
int64_t floor_bits;
int64_t res_bits;
void *backend_state;
} vorbis_dsp_state;
typedef struct {
long endbyte;
int endbit;
unsigned char *buffer;
unsigned char *ptr;
long storage;
} oggpack_buffer;
typedef struct vorbis_block {
/* necessary stream state for linking to the framing abstraction */
float **pcm; /* this is a pointer into local storage */
oggpack_buffer opb;
long lW;
long W;
long nW;
int pcmend;
int mode;
int eofflag;
int64_t granulepos;
int64_t sequence;
vorbis_dsp_state *vd; /* For read-only access of configuration */
/* local storage to avoid remallocing; it's up to the mapping to
structure it */
void *localstore;
long localtop;
long localalloc;
long totaluse;
struct alloc_chain *reap;
/* bitmetrics for the frame */
long glue_bits;
long time_bits;
long floor_bits;
long res_bits;
void *internal;
} vorbis_block;
typedef struct {
size_t (*read_func) (void *ptr, size_t size, size_t nmemb, void *datasource);
int (*seek_func) (void *datasource, int64_t offset, int whence);
int (*close_func) (void *datasource);
long (*tell_func) (void *datasource);
} ov_callbacks;
typedef struct OggVorbis {
void *datasource; /* Pointer to a FILE *, etc. */
int seekable;
int64_t offset;
int64_t end;
ogg_sync_state oy;
/* If the FILE handle isn't seekable (eg, a pipe), only the current
stream appears */
int links;
int64_t *offsets;
int64_t *dataoffsets;
long *serialnos;
int64_t *pcmlengths; /* overloaded to maintain binary
compatibility; x2 size, stores both
beginning and end values */
void *vi; //vorbis_info
void *vc; //vorbis_comment
/* Decoding working state local storage */
int64_t pcm_offset;
int ready_state;
long current_serialno;
int current_link;
double bittrack;
double samptrack;
ogg_stream_state os; /* take physical pages, weld into a logical
stream of packets */
vorbis_dsp_state vd; /* central working state for the packet->PCM decoder */
vorbis_block vb; /* local working space for packet->PCM decode */
ov_callbacks callbacks;
} OggVorbis;
typedef struct my_SMPEG_Info_s {
int has_audio;
int has_video;
int width;
int height;
int current_frame;
double current_fps;
char audio_string[80];
int audio_current_frame;
uint32_t current_offset;
uint32_t total_size;
double current_time;
double total_time;
} my_SMPEG_Info_t;
typedef struct __attribute__((packed)) x86_ftsent_s {
struct x86_ftsent_s *fts_cycle;
struct x86_ftsent_s *fts_parent;
struct x86_ftsent_s *fts_link;
long fts_number;
void *fts_pointer;
char *fts_accpath;
char *fts_path;
int fts_errno;
int fts_symfd;
uint16_t fts_pathlen;
uint16_t fts_namelen;
uintptr_t fts_ino;
uint64_t fts_dev;
uint32_t fts_nlink;
int16_t fts_level;
uint16_t fts_info;
uint16_t fts_flags;
uint16_t fts_instr;
struct stat *fts_statp;
char fts_name[1];
} x86_ftsent_t;
void UnalignFTSENT(void* dest, void* source); // Arm -> x86
void AlignFTSENT(void* dest, void* source); // x86 -> Arm
typedef struct my_flock64_s {
uint16_t l_type;
uint16_t l_whence;
int64_t l_start;
int64_t l_len;
int l_pid;
} my_flock64_t;
typedef struct __attribute__((packed)) x86_flock64_s {
uint16_t l_type;
uint16_t l_whence;
int64_t l_start;
int64_t l_len;
int l_pid;
} x86_flock64_t;
void UnalignFlock64(void* dest, void* source); // Arm -> x86
void AlignFlock64(void* dest, void* source); // x86 -> Arm
// defined in wrapperlibc.c
int of_convert(int); // x86->arm
int of_unconvert(int); // arm->x86
typedef struct my_GValue_s
{
int g_type;
union {
int v_int;
int64_t v_int64;
uint64_t v_uint64;
float v_float;
double v_double;
void* v_pointer;
} data[2];
} my_GValue_t;
void alignNGValue(my_GValue_t* v, void* value, int n);
void unalignNGValue(void* value, my_GValue_t* v, int n);
#endif
int of_convert32(int a);
int of_unconvert32(int a);
#endif//__MY_ALIGN32__H_

View File

@ -319,6 +319,7 @@ typedef union {
#define R_R13 emu->regs[_R13].q[0]
#define R_R14 emu->regs[_R14].q[0]
#define R_R15 emu->regs[_R15].q[0]
#define R_EIP emu->ip.dword[0]
#define R_EAX emu->regs[_AX].dword[0]
#define R_EBX emu->regs[_BX].dword[0]
#define R_ECX emu->regs[_CX].dword[0]

View File

@ -4,11 +4,26 @@
typedef struct box64context_s box64context_t;
typedef struct x64emu_s x64emu_t;
typedef struct emuthread_s {
uintptr_t fnc;
void* arg;
x64emu_t* emu;
int join;
uintptr_t self;
ulong_t hself;
int cancel_cap, cancel_size;
void** cancels;
} emuthread_t;
void CleanStackSize(box64context_t* context);
void init_pthread_helper(void);
void fini_pthread_helper(box64context_t* context);
void clean_current_emuthread(void);
#ifdef BOX32
void init_pthread_helper_32(void);
void fini_pthread_helper_32(box64context_t* context);
#endif
// prepare an "emuthread structure" in pet and return address of function pointer for a "thread creation routine"
void* my_prepare_thread(x64emu_t *emu, void* f, void* arg, int ssize, void** pet);

View File

@ -25,7 +25,8 @@ void SetEDX(x64emu_t *emu, uint32_t v);
void SetEDI(x64emu_t *emu, uint32_t v);
void SetESI(x64emu_t *emu, uint32_t v);
void SetEBP(x64emu_t *emu, uint32_t v);
//void SetESP(x64emu_t *emu, uint32_t v);
void SetESP(x64emu_t *emu, uint32_t v);
void SetEIP(x64emu_t *emu, uint32_t v);
void SetRAX(x64emu_t *emu, uint64_t v);
void SetRBX(x64emu_t *emu, uint64_t v);
void SetRCX(x64emu_t *emu, uint64_t v);
@ -35,8 +36,8 @@ void SetRSI(x64emu_t *emu, uint64_t v);
void SetRBP(x64emu_t *emu, uint64_t v);
void SetRSP(x64emu_t *emu, uint64_t v);
void SetRIP(x64emu_t *emu, uint64_t v);
//void SetFS(x64emu_t *emu, uint16_t v);
//uint16_t GetFS(x64emu_t *emu);
void SetFS(x64emu_t *emu, uint16_t v);
uint16_t GetFS(x64emu_t *emu);
uint64_t GetRSP(x64emu_t *emu);
uint64_t GetRBP(x64emu_t *emu);
void ResetFlags(x64emu_t *emu);

View File

@ -10,6 +10,9 @@ void DynaRun(x64emu_t *emu);
uint32_t LibSyscall(x64emu_t *emu);
void PltResolver64(x64emu_t* emu);
#ifdef BOX32
void PltResolver32(x64emu_t* emu);
#endif
extern uintptr_t pltResolver64;
int GetTID(void);

View File

@ -12,6 +12,9 @@
#include "x64emu.h"
#include "box64context.h"
#include "elfloader.h"
#ifdef BOX32
#include "box32.h"
#endif
#include "bridge.h"
@ -254,15 +257,31 @@ static int AddNeededLib_add(lib_t** maplib, int local, needed_libs_t* needed, in
if (lib->type == LIB_EMULATED) {
// Need to add library to the linkmap (put here so the link is ordered)
linkmap_t *lm = addLinkMapLib(lib);
if(!lm) {
// Crashed already
printf_dump(LOG_DEBUG, "Failure to add lib linkmap\n");
return 1;
#ifdef BOX32
if(box64_is32bits) {
linkmap32_t *lm = addLinkMapLib32(lib);
if(!lm) {
// Crashed already
printf_dump(LOG_DEBUG, "Failure to add lib linkmap\n");
return 1;
}
lm->l_addr = (Elf32_Addr)to_ptrv(GetElfDelta(lib->e.elf));
lm->l_name = to_ptrv(lib->name);
lm->l_ld = to_ptrv(GetDynamicSection(lib->e.elf));
} else
#endif
{
linkmap_t *lm = addLinkMapLib(lib);
if(!lm) {
// Crashed already
printf_dump(LOG_DEBUG, "Failure to add lib linkmap\n");
return 1;
}
lm->l_addr = (Elf64_Addr)GetElfDelta(lib->e.elf);
lm->l_name = lib->name;
lm->l_ld = GetDynamicSection(lib->e.elf);
}
lm->l_addr = (Elf64_Addr)GetElfDelta(lib->e.elf);
lm->l_name = lib->name;
lm->l_ld = GetDynamicSection(lib->e.elf);
//TODO: it seems to never be removed!
}
IncRefCount(lib, emu);
return 0;

View File

@ -40,6 +40,16 @@
#endif
#undef GO
#ifdef BOX32
#define GO(P, N) int wrapped##N##_init32(library_t* lib, box64context_t *box64); \
void wrapped##N##_fini32(library_t* lib);
#ifdef STATICBUILD
#include "library_list_static_32.h"
#else
#include "library_list_32.h"
#endif
#undef GO
#endif
#define GO(P, N) {P, wrapped##N##_init, wrapped##N##_fini},
wrappedlib_t wrappedlibs[] = {
@ -50,6 +60,17 @@ wrappedlib_t wrappedlibs[] = {
#endif
};
#undef GO
#define GO(P, N) {P, wrapped##N##_init32, wrapped##N##_fini32},
wrappedlib_t wrappedlibs32[] = {
#ifdef BOX32
#ifdef STATICBUILD
#include "library_list_static_32.h"
#else
#include "library_list_32.h"
#endif
#endif
};
#undef GO
KHASH_MAP_IMPL_STR(symbolmap, symbol1_t)
KHASH_MAP_IMPL_STR(symbol2map, symbol2_t)
@ -232,14 +253,11 @@ int DummyLib_GetLocal(library_t* lib, const char* name, uintptr_t *offs, uintptr
}
static void initWrappedLib(library_t *lib, box64context_t* context) {
if(box64_is32bits) {
// TODO
return; // nothing wrapped yet
}
int nb = sizeof(wrappedlibs) / sizeof(wrappedlib_t);
int nb = (box64_is32bits?sizeof(wrappedlibs32):sizeof(wrappedlibs)) / sizeof(wrappedlib_t);
for (int i=0; i<nb; ++i) {
if(strcmp(lib->name, wrappedlibs[i].name)==0) {
if(wrappedlibs[i].init(lib, context)) {
wrappedlib_t* w = box64_is32bits?(&wrappedlibs32[i]):(&wrappedlibs[i]);
if(strcmp(lib->name, w->name)==0) {
if(w->init(lib, context)) {
// error!
const char* error_str = dlerror();
if(error_str) // don't print the message if there is no error string from last error
@ -247,7 +265,7 @@ static void initWrappedLib(library_t *lib, box64context_t* context) {
return; // non blocker...
}
printf_dump(LOG_INFO, "Using native(wrapped) %s\n", lib->name);
lib->fini = wrappedlibs[i].fini;
lib->fini = w->fini;
lib->getglobal = WrappedLib_GetGlobal;
lib->getweak = WrappedLib_GetWeak;
lib->getlocal = WrappedLib_GetLocal;
@ -366,7 +384,7 @@ static void initEmulatedLib(const char* path, library_t *lib, box64context_t* co
{
char libname[MAX_PATH];
strcpy(libname, path);
int found = FileIsX64ELF(libname);
int found = box64_is32bits?FileIsX86ELF(libname):FileIsX64ELF(libname);
if(found)
if(loadEmulatedLib(libname, lib, context, verneeded))
return;
@ -375,14 +393,14 @@ static void initEmulatedLib(const char* path, library_t *lib, box64context_t* co
{
strcpy(libname, context->box64_ld_lib.paths[i]);
strcat(libname, path);
if(FileIsX64ELF(libname))
if(box64_is32bits?FileIsX86ELF(libname):FileIsX64ELF(libname))
if(loadEmulatedLib(libname, lib, context, verneeded))
return;
// also try x86_64 variant
strcpy(libname, context->box64_ld_lib.paths[i]);
strcat(libname, "x86_64/");
strcat(libname, box64_is32bits?"i386/":"x86_64/");
strcat(libname, path);
if(FileIsX64ELF(libname))
if(box64_is32bits?FileIsX86ELF(libname):FileIsX64ELF(libname))
if(loadEmulatedLib(libname, lib, context, verneeded))
return;
}
@ -793,7 +811,7 @@ static int getSymbolInDataMaps(library_t*lib, const char* name, int noweak, uint
if(lib->w.altmy)
strcpy(buff, lib->w.altmy);
else
strcpy(buff, "my_");
strcpy(buff, box64_is32bits?"my32_":"my_");
strcat(buff, name);
#ifdef STATICBUILD
symbol = (void*)kh_value(lib->w.mydatamap, k).addr;
@ -828,7 +846,7 @@ static int getSymbolInSymbolMaps(library_t*lib, const char* name, int noweak, ui
if(lib->w.altmy)
strcpy(buff, lib->w.altmy);
else
strcpy(buff, "my_");
strcpy(buff, box64_is32bits?"my32_":"my_");
strcat(buff, name);
#ifdef STATICBUILD
symbol = (void*)s->addr;
@ -856,7 +874,7 @@ static int getSymbolInSymbolMaps(library_t*lib, const char* name, int noweak, ui
if(lib->w.altmy)
strcpy(buff, lib->w.altmy);
else
strcpy(buff, "my_");
strcpy(buff, box64_is32bits?"my32_":"my_");
strcat(buff, name);
#ifdef STATICBUILD
symbol = (void*)s->addr;
@ -921,7 +939,7 @@ static int getSymbolInSymbolMaps(library_t*lib, const char* name, int noweak, ui
if(lib->w.altmy)
strcpy(buff, lib->w.altmy);
else
strcpy(buff, "my_");
strcpy(buff, box64_is32bits?"my32_":"my_");
strcat(buff, name);
#ifdef STATICBUILD
symbol = (void*)s->addr;
@ -1078,6 +1096,64 @@ int GetDeepBind(library_t* lib)
return lib->deepbind;
}
#ifdef BOX32
linkmap32_t* getLinkMapLib32(library_t* lib)
{
linkmap32_t* lm = my_context->linkmap32;
while(lm) {
if(lm->l_lib == lib)
return lm;
lm = (linkmap32_t*)from_ptrv(lm->l_next);
}
return NULL;
}
linkmap32_t* getLinkMapElf32(elfheader_t* h)
{
linkmap32_t* lm = my_context->linkmap32;
while(lm) {
if(lm->l_lib && lm->l_lib->type==LIB_EMULATED && lm->l_lib->e.elf == h)
return lm;
lm = (linkmap32_t*)from_ptrv(lm->l_next);
}
return NULL;
}
linkmap32_t* addLinkMapLib32(library_t* lib)
{
if(!my_context->linkmap32) {
my_context->linkmap32 = (linkmap32_t*)box_calloc(1, sizeof(linkmap32_t));
my_context->linkmap32->l_lib = lib;
return my_context->linkmap32;
}
linkmap32_t* lm = my_context->linkmap32;
while(lm->l_next)
lm = (linkmap32_t*)from_ptrv(lm->l_next);
lm->l_next = to_ptrv(box_calloc(1, sizeof(linkmap32_t)));
linkmap32_t* l_next = (linkmap32_t*)from_ptrv(lm->l_next);
l_next->l_lib = lib;
l_next->l_prev = to_ptrv(lm);
return l_next;
}
void removeLinkMapLib32(library_t* lib)
{
linkmap32_t* lm = getLinkMapLib32(lib);
if(!lm) return;
if(lm->l_next)
((linkmap32_t*)from_ptrv(lm->l_next))->l_prev = lm->l_prev;
if(lm->l_prev)
((linkmap32_t*)from_ptrv(lm->l_prev))->l_next = lm->l_next;
box_free(lm);
}
void AddMainElfToLinkmap32(elfheader_t* elf)
{
linkmap32_t* lm = addLinkMapLib32(NULL); // main elf will have a null lib link
lm->l_addr = (Elf32_Addr)to_ptrv(GetElfDelta(elf));
lm->l_name = to_ptrv(my_context->fullpath);
lm->l_ld = to_ptrv(GetDynamicSection(elf));
}
#endif
linkmap_t* getLinkMapLib(library_t* lib)
{
linkmap_t* lm = my_context->linkmap;

View File

@ -133,11 +133,29 @@ typedef struct linkmap_s {
library_t* l_lib;
} linkmap_t;
#ifdef BOX32
typedef struct linkmap32_s {
// actual struct link_map
Elf32_Addr l_addr;
ptr_t l_name; // char*
ptr_t l_ld; //Elf64_Dyn*
ptr_t l_next, l_prev; // struct linkmap32_s *
// custom
library_t* l_lib;
} linkmap32_t;
#endif
linkmap_t* getLinkMapLib(library_t* lib);
linkmap_t* getLinkMapElf(elfheader_t* h);
linkmap_t* addLinkMapLib(library_t* lib);
void removeLinkMapLib(library_t* lib);
#ifdef BOX32
linkmap32_t* getLinkMapLib32(library_t* lib);
linkmap32_t* getLinkMapElf32(elfheader_t* h);
linkmap32_t* addLinkMapLib32(library_t* lib);
void removeLinkMapLib32(library_t* lib);
#endif
int FiniLibrary(library_t* lib, x64emu_t* emu);
void Free1Library(library_t **lib, x64emu_t* emu);

12
src/library_list_32.h Normal file
View File

@ -0,0 +1,12 @@
#ifndef GO
#error Nope
#endif
GO("libpthread.so.0", libpthread)
GO("librt.so.1", librt)
GO("libc.so.6", libc)
GO("libm.so.6", libm)
GO("libdl.so.2", libdl)
GO("ld-linux.so.2", ldlinux)
GO("crashhandler.so", crashhandler)

View File

@ -0,0 +1,10 @@
#ifndef GO
#error Nope
#endif
GO("libpthread.so.0", libpthread)
GO("librt.so.1", librt)
GO("libc.so.6", libc)
GO("libm.so.6", libm)
GO("libdl.so.2", libdl)
GO("ld-linux.so.2", ldlinux)

935
src/libtools/myalign32.c Executable file
View File

@ -0,0 +1,935 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <wchar.h>
#include <sys/epoll.h>
#include <fts.h>
#include "x64emu.h"
#include "emu/x64emu_private.h"
#include "myalign32.h"
#include "debug.h"
#include "box32.h"
void myStackAlign32(const char* fmt, uint32_t* st, uint64_t* mystack)
{
if(!fmt)
return;
// loop...
const char* p = fmt;
int state = 0;
double d;
while(*p)
{
switch(state) {
case 0:
switch(*p) {
case '%': state = 1; ++p; break;
default:
++p;
}
break;
case 1: // normal
case 2: // l
case 3: // ll
case 4: // L
switch(*p) {
case '%': state = 0; ++p; break; //%% = back to 0
case 'l': ++state; if (state>3) state=3; ++p; break;
case 'z': state = 2; ++p; break;
case 'L': state = 4; ++p; break;
case 'a':
case 'A':
case 'e':
case 'E':
case 'g':
case 'G':
case 'F':
case 'f': state += 10; break; // float
case 'd':
case 'i':
case 'o': state += 20; break; // int
case 'x':
case 'X':
case 'u': state += 40; break; // uint
case 'h': ++p; break; // ignored...
case '\'':
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
case '.':
case '+':
case '-': ++p; break; // formating, ignored
case 'm': state = 0; ++p; break; // no argument
case 'n':
case 'p':
case 'S':
case 's': state = 30; break; // pointers
case '$': ++p; break; // should issue a warning, it's not handled...
case '*': *(mystack++) = *(st++); ++p; break; // fetch an int in the stack....
case ' ': state=0; ++p; break;
default:
state=20; // other stuff, put an int...
}
break;
case 11: //double
case 12: //%lg, still double
case 13: //%llg, still double
case 23: // 64bits int
case 43: // 64bits uint
*(uint64_t*)mystack = *(uint64_t*)st;
st+=2; mystack+=1;
state = 0;
++p;
break;
case 22: // long int
*(int64_t*)mystack = from_long(*(long_t*)st);
st+=1; mystack+=1;
state = 0;
++p;
break;
case 42: // long uint
*(uint64_t*)mystack = from_ulong(*(ulong_t*)st);
st+=1; mystack+=1;
state = 0;
++p;
break;
case 14: //%LG long double
#ifdef HAVE_LD80BITS
memcpy(mystack, st, 10);
st+=3; mystack+=2;
#else
LD2D((void*)st, &d);
*(long double*)mystack = (long double)d;
st+=3; mystack+=2;
#endif
state = 0;
++p;
break;
case 30: //pointer
*(uintptr_t*)mystack = from_ptr(*st);
st++; mystack+=1;
state = 0;
++p;
break;
case 20: // fallback
case 21:
case 24: // normal int / pointer
case 40:
case 41:
*mystack = *st;
++mystack;
++st;
state = 0;
++p;
break;
default:
// whattt?
state = 0;
}
}
}
void myStackAlignGVariantNew32(const char* fmt, uint32_t* st, uint64_t* mystack)
{
if (!fmt)
return;
const char *p = fmt;
int state = 0;
int inblocks = 0;
int tmp;
do {
switch(state) {
case 0: // Nothing
switch(*p) {
case 'b': // gboolean
case 'y': // guchar
case 'n': // gint16
case 'q': // guint16
case 'i': // gint32
case 'u': // guint32
case 'h': // gint32
case 's': // const gchar*
case 'o':
case 'g':
case 'v': // GVariant*
case '*': // GVariant* of any type
case '?': // GVariant* of basic type
case 'r': // GVariant* of tuple type
*mystack = *st;
++mystack;
++st;
break;
case 'x': // gint64
case 't': // guint64
case 'd': // gdouble
*(uint64_t*)mystack = *(uint64_t*)st;
st+=2; mystack+=1;
break;
case '{':
case '(': ++inblocks; break;
case '}':
case ')': --inblocks; break;
case 'a': state = 1; break; // GVariantBuilder* or GVariantIter**
case 'm': state = 2; break; // maybe types
case '@': state = 3; break; // GVariant* of type [type]
case '^': state = 4; break; // pointer value
case '&': break; // pointer: do nothing
}
break;
case 1: // Arrays
switch(*p) {
case '{':
case '(': ++tmp; break;
case '}':
case ')': --tmp; break;
}
if (*p == 'a') break;
if (tmp == 0) {
*mystack = *st;
++mystack;
++st;
state = 0;
}
break;
case 2: // Maybe-types
switch(*p) {
case 'b': // gboolean
case 'y': // guchar
case 'n': // gint16
case 'q': // guint16
case 'i': // gint32
case 'u': // guint32
case 'h': // gint32
case 'x': // gint64
case 't': // guint64
case 'd': // gdouble
case '{':
case '}':
case '(':
case ')':
// Add a gboolean or gboolean*, no char increment
*mystack = *st;
++mystack;
++st;
--p;
state = 0;
break;
case 'a': // GVariantBuilder* or GVariantIter**
case 's': // const gchar*
case 'o':
case 'g':
case 'v': // GVariant*
case '@': // GVariant* of type [type]
case '*': // GVariant* of any type
case '?': // GVariant* of basic type
case 'r': // GVariant* of tuple type
case '&': // pointer
case '^': // pointer value
// Just maybe-NULL
--p;
state = 0;
break;
default: // Default to add a gboolean & reinit state?
*mystack = *st;
++mystack;
++st;
--p;
state = 0;
}
break;
case 3: // GVariant*
switch(*p) {
case '{':
case '(': ++tmp; break;
case '}':
case ')': --tmp; break;
case 'a': // GVariantBuilder* or GVariantIter**
do { ++p; } while(*p == 'a'); // Use next character which is not an array (array definition)
switch(*p) {
case '{':
case '(': ++tmp; break;
case '}':
case ')': --tmp; break;
}
break;
}
if (tmp == 0) {
*mystack = *st;
++mystack;
++st;
state = 0;
}
break;
case 4: // ^
if (*p == 'a') state = 5;
else if (*p == '&') state = 8;
else state = 0; //???
break;
case 5: // ^a
if ((*p == 's') || (*p == 'o') || (*p == 'y')) {
*mystack = *st;
++mystack;
++st;
state = 0;
} else if (*p == '&') state = 6;
else if (*p == 'a') state = 7;
else state = 0; //???
break;
case 6: // ^a&
if ((*p == 's') || (*p == 'o')) {
*mystack = *st;
++mystack;
++st;
state = 0;
} else if (*p == 'a') state = 7;
else state = 0; //???
break;
case 7: // ^aa / ^a&a
if (*p == 'y') {
*mystack = *st;
++mystack;
++st;
state = 0;
} else state = 0; //???
case 8: // ^&
if (*p == 'a') state = 9;
else state = 0; //???
case 9: // ^&a
if (*p == 'y') {
*mystack = *st;
++mystack;
++st;
state = 0;
} else state = 0; //???
}
++p;
} while (*p && (inblocks || state));
}
void myStackAlignW32(const char* fmt, uint32_t* st, uint64_t* mystack)
{
// loop...
const wchar_t* p = (const wchar_t*)fmt;
int state = 0;
double d;
while(*p)
{
switch(state) {
case 0:
switch(*p) {
case '%': state = 1; ++p; break;
default:
++p;
}
break;
case 1: // normal
case 2: // l
case 3: // ll
case 4: // L
switch(*p) {
case '%': state = 0; ++p; break; //%% = back to 0
case 'l': ++state; if (state>3) state=3; ++p; break;
case 'z': state = 2; ++p; break;
case 'L': state = 4; ++p; break;
case 'a':
case 'A':
case 'e':
case 'E':
case 'g':
case 'G':
case 'F':
case 'f': state += 10; break; // float
case 'd':
case 'i':
case 'o': state += 20; break; // int
case 'x':
case 'X':
case 'u': state += 40; break; // unsigned
case 'h': ++p; break; // ignored...
case '\'':
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
case '.':
case '+':
case '-': ++p; break; // formating, ignored
case 'm': state = 0; ++p; break; // no argument
case 'n':
case 'p':
case 'S':
case 's': state = 30; break; // pointers
case '$': ++p; break; // should issue a warning, it's not handled...
case '*': *(mystack++) = *(st++); ++p; break; //fetch an int in the stack
case ' ': state=0; ++p; break;
default:
state=20; // other stuff, put an int...
}
break;
case 11: //double
case 12: //%lg, still double
case 13: //%llg, still double
case 23: // 64bits int
case 43: // 64bits uint
*(uint64_t*)mystack = *(uint64_t*)st;
st+=2; mystack+=1;
state = 0;
++p;
break;
case 22: // long int
*(int64_t*)mystack = from_long(*(long_t*)st);
st+=1; mystack+=1;
state = 0;
++p;
break;
case 42: // long uint
*(uint64_t*)mystack = from_ulong(*(ulong_t*)st);
st+=1; mystack+=1;
state = 0;
++p;
break;
case 14: //%LG long double
#ifdef HAVE_LD80BITS
memcpy(mystack, st, 10);
st+=3; mystack+=2;
#else
LD2D((void*)st, &d);
*(long double*)mystack = (long double)d;
st+=3; mystack+=2;
#endif
state = 0;
++p;
break;
case 30: //pointer
*(uintptr_t*)mystack = from_ptr(*st);
st++; mystack+=1;
state = 0;
++p;
break;
case 20: // fallback
case 40:
case 21:
case 24: // normal int / pointer
*mystack = *st;
++mystack;
++st;
state = 0;
++p;
break;
default:
// whattt?
state = 0;
}
}
}
#if 0
typedef struct __attribute__((packed)) {
unsigned char *body_data;
long body_storage;
long body_fill;
long body_returned;
int *lacing_vals;
int64_t *granule_vals;
long lacing_storage;
long lacing_fill;
long lacing_packet;
long lacing_returned;
unsigned char header[282];
int header_fill __attribute__ ((aligned (4)));
int e_o_s;
int b_o_s;
long serialno;
long pageno;
int64_t packetno;
int64_t granulepos;
} ogg_stream_state_x64;
typedef struct __attribute__((packed)) vorbis_dsp_state_x64 {
int analysisp;
void *vi; //vorbis_info
float **pcm;
float **pcmret;
int pcm_storage;
int pcm_current;
int pcm_returned;
int preextrapolate;
int eofflag;
long lW;
long W;
long nW;
long centerW;
int64_t granulepos;
int64_t sequence;
int64_t glue_bits;
int64_t time_bits;
int64_t floor_bits;
int64_t res_bits;
void *backend_state;
} vorbis_dsp_state_x64;
typedef struct __attribute__((packed)) {
long endbyte;
int endbit;
unsigned char *buffer;
unsigned char *ptr;
long storage;
} oggpack_buffer_x64;
typedef struct __attribute__((packed)) vorbis_block_x64 {
float **pcm;
oggpack_buffer_x64 opb;
long lW;
long W;
long nW;
int pcmend;
int mode;
int eofflag;
int64_t granulepos;
int64_t sequence;
void *vd;
void *localstore;
long localtop;
long localalloc;
long totaluse;
void *reap;
long glue_bits;
long time_bits;
long floor_bits;
long res_bits;
void *internal;
} vorbis_block_x64;
typedef struct __attribute__((packed)) OggVorbis_x64 {
void *datasource; /* Pointer to a FILE *, etc. */
int seekable;
int64_t offset;
int64_t end;
ogg_sync_state oy;
/* If the FILE handle isn't seekable (eg, a pipe), only the current
stream appears */
int links;
int64_t *offsets;
int64_t *dataoffsets;
long *serialnos;
int64_t *pcmlengths; /* overloaded to maintain binary
compatibility; x2 size, stores both
beginning and end values */
void *vi; //vorbis_info
void *vc; //vorbis_comment
/* Decoding working state local storage */
int64_t pcm_offset;
int ready_state;
long current_serialno;
int current_link;
double bittrack;
double samptrack;
ogg_stream_state_x64 os; /* take physical pages, weld into a logical
stream of packets */
vorbis_dsp_state_x64 vd; /* central working state for the packet->PCM decoder */
vorbis_block_x64 vb; /* local working space for packet->PCM decode */
ov_callbacks callbacks;
} OggVorbis_x64;
#define TRANSFERT \
GO(datasource) \
GO(seekable) \
GO(offset) \
GO(end) \
GOM(oy, sizeof(ogg_sync_state)) \
GO(links) \
GO(offsets) \
GO(dataoffsets) \
GO(serialnos) \
GO(pcmlengths) \
GO(vi) \
GO(vc) \
GO(pcm_offset) \
GO(ready_state) \
GO(current_serialno) \
GO(current_link) \
GOM(bittrack, 16) \
GO(os.body_data) \
GO(os.body_storage) \
GO(os.body_fill) \
GO(os.body_returned) \
GO(os.lacing_vals) \
GO(os.granule_vals) \
GO(os.lacing_storage) \
GO(os.lacing_fill) \
GO(os.lacing_packet) \
GO(os.lacing_returned) \
GOM(os.header, 282) \
GO(os.header_fill) \
GO(os.e_o_s) \
GO(os.b_o_s) \
GO(os.serialno) \
GO(os.pageno) \
GO(os.packetno) \
GO(os.granulepos) \
GO(vd.analysisp) \
GO(vd.vi) \
GO(vd.pcm) \
GO(vd.pcmret) \
GO(vd.pcm_storage) \
GO(vd.pcm_current) \
GO(vd.pcm_returned) \
GO(vd.preextrapolate) \
GO(vd.eofflag) \
GO(vd.lW) \
GO(vd.W) \
GO(vd.nW) \
GO(vd.centerW) \
GO(vd.granulepos) \
GO(vd.sequence) \
GO(vd.glue_bits) \
GO(vd.time_bits) \
GO(vd.floor_bits) \
GO(vd.res_bits) \
GO(vd.backend_state) \
GO(vb.pcm) \
GO(vb.opb.endbyte) \
GO(vb.opb.endbit) \
GO(vb.opb.buffer) \
GO(vb.opb.ptr) \
GO(vb.opb.storage) \
GO(vb.lW) \
GO(vb.W) \
GO(vb.nW) \
GO(vb.pcmend) \
GO(vb.mode) \
GO(vb.eofflag) \
GO(vb.granulepos) \
GO(vb.sequence) \
GO(vb.localstore) \
GO(vb.localtop) \
GO(vb.localalloc) \
GO(vb.totaluse) \
GO(vb.reap) \
GO(vb.glue_bits) \
GO(vb.time_bits) \
GO(vb.floor_bits) \
GO(vb.res_bits) \
GO(vb.internal) \
GOM(callbacks, sizeof(ov_callbacks))
void AlignOggVorbis(void* dest, void* source)
{
// Arm -> x64
OggVorbis_x64* src = (OggVorbis_x64*)source;
OggVorbis* dst = (OggVorbis*)dest;
#define GO(A) dst->A = src->A;
#define GOM(A, S) memcpy(&dst->A, &src->A, S);
TRANSFERT
#undef GO
#undef GOM
dst->vb.vd = (src->vb.vd == &src->vd)?&dst->vd:(vorbis_dsp_state*)src->vb.vd;
}
void UnalignOggVorbis(void* dest, void* source)
{
// x64 -> Arm
OggVorbis_x64* dst = (OggVorbis_x64*)dest;
OggVorbis* src = (OggVorbis*)source;
#define GO(A) dst->A = src->A;
#define GOM(A, S) memcpy(&dst->A, &src->A, S);
TRANSFERT
#undef GO
#undef GOM
dst->vb.vd = (src->vb.vd == &src->vd)?&dst->vd:(vorbis_dsp_state_x64*)src->vb.vd;
}
#undef TRANSFERT
#define TRANSFERT \
GO(analysisp) \
GO(vi) \
GO(pcm) \
GO(pcmret) \
GO(pcm_storage) \
GO(pcm_current) \
GO(pcm_returned) \
GO(preextrapolate) \
GO(eofflag) \
GO(lW) \
GO(W) \
GO(nW) \
GO(centerW) \
GO(granulepos) \
GO(sequence) \
GO(glue_bits) \
GO(time_bits) \
GO(floor_bits) \
GO(res_bits) \
GO(backend_state)
void UnalignVorbisDspState(void* dest, void* source)
{
// Arm -> x64
#define GO(A) ((vorbis_dsp_state_x64*)dest)->A = ((vorbis_dsp_state*)source)->A;
#define GOM(A, S) memcpy(&((vorbis_dsp_state_x64*)dest)->A, &((vorbis_dsp_state*)source)->A, S);
TRANSFERT
#undef GO
#undef GOM
}
void AlignVorbisDspState(void* dest, void* source)
{
// x64 -> Arm
#define GO(A) ((vorbis_dsp_state*)dest)->A = ((vorbis_dsp_state_x64*)source)->A;
#define GOM(A, S) memcpy(&((vorbis_dsp_state*)dest)->A, &((vorbis_dsp_state_x64*)source)->A, S);
TRANSFERT
#undef GO
#undef GOM
}
#undef TRANSFERT
#define TRANSFERT \
GO(pcm) \
GO(opb.endbyte) \
GO(opb.endbit) \
GO(opb.buffer) \
GO(opb.ptr) \
GO(opb.storage) \
GO(lW) \
GO(W) \
GO(nW) \
GO(pcmend) \
GO(mode) \
GO(eofflag) \
GO(granulepos) \
GO(sequence) \
GO(vd) \
GO(localstore) \
GO(localtop) \
GO(localalloc) \
GO(totaluse) \
GO(reap) \
GO(glue_bits) \
GO(time_bits) \
GO(floor_bits) \
GO(res_bits) \
GO(internal)
void UnalignVorbisBlock(void* dest, void* source)
{
// Arm -> x64
#define GO(A) ((vorbis_block_x64*)dest)->A = ((vorbis_block*)source)->A;
#define GOM(A, S) memcpy(&((vorbis_block_x64*)dest)->A, &((vorbis_block*)source)->A, S);
TRANSFERT
#undef GO
#undef GOM
}
void AlignVorbisBlock(void* dest, void* source)
{
// x64 -> Arm
#define GO(A) ((vorbis_block*)dest)->A = ((vorbis_block_x64*)source)->A;
#define GOM(A, S) memcpy(&((vorbis_block*)dest)->A, &((vorbis_block_x64*)source)->A, S);
TRANSFERT
#undef GO
#undef GOM
}
#undef TRANSFERT
typedef union __attribute__((packed)) x64_epoll_data {
void *ptr;
int fd;
uint32_t u32;
uint64_t u64;
} x64_epoll_data_t;
struct __attribute__((packed)) x64_epoll_event {
uint32_t events;
x64_epoll_data_t data;
};
// Arm -> x64
void UnalignEpollEvent(void* dest, void* source, int nbr)
{
struct x64_epoll_event *x64_struct = (struct x64_epoll_event*)dest;
struct epoll_event *arm_struct = (struct epoll_event*)source;
while(nbr) {
x64_struct->events = arm_struct->events;
x64_struct->data.u64 = arm_struct->data.u64;
++x64_struct;
++arm_struct;
--nbr;
}
}
// x64 -> Arm
void AlignEpollEvent(void* dest, void* source, int nbr)
{
struct x64_epoll_event *x64_struct = (struct x64_epoll_event*)source;
struct epoll_event *arm_struct = (struct epoll_event*)dest;
while(nbr) {
arm_struct->events = x64_struct->events;
arm_struct->data.u64 = x64_struct->data.u64;
++x64_struct;
++arm_struct;
--nbr;
}
}
typedef struct __attribute__((packed)) x64_SMPEG_Info_s {
int has_audio;
int has_video;
int width;
int height;
int current_frame;
double current_fps;
char audio_string[80];
int audio_current_frame;
uint32_t current_offset;
uint32_t total_size;
double current_time;
double total_time;
} x64_SMPEG_Info_t;
#define TRANSFERT \
GO(has_audio) \
GO(has_video) \
GO(width) \
GO(height) \
GO(current_frame) \
GO(current_fps) \
GOM(audio_string, 80) \
GO(audio_current_frame) \
GO(current_offset) \
GO(total_size) \
GO(current_time) \
GO(total_time)
// Arm -> x64
void UnalignSmpegInfo(void* dest, void* source)
{
#define GO(A) ((x64_SMPEG_Info_t*)dest)->A = ((my_SMPEG_Info_t*)source)->A;
#define GOM(A, S) memcpy(&((x64_SMPEG_Info_t*)dest)->A, &((my_SMPEG_Info_t*)source)->A, S);
TRANSFERT
#undef GO
#undef GOM
}
// x64 -> Arm
void AlignSmpegInfo(void* dest, void* source)
{
#define GO(A) ((my_SMPEG_Info_t*)dest)->A = ((x64_SMPEG_Info_t*)source)->A;
#define GOM(A, S) memcpy(&((my_SMPEG_Info_t*)dest)->A, &((x64_SMPEG_Info_t*)source)->A, S);
TRANSFERT
#undef GO
#undef GOM
}
#undef TRANSFERT
#define TRANSFERT \
GOV(fts_cycle) \
GOV(fts_parent) \
GOV(fts_link) \
GO(fts_number) \
GO(fts_pointer) \
GO(fts_accpath) \
GO(fts_path) \
GO(fts_errno) \
GO(fts_symfd) \
GO(fts_pathlen) \
GO(fts_namelen) \
GO(fts_ino) \
GO(fts_dev) \
GO(fts_nlink) \
GO(fts_level) \
GO(fts_info) \
GO(fts_flags) \
GO(fts_instr) \
GO(fts_statp) \
GOM(fts_name, sizeof(void*))
// Arm -> x64
void UnalignFTSENT(void* dest, void* source)
{
#define GO(A) ((x64_ftsent_t*)dest)->A = ((FTSENT*)source)->A;
#define GOV(A) ((x64_ftsent_t*)dest)->A = (void*)((FTSENT*)source)->A;
#define GOM(A, S) memcpy(&((x64_ftsent_t*)dest)->A, &((FTSENT*)source)->A, S);
TRANSFERT
#undef GO
#undef GOV
#undef GOM
}
// x64 -> Arm
void AlignFTSENT(void* dest, void* source)
{
#define GO(A) ((FTSENT*)dest)->A = ((x64_ftsent_t*)source)->A;
#define GOV(A) ((FTSENT*)dest)->A = (void*)((x64_ftsent_t*)source)->A;
#define GOM(A, S) memcpy(&((FTSENT*)dest)->A, &((x64_ftsent_t*)source)->A, S);
TRANSFERT
#undef GO
#undef GOV
#undef GOM
}
#undef TRANSFERT
void alignNGValue(my_GValue_t* v, void* value, int n)
{
while(n) {
v->g_type = *(int*)value;
memcpy(v->data, value+4, 2*sizeof(double));
++v;
value+=4+2*sizeof(double);
--n;
}
}
void unalignNGValue(void* value, my_GValue_t* v, int n)
{
while(n) {
*(int*)value = v->g_type;
memcpy(value+4, v->data, 2*sizeof(double));
++v;
value+=4+2*sizeof(double);
--n;
}
}
#endif

104
src/libtools/myalign64_32.c Executable file
View File

@ -0,0 +1,104 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <asm/stat.h>
#include <sys/vfs.h>
#include "x64emu.h"
#include "emu/x64emu_private.h"
#include "myalign32.h"
#include "box32.h"
void UnalignStat64_32(const void* source, void* dest)
{
struct i386_stat64 *i386st = (struct i386_stat64*)dest;
struct stat *st = (struct stat*) source;
memset(i386st->__pad0, 0, sizeof(i386st->__pad0));
memset(i386st->__pad3, 0, sizeof(i386st->__pad3));
i386st->st_dev = st->st_dev;
i386st->__st_ino = st->st_ino;
i386st->st_mode = st->st_mode;
i386st->st_nlink = st->st_nlink;
i386st->st_uid = st->st_uid;
i386st->st_gid = st->st_gid;
i386st->st_rdev = st->st_rdev;
i386st->st_size = st->st_size;
i386st->st_blksize = st->st_blksize;
i386st->st_blocks = st->st_blocks;
i386st->st_atime = st->st_atime;
i386st->st_atime_nsec = st->st_atime_nsec;
i386st->st_mtime = st->st_mtime;
i386st->st_mtime_nsec = st->st_mtime_nsec;
i386st->st_ctime = st->st_ctime;
i386st->st_ctime_nsec = st->st_ctime_nsec;
i386st->st_ino = st->st_ino;
}
struct native_fsid {
int val[2];
};
struct native_statfs64 {
uint32_t f_type;
uint32_t f_bsize;
uint64_t f_blocks;
uint64_t f_bfree;
uint64_t f_bavail;
uint64_t f_files;
uint64_t f_ffree;
struct native_fsid f_fsid;
uint32_t f_namelen;
uint32_t f_frsize;
uint32_t f_flags;
uint32_t f_spare[4];
}; // f_flags is not always defined, but then f_spare is [5] in that case
void UnalignStatFS64_32(const void* source, void* dest)
{
struct i386_statfs64 *i386st = (struct i386_statfs64*)dest;
struct native_statfs64 *st = (struct native_statfs64*) source;
i386st->f_type = st->f_type;
i386st->f_bsize = st->f_bsize;
i386st->f_blocks = st->f_blocks;
i386st->f_bfree = st->f_bfree;
i386st->f_bavail = st->f_bavail;
i386st->f_files = st->f_files;
i386st->f_ffree = st->f_ffree;
memcpy(&i386st->f_fsid, &st->f_fsid, sizeof(i386st->f_fsid));
i386st->f_namelen = st->f_namelen;
i386st->f_frsize = st->f_frsize;
i386st->f_flags = st->f_flags;
i386st->f_spare[0] = st->f_spare[0];
i386st->f_spare[1] = st->f_spare[1];
i386st->f_spare[2] = st->f_spare[2];
i386st->f_spare[3] = st->f_spare[3];
}
#if 0
#define TRANSFERT \
GO(l_type) \
GO(l_whence) \
GO(l_start) \
GO(l_len) \
GO(l_pid)
// Arm -> x64
void UnalignFlock64_32(void* dest, void* source)
{
#define GO(A) ((x64_flock64_t*)dest)->A = ((my_flock64_t*)source)->A;
TRANSFERT
#undef GO
}
// x64 -> Arm
void AlignFlock64_32(void* dest, void* source)
{
#define GO(A) ((my_flock64_t*)dest)->A = ((x64_flock64_t*)source)->A;
TRANSFERT
#undef GO
}
#undef TRANSFERT
#endif

842
src/libtools/signal32.c Normal file
View File

@ -0,0 +1,842 @@
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <signal.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <syscall.h>
#include <stddef.h>
#include <stdarg.h>
#include <ucontext.h>
#include <setjmp.h>
#include <sys/mman.h>
#include <pthread.h>
#ifndef ANDROID
#include <execinfo.h>
#endif
#include "box32context.h"
#include "debug.h"
#include "x64emu.h"
#include "emu/x64emu_private.h"
#include "emu/x64run_private.h"
#include "signals.h"
#include "box64stack.h"
#include "dynarec.h"
#include "callback.h"
#include "x64run.h"
#include "elfloader.h"
#include "threads.h"
#include "emu/x87emu_private.h"
#include "custommem.h"
#ifdef DYNAREC
#include "dynablock.h"
#include "../dynarec/dynablock_private.h"
#include "dynarec_native.h"
#endif
/* Definitions taken from the kernel headers. */
enum
{
I386_GS = 0,
# define I386_GS I386_GS
I386_FS,
# define I386_FS I386_FS
I386_ES,
# define I386_ES I386_ES
I386_DS,
# define I386_DS I386_DS
I386_EDI,
# define I386_EDI I386_EDI
I386_ESI,
# define I386_ESI I386_ESI
I386_EBP,
# define I386_EBP I386_EBP
I386_ESP,
# define I386_ESP I386_ESP
I386_EBX,
# define I386_EBX I386_EBX
I386_EDX,
# define I386_EDX I386_EDX
I386_ECX,
# define I386_ECX I386_ECX
I386_EAX,
# define I386_EAX I386_EAX
I386_TRAPNO,
# define I386_TRAPNO I386_TRAPNO
I386_ERR,
# define I386_ERR I386_ERR
I386_EIP,
# define I386_EIP I386_EIP
I386_CS,
# define I386_CS I386_CS
I386_EFL,
# define I386_EFL I386_EFL
I386_UESP,
# define I386_UESP I386_UESP
I386_SS
# define I386_SS I386_SS
};
typedef uint32_t i386_gregset_t[19];
struct i386_fpreg
{
uint16_t significand[4];
uint16_t exponent;
}__attribute__((packed));
struct i386_fpxreg
{
unsigned short significand[4];
unsigned short exponent;
unsigned short padding[3];
}__attribute__((packed));
struct i386_xmmreg
{
uint32_t element[4];
}__attribute__((packed));
struct i386_fpstate
{
/* Regular FPU environment. */
uint32_t cw;
uint32_t sw;
uint32_t tag;
uint32_t ipoff;
uint32_t cssel;
uint32_t dataoff;
uint32_t datasel;
struct i386_fpreg _st[8];
uint32_t status_magic;
/* FXSR FPU environment. */
uint32_t _fxsr_env[6];
uint32_t mxcsr;
uint32_t reserved;
struct i386_fpxreg _fxsr_st[8];
struct i386_xmmreg _xmm[8];
uint32_t padding[56];
}__attribute__((packed));
typedef struct i386_fpstate *i386_fpregset_t;
static void save_fpreg(x64emu_t* emu, struct i386_fpstate* state)
{
emu->sw.f.F87_TOP = emu->top&7;
state->sw = emu->sw.x16;
state->cw = emu->cw.x16;
// save SSE and MMX regs
fpu_fxsave32(emu, &state->_fxsr_env);
}
static void load_fpreg(x64emu_t* emu, struct i386_fpstate* state)
{
// copy SSE and MMX regs
fpu_fxrstor32(emu, &state->_fxsr_env);
emu->cw.x16 = state->cw;
emu->sw.x16 = state->sw;
emu->top = emu->sw.f.F87_TOP&7;
}
typedef struct
{
ptr_t ss_sp;
int ss_flags;
long_t ss_size;
} i386_stack_t;
typedef struct x64_stack_s
{
void *ss_sp;
int ss_flags;
size_t ss_size;
} x64_stack_t;
/*
another way to see the sigcontext
struct sigcontext
{
unsigned short gs, __gsh;
unsigned short fs, __fsh;
unsigned short es, __esh;
unsigned short ds, __dsh;
unsigned long edi;
unsigned long esi;
unsigned long ebp;
unsigned long esp;
unsigned long ebx;
unsigned long edx;
unsigned long ecx;
unsigned long eax;
unsigned long trapno;
unsigned long err;
unsigned long eip;
unsigned short cs, __csh;
unsigned long eflags;
unsigned long esp_at_signal;
unsigned short ss, __ssh;
struct _fpstate * fpstate;
unsigned long oldmask;
unsigned long cr2;
};
*/
typedef struct
{
i386_gregset_t gregs;
ptr_t fpregs; //i386_fpregset_t
uint32_t oldmask;
uint32_t cr2;
} i386_mcontext_t;
// /!\ signal sig_set is different than glibc __sig_set
#define _NSIG_WORDS (64 / 32)
typedef unsigned long i386_old_sigset_t;
typedef struct {
unsigned long sig[_NSIG_WORDS];
} i386_sigset_t;
struct i386_xsave_hdr_struct {
uint64_t xstate_bv;
uint64_t reserved1[2];
uint64_t reserved2[5];
};
struct i386_xstate {
/*
* Applications need to refer to fpstate through fpstate pointer
* in sigcontext. Not here directly.
*/
struct i386_fpstate fpstate;
struct i386_xsave_hdr_struct xsave_hdr;
/* new processor state extensions will go here */
} __attribute__ ((aligned (64)));
struct i386_xstate_cntxt {
ptr_t xstate; //struct i386_xstate *xstate;
uint32_t size;
uint32_t lmask;
uint32_t hmask;
};
typedef struct i386_ucontext_s
{
uint32_t uc_flags;
ptr_t uc_link; //struct i386_ucontext_s *uc_link;
i386_stack_t uc_stack;
i386_mcontext_t uc_mcontext;
i386_sigset_t uc_sigmask;
/* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */
int unused[32 - (sizeof (sigset_t) / sizeof (int))];
//struct i386_xstate_cntxt uc_xstate;
struct i386_xstate xstate;
} i386_ucontext_t;
typedef struct i386_sigframe_s {
ptr_t pretcode; // pointer to retcode
int sig;
i386_mcontext_t cpustate;
struct i386_xstate fpstate;
ptr_t extramask[64-1];
char retcode[8];
} i386_sigframe_t;
struct kernel_sigaction {
void (*k_sa_handler) (int);
unsigned long sa_flags;
void (*sa_restorer) (void);
unsigned long sa_mask;
unsigned long sa_mask2;
};
#ifdef DYNAREC
uintptr_t getX64Address(dynablock_t* db, uintptr_t arm_addr);
#endif
x64_stack_t* sigstack_getstack();
int my_sigaltstack(x64emu_t* emu, const x64_stack_t* ss, x64_stack_t* oss);
EXPORT int my32_sigaltstack(x64emu_t* emu, const i386_stack_t* ss, i386_stack_t* oss)
{
x64_stack_t ss_ = {0};
x64_stack_t oss_ = {0};
if(ss) {
ss_.ss_flags = ss->ss_flags;
ss_.ss_sp = from_ptrv(ss->ss_sp);
ss_.ss_size = ss->ss_size;
}
int ret = my_sigaltstack(emu, ss?(&ss_):NULL, oss?(&oss_):NULL);
if(!ret && oss) {
oss->ss_flags = oss_.ss_flags;
oss->ss_sp = to_ptrv(oss_.ss_sp);
oss->ss_size = oss_.ss_size;
}
return ret;
}
uint32_t RunFunctionHandler32(int* exit, int dynarec, i386_ucontext_t* sigcontext, ptr_t fnc, int nargs, ...)
{
if(fnc==0 || fnc==1) {
va_list va;
va_start (va, nargs);
int sig = va_arg(va, int);
va_end (va);
printf_log(LOG_NONE, "%04d|BOX32: Warning, calling Signal %d function handler %s\n", GetTID(), sig, fnc?"SIG_IGN":"SIG_DFL");
if(fnc==0) {
printf_log(LOG_NONE, "Unhandled signal caught, aborting\n");
abort();
}
return 0;
}
#ifdef HAVE_TRACE
uintptr_t old_start = trace_start, old_end = trace_end;
#if 0
trace_start = 0; trace_end = 1; // disabling trace, globably for now...
#endif
#endif
#ifndef USE_CUSTOM_MEM
// because a signal can interupt a malloc-like function
// Dynarec cannot be used in signal handling unless custom malloc is used
dynarec = 0;
#endif
x64emu_t *emu = thread_get_emu();
#ifdef DYNAREC
if(box64_dynarec_test)
emu->test.test = 0;
#endif
/*SetFS(emu, default_fs);*/
for (int i=0; i<6; ++i)
emu->segs_serial[i] = 0;
int align = nargs&1;
R_ESP -= nargs * sizeof(ptr_t);
uint32_t *p = (uint32_t*)from_ptrv(R_ESP);
va_list va;
va_start (va, nargs);
for (int i=0; i<nargs; ++i) {
uint32_t v = va_arg(va, uint32_t);
*p = v;
p++;
}
va_end (va);
printf_log(LOG_DEBUG, "%04d|signal #%d function handler %p called, RSP=%p\n", GetTID(), R_EDI, from_ptrv(fnc), from_ptrv(R_ESP));
int oldquitonlongjmp = emu->flags.quitonlongjmp;
emu->flags.quitonlongjmp = 2;
int old_cs = R_CS;
R_CS = 0x23;
emu->eflags.x64 &= ~(1<<F_TF); // this one needs to cleared
if(dynarec)
DynaCall(emu, fnc);
else
EmuCall(emu, fnc);
if(!emu->flags.longjmp)
R_ESP+=nargs*sizeof(ptr_t);
if(!emu->flags.longjmp && R_CS==0x23)
R_CS = old_cs;
emu->flags.quitonlongjmp = oldquitonlongjmp;
#ifdef DYNAREC
if(box64_dynarec_test) {
emu->test.test = 0;
emu->test.clean = 0;
}
#endif
if(emu->flags.longjmp) {
// longjmp inside signal handler, lets grab all relevent value and do the actual longjmp in the signal handler
emu->flags.longjmp = 0;
if(sigcontext) {
sigcontext->uc_mcontext.gregs[I386_EAX] = R_EAX;
sigcontext->uc_mcontext.gregs[I386_ECX] = R_ECX;
sigcontext->uc_mcontext.gregs[I386_EDX] = R_EDX;
sigcontext->uc_mcontext.gregs[I386_EDI] = R_EDI;
sigcontext->uc_mcontext.gregs[I386_ESI] = R_ESI;
sigcontext->uc_mcontext.gregs[I386_EBP] = R_EBP;
sigcontext->uc_mcontext.gregs[I386_ESP] = R_ESP;
sigcontext->uc_mcontext.gregs[I386_EBX] = R_EBX;
sigcontext->uc_mcontext.gregs[I386_EIP] = R_EIP;
// flags
sigcontext->uc_mcontext.gregs[I386_EFL] = emu->eflags.x64;
// get segments
sigcontext->uc_mcontext.gregs[I386_CS] = R_CS;
sigcontext->uc_mcontext.gregs[I386_DS] = R_DS;
sigcontext->uc_mcontext.gregs[I386_ES] = R_ES;
sigcontext->uc_mcontext.gregs[I386_SS] = R_SS;
sigcontext->uc_mcontext.gregs[I386_FS] = R_FS;
sigcontext->uc_mcontext.gregs[I386_GS] = R_GS;
} else {
printf_log(LOG_NONE, "Warning, longjmp in signal but no sigcontext to change\n");
}
}
if(exit)
*exit = emu->exit;
uint32_t ret = R_EAX;
#ifdef HAVE_TRACE
trace_start = old_start; trace_end = old_end;
#endif
return ret;
}
#define is_memprot_locked (1<<1)
#define is_dyndump_locked (1<<8)
void my_sigactionhandler_oldcode_32(int32_t sig, int simple, siginfo_t* info, void * ucntx, int* old_code, void* cur_db)
{
int Locks = unlockMutex();
printf_log(LOG_DEBUG, "Sigactionhanlder for signal #%d called (jump to %p/%s)\n", sig, (void*)my_context->signals[sig], GetNativeName((void*)my_context->signals[sig]));
uintptr_t restorer = my_context->restorer[sig];
// get that actual ESP first!
x64emu_t *emu = thread_get_emu();
uintptr_t frame = R_RSP;
#if defined(DYNAREC)
#if defined(ARM64)
dynablock_t* db = (dynablock_t*)cur_db;//FindDynablockFromNativeAddress(pc);
ucontext_t *p = (ucontext_t *)ucntx;
void* pc = NULL;
if(p) {
pc = (void*)p->uc_mcontext.pc;
if(db)
frame = (uintptr_t)p->uc_mcontext.regs[10+_SP];
}
#elif defined(LA64)
dynablock_t* db = (dynablock_t*)cur_db;//FindDynablockFromNativeAddress(pc);
ucontext_t *p = (ucontext_t *)ucntx;
void* pc = NULL;
if(p) {
pc = (void*)p->uc_mcontext.__pc;
if(db)
frame = (uintptr_t)p->uc_mcontext.__gregs[12+_SP];
}
#elif defined(RV64)
dynablock_t* db = (dynablock_t*)cur_db;//FindDynablockFromNativeAddress(pc);
ucontext_t *p = (ucontext_t *)ucntx;
void* pc = NULL;
if(p) {
pc = (void*)p->uc_mcontext.__gregs[0];
if(db)
frame = (uintptr_t)p->uc_mcontext.__gregs[16+_SP];
}
#else
#error Unsupported architecture
#endif
#else
(void)ucntx; (void)cur_db;
#endif
// setup libc context stack frame, on caller stack
frame = frame&~15;
// stack tracking
x64_stack_t *new_ss = my_context->onstack[sig]?sigstack_getstack():NULL;
int used_stack = 0;
if(new_ss) {
if(new_ss->ss_flags == SS_ONSTACK) { // already using it!
frame = ((uintptr_t)emu->regs[_SP].q[0] - 128) & ~0x0f;
} else {
frame = (uintptr_t)(((uintptr_t)new_ss->ss_sp + new_ss->ss_size - 16) & ~0x0f);
used_stack = 1;
new_ss->ss_flags = SS_ONSTACK;
}
} else {
frame -= 0x200; // redzone
}
// TODO: do I need to really setup 2 stack frame? That doesn't seems right!
// setup stack frame
frame -= 512+64+16*16;
void* xstate = (void*)frame;
frame -= sizeof(siginfo_t);
siginfo_t* info2 = (siginfo_t*)frame;
memcpy(info2, info, sizeof(siginfo_t));
// try to fill some sigcontext....
frame -= sizeof(i386_ucontext_t);
i386_ucontext_t *sigcontext = (i386_ucontext_t*)frame;
// get general register
sigcontext->uc_mcontext.gregs[I386_EAX] = R_EAX;
sigcontext->uc_mcontext.gregs[I386_ECX] = R_ECX;
sigcontext->uc_mcontext.gregs[I386_EDX] = R_EDX;
sigcontext->uc_mcontext.gregs[I386_EDI] = R_EDI;
sigcontext->uc_mcontext.gregs[I386_ESI] = R_ESI;
sigcontext->uc_mcontext.gregs[I386_EBP] = R_EBP;
sigcontext->uc_mcontext.gregs[I386_ESP] = R_ESP;
sigcontext->uc_mcontext.gregs[I386_EBX] = R_EBX;
sigcontext->uc_mcontext.gregs[I386_EIP] = R_EIP;//emu->old_ip; // old_ip should be more accurate as the "current" IP, but it's not always up-to-date
// flags
sigcontext->uc_mcontext.gregs[I386_EFL] = emu->eflags.x64;
// get segments
sigcontext->uc_mcontext.gregs[I386_CS] = R_CS;
sigcontext->uc_mcontext.gregs[I386_DS] = R_DS;
sigcontext->uc_mcontext.gregs[I386_ES] = R_ES;
sigcontext->uc_mcontext.gregs[I386_SS] = R_SS;
sigcontext->uc_mcontext.gregs[I386_FS] = R_FS;
sigcontext->uc_mcontext.gregs[I386_GS] = R_GS;
#if defined(DYNAREC)
#if defined(ARM64)
if(db && p) {
sigcontext->uc_mcontext.gregs[I386_EAX] = p->uc_mcontext.regs[10];
sigcontext->uc_mcontext.gregs[I386_ECX] = p->uc_mcontext.regs[11];
sigcontext->uc_mcontext.gregs[I386_EDX] = p->uc_mcontext.regs[12];
sigcontext->uc_mcontext.gregs[I386_EBX] = p->uc_mcontext.regs[13];
sigcontext->uc_mcontext.gregs[I386_ESP] = p->uc_mcontext.regs[14];
sigcontext->uc_mcontext.gregs[I386_EBP] = p->uc_mcontext.regs[15];
sigcontext->uc_mcontext.gregs[I386_ESI] = p->uc_mcontext.regs[16];
sigcontext->uc_mcontext.gregs[I386_EDI] = p->uc_mcontext.regs[17];
sigcontext->uc_mcontext.gregs[I386_EIP] = getX64Address(db, (uintptr_t)pc);
}
#elif defined(LA64)
if(db && p) {
sigcontext->uc_mcontext.gregs[I386_EAX] = p->uc_mcontext.__gregs[12];
sigcontext->uc_mcontext.gregs[I386_ECX] = p->uc_mcontext.__gregs[13];
sigcontext->uc_mcontext.gregs[I386_EDX] = p->uc_mcontext.__gregs[14];
sigcontext->uc_mcontext.gregs[I386_EBX] = p->uc_mcontext.__gregs[15];
sigcontext->uc_mcontext.gregs[I386_ESP] = p->uc_mcontext.__gregs[16];
sigcontext->uc_mcontext.gregs[I386_EBP] = p->uc_mcontext.__gregs[17];
sigcontext->uc_mcontext.gregs[I386_ESI] = p->uc_mcontext.__gregs[18];
sigcontext->uc_mcontext.gregs[I386_EDI] = p->uc_mcontext.__gregs[19];
sigcontext->uc_mcontext.gregs[I386_EIP] = getX64Address(db, (uintptr_t)pc);
}
#elif defined(RV64)
if(db && p) {
sigcontext->uc_mcontext.gregs[I386_EAX] = p->uc_mcontext.__gregs[16];
sigcontext->uc_mcontext.gregs[I386_ECX] = p->uc_mcontext.__gregs[17];
sigcontext->uc_mcontext.gregs[I386_EDX] = p->uc_mcontext.__gregs[18];
sigcontext->uc_mcontext.gregs[I386_EBX] = p->uc_mcontext.__gregs[19];
sigcontext->uc_mcontext.gregs[I386_ESP] = p->uc_mcontext.__gregs[20];
sigcontext->uc_mcontext.gregs[I386_EBP] = p->uc_mcontext.__gregs[21];
sigcontext->uc_mcontext.gregs[I386_ESI] = p->uc_mcontext.__gregs[22];
sigcontext->uc_mcontext.gregs[I386_EDI] = p->uc_mcontext.__gregs[23];
sigcontext->uc_mcontext.gregs[I386_EIP] = getX64Address(db, (uintptr_t)pc);
}
#else
#error Unsupported architecture
#endif
#endif
// get FloatPoint status
sigcontext->uc_mcontext.fpregs = to_ptrv(xstate);//(struct x64_libc_fpstate*)&sigcontext->xstate;
fpu_xsave_mask(emu, xstate, 1, 0b111);
memcpy(&sigcontext->xstate, xstate, sizeof(sigcontext->xstate));
((struct i386_fpstate*)xstate)->status_magic = 0x46505853; // magic number to signal an XSTATE type of fpregs
// get signal mask
if(new_ss) {
sigcontext->uc_stack.ss_sp = to_ptrv(new_ss->ss_sp);
sigcontext->uc_stack.ss_size = new_ss->ss_size;
sigcontext->uc_stack.ss_flags = new_ss->ss_flags;
} else
sigcontext->uc_stack.ss_flags = SS_DISABLE;
// Try to guess some X64_TRAPNO
/*
TRAP_x86_DIVIDE = 0, // Division by zero exception
TRAP_x86_TRCTRAP = 1, // Single-step exception
TRAP_x86_NMI = 2, // NMI interrupt
TRAP_x86_BPTFLT = 3, // Breakpoint exception
TRAP_x86_OFLOW = 4, // Overflow exception
TRAP_x86_BOUND = 5, // Bound range exception
TRAP_x86_PRIVINFLT = 6, // Invalid opcode exception
TRAP_x86_DNA = 7, // Device not available exception
TRAP_x86_DOUBLEFLT = 8, // Double fault exception
TRAP_x86_FPOPFLT = 9, // Coprocessor segment overrun
TRAP_x86_TSSFLT = 10, // Invalid TSS exception
TRAP_x86_SEGNPFLT = 11, // Segment not present exception
TRAP_x86_STKFLT = 12, // Stack fault
TRAP_x86_PROTFLT = 13, // General protection fault
TRAP_x86_PAGEFLT = 14, // Page fault
TRAP_x86_ARITHTRAP = 16, // Floating point exception
TRAP_x86_ALIGNFLT = 17, // Alignment check exception
TRAP_x86_MCHK = 18, // Machine check exception
TRAP_x86_CACHEFLT = 19 // SIMD exception (via SIGFPE) if CPU is SSE capable otherwise Cache flush exception (via SIGSEV)
*/
uint32_t prot = getProtection((uintptr_t)info->si_addr);
if(sig==SIGBUS)
sigcontext->uc_mcontext.gregs[I386_TRAPNO] = 17;
else if(sig==SIGSEGV) {
if((uintptr_t)info->si_addr == sigcontext->uc_mcontext.gregs[I386_EIP]) {
sigcontext->uc_mcontext.gregs[I386_ERR] = (info->si_errno==0x1234)?0:((info->si_errno==0xdead)?(0x2|(info->si_code<<3)):0x0010); // execution flag issue (probably), unless it's a #GP(0)
sigcontext->uc_mcontext.gregs[I386_TRAPNO] = ((info->si_code==SEGV_ACCERR) || (info->si_errno==0x1234) || (info->si_errno==0xdead) || ((uintptr_t)info->si_addr==0))?13:14;
} else if(info->si_code==SEGV_ACCERR && !(prot&PROT_WRITE)) {
sigcontext->uc_mcontext.gregs[I386_ERR] = 0x0002; // write flag issue
sigcontext->uc_mcontext.gregs[I386_TRAPNO] = 14;
} else {
if((info->si_code!=SEGV_ACCERR) && labs((intptr_t)info->si_addr-(intptr_t)sigcontext->uc_mcontext.gregs[I386_ESP])<16)
sigcontext->uc_mcontext.gregs[I386_TRAPNO] = 12; // stack overflow probably
else
sigcontext->uc_mcontext.gregs[I386_TRAPNO] = (info->si_code == SEGV_ACCERR)?13:14;
//I386_ERR seems to be INT:8 CODE:8. So for write access segfault it's 0x0002 For a read it's 0x0004 (and 8 for exec). For an int 2d it could be 0x2D01 for example
sigcontext->uc_mcontext.gregs[I386_ERR] = 0x0004; // read error? there is no execute control in box64 anyway
}
if(info->si_code == SEGV_ACCERR && old_code)
*old_code = -1;
if(info->si_errno==0x1234) {
info2->si_errno = 0;
} else if(info->si_errno==0xdead) {
// INT x
uint8_t int_n = info2->si_code;
info2->si_errno = 0;
info2->si_code = info->si_code;
info2->si_addr = NULL;
// some special cases...
if(int_n==3) {
info2->si_signo = SIGTRAP;
sigcontext->uc_mcontext.gregs[I386_TRAPNO] = 3;
sigcontext->uc_mcontext.gregs[I386_ERR] = 0;
} else if(int_n==0x04) {
sigcontext->uc_mcontext.gregs[I386_TRAPNO] = 4;
sigcontext->uc_mcontext.gregs[I386_ERR] = 0;
} else if (int_n==0x29 || int_n==0x2c || int_n==0x2d) {
sigcontext->uc_mcontext.gregs[I386_ERR] = 0x02|(int_n<<3);
} else {
sigcontext->uc_mcontext.gregs[I386_ERR] = 0x0a|(int_n<<3);
}
} else if(info->si_errno==0xcafe) {
info2->si_errno = 0;
sigcontext->uc_mcontext.gregs[I386_TRAPNO] = 0;
info2->si_signo = SIGFPE;
}
} else if(sig==SIGFPE) {
if (info->si_code == FPE_INTOVF)
sigcontext->uc_mcontext.gregs[I386_TRAPNO] = 4;
else
sigcontext->uc_mcontext.gregs[I386_TRAPNO] = 19;
} else if(sig==SIGILL)
sigcontext->uc_mcontext.gregs[I386_TRAPNO] = 6;
else if(sig==SIGTRAP) {
info2->si_code = 128;
sigcontext->uc_mcontext.gregs[I386_TRAPNO] = info->si_code;
sigcontext->uc_mcontext.gregs[I386_ERR] = 0;
}
//TODO: SIGABRT generate what?
printf_log(LOG_DEBUG, "Signal %d: si_addr=%p, TRAPNO=%d, ERR=%d, RIP=%p\n", sig, (void*)info2->si_addr, sigcontext->uc_mcontext.gregs[I386_TRAPNO], sigcontext->uc_mcontext.gregs[I386_ERR],from_ptrv(sigcontext->uc_mcontext.gregs[I386_EIP]));
// call the signal handler
i386_ucontext_t sigcontext_copy = *sigcontext;
// save old value from emu
#define GO(A) uint32_t old_##A = R_##A
GO(EAX);
GO(EDI);
GO(ESI);
GO(EDX);
GO(ECX);
GO(EBP);
#undef GO
// set stack pointer
R_ESP = frame;
// set frame pointer
R_EBP = sigcontext->uc_mcontext.gregs[I386_EBP];
int exits = 0;
int ret;
int dynarec = 0;
#ifdef DYNAREC
if(sig!=SIGSEGV && !(Locks&is_dyndump_locked) && !(Locks&is_memprot_locked))
dynarec = 1;
#endif
ret = RunFunctionHandler32(&exits, dynarec, sigcontext, my_context->signals[info2->si_signo], 3, info2->si_signo, info2, sigcontext);
// restore old value from emu
if(used_stack) // release stack
new_ss->ss_flags = 0;
#define GO(A) R_##A = old_##A
GO(EAX);
GO(EDI);
GO(ESI);
GO(EDX);
GO(ECX);
GO(EBP);
#undef GO
if(memcmp(sigcontext, &sigcontext_copy, sizeof(i386_ucontext_t))) {
if(emu->jmpbuf) {
#define GO(R) emu->regs[_##R].q[0]=sigcontext->uc_mcontext.gregs[I386_E##R]
GO(AX);
GO(CX);
GO(DX);
GO(DI);
GO(SI);
GO(BP);
GO(SP);
GO(BX);
#undef GO
emu->ip.q[0]=sigcontext->uc_mcontext.gregs[I386_EIP];
// flags
emu->eflags.x64=sigcontext->uc_mcontext.gregs[I386_EFL];
// get segments
#define GO(S) if(emu->segs[_##S]!=sigcontext->uc_mcontext.gregs[I386_##S]) emu->segs[_##S]=sigcontext->uc_mcontext.gregs[I386_##S]
GO(CS);
GO(DS);
GO(ES);
GO(SS);
GO(GS);
GO(FS);
#undef GO
for(int i=0; i<6; ++i)
emu->segs_serial[i] = 0;
printf_log(LOG_DEBUG, "Context has been changed in Sigactionhanlder, doing siglongjmp to resume emu at %p, RSP=%p\n", (void*)R_RIP, (void*)R_RSP);
if(old_code)
*old_code = -1; // re-init the value to allow another segfault at the same place
//relockMutex(Locks); // do not relock mutex, because of the siglongjmp, whatever was running is canceled
#ifdef DYNAREC
if(Locks & is_dyndump_locked)
CancelBlock64(1);
#endif
#ifdef RV64
emu->xSPSave = emu->old_savedsp;
#endif
#ifdef ANDROID
siglongjmp(*emu->jmpbuf, 1);
#else
siglongjmp(emu->jmpbuf, 1);
#endif
}
printf_log(LOG_INFO, "Warning, context has been changed in Sigactionhanlder%s\n", (sigcontext->uc_mcontext.gregs[I386_EIP]!=sigcontext_copy.uc_mcontext.gregs[I386_EIP])?" (EIP changed)":"");
}
// restore regs...
#define GO(R) R_##R=sigcontext->uc_mcontext.gregs[I386_##R]
GO(EAX);
GO(ECX);
GO(EDX);
GO(EDI);
GO(ESI);
GO(EBP);
GO(ESP);
GO(EBX);
#undef GO
emu->eflags.x64=sigcontext->uc_mcontext.gregs[I386_EFL];
#define GO(R) R_##R=sigcontext->uc_mcontext.gregs[I386_##R]
GO(CS);
GO(DS);
GO(ES);
GO(SS);
GO(GS);
GO(FS);
#undef GO
printf_log(LOG_DEBUG, "Sigactionhanlder main function returned (exit=%d, restorer=%p)\n", exits, (void*)restorer);
if(exits) {
//relockMutex(Locks); // the thread will exit, so no relock there
#ifdef DYNAREC
if(Locks & is_dyndump_locked)
CancelBlock64(1);
#endif
exit(ret);
}
if(restorer)
RunFunctionHandler32(&exits, 0, NULL, restorer, 0);
relockMutex(Locks);
}
EXPORT int my32_getcontext(x64emu_t* emu, void* ucp)
{
// printf_log(LOG_NONE, "Warning: call to partially implemented getcontext\n");
i386_ucontext_t *u = (i386_ucontext_t*)ucp;
// stack traking
u->uc_stack.ss_sp = 0;
u->uc_stack.ss_size = 0; // this need to filled
// get general register
u->uc_mcontext.gregs[I386_EAX] = R_EAX;
u->uc_mcontext.gregs[I386_ECX] = R_ECX;
u->uc_mcontext.gregs[I386_EDX] = R_EDX;
u->uc_mcontext.gregs[I386_EDI] = R_EDI;
u->uc_mcontext.gregs[I386_ESI] = R_ESI;
u->uc_mcontext.gregs[I386_EBP] = R_EBP;
u->uc_mcontext.gregs[I386_EIP] = *(uint32_t*)from_ptrv(R_ESP);
u->uc_mcontext.gregs[I386_ESP] = R_ESP+4;
u->uc_mcontext.gregs[I386_EBX] = R_EBX;
// get segments
u->uc_mcontext.gregs[I386_GS] = R_GS;
u->uc_mcontext.gregs[I386_FS] = R_FS;
u->uc_mcontext.gregs[I386_ES] = R_ES;
u->uc_mcontext.gregs[I386_DS] = R_DS;
u->uc_mcontext.gregs[I386_CS] = R_CS;
u->uc_mcontext.gregs[I386_SS] = R_SS;
// get FloatPoint status
if(u->uc_mcontext.fpregs)
save_fpreg(emu, from_ptrv(u->uc_mcontext.fpregs));
// get signal mask
sigprocmask(SIG_SETMASK, NULL, (sigset_t*)&u->uc_sigmask);
// ensure uc_link is properly initialized
u->uc_link = to_ptrv(emu->uc_link);
return 0;
}
EXPORT int my32_setcontext(x64emu_t* emu, void* ucp)
{
// printf_log(LOG_NONE, "Warning: call to partially implemented setcontext\n");
i386_ucontext_t *u = (i386_ucontext_t*)ucp;
// stack tracking
emu->init_stack = from_ptrv(u->uc_stack.ss_sp);
emu->size_stack = from_ulong(u->uc_stack.ss_size);
// set general register
R_EAX = u->uc_mcontext.gregs[I386_EAX];
R_ECX = u->uc_mcontext.gregs[I386_ECX];
R_EDX = u->uc_mcontext.gregs[I386_EDX];
R_EDI = u->uc_mcontext.gregs[I386_EDI];
R_ESI = u->uc_mcontext.gregs[I386_ESI];
R_EBP = u->uc_mcontext.gregs[I386_EBP];
R_EIP = u->uc_mcontext.gregs[I386_EIP];
R_ESP = u->uc_mcontext.gregs[I386_ESP];
R_EBX = u->uc_mcontext.gregs[I386_EBX];
// get segments
R_GS = u->uc_mcontext.gregs[I386_GS];
R_FS = u->uc_mcontext.gregs[I386_FS];
R_ES = u->uc_mcontext.gregs[I386_ES];
R_DS = u->uc_mcontext.gregs[I386_DS];
R_CS = u->uc_mcontext.gregs[I386_CS];
R_SS = u->uc_mcontext.gregs[I386_SS];
// set FloatPoint status
if(u->uc_mcontext.fpregs)
load_fpreg(emu, from_ptrv(u->uc_mcontext.fpregs));
// set signal mask
sigprocmask(SIG_SETMASK, (sigset_t*)&u->uc_sigmask, NULL);
// set uc_link
emu->uc_link = from_ptrv(u->uc_link);
errno = 0;
return R_EAX;
}
EXPORT int my32_makecontext(x64emu_t* emu, void* ucp, void* fnc, int32_t argc, int32_t* argv)
{
// printf_log(LOG_NONE, "Warning: call to unimplemented makecontext\n");
i386_ucontext_t *u = (i386_ucontext_t*)ucp;
// setup stack
u->uc_mcontext.gregs[I386_ESP] = to_ptr(u->uc_stack.ss_sp + u->uc_stack.ss_size - 4);
// setup the function
u->uc_mcontext.gregs[I386_EIP] = to_ptrv(fnc);
// setup args
uint32_t* esp = (uint32_t*)from_ptr(u->uc_mcontext.gregs[I386_ESP]);
for (int i=0; i<argc; ++i) {
// push value
--esp;
*esp = argv[(argc-1)-i];
}
// push the return value
--esp;
*esp = to_ptr(my_context->exit_bridge);
u->uc_mcontext.gregs[I386_ESP] = (uintptr_t)esp;
return 0;
}
EXPORT int my32_swapcontext(x64emu_t* emu, void* ucp1, void* ucp2)
{
// printf_log(LOG_NONE, "Warning: call to unimplemented swapcontext\n");
// grab current context in ucp1
my32_getcontext(emu, ucp1);
// activate ucp2
my32_setcontext(emu, ucp2);
return 0;
}

View File

@ -269,6 +269,10 @@ static void sigstack_key_alloc() {
pthread_key_create(&sigstack_key, sigstack_destroy);
}
x64_stack_t* sigstack_getstack() {
return (x64_stack_t*)pthread_getspecific(sigstack_key);
}
// this allow handling "safe" function that just abort if accessing a bad address
static __thread JUMPBUFF signal_jmpbuf;
#ifdef ANDROID
@ -923,8 +927,17 @@ int sigbus_specialcases(siginfo_t* info, void * ucntx, void* pc, void* _fpsimd)
return 0;
}
#ifdef BOX32
void my_sigactionhandler_oldcode_32(int32_t sig, int simple, siginfo_t* info, void * ucntx, int* old_code, void* cur_db);
#endif
void my_sigactionhandler_oldcode(int32_t sig, int simple, siginfo_t* info, void * ucntx, int* old_code, void* cur_db)
{
#ifdef BOX32
if(box64_is32bits) {
my_sigactionhandler_oldcode_32(sig, simple, info, ucntx, old_code, cur_db);
return;
}
#endif
int Locks = unlockMutex();
printf_log(LOG_DEBUG, "Sigactionhanlder for signal #%d called (jump to %p/%s)\n", sig, (void*)my_context->signals[sig], GetNativeName((void*)my_context->signals[sig]));
@ -2184,7 +2197,7 @@ EXPORT int my_getcontext(x64emu_t* emu, void* ucp)
// get signal mask
sigprocmask(SIG_SETMASK, NULL, (sigset_t*)&u->uc_sigmask);
// ensure uc_link is properly initialized
u->uc_link = emu->uc_link;
u->uc_link = (x64_ucontext_t*)emu->uc_link;
return 0;
}

View File

@ -29,6 +29,9 @@
#include "dynablock.h"
#include "dynarec/native_lock.h"
#endif
#ifdef BOX32
#include "box32.h"
#endif
//void _pthread_cleanup_push_defer(void* buffer, void* routine, void* arg); // declare hidden functions
//void _pthread_cleanup_pop_restore(void* buffer, int exec);
@ -124,14 +127,6 @@ int GetStackSize(x64emu_t* emu, uintptr_t attr, void** stack, size_t* stacksize)
void my_longjmp(x64emu_t* emu, /*struct __jmp_buf_tag __env[1]*/void *p, int32_t __val);
typedef struct emuthread_s {
uintptr_t fnc;
void* arg;
x64emu_t* emu;
int cancel_cap, cancel_size;
x64_unwind_buff_t **cancels;
} emuthread_t;
static pthread_key_t thread_key;
static void emuthread_destroy(void* p)
@ -144,6 +139,10 @@ static void emuthread_destroy(void* p)
if (my_context && (ptr = pthread_getspecific(my_context->tlskey)) != NULL)
free_tlsdatasize(ptr);*/
// free x64emu
#ifdef BOX32
if(box64_is32bits && !et->join)
to_hash_d(et->self);
#endif
if(et) {
FreeX64Emu(&et->emu);
box_free(et);
@ -158,9 +157,13 @@ static void emuthread_cancel(void* p)
// check cancels threads
for(int i=et->cancel_size-1; i>=0; --i) {
et->emu->flags.quitonlongjmp = 0;
my_longjmp(et->emu, et->cancels[i]->__cancel_jmp_buf, 1);
my_longjmp(et->emu, ((x64_unwind_buff_t*)et->cancels[i])->__cancel_jmp_buf, 1);
DynaRun(et->emu); // will return after a __pthread_unwind_next()
}
#ifdef BOX32
if(box64_is32bits)
to_hash_d(et->self);
#endif
box_free(et->cancels);
et->cancels=NULL;
et->cancel_size = et->cancel_cap = 0;
@ -182,6 +185,12 @@ void thread_set_emu(x64emu_t* emu)
}
et->emu = emu;
et->emu->type = EMUTYPE_MAIN;
#ifdef BOX32
if(box64_is32bits) {
et->self = (uintptr_t)pthread_self();
et->hself = to_hash(et->self);
}
#endif
pthread_setspecific(thread_key, et);
}
@ -1097,6 +1106,10 @@ EXPORT int my_pthread_barrier_init(x64emu_t* emu, pthread_barrier_t* bar, my_bar
void init_pthread_helper()
{
#ifdef BOX32
if(box64_is32bits)
init_pthread_helper_32();
#endif
real_pthread_cleanup_push_defer = (vFppp_t)dlsym(NULL, "_pthread_cleanup_push_defer");
real_pthread_cleanup_pop_restore = (vFpi_t)dlsym(NULL, "_pthread_cleanup_pop_restore");
real_pthread_cond_clockwait = (iFppip_t)dlsym(NULL, "pthread_cond_clockwait");
@ -1129,6 +1142,10 @@ void clean_current_emuthread()
void fini_pthread_helper(box64context_t* context)
{
#ifdef BOX32
if(box64_is32bits)
fini_pthread_helper_32(context);
#endif
CleanStackSize(context);
clean_current_emuthread();
}

854
src/libtools/threads32.c Executable file
View File

@ -0,0 +1,854 @@
// __USE_UNIX98 is needed for sttype / gettype definition
#define __USE_UNIX98
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <signal.h>
#include <errno.h>
#include <setjmp.h>
#include <sys/mman.h>
#include <dlfcn.h>
#include "debug.h"
#include "box32context.h"
#include "threads.h"
#include "emu/x64emu_private.h"
#include "tools/bridge_private.h"
#include "x64run.h"
#include "x64emu.h"
#include "box64stack.h"
#include "callback.h"
#include "custommem.h"
#include "khash.h"
#include "emu/x64run_private.h"
#include "x64trace.h"
#include "dynarec.h"
#include "bridge.h"
#ifdef DYNAREC
#include "dynablock.h"
#endif
typedef void (*vFppp_t)(void*, void*, void*);
typedef void (*vFpi_t)(void*, int);
//starting with glibc 2.34+, those 2 functions are in libc.so as versionned symbol only
// So use dlsym to get the symbol unversionned, as simple link will not work.
static vFppp_t real_pthread_cleanup_push_defer = NULL;
static vFpi_t real_pthread_cleanup_pop_restore = NULL;
// those function can be used simply
void _pthread_cleanup_push(void* buffer, void* routine, void* arg); // declare hidden functions
void _pthread_cleanup_pop(void* buffer, int exec);
typedef struct threadstack_s {
void* stack;
size_t stacksize;
} threadstack_t;
// longjmp / setjmp
typedef struct jump_buff_i386_s {
uint32_t save_ebx;
uint32_t save_esi;
uint32_t save_edi;
uint32_t save_ebp;
uint32_t save_esp;
uint32_t save_eip;
} jump_buff_i386_t;
// sigset_t should have the same size on 32bits and 64bits machine (64bits)
typedef struct __jmp_buf_tag_s {
jump_buff_i386_t __jmpbuf;
int __mask_was_saved;
sigset_t __saved_mask;
} __jmp_buf_tag_t;
typedef struct x64_unwind_buff_s {
struct {
jump_buff_i386_t __cancel_jmp_buf;
int __mask_was_saved;
} __cancel_jmp_buf[1];
ptr_t __pad[2];
void* __pad3;
} x64_unwind_buff_t __attribute__((__aligned__));
static pthread_attr_t* get_attr(void* attr);
static void del_attr(void* attr);
typedef void(*vFv_t)();
KHASH_MAP_INIT_INT(threadstack, threadstack_t)
#ifndef ANDROID
KHASH_MAP_INIT_INT(cancelthread, __pthread_unwind_buf_t*)
#endif
void CleanStackSize(box64context_t* context);
void FreeStackSize(kh_threadstack_t* map, uintptr_t attr);
void AddStackSize(kh_threadstack_t* map, uintptr_t attr, void* stack, size_t stacksize);
int GetStackSize(x64emu_t* emu, uintptr_t attr, void** stack, size_t* stacksize);
static pthread_key_t thread_key;
void my32_longjmp(x64emu_t* emu, /*struct __jmp_buf_tag __env[1]*/void *p, int32_t __val);
static void emuthread_destroy(void* p)
{
emuthread_t *et = (emuthread_t*)p;
if(!et)
return;
// destroy the hash key if thread is not joinable
if(!et->join)
to_hash_d(et->self);
// destroy thread emu and all
if(et) {
FreeX64Emu(&et->emu);
free(et);
}
}
static void emuthread_cancel(void* p)
{
emuthread_t *et = (emuthread_t*)p;
if(!et)
return;
// check cancels threads
for(int i=et->cancel_size-1; i>=0; --i) {
et->emu->flags.quitonlongjmp = 0;
my32_longjmp(et->emu, ((x64_unwind_buff_t*)et->cancels[i])->__cancel_jmp_buf, 1);
DynaRun(et->emu); // will return after a __pthread_unwind_next()
}
free(et->cancels);
to_hash_d(et->self);
et->cancels=NULL;
et->cancel_size = et->cancel_cap = 0;
}
static void* pthread_routine(void* p)
{
// free current emuthread if it exist
{
void* t = pthread_getspecific(thread_key);
if(t) {
// not sure how this could happens
printf_log(LOG_INFO, "Clean of an existing ET for Thread %04d\n", GetTID());
emuthread_destroy(t);
}
}
pthread_setspecific(thread_key, p);
// call the function
emuthread_t *et = (emuthread_t*)p;
et->emu->type = EMUTYPE_MAIN;
et->self = (uintptr_t)pthread_self();
et->hself = to_hash(et->self);
// setup callstack and run...
x64emu_t* emu = et->emu;
Push_32(emu, 0); // PUSH 0 (backtrace marker: return address is 0)
Push_32(emu, 0); // PUSH BP
R_EBP = R_ESP; // MOV BP, SP
R_ESP -= 32; // guard area
R_ESP &=~15;
Push_32(emu, to_ptrv(et->arg));
PushExit_32(emu);
R_EIP = to_ptr(et->fnc);
pthread_cleanup_push(emuthread_cancel, p);
DynaRun(et->emu);
pthread_cleanup_pop(0);
void* ret = from_ptrv(R_EAX);
return ret;
}
EXPORT int my32_pthread_attr_destroy(x64emu_t* emu, void* attr)
{
if(emu->context->stacksizes)
FreeStackSize(emu->context->stacksizes, (uintptr_t)attr);
int ret = pthread_attr_destroy(get_attr(attr));
del_attr(attr);
return ret;
}
EXPORT int my32_pthread_attr_getstack(x64emu_t* emu, void* attr, void** stackaddr, size_t* stacksize)
{
int ret = pthread_attr_getstack(get_attr(attr), stackaddr, stacksize);
if (ret==0)
GetStackSize(emu, (uintptr_t)attr, stackaddr, stacksize);
return ret;
}
EXPORT int my32_pthread_attr_setstack(x64emu_t* emu, void* attr, void* stackaddr, size_t stacksize)
{
if(!emu->context->stacksizes) {
emu->context->stacksizes = kh_init(threadstack);
}
AddStackSize(emu->context->stacksizes, (uintptr_t)attr, stackaddr, stacksize);
//Don't call actual setstack...
//return pthread_attr_setstack(attr, stackaddr, stacksize);
return pthread_attr_setstacksize(get_attr(attr), stacksize);
}
EXPORT int my32_pthread_create(x64emu_t *emu, void* t, void* attr, void* start_routine, void* arg)
{
int stacksize = 2*1024*1024; //default stack size is 2Mo
void* attr_stack;
size_t attr_stacksize;
int own;
void* stack;
if(attr) {
size_t stsize;
if(pthread_attr_getstacksize(get_attr(attr), &stsize)==0)
stacksize = stsize;
if(stacksize<512*1024) // emu and all needs some stack space, don't go too low
pthread_attr_setstacksize(get_attr(attr), 512*1024);
}
if(GetStackSize(emu, (uintptr_t)attr, &attr_stack, &attr_stacksize))
{
stack = attr_stack;
stacksize = attr_stacksize;
own = 0;
} else {
//stack = malloc(stacksize);
stack = mmap64(NULL, stacksize, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_GROWSDOWN, -1, 0);
own = 1;
}
emuthread_t *et = (emuthread_t*)calloc(1, sizeof(emuthread_t));
x64emu_t *emuthread = NewX64Emu(my_context, (uintptr_t)start_routine, (uintptr_t)stack, stacksize, own);
SetupX64Emu(emuthread, emu);
et->emu = emuthread;
et->fnc = (uintptr_t)start_routine;
et->arg = arg;
if(!attr)
et->join = 1;
else {
int j;
pthread_attr_getdetachstate(get_attr(attr), &j);
if(j==PTHREAD_CREATE_JOINABLE)
et->join = 1;
else
et->join = 0;
}
#ifdef DYNAREC
if(box64_dynarec) {
// pre-creation of the JIT code for the entry point of the thread
dynablock_t *current = NULL;
DBGetBlock(emu, (uintptr_t)start_routine, 1, 1);
}
#endif
// create thread
return pthread_create((pthread_t*)t, get_attr(attr),
pthread_routine, et);
}
EXPORT int my32_pthread_detach(x64emu_t* emu, pthread_t p)
{
if(pthread_equal(p ,pthread_self())) {
emuthread_t *et = (emuthread_t*)pthread_getspecific(thread_key);
et->join = 0;
}
return pthread_detach(p);
}
void* my32_prepare_thread(x64emu_t *emu, void* f, void* arg, int ssize, void** pet)
{
int stacksize = (ssize)?ssize:(2*1024*1024); //default stack size is 2Mo
//void* stack = malloc(stacksize);
void* stack = mmap64(NULL, stacksize, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_GROWSDOWN, -1, 0);
emuthread_t *et = (emuthread_t*)calloc(1, sizeof(emuthread_t));
x64emu_t *emuthread = NewX64Emu(emu->context, (uintptr_t)f, (uintptr_t)stack, stacksize, 1);
SetupX64Emu(emuthread, emu);
et->emu = emuthread;
et->fnc = (uintptr_t)f;
et->arg = arg;
#ifdef DYNAREC
if(box64_dynarec) {
// pre-creation of the JIT code for the entry point of the thread
dynablock_t *current = NULL;
DBGetBlock(emu, (uintptr_t)f, 1, 1);
}
#endif
*pet = et;
return pthread_routine;
}
void my32_longjmp(x64emu_t* emu, /*struct __jmp_buf_tag __env[1]*/void *p, int32_t __val);
EXPORT void my32___pthread_register_cancel(x64emu_t* emu, x64_unwind_buff_t* buff)
{
buff = (x64_unwind_buff_t*)from_ptr(R_EAX); // param is in fact on register
emuthread_t *et = (emuthread_t*)pthread_getspecific(thread_key);
if(et->cancel_cap == et->cancel_size) {
et->cancel_cap+=8;
et->cancels = realloc(et->cancels, sizeof(x64_unwind_buff_t*)*et->cancel_cap);
}
et->cancels[et->cancel_size++] = buff;
}
EXPORT void my32___pthread_unregister_cancel(x64emu_t* emu, x64_unwind_buff_t* buff)
{
emuthread_t *et = (emuthread_t*)pthread_getspecific(thread_key);
for (int i=et->cancel_size-1; i>=0; --i) {
if(et->cancels[i] == buff) {
if(i!=et->cancel_size-1)
memmove(et->cancels+i, et->cancels+i+1, sizeof(x64_unwind_buff_t*)*(et->cancel_size-i-1));
et->cancel_size--;
}
}
}
EXPORT void my32___pthread_unwind_next(x64emu_t* emu, void* p)
{
emu->quit = 1;
}
KHASH_MAP_INIT_INT(once, int)
#define SUPER() \
GO(0) \
GO(1) \
GO(2) \
GO(3) \
GO(4) \
GO(5) \
GO(6) \
GO(7) \
GO(8) \
GO(9) \
GO(10) \
GO(11) \
GO(12) \
GO(13) \
GO(14) \
GO(15) \
GO(16) \
GO(17) \
GO(18) \
GO(19) \
GO(20) \
GO(21) \
GO(22) \
GO(23) \
GO(24) \
GO(25) \
GO(26) \
GO(27) \
GO(28) \
GO(29)
// cleanup_routine
#define GO(A) \
static uintptr_t my32_cleanup_routine_fct_##A = 0; \
static void my32_cleanup_routine_##A(void* a) \
{ \
RunFunctionFmt(my32_cleanup_routine_fct_##A, "p", to_ptrv(a)); \
}
SUPER()
#undef GO
static void* findcleanup_routineFct(void* fct)
{
if(!fct) return fct;
if(GetNativeFnc((uintptr_t)fct)) return GetNativeFnc((uintptr_t)fct);
#define GO(A) if(my32_cleanup_routine_fct_##A == (uintptr_t)fct) return my32_cleanup_routine_##A;
SUPER()
#undef GO
#define GO(A) if(my32_cleanup_routine_fct_##A == 0) {my32_cleanup_routine_fct_##A = (uintptr_t)fct; return my32_cleanup_routine_##A; }
SUPER()
#undef GO
printf_log(LOG_NONE, "Warning, no more slot for pthread cleanup_routine callback\n");
return NULL;
}
// key_destructor
#define GO(A) \
static uintptr_t my32_key_destructor_fct_##A = 0; \
static void my32_key_destructor_##A(void* a) \
{ \
RunFunctionFmt(my32_key_destructor_fct_##A, "p", to_ptrv(a)); \
}
SUPER()
#undef GO
static void* findkey_destructorFct(void* fct)
{
if(!fct) return fct;
if(GetNativeFnc((uintptr_t)fct)) return GetNativeFnc((uintptr_t)fct);
#define GO(A) if(my32_key_destructor_fct_##A == (uintptr_t)fct) return my32_key_destructor_##A;
SUPER()
#undef GO
#define GO(A) if(my32_key_destructor_fct_##A == 0) {my32_key_destructor_fct_##A = (uintptr_t)fct; return my32_key_destructor_##A; }
SUPER()
#undef GO
printf_log(LOG_NONE, "Warning, no more slot for pthread key_destructor callback\n");
return NULL;
}
#undef SUPER
int EXPORT my32_pthread_once(x64emu_t* emu, int* once, void* cb)
{
if(*once) // quick test first
return 0;
// slow test now
#ifdef DYNAREC
int old = native_lock_xchg_d(once, 1);
#else
int old = *once; // outside of the mutex in case once is badly formed
pthread_mutex_lock(&my_context->mutex_lock);
old = *once;
*once = 1;
pthread_mutex_unlock(&my_context->mutex_lock);
#endif
if(old)
return 0;
// make some room and align R_RSP before doing the call (maybe it would be simpler to just use Callback functions)
Push_32(emu, R_EBP); // push rbp
R_EBP = R_ESP; // mov rbp, rsp
R_ESP -= 0x200;
R_ESP &= ~63LL;
DynaCall(emu, (uintptr_t)cb);
R_ESP = R_EBP; // mov rsp, rbp
R_EBP = Pop32(emu); // pop rbp
return 0;
}
EXPORT int my32___pthread_once(x64emu_t* emu, void* once, void* cb) __attribute__((alias("my32_pthread_once")));
EXPORT int my32_pthread_key_create(x64emu_t* emu, void* key, void* dtor)
{
return pthread_key_create(key, findkey_destructorFct(dtor));
}
EXPORT int my32___pthread_key_create(x64emu_t* emu, void* key, void* dtor) __attribute__((alias("my32_pthread_key_create")));
// phtread_cond_init with null attr seems to only write 1 (NULL) dword on x64, while it's 48 bytes on ARM.
// Not sure why as sizeof(pthread_cond_init) is 48 on both platform... But Neverwinter Night init seems to rely on that
// What about cond that are statically initialized?
// Note, this is is a versionned function (the pthread_cond_*), and this seems to correspond to an old behaviour
KHASH_MAP_INIT_INT(mapcond, pthread_cond_t*);
// should all access to that map be behind a mutex?
kh_mapcond_t *mapcond = NULL;
static pthread_cond_t* add_cond(void* cond)
{
mutex_lock(&my_context->mutex_thread);
khint_t k;
int ret;
pthread_cond_t *c;
k = kh_put(mapcond, mapcond, (uintptr_t)cond, &ret);
if(!ret)
c = kh_value(mapcond, k); // already there... reinit an existing one?
else
c = kh_value(mapcond, k) = (pthread_cond_t*)calloc(1, sizeof(pthread_cond_t));
//*(ptr_t*)cond = to_ptrv(cond);
mutex_unlock(&my_context->mutex_thread);
return c;
}
static pthread_cond_t* get_cond(void* cond)
{
pthread_cond_t* ret;
int r;
mutex_lock(&my_context->mutex_thread);
khint_t k = kh_get(mapcond, mapcond, *(uintptr_t*)cond);
if(k==kh_end(mapcond)) {
khint_t k = kh_get(mapcond, mapcond, (uintptr_t)cond);
if(k==kh_end(mapcond)) {
printf_log(LOG_DEBUG, "BOX32: Note: phtread_cond not found, create a new empty one\n");
ret = (pthread_cond_t*)calloc(1, sizeof(pthread_cond_t));
k = kh_put(mapcond, mapcond, (uintptr_t)cond, &r);
kh_value(mapcond, k) = ret;
//*(ptr_t*)cond = to_ptrv(cond);
pthread_cond_init(ret, NULL);
} else
ret = kh_value(mapcond, k);
} else
ret = kh_value(mapcond, k);
mutex_unlock(&my_context->mutex_thread);
return ret;
}
static void del_cond(void* cond)
{
if(!mapcond)
return;
mutex_lock(&my_context->mutex_thread);
khint_t k = kh_get(mapcond, mapcond, *(uintptr_t*)cond);
if(k!=kh_end(mapcond)) {
free(kh_value(mapcond, k));
kh_del(mapcond, mapcond, k);
}
mutex_unlock(&my_context->mutex_thread);
}
pthread_mutex_t* getAlignedMutex(pthread_mutex_t* m);
EXPORT int my32_pthread_cond_broadcast_old(x64emu_t* emu, void* cond)
{
pthread_cond_t * c = get_cond(cond);
return pthread_cond_broadcast(c);
}
EXPORT int my32_pthread_cond_destroy_old(x64emu_t* emu, void* cond)
{
pthread_cond_t * c = get_cond(cond);
int ret = pthread_cond_destroy(c);
if(c!=cond) del_cond(cond);
return ret;
}
EXPORT int my32_pthread_cond_init_old(x64emu_t* emu, void* cond, void* attr)
{
pthread_cond_t *c = add_cond(cond);
return pthread_cond_init(c, (const pthread_condattr_t*)attr);
}
EXPORT int my32_pthread_cond_signal_old(x64emu_t* emu, void* cond)
{
pthread_cond_t * c = get_cond(cond);
return pthread_cond_signal(c);
}
EXPORT int my32_pthread_cond_timedwait_old(x64emu_t* emu, void* cond, void* mutex, void* abstime)
{
pthread_cond_t * c = get_cond(cond);
return pthread_cond_timedwait(c, getAlignedMutex((pthread_mutex_t*)mutex), (const struct timespec*)abstime);
}
EXPORT int my32_pthread_cond_wait_old(x64emu_t* emu, void* cond, void* mutex)
{
pthread_cond_t * c = get_cond(cond);
return pthread_cond_wait(c, getAlignedMutex((pthread_mutex_t*)mutex));
}
EXPORT int my32_pthread_cond_timedwait(x64emu_t* emu, void* cond, void* mutex, void* abstime)
{
return pthread_cond_timedwait((pthread_cond_t*)cond, getAlignedMutex((pthread_mutex_t*)mutex), (const struct timespec*)abstime);
}
EXPORT int my32_pthread_cond_wait(x64emu_t* emu, void* cond, void* mutex)
{
return pthread_cond_wait((pthread_cond_t*)cond, getAlignedMutex((pthread_mutex_t*)mutex));
}
EXPORT int my32_pthread_mutexattr_setkind_np(x64emu_t* emu, void* t, int kind)
{
// does "kind" needs some type of translation?
return pthread_mutexattr_settype(t, kind);
}
// pthread_attr_t on x64 is 36 bytes
static uint64_t ATTR_SIGN = 0xA055E10CDE98LL; // random signature
typedef struct my32_x64_attr_s {
uint64_t sign;
pthread_attr_t* attr;
} my32_x64_attr_t;
static pthread_attr_t* get_attr(void* attr)
{
if(!attr)
return NULL;
my32_x64_attr_t* my32_attr = (my32_x64_attr_t*)attr;
if(my32_attr->sign!=ATTR_SIGN) {
my32_attr->attr = (pthread_attr_t*)calloc(1, sizeof(pthread_attr_t));
my32_attr->sign = ATTR_SIGN;
}
return my32_attr->attr;
}
static void del_attr(void* attr)
{
if(!attr)
return;
my32_x64_attr_t* my32_attr = (my32_x64_attr_t*)attr;
if(my32_attr->sign==ATTR_SIGN) {
my32_attr->sign = 0;
free(my32_attr->attr);
}
}
EXPORT int my32_pthread_attr_init(x64emu_t* emu, void* attr)
{
return pthread_attr_init(get_attr(attr));
}
EXPORT int my32_pthread_attr_getdetachstate(x64emu_t* emu, void* attr, void* p)
{
return pthread_attr_getdetachstate(get_attr(attr), p);
}
EXPORT int my32_pthread_attr_getguardsize(x64emu_t* emu, void* attr, void* p)
{
return pthread_attr_getguardsize(get_attr(attr), p);
}
EXPORT int my32_pthread_attr_getinheritsched(x64emu_t* emu, void* attr, void* p)
{
return pthread_attr_getinheritsched(get_attr(attr), p);
}
EXPORT int my32_pthread_attr_getschedparam(x64emu_t* emu, void* attr, void* p)
{
return pthread_attr_getschedparam(get_attr(attr), p);
}
EXPORT int my32_pthread_attr_getschedpolicy(x64emu_t* emu, void* attr, void* p)
{
return pthread_attr_getschedpolicy(get_attr(attr), p);
}
EXPORT int my32_pthread_attr_getscope(x64emu_t* emu, void* attr, void* p)
{
return pthread_attr_getscope(get_attr(attr), p);
}
EXPORT int my32_pthread_attr_getstackaddr(x64emu_t* emu, void* attr, ptr_t* p)
{
size_t size;
void* pp;
int ret = pthread_attr_getstack(get_attr(attr), &pp, &size);
*p = to_ptrv(pp);
return ret;
}
EXPORT int my32_pthread_attr_getstacksize(x64emu_t* emu, void* attr, ulong_t* p)
{
size_t size;
void* pp;
int ret = pthread_attr_getstack(get_attr(attr), &pp, &size);
*p = to_ulong(size);
return ret;
}
EXPORT int my32_pthread_attr_setdetachstate(x64emu_t* emu, void* attr, int p)
{
return pthread_attr_setdetachstate(get_attr(attr), p);
}
EXPORT int my32_pthread_attr_setguardsize(x64emu_t* emu, void* attr, size_t p)
{
return pthread_attr_setguardsize(get_attr(attr), p);
}
EXPORT int my32_pthread_attr_setinheritsched(x64emu_t* emu, void* attr, int p)
{
return pthread_attr_setinheritsched(get_attr(attr), p);
}
EXPORT int my32_pthread_attr_setschedparam(x64emu_t* emu, void* attr, void* param)
{
int policy;
pthread_attr_getschedpolicy(get_attr(attr), &policy);
int pmin = sched_get_priority_min(policy);
int pmax = sched_get_priority_max(policy);
if(param) {
int p = *(int*)param;
if(p>=pmin && p<=pmax)
return pthread_attr_setschedparam(get_attr(attr), param);
}
printf_log(LOG_INFO, "Warning, call to pthread_attr_setschedparam(%p, %p[%d]) ignored\n", attr, param, param?(*(int*)param):-1);
return 0; // faking success
}
EXPORT int my32_pthread_attr_setschedpolicy(x64emu_t* emu, void* attr, int p)
{
return pthread_attr_setschedpolicy(get_attr(attr), p);
}
EXPORT int my32_pthread_attr_setstackaddr(x64emu_t* emu, void* attr, void* p)
{
ulong_t size = 2*1024*1024;
my32_pthread_attr_getstacksize(emu, attr, &size);
return pthread_attr_setstack(get_attr(attr), p, size);
}
EXPORT int my32_pthread_attr_setstacksize(x64emu_t* emu, void* attr, size_t p)
{
ptr_t pp;
my32_pthread_attr_getstackaddr(emu, attr, &pp);
return pthread_attr_setstack(get_attr(attr), from_ptrv(pp), p);
}
EXPORT int my32_pthread_attr_setscope(x64emu_t* emu, void* attr, int scope)
{
if(scope!=PTHREAD_SCOPE_SYSTEM) printf_log(LOG_INFO, "Warning, scope of call to pthread_attr_setscope(...) changed from %d to PTHREAD_SCOPE_SYSTEM\n", scope);
return pthread_attr_setscope(get_attr(attr), PTHREAD_SCOPE_SYSTEM);
//The scope is either PTHREAD_SCOPE_SYSTEM or PTHREAD_SCOPE_PROCESS
// but PTHREAD_SCOPE_PROCESS doesn't seem supported on ARM linux, and PTHREAD_SCOPE_SYSTEM is default
}
#ifndef ANDROID
EXPORT void my32__pthread_cleanup_push_defer(x64emu_t* emu, void* buffer, void* routine, void* arg)
{
real_pthread_cleanup_push_defer(buffer, findcleanup_routineFct(routine), arg);
}
EXPORT void my32__pthread_cleanup_push(x64emu_t* emu, void* buffer, void* routine, void* arg)
{
_pthread_cleanup_push(buffer, findcleanup_routineFct(routine), arg);
}
EXPORT void my32__pthread_cleanup_pop_restore(x64emu_t* emu, void* buffer, int exec)
{
real_pthread_cleanup_pop_restore(buffer, exec);
}
EXPORT void my32__pthread_cleanup_pop(x64emu_t* emu, void* buffer, int exec)
{
_pthread_cleanup_pop(buffer, exec);
}
// getaffinity_np (pthread or attr) hav an "old" version (glibc-2.3.3) that only have 2 args, cpusetsize is omited
EXPORT int my32_pthread_getaffinity_np(x64emu_t* emu, pthread_t thread, int cpusetsize, void* cpuset)
{
if(cpusetsize>0x1000) {
// probably old version of the function, that didn't have cpusetsize....
cpuset = from_ptrv(cpusetsize);
cpusetsize = sizeof(cpu_set_t);
}
int ret = pthread_getaffinity_np(thread, cpusetsize, cpuset);
if(ret<0) {
printf_log(LOG_INFO, "Warning, pthread_getaffinity_np(%p, %d, %p) errored, with errno=%d\n", (void*)thread, cpusetsize, cpuset, errno);
}
return ret;
}
EXPORT int my32_pthread_setaffinity_np(x64emu_t* emu, pthread_t thread, int cpusetsize, void* cpuset)
{
if(cpusetsize>0x1000) {
// probably old version of the function, that didn't have cpusetsize....
cpuset = from_ptrv(cpusetsize);
cpusetsize = sizeof(cpu_set_t);
}
int ret = pthread_setaffinity_np(thread, cpusetsize, cpuset);
if(ret<0) {
printf_log(LOG_INFO, "Warning, pthread_setaffinity_np(%p, %d, %p) errored, with errno=%d\n", (void*)thread, cpusetsize, cpuset, errno);
}
return ret;
}
EXPORT int my32_pthread_attr_setaffinity_np(x64emu_t* emu, void* attr, uint32_t cpusetsize, void* cpuset)
{
if(cpusetsize>0x1000) {
// probably old version of the function, that didn't have cpusetsize....
cpuset = from_ptrv(cpusetsize);
cpusetsize = sizeof(cpu_set_t);
}
int ret = pthread_attr_setaffinity_np(attr, cpusetsize, cpuset);
if(ret<0) {
printf_log(LOG_INFO, "Warning, pthread_attr_setaffinity_np(%p, %d, %p) errored, with errno=%d\n", attr, cpusetsize, cpuset, errno);
}
return ret;
}
#endif
EXPORT int my32_pthread_kill(x64emu_t* emu, void* thread, int sig)
{
// check for old "is everything ok?"
if((thread==NULL) && (sig==0))
return pthread_kill(pthread_self(), 0);
return pthread_kill((pthread_t)thread, sig);
}
//EXPORT void my32_pthread_exit(x64emu_t* emu, void* retval)
//{
// emu->quit = 1; // to be safe
// pthread_exit(retval);
//}
// TODO: find a better way for mutex. It should be possible to use the actual mutex most of the time, especially for simple ones
// Having the mutex table behind a mutex is far from ideal!
KHASH_MAP_INIT_INT(mutex, pthread_mutex_t*)
static kh_mutex_t* unaligned_mutex = NULL;
static pthread_rwlock_t m_lock = {0};
pthread_mutex_t* getAlignedMutex(pthread_mutex_t* m)
{
pthread_mutex_t* ret = NULL;
pthread_rwlock_rdlock(&m_lock);
khint_t k = kh_get(mutex, unaligned_mutex, (uintptr_t)m);
if(k!=kh_end(unaligned_mutex)) {
ret = kh_value(unaligned_mutex, k);
} else {
int r;
pthread_rwlock_unlock(&m_lock);
pthread_rwlock_wrlock(&m_lock);
k = kh_put(mutex, unaligned_mutex, (uintptr_t)m, &r);
ret = kh_value(unaligned_mutex, k) = (pthread_mutex_t*)calloc(1, sizeof(pthread_mutex_t));
memcpy(ret, m, 24);
}
pthread_rwlock_unlock(&m_lock);
return ret;
}
EXPORT int my32_pthread_mutex_destroy(pthread_mutex_t *m)
{
pthread_rwlock_wrlock(&m_lock);
khint_t k = kh_get(mutex, unaligned_mutex, (uintptr_t)m);
if(k!=kh_end(unaligned_mutex)) {
pthread_mutex_t *n = kh_value(unaligned_mutex, k);
kh_del(mutex, unaligned_mutex, k);
int ret = pthread_mutex_destroy(n);
free(n);
return ret;
}
pthread_rwlock_unlock(&m_lock);
return pthread_mutex_destroy(m);
}
#define getAlignedMutexWithInit(A, B) getAlignedMutex(A)
EXPORT int my32___pthread_mutex_destroy(pthread_mutex_t *m) __attribute__((alias("my32_pthread_mutex_destroy")));
EXPORT int my32_pthread_mutex_init(pthread_mutex_t *m, pthread_mutexattr_t *att)
{
return pthread_mutex_init(getAlignedMutexWithInit(m, 0), att);
}
EXPORT int my32___pthread_mutex_init(pthread_mutex_t *m, pthread_mutexattr_t *att) __attribute__((alias("my32_pthread_mutex_init")));
EXPORT int my32_pthread_mutex_lock(pthread_mutex_t *m)
{
return pthread_mutex_lock(getAlignedMutex(m));
}
EXPORT int my32___pthread_mutex_lock(pthread_mutex_t *m) __attribute__((alias("my32_pthread_mutex_lock")));
EXPORT int my32_pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec * t)
{
return pthread_mutex_timedlock(getAlignedMutex(m), t);
}
EXPORT int my32_pthread_mutex_trylock(pthread_mutex_t *m)
{
return pthread_mutex_trylock(getAlignedMutex(m));
}
EXPORT int my32___pthread_mutex_trylock(pthread_mutex_t *m) __attribute__((alias("my32_pthread_mutex_trylock")));
EXPORT int my32_pthread_mutex_unlock(pthread_mutex_t *m)
{
return pthread_mutex_unlock(getAlignedMutex(m));
}
EXPORT int my32___pthread_mutex_unlock(pthread_mutex_t *m) __attribute__((alias("my32_pthread_mutex_unlock")));
static int done = 0;
void init_pthread_helper_32()
{
if(done)
return;
done = 1;
real_pthread_cleanup_push_defer = (vFppp_t)dlsym(NULL, "_pthread_cleanup_push_defer");
real_pthread_cleanup_pop_restore = (vFpi_t)dlsym(NULL, "_pthread_cleanup_pop_restore");
mapcond = kh_init(mapcond);
unaligned_mutex = kh_init(mutex);
pthread_key_create(&thread_key, emuthread_destroy);
pthread_setspecific(thread_key, NULL);
}
void clean_current_emuthread_32()
{
emuthread_t *et = (emuthread_t*)pthread_getspecific(thread_key);
if(et) {
emuthread_destroy(et);
pthread_setspecific(thread_key, NULL);
}
}
void fini_pthread_helper_32(box64context_t* context)
{
if(!done)
return;
done = 0;
//CleanStackSize(context);
pthread_cond_t *cond;
kh_foreach_value(mapcond, cond,
pthread_cond_destroy(cond);
free(cond);
);
kh_destroy(mapcond, mapcond);
mapcond = NULL;
pthread_mutex_t *m;
kh_foreach_value(unaligned_mutex, m,
pthread_mutex_destroy(m);
free(m);
);
kh_destroy(mutex, unaligned_mutex);
clean_current_emuthread_32();
}

137
src/tools/box32stack.c Normal file
View File

@ -0,0 +1,137 @@
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <sys/mman.h>
#include "box64stack.h"
#include "box64context.h"
#include "elfloader.h"
#include "debug.h"
#include "emu/x64emu_private.h"
#include "emu/x64run_private.h"
#include "auxval.h"
#include "custommem.h"
#include "box32.h"
static void PushString32(x64emu_t *emu, const char* s)
{
int sz = strlen(s) + 1;
// round to 4 bytes boundary
R_ESP -= sz;
memcpy(from_ptrv(R_ESP), s, sz);
}
static void Push32_32(x64emu_t *emu, uint32_t v)
{
R_ESP -= 4;
*((uint32_t*)from_ptr(R_ESP)) = v;
}
EXPORTDYN
void SetupInitialStack32(x64emu_t *emu)
{
// start with 0
Push32_32(emu, 0);
// push program executed
PushString32(emu, emu->context->argv[0]);
uintptr_t p_arg0 = from_ptr(R_ESP);
// push envs
uintptr_t p_envv[emu->context->envc];
for (int i=emu->context->envc-1; i>=0; --i) {
PushString32(emu, emu->context->envv[i]);
p_envv[i] = from_ptr(R_ESP);
}
// push args, also, free the argv[] string and point to the one in the main stack
uintptr_t p_argv[emu->context->argc];
for (int i=emu->context->argc-1; i>=0; --i) {
PushString32(emu, emu->context->argv[i]);
p_argv[i] = R_ESP;
free(emu->context->argv[i]);
emu->context->argv[i] = (char*)p_argv[i];
}
// align
uintptr_t tmp = from_ptr(R_ESP)&~(emu->context->stackalign-1);
memset((void*)tmp, 0, from_ptr(R_ESP)-tmp);
R_ESP=to_ptr(tmp);
// push some AuxVector stuffs
PushString32(emu, "i686");
uintptr_t p_i686 = from_ptr(R_ESP);
uintptr_t p_random = real_getauxval(25);
if(!p_random) {
for (int i=0; i<4; ++i)
Push32_32(emu, random());
p_random = from_ptr(R_ESP);
}
// align
tmp = (R_ESP)&~(emu->context->stackalign-1);
memset((void*)tmp, 0, from_ptr(R_ESP)-tmp);
R_ESP=tmp;
// push the AuxVector themselves
/*
00: 00000000
03: 08048034
04: 00000020
05: 0000000b
06: 00001000
07: f7fc0000
08: 00000000
09: 08049060
11: 000003e8
12: 000003e8
13: 000003e8
14: 000003e8
15: ffd8aa5b/i686
16: bfebfbff
17: 00000064
23: 00000000
25: ffd8aa4b
26: 00000000
31: ffd8bfeb/./testAuxVec
32: f7fbfb40
33: f7fbf000
*/
Push32_32(emu, 0); Push32_32(emu, 0); //AT_NULL(0)=0
//Push32_32(emu, ); Push32_32(emu, 3); //AT_PHDR(3)=address of the PH of the executable
//Push32_32(emu, ); Push32_32(emu, 4); //AT_PHENT(4)=size of PH entry
//Push32_32(emu, ); Push32_32(emu, 5); //AT_PHNUM(5)=number of elf headers
Push32_32(emu, box64_pagesize); Push32_32(emu, 6); //AT_PAGESZ(6)
//Push32_32(emu, real_getauxval(7)); Push32_32(emu, 7); //AT_BASE(7)=ld-2.27.so start (in memory)
Push32_32(emu, 0); Push32_32(emu, 8); //AT_FLAGS(8)=0
Push32_32(emu, R_EIP); Push32_32(emu, 9); //AT_ENTRY(9)=entrypoint
Push32_32(emu, from_ulong(real_getauxval(11))); Push32_32(emu, 11); //AT_UID(11)
Push32_32(emu, from_ulong(real_getauxval(12))); Push32_32(emu, 12); //AT_EUID(12)
Push32_32(emu, from_ulong(real_getauxval(13))); Push32_32(emu, 13); //AT_GID(13)
Push32_32(emu, from_ulong(real_getauxval(14))); Push32_32(emu, 14); //AT_EGID(14)
Push32_32(emu, p_i686); Push32_32(emu, 15); //AT_PLATFORM(15)=&"i686"
// Push HWCAP:
// FPU: 1<<0 ; VME: 1<<1 ; DE : 1<<2 ; PSE: 1<<3 ; TSC: 1<<4 ; MSR: 1<<5 ; PAE: 1<<6 ; MCE: 1<<7
// CX8: 1<<8 ; APIC:1<<9 ; SEP: 1<<11; MTRR:1<<12; PGE: 1<<13; MCA: 1<<14; CMOV:1<<15
// FCMOV:1<<16; MMX: 1<<23
// OSFXR:1<<24; XMM: 1<<25;XMM2: 1<<26; AMD3D:1<<31
Push32_32(emu, (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<8) | (1<<15) | (1<<16) | (1<<23) | (1<<25) | (1<<26));
Push32_32(emu, 16); //AT_HWCAP(16)=...
//Push32_32(emu, sysconf(_SC_CLK_TCK)); Push32_32(emu, 17); //AT_CLKTCK(17)=times() frequency
Push32_32(emu, from_ulong(real_getauxval(23))); Push32_32(emu, 23); //AT_SECURE(23)
Push32_32(emu, p_random); Push32_32(emu, 25); //AT_RANDOM(25)=p_random
Push32_32(emu, 0); Push32_32(emu, 26); //AT_HWCAP2(26)=0
Push32_32(emu, p_arg0); Push32_32(emu, 31); //AT_EXECFN(31)=p_arg0
Push32_32(emu, emu->context->vsyscall); Push32_32(emu, 32); //AT_SYSINFO(32)=vsyscall
//Push32_32(emu, ); Push32_32(emu, 33); //AT_SYSINFO_EHDR(33)=address of vDSO
if(!emu->context->auxval_start) // store auxval start if needed
emu->context->auxval_start = (uintptr_t*)from_ptr(R_ESP);
// push nil / envs / nil / args / argc
Push32_32(emu, 0);
for (int i=emu->context->envc-1; i>=0; --i)
Push32_32(emu, to_ptr(p_envv[i]));
emu->context->envv32 = R_ESP;
Push32_32(emu, 0);
for (int i=emu->context->argc-1; i>=0; --i)
Push32_32(emu, to_ptr(p_argv[i]));
emu->context->argv32 = R_ESP;
Push32_32(emu, emu->context->argc);
}

View File

@ -42,9 +42,19 @@ void PushString(x64emu_t *emu, const char* s)
memcpy((void*)R_RSP, s, sz);
}
void SetupInitialStack32(x64emu_t *emu)
#ifndef BOX32
{ }
#else
;
#endif
EXPORTDYN
void SetupInitialStack(x64emu_t *emu)
{
if(box64_is32bits) {
SetupInitialStack32(emu);
return;
}
// start with 0
Push64(emu, 0);
// push program executed

View File

@ -12,43 +12,74 @@
#include "box64context.h"
#include "box64stack.h"
#include "dynarec.h"
#ifdef BOX32
#include "box32.h"
#endif
EXPORTDYN
uint64_t RunFunction(uintptr_t fnc, int nargs, ...)
{
x64emu_t *emu = thread_get_emu();
int align = (nargs>6)?(((nargs-6)&1)):0;
int stackn = align + ((nargs>6)?(nargs-6):0);
#ifdef BOX32
if(box64_is32bits) {
Push_32(emu, R_RBP); // push ebp
R_RBP = R_ESP; // mov ebp, esp
Push64(emu, R_RBP); // push rbp
R_RBP = R_RSP; // mov rbp, rsp
R_ESP -= nargs*4; // need to push in reverse order
R_RSP -= stackn*sizeof(void*); // need to push in reverse order
ptr_t *p = (ptr_t*)from_ptrv(R_ESP);
uint64_t *p = (uint64_t*)R_RSP;
va_list va;
va_start (va, nargs);
for (int i=0; i<nargs; ++i) {
if(i<6) {
int nn[] = {_DI, _SI, _DX, _CX, _R8, _R9};
emu->regs[nn[i]].q[0] = va_arg(va, uint64_t);
} else {
*p = va_arg(va, uint64_t);
va_list va;
va_start (va, nargs);
for (int i=0; i<nargs; ++i) {
*p = va_arg(va, uint32_t);
p++;
}
va_end (va);
} else
#endif
{
int align = (nargs>6)?(((nargs-6)&1)):0;
int stackn = align + ((nargs>6)?(nargs-6):0);
Push64(emu, R_RBP); // push rbp
R_RBP = R_RSP; // mov rbp, rsp
R_RSP -= stackn*sizeof(void*); // need to push in reverse order
uint64_t *p = (uint64_t*)R_RSP;
va_list va;
va_start (va, nargs);
for (int i=0; i<nargs; ++i) {
if(i<6) {
int nn[] = {_DI, _SI, _DX, _CX, _R8, _R9};
emu->regs[nn[i]].q[0] = va_arg(va, uint64_t);
} else {
*p = va_arg(va, uint64_t);
p++;
}
}
va_end (va);
}
va_end (va);
uintptr_t oldip = R_RIP;
DynaCall(emu, fnc);
if(oldip==R_RIP) {
R_RSP = R_RBP; // mov rsp, rbp
R_RBP = Pop64(emu); // pop rbp
#ifdef BOX32
if(box64_is32bits) {
R_RSP = R_EBP; // mov esp, ebp
R_EBP = Pop_32(emu); // pop ebp
}
#endif
{
R_RSP = R_RBP; // mov rsp, rbp
R_RBP = Pop64(emu); // pop rbp
}
}
uint64_t ret = R_RAX;
uint64_t ret = box64_is32bits?((uint64_t)R_EAX | ((uint64_t)R_EDX)<<32):R_RAX;
return ret;
}
@ -61,6 +92,27 @@ uint64_t RunFunctionFmt(uintptr_t fnc, const char* fmt, ...)
int ni = 0;
int ndf = 0;
for (int i=0; fmt[i]; ++i) {
#ifdef BOX32
if(box64_is32bits)
switch(fmt[i]) {
case 'd':
case 'I':
case 'U': nargs+=2; break;
case 'p':
case 'L':
case 'l':
case 'f':
case 'i':
case 'u':
case 'w':
case 'W':
case 'c':
case 'C': ++nargs; break;
default:
++nargs; break;
}
else
#endif
switch(fmt[i]) {
case 'f':
case 'd': if(ndf<8) ++ndf; else ++nargs; break;
@ -83,63 +135,116 @@ uint64_t RunFunctionFmt(uintptr_t fnc, const char* fmt, ...)
ndf = 0;
int align = nargs&1;
int stackn = align + nargs;
Push64(emu, R_RBP); // push rbp
R_RBP = R_RSP; // mov rbp, rsp
R_RSP -= stackn*sizeof(void*); // need to push in reverse order
uint64_t *p = (uint64_t*)R_RSP;
static const int nn[] = {_DI, _SI, _DX, _CX, _R8, _R9};
#define GO(c, A, B, B2, C) case c: if(ni<6) emu->regs[nn[ni++]].A[0] = C va_arg(va, B2); else {*p = 0; *((B*)p) = va_arg(va, B2); ++p;}; break;
va_list va;
va_start (va, fmt);
for (int i=0; fmt[i]; ++i) {
switch(fmt[i]) {
case 'f': if(ndf<8)
emu->xmm[ndf++].f[0] = va_arg(va, double); // float are promoted to double in ...
else {
*p = 0;
*((float*)p) = va_arg(va, double);
++p;
}
break;
case 'd': if(ndf<8)
emu->xmm[ndf++].d[0] = va_arg(va, double);
else {
*((double*)p) = va_arg(va, double);
++p;
}
break;
GO('p', q, void*, void*, (uintptr_t))
GO('i', sdword, int, int, )
GO('u', dword, uint32_t, uint32_t, )
GO('I', sq, int64_t, int64_t, )
GO('U', q, uint64_t, uint64_t, )
GO('L', q, uint64_t, uint64_t, )
GO('l', sq, int64_t, int64_t, )
GO('w', sword, int16_t, int, )
GO('W', word, uint16_t, int, )
GO('c', sbyte, int8_t, int, )
GO('C', byte, uint8_t, int, )
default:
printf_log(LOG_NONE, "Error, unhandled arg %d: '%c' in RunFunctionFmt\n", i, fmt[i]);
if(ni<6) emu->regs[nn[ni++]].q[0] = va_arg(va, uint64_t); else {*p = va_arg(va, uint64_t); ++p;};
break;
}
int sizeof_ptr = sizeof(void*);
#ifdef BOX32
if(box64_is32bits) {
Push_32(emu, R_EBP); // push ebp
R_RBP = R_ESP; // mov ebp, esp
sizeof_ptr = sizeof(ptr_t);
} else
#endif
{
Push64(emu, R_RBP); // push rbp
R_RBP = R_RSP; // mov rbp, rsp
}
R_RSP -= stackn*sizeof_ptr; // need to push in reverse order
#ifdef BOX32
if(box64_is32bits) {
ptr_t *p = (ptr_t*)from_ptrv(R_ESP);
#define GO(c, B, B2, N) case c: *((B*)p) = va_arg(va, B2); p+=N; break
va_list va;
va_start (va, fmt);
for (int i=0; fmt[i]; ++i) {
switch(fmt[i]) {
GO('f', float, double, 1);
GO('d', double, double, 2);
case 'p': *((ptr_t*)p) = to_ptrv(va_arg(va, void*)); p+=1; break;
GO('i', int, int, 1);
GO('u', uint32_t, uint32_t, 1);
GO('I', int64_t, int64_t, 2);
GO('U', uint64_t, uint64_t, 2);
GO('L', uint32_t, uint64_t, 1); // long are 64bits on 64bits system
GO('l', int32_t, int64_t, 1); // but 32bits on 32bits system
GO('w', int16_t, int, 1);
GO('W', uint16_t, int, 1);
GO('c', int8_t, int, 1);
GO('C', uint8_t, int, 1);
default:
printf_log(LOG_NONE, "Error, unhandled arg %d: '%c' in RunFunctionFmt\n", i, fmt[i]);
*p = va_arg(va, uint32_t);
++p;
break;
}
}
#undef GO
va_end (va);
} else
#endif
{
uint64_t *p = (uint64_t*)R_RSP;
static const int nn[] = {_DI, _SI, _DX, _CX, _R8, _R9};
#define GO(c, A, B, B2, C) case c: if(ni<6) emu->regs[nn[ni++]].A[0] = C va_arg(va, B2); else {*p = 0; *((B*)p) = va_arg(va, B2); ++p;}; break;
va_list va;
va_start (va, fmt);
for (int i=0; fmt[i]; ++i) {
switch(fmt[i]) {
case 'f': if(ndf<8)
emu->xmm[ndf++].f[0] = va_arg(va, double); // float are promoted to double in ...
else {
*p = 0;
*((float*)p) = va_arg(va, double);
++p;
}
break;
case 'd': if(ndf<8)
emu->xmm[ndf++].d[0] = va_arg(va, double);
else {
*((double*)p) = va_arg(va, double);
++p;
}
break;
GO('p', q, void*, void*, (uintptr_t))
GO('i', sdword, int, int, )
GO('u', dword, uint32_t, uint32_t, )
GO('I', sq, int64_t, int64_t, )
GO('U', q, uint64_t, uint64_t, )
GO('L', q, uint64_t, uint64_t, )
GO('l', sq, int64_t, int64_t, )
GO('w', sword, int16_t, int, )
GO('W', word, uint16_t, int, )
GO('c', sbyte, int8_t, int, )
GO('C', byte, uint8_t, int, )
default:
printf_log(LOG_NONE, "Error, unhandled arg %d: '%c' in RunFunctionFmt\n", i, fmt[i]);
if(ni<6) emu->regs[nn[ni++]].q[0] = va_arg(va, uint64_t); else {*p = va_arg(va, uint64_t); ++p;};
break;
}
}
#undef GO
va_end (va);
}
va_end (va);
uintptr_t oldip = R_RIP;
DynaCall(emu, fnc);
if(oldip==R_RIP) {
R_RSP = R_RBP; // mov rsp, rbp
R_RBP = Pop64(emu); // pop rbp
#ifdef BOX32
if(box64_is32bits) {
R_RSP = R_EBP; // mov esp, ebp
R_RBP = Pop_32(emu); // pop ebp
}
#endif
{
R_RSP = R_RBP; // mov rsp, rbp
R_RBP = Pop64(emu); // pop rbp
}
}
uint64_t ret = R_RAX;
uint64_t ret = box64_is32bits?((uint64_t)R_EAX | ((uint64_t)R_EDX)<<32):R_RAX;
return ret;
}
@ -148,6 +253,10 @@ EXPORTDYN
uint64_t RunSafeFunction(uintptr_t fnc, int nargs, ...)
{
x64emu_t * emu = thread_get_emu();
if(box64_is32bits) {
printf_log(LOG_NONE, "Calling RunSafeFunction in 32bits\n");
abort();
}
int align = (nargs>6)?(((nargs-6)&1)):0;
int stackn = align + ((nargs>6)?(nargs-6):0);
@ -222,28 +331,48 @@ uint64_t RunSafeFunction(uintptr_t fnc, int nargs, ...)
EXPORTDYN
uint64_t RunFunctionWithEmu(x64emu_t *emu, int QuitOnLongJump, uintptr_t fnc, int nargs, ...)
{
int align = (nargs>6)?(((nargs-6)&1)):0;
int stackn = align + ((nargs>6)?(nargs-6):0);
#ifdef BOX32
if(box64_is32bits) {
Push_32(emu, R_RBP); // push ebp
R_RBP = R_ESP; // mov ebp, esp
Push64(emu, R_RBP); // push rbp
R_RBP = R_RSP; // mov rbp, rsp
R_ESP -= nargs*4; // need to push in reverse order
R_RSP -= stackn*sizeof(void*); // need to push in reverse order
ptr_t *p = (ptr_t*)from_ptrv(R_ESP);
uint64_t *p = (uint64_t*)R_RSP;
va_list va;
va_start (va, nargs);
for (int i=0; i<nargs; ++i) {
if(i<6) {
int nn[] = {_DI, _SI, _DX, _CX, _R8, _R9};
emu->regs[nn[i]].q[0] = va_arg(va, uint64_t);
} else {
*p = va_arg(va, uint64_t);
va_list va;
va_start (va, nargs);
for (int i=0; i<nargs; ++i) {
*p = va_arg(va, uint32_t);
p++;
}
va_end (va);
} else
#endif
{
int align = (nargs>6)?(((nargs-6)&1)):0;
int stackn = align + ((nargs>6)?(nargs-6):0);
Push64(emu, R_RBP); // push rbp
R_RBP = R_RSP; // mov rbp, rsp
R_RSP -= stackn*sizeof(void*); // need to push in reverse order
uint64_t *p = (uint64_t*)R_RSP;
va_list va;
va_start (va, nargs);
for (int i=0; i<nargs; ++i) {
if(i<6) {
int nn[] = {_DI, _SI, _DX, _CX, _R8, _R9};
emu->regs[nn[i]].q[0] = va_arg(va, uint64_t);
} else {
*p = va_arg(va, uint64_t);
p++;
}
}
va_end (va);
}
va_end (va);
uintptr_t oldip = R_RIP;
int old_quit = emu->quit;
@ -256,20 +385,29 @@ uint64_t RunFunctionWithEmu(x64emu_t *emu, int QuitOnLongJump, uintptr_t fnc, in
if(oldip==R_RIP) {
R_RSP = R_RBP; // restore stack only if EIP is the one expected (else, it means return value is not the one expected)
R_RBP = Pop64(emu); //Pop EBP
#ifdef BOX32
if(box64_is32bits)
R_RBP = Pop_32(emu); //Pop EBP
else
#endif
R_RBP = Pop64(emu); //Pop EBP
}
emu->quit = old_quit;
emu->flags.quitonlongjmp = oldlong;
return R_RAX;
return box64_is32bits?((uint64_t)R_EAX | ((uint64_t)R_EDX)<<32):R_RAX;;
}
EXPORTDYN
uint64_t RunFunctionWindows(uintptr_t fnc, int nargs, ...)
{
x64emu_t *emu = thread_get_emu();
if(box64_is32bits) {
printf_log(LOG_NONE, "Calling RunFunctionWindows in 32bits\n");
abort();
}
int align = (nargs>4)?(((nargs-4)&1)):0;
int stackn = align + ((nargs>4)?(nargs-4):0);

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedalureDEFS_H_
#define __wrappedalureDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedalureTYPES_H_
#define __wrappedalureTYPES_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedalureUNDEFS_H_
#define __wrappedalureUNDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedalutDEFS_H_
#define __wrappedalutDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedalutTYPES_H_
#define __wrappedalutTYPES_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedalutUNDEFS_H_
#define __wrappedalutUNDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedanlDEFS_H_
#define __wrappedanlDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedanlTYPES_H_
#define __wrappedanlTYPES_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedanlUNDEFS_H_
#define __wrappedanlUNDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedatkbridgeDEFS_H_
#define __wrappedatkbridgeDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedatkbridgeTYPES_H_
#define __wrappedatkbridgeTYPES_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedatkbridgeUNDEFS_H_
#define __wrappedatkbridgeUNDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedatkDEFS_H_
#define __wrappedatkDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedatkTYPES_H_
#define __wrappedatkTYPES_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedatkUNDEFS_H_
#define __wrappedatkUNDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedatomicDEFS_H_
#define __wrappedatomicDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedatomicTYPES_H_
#define __wrappedatomicTYPES_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedatomicUNDEFS_H_
#define __wrappedatomicUNDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedatspiDEFS_H_
#define __wrappedatspiDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedatspiTYPES_H_
#define __wrappedatspiTYPES_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedatspiUNDEFS_H_
#define __wrappedatspiUNDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedayatanaappindicator3DEFS_H_
#define __wrappedayatanaappindicator3DEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedayatanaappindicator3TYPES_H_
#define __wrappedayatanaappindicator3TYPES_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedayatanaappindicator3UNDEFS_H_
#define __wrappedayatanaappindicator3UNDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedbz2DEFS_H_
#define __wrappedbz2DEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedbz2TYPES_H_
#define __wrappedbz2TYPES_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedbz2UNDEFS_H_
#define __wrappedbz2UNDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedcairoDEFS_H_
#define __wrappedcairoDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedcairogobjectDEFS_H_
#define __wrappedcairogobjectDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedcairogobjectTYPES_H_
#define __wrappedcairogobjectTYPES_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedcairogobjectUNDEFS_H_
#define __wrappedcairogobjectUNDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedcairoTYPES_H_
#define __wrappedcairoTYPES_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedcairoUNDEFS_H_
#define __wrappedcairoUNDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedcapDEFS_H_
#define __wrappedcapDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedcapTYPES_H_
#define __wrappedcapTYPES_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedcapUNDEFS_H_
#define __wrappedcapUNDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedcrashhandlerDEFS_H_
#define __wrappedcrashhandlerDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedcrashhandlerTYPES_H_
#define __wrappedcrashhandlerTYPES_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedcrashhandlerUNDEFS_H_
#define __wrappedcrashhandlerUNDEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedcrypto3DEFS_H_
#define __wrappedcrypto3DEFS_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedcrypto3TYPES_H_
#define __wrappedcrypto3TYPES_H_

View File

@ -1,5 +1,5 @@
/*******************************************************************
* File automatically generated by rebuild_wrappers.py (v2.4.0.21) *
* File automatically generated by rebuild_wrappers.py (v2.4.0.23) *
*******************************************************************/
#ifndef __wrappedcrypto3UNDEFS_H_
#define __wrappedcrypto3UNDEFS_H_

Some files were not shown because too many files have changed in this diff Show More