mirror of
https://github.com/libretro/scummvm.git
synced 2025-04-02 23:01:42 +00:00
BUILD: Replace _need_memalign runtime test by hardcoded list
According to a discussion on -devel, this test cannot work reliably in general: It cannot determine when unaligned access really works reliably in all situations, nor on all implementations of the target CPU arch; nor does it determine whether unaligned access is supported effectively (as opposed to say supported via super-slow fault handler mechanism).
This commit is contained in:
parent
4b7f6dfa3c
commit
d2e778bf0b
54
configure
vendored
54
configure
vendored
@ -2061,48 +2061,34 @@ fi
|
||||
# alignment can be a lot slower than regular access, so we don't want
|
||||
# to use it if we don't have to.
|
||||
#
|
||||
# So we do the following: First, for CPU families where we know whether
|
||||
# unaligned access is safe & fast, we enable / disable unaligned access
|
||||
# accordingly.
|
||||
# Otherwise, for cross compiled builds we just disable memory alignment.
|
||||
# For native builds, we run some test code that detects whether unaligned
|
||||
# access is supported (and is supported without an exception handler).
|
||||
# So we do the following: For CPU families where we know whether unaligned
|
||||
# access is safe & fast, we enable / disable unaligned access accordingly.
|
||||
# Otherwise, we just disable memory alignment.
|
||||
#
|
||||
# NOTE: The only kinds of unaligned access we allow are for 2 byte and
|
||||
# 4 byte loads / stores. No promises are made for bigger sizes, such as
|
||||
# 8 or 16 byte loads, for which various architectures (e.g. x86 and PowerPC)
|
||||
# behave differently than for the smaller sizes).
|
||||
# NOTE: In the past, for non-cross compiled builds, we would also run some code
|
||||
# which would try to test whether unaligned access worked or not. But this test
|
||||
# could not reliably determine whether unaligned access really worked in all
|
||||
# situations (and across different implementations of the target CPU arch), nor
|
||||
# whether it was fast (as opposed to slowly emulated by fault handlers). Hence,
|
||||
# we do not use this approach anymore.
|
||||
#
|
||||
# NOTE: The only kinds of unaligned access we allow are for 2 byte and 4
|
||||
# byte loads / stores. No promises are made for bigger sizes, such as 8
|
||||
# or 16 byte loads, for which architectures may behave differently than
|
||||
# for the smaller sizes.
|
||||
echo_n "Alignment required... "
|
||||
case $_host_cpu in
|
||||
i[3-6]86 | x86_64 | ppc*)
|
||||
# Unaligned access should work
|
||||
_need_memalign=no
|
||||
;;
|
||||
alpha* | arm* | bfin* | hp* | mips* | sh* | sparc* | ia64 | nv1*)
|
||||
# Unaligned access is not supported or extremely slow.
|
||||
_need_memalign=yes
|
||||
;;
|
||||
i[3-6]86 | x86_64 | ppc*)
|
||||
# Unaligned access should work reasonably well
|
||||
_need_memalign=no
|
||||
;;
|
||||
*)
|
||||
if test -z "$_host"; then
|
||||
# NOT in cross-compiling mode:
|
||||
# Try to auto-detect....
|
||||
cat > $TMPC << EOF
|
||||
#include <stdlib.h>
|
||||
#include <signal.h>
|
||||
int main(int argc, char **argv) {
|
||||
unsigned char test[8] = { 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88 };
|
||||
signal(SIGBUS, exit);
|
||||
signal(SIGABRT, exit);
|
||||
signal(SIGSEGV, exit);
|
||||
if (*((unsigned int *)(test + 1)) != 0x55443322 && *((unsigned int *)(test + 1)) != 0x22334455) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
cc_check_no_clean && $TMPO$HOSTEXEEXT && _need_memalign=no
|
||||
cc_check_clean
|
||||
fi
|
||||
# Status of unaligned access is unknown, so assume the worst.
|
||||
_need_memalign=yes
|
||||
;;
|
||||
esac
|
||||
echo "$_need_memalign"
|
||||
|
Loading…
x
Reference in New Issue
Block a user