Compatibility with GCC/ARMv7

This commit is contained in:
Yann Collet 2015-08-19 15:11:24 +01:00
parent b605d6e2ff
commit 9ffb7e28c6
2 changed files with 45 additions and 13 deletions

View File

@ -35,15 +35,24 @@ You can contact the author at :
/**************************************
* Tuning parameters
**************************************/
/* XXH_FORCE_DIRECT_MEMORY_ACCESS
* Unaligned memory access is automatically enabled for "common" CPU, such as x86/x64.
* For others CPU, the compiler will be more cautious, and insert extra code to ensure proper working with unaligned memory accesses.
* If you know your target CPU efficiently supports unaligned memory accesses, you can force this option manually.
* If your CPU efficiently supports unaligned memory accesses and the compiler did not automatically detected it, you will witness large performance improvement.
* You can also enable this switch from compilation command line / Makefile.
/* XXH_FORCE_MEMORY_ACCESS
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
* The below switch allow to select different access method for improved performance.
* Method 0 (default) : use `memcpy()`. Safe and portable.
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
* Method 2 : direct access. This method is portable but violate C standard.
* It can generate buggy code on targets which generate assembly depending on alignment.
* But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
* See http://stackoverflow.com/a/32095106/646947 for details.
* Prefer these methods in priority order (0 > 1 > 2)
*/
#if !defined(XXH_FORCE_DIRECT_MEMORY_ACCESS) && ( defined(__ARM_FEATURE_UNALIGNED) )
# define XXH_FORCE_DIRECT_MEMORY_ACCESS 1
#if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# define XXH_FORCE_MEMORY_ACCESS 2
#elif defined(__INTEL_COMPILER) || \
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
# define XXH_FORCE_MEMORY_ACCESS 1
#endif
/* XXH_ACCEPT_NULL_INPUT_POINTER :
@ -57,8 +66,8 @@ You can contact the author at :
* By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
* Results are therefore identical for little-endian and big-endian CPU.
* This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
* Should endian-independance be of no importance for your application, you may set the #define below to 1.
* It will improve speed for Big-endian CPU.
* Should endian-independance be of no importance for your application, you may set the #define below to 1,
* to improve speed for Big-endian CPU.
* This option has no impact on Little_Endian CPU.
*/
#define XXH_FORCE_NATIVE_FORMAT 0
@ -68,7 +77,7 @@ You can contact the author at :
* It means : don't make a test between aligned/unaligned, because performance will be the same.
* It saves one initial branch per hash.
*/
#if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) || (defined(XXH_FORCE_DIRECT_MEMORY_ACCESS) && (XXH_FORCE_DIRECT_MEMORY_ACCESS==1))
#if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
# define XXH_USELESS_ALIGN_BRANCH 1
#endif
@ -125,13 +134,28 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
#endif
#if defined(XXH_FORCE_DIRECT_MEMORY_ACCESS) && (XXH_FORCE_DIRECT_MEMORY_ACCESS==1)
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
/* __pack instructions are safer, but compiler specific, hence potentially problematic */
/* currently only defined for gcc and icc */
typedef struct __attribute__ ((__packed__)) { U32 v; } uu32;
typedef struct __attribute__ ((__packed__)) { U64 v; } uu64;
static U32 XXH_read32(const void* ptr) { return ((const uu32*)ptr)->v; }
static U64 XXH_read64(const void* ptr) { return ((const uu64*)ptr)->v; }
#else
/* portable and safe solution. Generally efficient.
* see : http://stackoverflow.com/a/32095106/646947
*/
static U32 XXH_read32(const void* memPtr)
{
U32 val;

View File

@ -106,9 +106,13 @@ static unsigned BMK_isLittleEndian(void)
**************************************/
#define PROGRAM_NAME exename
#define PROGRAM_VERSION ""
static const int g_nbBits = (int)(sizeof(void*)*8);
static const char g_lename[] = "little endian";
static const char g_bename[] = "big endian";
#define ENDIAN_NAME (BMK_isLittleEndian() ? g_lename : g_bename)
#define COMPILED __DATE__
static const char author[] = "Yann Collet";
#define WELCOME_MESSAGE "*** %s %i-bits %s, by %s (%s) ***\n", PROGRAM_NAME, (int)(sizeof(void*)*8), PROGRAM_VERSION, author, COMPILED
#define WELCOME_MESSAGE "%s %s (%i-bits %s), by %s (%s) \n", PROGRAM_NAME, PROGRAM_VERSION, g_nbBits, ENDIAN_NAME, author, COMPILED
#define NBLOOPS 3 /* Default number of benchmark iterations */
#define TIMELOOP 2500 /* Minimum timing per iteration */
@ -649,6 +653,10 @@ int main(int argc, const char** argv)
{
switch(*argument)
{
/* Display version */
case 'V':
DISPLAY(WELCOME_MESSAGE); return 0;
/* Display help on usage */
case 'h':
return usage(exename);