mirror of
https://github.com/FEX-Emu/xxHash.git
synced 2024-11-28 01:00:56 +00:00
Merge pull request #271 from easyaspi314/typedefs
Improve typedefs, fix 16-bit int/seed type bug
This commit is contained in:
commit
8dab0315ac
334
xxh3.h
334
xxh3.h
@ -148,12 +148,12 @@
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* U64 XXH_mult32to64(U32 a, U64 b) { return (U64)a * (U64)b; } */
|
||||
/* xxh_u64 XXH_mult32to64(xxh_u32 a, xxh_u64 b) { return (xxh_u64)a * (xxh_u64)b; } */
|
||||
#if defined(_MSC_VER) && defined(_M_IX86)
|
||||
# include <intrin.h>
|
||||
# define XXH_mult32to64(x, y) __emulu(x, y)
|
||||
#else
|
||||
# define XXH_mult32to64(x, y) ((U64)((x) & 0xFFFFFFFF) * (U64)((y) & 0xFFFFFFFF))
|
||||
# define XXH_mult32to64(x, y) ((xxh_u64)((x) & 0xFFFFFFFF) * (xxh_u64)((y) & 0xFFFFFFFF))
|
||||
#endif
|
||||
|
||||
/* VSX stuff. It's a lot because VSX support is mediocre across compilers and
|
||||
@ -248,7 +248,7 @@ XXH_FORCE_INLINE U64x2 XXH_vec_mule(U32x4 a, U32x4 b) {
|
||||
# error "default keyset is not large enough"
|
||||
#endif
|
||||
|
||||
XXH_ALIGN(64) static const BYTE kSecret[XXH_SECRET_DEFAULT_SIZE] = {
|
||||
XXH_ALIGN(64) static const xxh_u8 kSecret[XXH_SECRET_DEFAULT_SIZE] = {
|
||||
0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
|
||||
0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
|
||||
0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
|
||||
@ -276,7 +276,7 @@ XXH_ALIGN(64) static const BYTE kSecret[XXH_SECRET_DEFAULT_SIZE] = {
|
||||
__attribute__((__target__("no-sse")))
|
||||
#endif
|
||||
static XXH128_hash_t
|
||||
XXH_mult64to128(U64 lhs, U64 rhs)
|
||||
XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
|
||||
{
|
||||
/*
|
||||
* GCC/Clang __uint128_t method.
|
||||
@ -298,13 +298,13 @@ XXH_mult64to128(U64 lhs, U64 rhs)
|
||||
|| (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
|
||||
|
||||
__uint128_t product = (__uint128_t)lhs * (__uint128_t)rhs;
|
||||
XXH128_hash_t const r128 = { (U64)(product), (U64)(product >> 64) };
|
||||
XXH128_hash_t const r128 = { (xxh_u64)(product), (xxh_u64)(product >> 64) };
|
||||
return r128;
|
||||
|
||||
/*
|
||||
* MSVC for x64's _umul128 method.
|
||||
*
|
||||
* U64 _umul128(U64 Multiplier, U64 Multiplicand, U64 *HighProduct);
|
||||
* xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
|
||||
*
|
||||
* This compiles to single operand MUL on x64.
|
||||
*/
|
||||
@ -313,8 +313,8 @@ XXH_mult64to128(U64 lhs, U64 rhs)
|
||||
#ifndef _MSC_VER
|
||||
# pragma intrinsic(_umul128)
|
||||
#endif
|
||||
U64 product_high;
|
||||
U64 const product_low = _umul128(lhs, rhs, &product_high);
|
||||
xxh_u64 product_high;
|
||||
xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
|
||||
XXH128_hash_t const r128 = { product_low, product_high };
|
||||
return r128;
|
||||
|
||||
@ -346,11 +346,11 @@ XXH_mult64to128(U64 lhs, U64 rhs)
|
||||
* 2. It hints for, and on Clang, compiles to, the powerful UMAAL
|
||||
* instruction available in ARMv6+ A32/T32, which is shown below:
|
||||
*
|
||||
* void UMAAL(U32 *RdLo, U32 *RdHi, U32 Rn, U32 Rm)
|
||||
* void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
|
||||
* {
|
||||
* U64 product = (U64)*RdLo * (U64)*RdHi + Rn + Rm;
|
||||
* *RdLo = (U32)(product & 0xFFFFFFFF);
|
||||
* *RdHi = (U32)(product >> 32);
|
||||
* xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
|
||||
* *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
|
||||
* *RdHi = (xxh_u32)(product >> 32);
|
||||
* }
|
||||
*
|
||||
* This instruction was designed for efficient long multiplication,
|
||||
@ -362,15 +362,15 @@ XXH_mult64to128(U64 lhs, U64 rhs)
|
||||
*/
|
||||
|
||||
/* First calculate all of the cross products. */
|
||||
U64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
|
||||
U64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
|
||||
U64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
|
||||
U64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
|
||||
xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
|
||||
xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
|
||||
xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
|
||||
xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
|
||||
|
||||
/* Now add the products together. These will never overflow. */
|
||||
U64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
|
||||
U64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
|
||||
U64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
|
||||
xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
|
||||
xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
|
||||
xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
|
||||
|
||||
XXH128_hash_t r128 = { lower, upper };
|
||||
return r128;
|
||||
@ -389,15 +389,15 @@ XXH_mult64to128(U64 lhs, U64 rhs)
|
||||
#if defined(__GNUC__) && !defined(__clang__) && defined(__i386__)
|
||||
__attribute__((__target__("no-sse")))
|
||||
#endif
|
||||
static U64
|
||||
XXH3_mul128_fold64(U64 lhs, U64 rhs)
|
||||
static xxh_u64
|
||||
XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
|
||||
{
|
||||
XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
|
||||
return product.low64 ^ product.high64;
|
||||
}
|
||||
|
||||
|
||||
static XXH64_hash_t XXH3_avalanche(U64 h64)
|
||||
static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
|
||||
{
|
||||
h64 ^= h64 >> 37;
|
||||
h64 *= PRIME64_3;
|
||||
@ -411,51 +411,51 @@ static XXH64_hash_t XXH3_avalanche(U64 h64)
|
||||
* ========================================== */
|
||||
|
||||
XXH_FORCE_INLINE XXH64_hash_t
|
||||
XXH3_len_1to3_64b(const BYTE* input, size_t len, const BYTE* secret, XXH64_hash_t seed)
|
||||
XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
|
||||
{
|
||||
XXH_ASSERT(input != NULL);
|
||||
XXH_ASSERT(1 <= len && len <= 3);
|
||||
XXH_ASSERT(secret != NULL);
|
||||
{ BYTE const c1 = input[0];
|
||||
BYTE const c2 = input[len >> 1];
|
||||
BYTE const c3 = input[len - 1];
|
||||
U32 const combined = ((U32)c1) | (((U32)c2) << 8) | (((U32)c3) << 16) | (((U32)len) << 24);
|
||||
U64 const keyed = (U64)combined ^ (XXH_readLE32(secret) + seed);
|
||||
U64 const mixed = keyed * PRIME64_1;
|
||||
{ xxh_u8 const c1 = input[0];
|
||||
xxh_u8 const c2 = input[len >> 1];
|
||||
xxh_u8 const c3 = input[len - 1];
|
||||
xxh_u32 const combined = ((xxh_u32)c1) | (((xxh_u32)c2) << 8) | (((xxh_u32)c3) << 16) | (((xxh_u32)len) << 24);
|
||||
xxh_u64 const keyed = (xxh_u64)combined ^ (XXH_readLE32(secret) + seed);
|
||||
xxh_u64 const mixed = keyed * PRIME64_1;
|
||||
return XXH3_avalanche(mixed);
|
||||
}
|
||||
}
|
||||
|
||||
XXH_FORCE_INLINE XXH64_hash_t
|
||||
XXH3_len_4to8_64b(const BYTE* input, size_t len, const BYTE* secret, XXH64_hash_t seed)
|
||||
XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
|
||||
{
|
||||
XXH_ASSERT(input != NULL);
|
||||
XXH_ASSERT(secret != NULL);
|
||||
XXH_ASSERT(4 <= len && len <= 8);
|
||||
{ U32 const input_lo = XXH_readLE32(input);
|
||||
U32 const input_hi = XXH_readLE32(input + len - 4);
|
||||
U64 const input_64 = input_lo | ((U64)input_hi << 32);
|
||||
U64 const keyed = input_64 ^ (XXH_readLE64(secret) + seed);
|
||||
U64 const mix64 = len + ((keyed ^ (keyed >> 51)) * PRIME32_1);
|
||||
{ xxh_u32 const input_lo = XXH_readLE32(input);
|
||||
xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
|
||||
xxh_u64 const input_64 = input_lo | ((xxh_u64)input_hi << 32);
|
||||
xxh_u64 const keyed = input_64 ^ (XXH_readLE64(secret) + seed);
|
||||
xxh_u64 const mix64 = len + ((keyed ^ (keyed >> 51)) * PRIME32_1);
|
||||
return XXH3_avalanche((mix64 ^ (mix64 >> 47)) * PRIME64_2);
|
||||
}
|
||||
}
|
||||
|
||||
XXH_FORCE_INLINE XXH64_hash_t
|
||||
XXH3_len_9to16_64b(const BYTE* input, size_t len, const BYTE* secret, XXH64_hash_t seed)
|
||||
XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
|
||||
{
|
||||
XXH_ASSERT(input != NULL);
|
||||
XXH_ASSERT(secret != NULL);
|
||||
XXH_ASSERT(9 <= len && len <= 16);
|
||||
{ U64 const input_lo = XXH_readLE64(input) ^ (XXH_readLE64(secret) + seed);
|
||||
U64 const input_hi = XXH_readLE64(input + len - 8) ^ (XXH_readLE64(secret + 8) - seed);
|
||||
U64 const acc = len + (input_lo + input_hi) + XXH3_mul128_fold64(input_lo, input_hi);
|
||||
{ xxh_u64 const input_lo = XXH_readLE64(input) ^ (XXH_readLE64(secret) + seed);
|
||||
xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ (XXH_readLE64(secret + 8) - seed);
|
||||
xxh_u64 const acc = len + (input_lo + input_hi) + XXH3_mul128_fold64(input_lo, input_hi);
|
||||
return XXH3_avalanche(acc);
|
||||
}
|
||||
}
|
||||
|
||||
XXH_FORCE_INLINE XXH64_hash_t
|
||||
XXH3_len_0to16_64b(const BYTE* input, size_t len, const BYTE* secret, XXH64_hash_t seed)
|
||||
XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
|
||||
{
|
||||
XXH_ASSERT(len <= 16);
|
||||
{ if (len > 8) return XXH3_len_9to16_64b(input, len, secret, seed);
|
||||
@ -470,7 +470,7 @@ XXH3_len_0to16_64b(const BYTE* input, size_t len, const BYTE* secret, XXH64_hash
|
||||
|
||||
#define STRIPE_LEN 64
|
||||
#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */
|
||||
#define ACC_NB (STRIPE_LEN / sizeof(U64))
|
||||
#define ACC_NB (STRIPE_LEN / sizeof(xxh_u64))
|
||||
|
||||
typedef enum { XXH3_acc_64bits, XXH3_acc_128bits } XXH3_accWidth_e;
|
||||
|
||||
@ -647,14 +647,14 @@ XXH3_accumulate_512( void* XXH_RESTRICT acc,
|
||||
|
||||
#else /* scalar variant of Accumulator - universal */
|
||||
|
||||
XXH_ALIGN(XXH_ACC_ALIGN) U64* const xacc = (U64*) acc; /* presumed aligned on 32-bytes boundaries, little hint for the auto-vectorizer */
|
||||
const BYTE* const xinput = (const BYTE*) input; /* no alignment restriction */
|
||||
const BYTE* const xsecret = (const BYTE*) secret; /* no alignment restriction */
|
||||
XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned on 32-bytes boundaries, little hint for the auto-vectorizer */
|
||||
const xxh_u8* const xinput = (const xxh_u8*) input; /* no alignment restriction */
|
||||
const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
|
||||
size_t i;
|
||||
XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
|
||||
for (i=0; i < ACC_NB; i++) {
|
||||
U64 const data_val = XXH_readLE64(xinput + 8*i);
|
||||
U64 const data_key = data_val ^ XXH_readLE64(xsecret + i*8);
|
||||
xxh_u64 const data_val = XXH_readLE64(xinput + 8*i);
|
||||
xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + i*8);
|
||||
|
||||
if (accWidth == XXH3_acc_64bits) {
|
||||
xacc[i] += data_val;
|
||||
@ -789,13 +789,13 @@ XXH3_scrambleAcc(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
|
||||
|
||||
#else /* scalar variant of Scrambler - universal */
|
||||
|
||||
XXH_ALIGN(XXH_ACC_ALIGN) U64* const xacc = (U64*) acc; /* presumed aligned on 32-bytes boundaries, little hint for the auto-vectorizer */
|
||||
const BYTE* const xsecret = (const BYTE*) secret; /* no alignment restriction */
|
||||
XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned on 32-bytes boundaries, little hint for the auto-vectorizer */
|
||||
const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
|
||||
size_t i;
|
||||
XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
|
||||
for (i=0; i < ACC_NB; i++) {
|
||||
U64 const key64 = XXH_readLE64(xsecret + 8*i);
|
||||
U64 acc64 = xacc[i];
|
||||
xxh_u64 const key64 = XXH_readLE64(xsecret + 8*i);
|
||||
xxh_u64 acc64 = xacc[i];
|
||||
acc64 ^= acc64 >> 47;
|
||||
acc64 ^= key64;
|
||||
acc64 *= PRIME32_1;
|
||||
@ -807,9 +807,9 @@ XXH3_scrambleAcc(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
|
||||
|
||||
/* assumption : nbStripes will not overflow secret size */
|
||||
XXH_FORCE_INLINE void
|
||||
XXH3_accumulate( U64* XXH_RESTRICT acc,
|
||||
const BYTE* XXH_RESTRICT input,
|
||||
const BYTE* XXH_RESTRICT secret,
|
||||
XXH3_accumulate( xxh_u64* XXH_RESTRICT acc,
|
||||
const xxh_u8* XXH_RESTRICT input,
|
||||
const xxh_u8* XXH_RESTRICT secret,
|
||||
size_t nbStripes,
|
||||
XXH3_accWidth_e accWidth)
|
||||
{
|
||||
@ -833,9 +833,9 @@ static void
|
||||
#else
|
||||
XXH_FORCE_INLINE void
|
||||
#endif
|
||||
XXH3_hashLong_internal_loop( U64* XXH_RESTRICT acc,
|
||||
const BYTE* XXH_RESTRICT input, size_t len,
|
||||
const BYTE* XXH_RESTRICT secret, size_t secretSize,
|
||||
XXH3_hashLong_internal_loop( xxh_u64* XXH_RESTRICT acc,
|
||||
const xxh_u8* XXH_RESTRICT input, size_t len,
|
||||
const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
|
||||
XXH3_accWidth_e accWidth)
|
||||
{
|
||||
size_t const nb_rounds = (secretSize - STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
|
||||
@ -859,14 +859,14 @@ XXH3_hashLong_internal_loop( U64* XXH_RESTRICT acc,
|
||||
|
||||
/* last stripe */
|
||||
if (len & (STRIPE_LEN - 1)) {
|
||||
const BYTE* const p = input + len - STRIPE_LEN;
|
||||
const xxh_u8* const p = input + len - STRIPE_LEN;
|
||||
#define XXH_SECRET_LASTACC_START 7 /* do not align on 8, so that secret is different from scrambler */
|
||||
XXH3_accumulate_512(acc, p, secret + secretSize - STRIPE_LEN - XXH_SECRET_LASTACC_START, accWidth);
|
||||
} }
|
||||
}
|
||||
|
||||
XXH_FORCE_INLINE U64
|
||||
XXH3_mix2Accs(const U64* XXH_RESTRICT acc, const BYTE* XXH_RESTRICT secret)
|
||||
XXH_FORCE_INLINE xxh_u64
|
||||
XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
|
||||
{
|
||||
return XXH3_mul128_fold64(
|
||||
acc[0] ^ XXH_readLE64(secret),
|
||||
@ -874,9 +874,9 @@ XXH3_mix2Accs(const U64* XXH_RESTRICT acc, const BYTE* XXH_RESTRICT secret)
|
||||
}
|
||||
|
||||
static XXH64_hash_t
|
||||
XXH3_mergeAccs(const U64* XXH_RESTRICT acc, const BYTE* XXH_RESTRICT secret, U64 start)
|
||||
XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
|
||||
{
|
||||
U64 result64 = start;
|
||||
xxh_u64 result64 = start;
|
||||
|
||||
result64 += XXH3_mix2Accs(acc+0, secret + 0);
|
||||
result64 += XXH3_mix2Accs(acc+2, secret + 16);
|
||||
@ -890,10 +890,10 @@ XXH3_mergeAccs(const U64* XXH_RESTRICT acc, const BYTE* XXH_RESTRICT secret, U64
|
||||
PRIME64_4, PRIME32_2, PRIME64_5, PRIME32_1 };
|
||||
|
||||
XXH_FORCE_INLINE XXH64_hash_t
|
||||
XXH3_hashLong_internal(const BYTE* XXH_RESTRICT input, size_t len,
|
||||
const BYTE* XXH_RESTRICT secret, size_t secretSize)
|
||||
XXH3_hashLong_internal(const xxh_u8* XXH_RESTRICT input, size_t len,
|
||||
const xxh_u8* XXH_RESTRICT secret, size_t secretSize)
|
||||
{
|
||||
XXH_ALIGN(XXH_ACC_ALIGN) U64 acc[ACC_NB] = XXH3_INIT_ACC;
|
||||
XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[ACC_NB] = XXH3_INIT_ACC;
|
||||
|
||||
XXH3_hashLong_internal_loop(acc, input, len, secret, secretSize, XXH3_acc_64bits);
|
||||
|
||||
@ -901,25 +901,25 @@ XXH3_hashLong_internal(const BYTE* XXH_RESTRICT input, size_t len,
|
||||
XXH_STATIC_ASSERT(sizeof(acc) == 64);
|
||||
#define XXH_SECRET_MERGEACCS_START 11 /* do not align on 8, so that secret is different from accumulator */
|
||||
XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
|
||||
return XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START, (U64)len * PRIME64_1);
|
||||
return XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * PRIME64_1);
|
||||
}
|
||||
|
||||
|
||||
XXH_NO_INLINE XXH64_hash_t /* It's important for performance that XXH3_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */
|
||||
XXH3_hashLong_64b_defaultSecret(const BYTE* XXH_RESTRICT input, size_t len)
|
||||
XXH3_hashLong_64b_defaultSecret(const xxh_u8* XXH_RESTRICT input, size_t len)
|
||||
{
|
||||
return XXH3_hashLong_internal(input, len, kSecret, sizeof(kSecret));
|
||||
}
|
||||
|
||||
XXH_NO_INLINE XXH64_hash_t /* It's important for performance that XXH3_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */
|
||||
XXH3_hashLong_64b_withSecret(const BYTE* XXH_RESTRICT input, size_t len,
|
||||
const BYTE* XXH_RESTRICT secret, size_t secretSize)
|
||||
XXH3_hashLong_64b_withSecret(const xxh_u8* XXH_RESTRICT input, size_t len,
|
||||
const xxh_u8* XXH_RESTRICT secret, size_t secretSize)
|
||||
{
|
||||
return XXH3_hashLong_internal(input, len, secret, secretSize);
|
||||
}
|
||||
|
||||
|
||||
XXH_FORCE_INLINE void XXH_writeLE64(void* dst, U64 v64)
|
||||
XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
|
||||
{
|
||||
if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
|
||||
memcpy(dst, &v64, sizeof(v64));
|
||||
@ -928,7 +928,7 @@ XXH_FORCE_INLINE void XXH_writeLE64(void* dst, U64 v64)
|
||||
/* XXH3_initCustomSecret() :
|
||||
* destination `customSecret` is presumed allocated and same size as `kSecret`.
|
||||
*/
|
||||
XXH_FORCE_INLINE void XXH3_initCustomSecret(BYTE* customSecret, U64 seed64)
|
||||
XXH_FORCE_INLINE void XXH3_initCustomSecret(xxh_u8* customSecret, xxh_u64 seed64)
|
||||
{
|
||||
int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
|
||||
int i;
|
||||
@ -950,20 +950,20 @@ XXH_FORCE_INLINE void XXH3_initCustomSecret(BYTE* customSecret, U64 seed64)
|
||||
* Try to avoid it whenever possible (typically when seed==0).
|
||||
*/
|
||||
XXH_NO_INLINE XXH64_hash_t /* It's important for performance that XXH3_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */
|
||||
XXH3_hashLong_64b_withSeed(const BYTE* input, size_t len, XXH64_hash_t seed)
|
||||
XXH3_hashLong_64b_withSeed(const xxh_u8* input, size_t len, XXH64_hash_t seed)
|
||||
{
|
||||
XXH_ALIGN(8) BYTE secret[XXH_SECRET_DEFAULT_SIZE];
|
||||
XXH_ALIGN(8) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
|
||||
if (seed==0) return XXH3_hashLong_64b_defaultSecret(input, len);
|
||||
XXH3_initCustomSecret(secret, seed);
|
||||
return XXH3_hashLong_internal(input, len, secret, sizeof(secret));
|
||||
}
|
||||
|
||||
|
||||
XXH_FORCE_INLINE U64 XXH3_mix16B(const BYTE* XXH_RESTRICT input,
|
||||
const BYTE* XXH_RESTRICT secret, U64 seed64)
|
||||
XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
|
||||
const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
|
||||
{
|
||||
U64 const input_lo = XXH_readLE64(input);
|
||||
U64 const input_hi = XXH_readLE64(input+8);
|
||||
xxh_u64 const input_lo = XXH_readLE64(input);
|
||||
xxh_u64 const input_hi = XXH_readLE64(input+8);
|
||||
return XXH3_mul128_fold64(
|
||||
input_lo ^ (XXH_readLE64(secret) + seed64),
|
||||
input_hi ^ (XXH_readLE64(secret+8) - seed64) );
|
||||
@ -971,14 +971,14 @@ XXH_FORCE_INLINE U64 XXH3_mix16B(const BYTE* XXH_RESTRICT input,
|
||||
|
||||
|
||||
XXH_FORCE_INLINE XXH64_hash_t
|
||||
XXH3_len_17to128_64b(const BYTE* XXH_RESTRICT input, size_t len,
|
||||
const BYTE* XXH_RESTRICT secret, size_t secretSize,
|
||||
XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
|
||||
const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
|
||||
XXH64_hash_t seed)
|
||||
{
|
||||
XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
|
||||
XXH_ASSERT(16 < len && len <= 128);
|
||||
|
||||
{ U64 acc = len * PRIME64_1;
|
||||
{ xxh_u64 acc = len * PRIME64_1;
|
||||
if (len > 32) {
|
||||
if (len > 64) {
|
||||
if (len > 96) {
|
||||
@ -1001,8 +1001,8 @@ XXH3_len_17to128_64b(const BYTE* XXH_RESTRICT input, size_t len,
|
||||
#define XXH3_MIDSIZE_MAX 240
|
||||
|
||||
XXH_NO_INLINE XXH64_hash_t
|
||||
XXH3_len_129to240_64b(const BYTE* XXH_RESTRICT input, size_t len,
|
||||
const BYTE* XXH_RESTRICT secret, size_t secretSize,
|
||||
XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
|
||||
const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
|
||||
XXH64_hash_t seed)
|
||||
{
|
||||
XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
|
||||
@ -1011,7 +1011,7 @@ XXH3_len_129to240_64b(const BYTE* XXH_RESTRICT input, size_t len,
|
||||
#define XXH3_MIDSIZE_STARTOFFSET 3
|
||||
#define XXH3_MIDSIZE_LASTOFFSET 17
|
||||
|
||||
{ U64 acc = len * PRIME64_1;
|
||||
{ xxh_u64 acc = len * PRIME64_1;
|
||||
int const nbRounds = (int)len / 16;
|
||||
int i;
|
||||
for (i=0; i<8; i++) {
|
||||
@ -1032,10 +1032,10 @@ XXH3_len_129to240_64b(const BYTE* XXH_RESTRICT input, size_t len,
|
||||
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t len)
|
||||
{
|
||||
if (len <= 16) return XXH3_len_0to16_64b((const BYTE*)input, len, kSecret, 0);
|
||||
if (len <= 128) return XXH3_len_17to128_64b((const BYTE*)input, len, kSecret, sizeof(kSecret), 0);
|
||||
if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_64b((const BYTE*)input, len, kSecret, sizeof(kSecret), 0);
|
||||
return XXH3_hashLong_64b_defaultSecret((const BYTE*)input, len);
|
||||
if (len <= 16) return XXH3_len_0to16_64b((const xxh_u8*)input, len, kSecret, 0);
|
||||
if (len <= 128) return XXH3_len_17to128_64b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), 0);
|
||||
if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_64b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), 0);
|
||||
return XXH3_hashLong_64b_defaultSecret((const xxh_u8*)input, len);
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH64_hash_t
|
||||
@ -1046,19 +1046,19 @@ XXH3_64bits_withSecret(const void* input, size_t len, const void* secret, size_t
|
||||
* it should be done here.
|
||||
* For now, it's a contract pre-condition.
|
||||
* Adding a check and a branch here would cost performance at every hash */
|
||||
if (len <= 16) return XXH3_len_0to16_64b((const BYTE*)input, len, (const BYTE*)secret, 0);
|
||||
if (len <= 128) return XXH3_len_17to128_64b((const BYTE*)input, len, (const BYTE*)secret, secretSize, 0);
|
||||
if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_64b((const BYTE*)input, len, (const BYTE*)secret, secretSize, 0);
|
||||
return XXH3_hashLong_64b_withSecret((const BYTE*)input, len, (const BYTE*)secret, secretSize);
|
||||
if (len <= 16) return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, 0);
|
||||
if (len <= 128) return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, 0);
|
||||
if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, 0);
|
||||
return XXH3_hashLong_64b_withSecret((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize);
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH64_hash_t
|
||||
XXH3_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
|
||||
{
|
||||
if (len <= 16) return XXH3_len_0to16_64b((const BYTE*)input, len, kSecret, seed);
|
||||
if (len <= 128) return XXH3_len_17to128_64b((const BYTE*)input, len, kSecret, sizeof(kSecret), seed);
|
||||
if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_64b((const BYTE*)input, len, kSecret, sizeof(kSecret), seed);
|
||||
return XXH3_hashLong_64b_withSeed((const BYTE*)input, len, seed);
|
||||
if (len <= 16) return XXH3_len_0to16_64b((const xxh_u8*)input, len, kSecret, seed);
|
||||
if (len <= 128) return XXH3_len_17to128_64b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), seed);
|
||||
if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_64b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), seed);
|
||||
return XXH3_hashLong_64b_withSeed((const xxh_u8*)input, len, seed);
|
||||
}
|
||||
|
||||
/* === XXH3 streaming === */
|
||||
@ -1083,7 +1083,7 @@ XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state)
|
||||
static void
|
||||
XXH3_64bits_reset_internal(XXH3_state_t* statePtr,
|
||||
XXH64_hash_t seed,
|
||||
const BYTE* secret, size_t secretSize)
|
||||
const xxh_u8* secret, size_t secretSize)
|
||||
{
|
||||
XXH_ASSERT(statePtr != NULL);
|
||||
memset(statePtr, 0, sizeof(*statePtr));
|
||||
@ -1115,7 +1115,7 @@ XXH_PUBLIC_API XXH_errorcode
|
||||
XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
|
||||
{
|
||||
if (statePtr == NULL) return XXH_ERROR;
|
||||
XXH3_64bits_reset_internal(statePtr, 0, (const BYTE*)secret, secretSize);
|
||||
XXH3_64bits_reset_internal(statePtr, 0, (const xxh_u8*)secret, secretSize);
|
||||
if (secret == NULL) return XXH_ERROR;
|
||||
if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
|
||||
return XXH_OK;
|
||||
@ -1132,10 +1132,10 @@ XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
|
||||
}
|
||||
|
||||
XXH_FORCE_INLINE void
|
||||
XXH3_consumeStripes( U64* acc,
|
||||
XXH3_consumeStripes( xxh_u64* acc,
|
||||
XXH32_hash_t* nbStripesSoFarPtr, XXH32_hash_t nbStripesPerBlock,
|
||||
const BYTE* input, size_t totalStripes,
|
||||
const BYTE* secret, size_t secretLimit,
|
||||
const xxh_u8* input, size_t totalStripes,
|
||||
const xxh_u8* secret, size_t secretLimit,
|
||||
XXH3_accWidth_e accWidth)
|
||||
{
|
||||
XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock);
|
||||
@ -1153,7 +1153,7 @@ XXH3_consumeStripes( U64* acc,
|
||||
}
|
||||
|
||||
XXH_FORCE_INLINE XXH_errorcode
|
||||
XXH3_update(XXH3_state_t* state, const BYTE* input, size_t len, XXH3_accWidth_e accWidth)
|
||||
XXH3_update(XXH3_state_t* state, const xxh_u8* input, size_t len, XXH3_accWidth_e accWidth)
|
||||
{
|
||||
if (input==NULL)
|
||||
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
|
||||
@ -1162,7 +1162,7 @@ XXH3_update(XXH3_state_t* state, const BYTE* input, size_t len, XXH3_accWidth_e
|
||||
return XXH_ERROR;
|
||||
#endif
|
||||
|
||||
{ const BYTE* const bEnd = input + len;
|
||||
{ const xxh_u8* const bEnd = input + len;
|
||||
|
||||
state->totalLen += len;
|
||||
|
||||
@ -1190,7 +1190,7 @@ XXH3_update(XXH3_state_t* state, const BYTE* input, size_t len, XXH3_accWidth_e
|
||||
|
||||
/* consume input by full buffer quantities */
|
||||
if (input+XXH3_INTERNALBUFFER_SIZE <= bEnd) {
|
||||
const BYTE* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
|
||||
const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
|
||||
do {
|
||||
XXH3_consumeStripes(state->acc,
|
||||
&state->nbStripesSoFar, state->nbStripesPerBlock,
|
||||
@ -1213,7 +1213,7 @@ XXH3_update(XXH3_state_t* state, const BYTE* input, size_t len, XXH3_accWidth_e
|
||||
XXH_PUBLIC_API XXH_errorcode
|
||||
XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len)
|
||||
{
|
||||
return XXH3_update(state, (const BYTE*)input, len, XXH3_acc_64bits);
|
||||
return XXH3_update(state, (const xxh_u8*)input, len, XXH3_acc_64bits);
|
||||
}
|
||||
|
||||
|
||||
@ -1237,7 +1237,7 @@ XXH3_digest_long (XXH64_hash_t* acc, const XXH3_state_t* state, XXH3_accWidth_e
|
||||
}
|
||||
} else { /* bufferedSize < STRIPE_LEN */
|
||||
if (state->bufferedSize) { /* one last stripe */
|
||||
BYTE lastStripe[STRIPE_LEN];
|
||||
xxh_u8 lastStripe[STRIPE_LEN];
|
||||
size_t const catchupSize = STRIPE_LEN - state->bufferedSize;
|
||||
memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
|
||||
memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
|
||||
@ -1253,7 +1253,7 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state)
|
||||
if (state->totalLen > XXH3_MIDSIZE_MAX) {
|
||||
XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[ACC_NB];
|
||||
XXH3_digest_long(acc, state, XXH3_acc_64bits);
|
||||
return XXH3_mergeAccs(acc, state->secret + XXH_SECRET_MERGEACCS_START, (U64)state->totalLen * PRIME64_1);
|
||||
return XXH3_mergeAccs(acc, state->secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)state->totalLen * PRIME64_1);
|
||||
}
|
||||
/* len <= XXH3_MIDSIZE_MAX : short code */
|
||||
if (state->seed)
|
||||
@ -1266,20 +1266,20 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state)
|
||||
* ========================================== */
|
||||
|
||||
XXH_FORCE_INLINE XXH128_hash_t
|
||||
XXH3_len_1to3_128b(const BYTE* input, size_t len, const BYTE* secret, XXH64_hash_t seed)
|
||||
XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
|
||||
{
|
||||
XXH_ASSERT(input != NULL);
|
||||
XXH_ASSERT(1 <= len && len <= 3);
|
||||
XXH_ASSERT(secret != NULL);
|
||||
{ BYTE const c1 = input[0];
|
||||
BYTE const c2 = input[len >> 1];
|
||||
BYTE const c3 = input[len - 1];
|
||||
U32 const combinedl = ((U32)c1) + (((U32)c2) << 8) + (((U32)c3) << 16) + (((U32)len) << 24);
|
||||
U32 const combinedh = XXH_swap32(combinedl);
|
||||
U64 const keyed_lo = (U64)combinedl ^ (XXH_readLE32(secret) + seed);
|
||||
U64 const keyed_hi = (U64)combinedh ^ (XXH_readLE32(secret+4) - seed);
|
||||
U64 const mixedl = keyed_lo * PRIME64_1;
|
||||
U64 const mixedh = keyed_hi * PRIME64_5;
|
||||
{ xxh_u8 const c1 = input[0];
|
||||
xxh_u8 const c2 = input[len >> 1];
|
||||
xxh_u8 const c3 = input[len - 1];
|
||||
xxh_u32 const combinedl = ((xxh_u32)c1) + (((xxh_u32)c2) << 8) + (((xxh_u32)c3) << 16) + (((xxh_u32)len) << 24);
|
||||
xxh_u32 const combinedh = XXH_swap32(combinedl);
|
||||
xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ (XXH_readLE32(secret) + seed);
|
||||
xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ (XXH_readLE32(secret+4) - seed);
|
||||
xxh_u64 const mixedl = keyed_lo * PRIME64_1;
|
||||
xxh_u64 const mixedh = keyed_hi * PRIME64_5;
|
||||
XXH128_hash_t const h128 = { XXH3_avalanche(mixedl) /*low64*/, XXH3_avalanche(mixedh) /*high64*/ };
|
||||
return h128;
|
||||
}
|
||||
@ -1287,36 +1287,36 @@ XXH3_len_1to3_128b(const BYTE* input, size_t len, const BYTE* secret, XXH64_hash
|
||||
|
||||
|
||||
XXH_FORCE_INLINE XXH128_hash_t
|
||||
XXH3_len_4to8_128b(const BYTE* input, size_t len, const BYTE* secret, XXH64_hash_t seed)
|
||||
XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
|
||||
{
|
||||
XXH_ASSERT(input != NULL);
|
||||
XXH_ASSERT(secret != NULL);
|
||||
XXH_ASSERT(4 <= len && len <= 8);
|
||||
{ U32 const input_lo = XXH_readLE32(input);
|
||||
U32 const input_hi = XXH_readLE32(input + len - 4);
|
||||
U64 const input_64_lo = input_lo + ((U64)input_hi << 32);
|
||||
U64 const input_64_hi = XXH_swap64(input_64_lo);
|
||||
U64 const keyed_lo = input_64_lo ^ (XXH_readLE64(secret) + seed);
|
||||
U64 const keyed_hi = input_64_hi ^ (XXH_readLE64(secret + 8) - seed);
|
||||
U64 const mix64l1 = len + ((keyed_lo ^ (keyed_lo >> 51)) * PRIME32_1);
|
||||
U64 const mix64l2 = (mix64l1 ^ (mix64l1 >> 47)) * PRIME64_2;
|
||||
U64 const mix64h1 = ((keyed_hi ^ (keyed_hi >> 47)) * PRIME64_1) - len;
|
||||
U64 const mix64h2 = (mix64h1 ^ (mix64h1 >> 43)) * PRIME64_4;
|
||||
{ xxh_u32 const input_lo = XXH_readLE32(input);
|
||||
xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
|
||||
xxh_u64 const input_64_lo = input_lo + ((xxh_u64)input_hi << 32);
|
||||
xxh_u64 const input_64_hi = XXH_swap64(input_64_lo);
|
||||
xxh_u64 const keyed_lo = input_64_lo ^ (XXH_readLE64(secret) + seed);
|
||||
xxh_u64 const keyed_hi = input_64_hi ^ (XXH_readLE64(secret + 8) - seed);
|
||||
xxh_u64 const mix64l1 = len + ((keyed_lo ^ (keyed_lo >> 51)) * PRIME32_1);
|
||||
xxh_u64 const mix64l2 = (mix64l1 ^ (mix64l1 >> 47)) * PRIME64_2;
|
||||
xxh_u64 const mix64h1 = ((keyed_hi ^ (keyed_hi >> 47)) * PRIME64_1) - len;
|
||||
xxh_u64 const mix64h2 = (mix64h1 ^ (mix64h1 >> 43)) * PRIME64_4;
|
||||
{ XXH128_hash_t const h128 = { XXH3_avalanche(mix64l2) /*low64*/, XXH3_avalanche(mix64h2) /*high64*/ };
|
||||
return h128;
|
||||
} }
|
||||
}
|
||||
|
||||
XXH_FORCE_INLINE XXH128_hash_t
|
||||
XXH3_len_9to16_128b(const BYTE* input, size_t len, const BYTE* secret, XXH64_hash_t seed)
|
||||
XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
|
||||
{
|
||||
XXH_ASSERT(input != NULL);
|
||||
XXH_ASSERT(secret != NULL);
|
||||
XXH_ASSERT(9 <= len && len <= 16);
|
||||
{ U64 const input_lo = XXH_readLE64(input) ^ (XXH_readLE64(secret) + seed);
|
||||
U64 const input_hi = XXH_readLE64(input + len - 8) ^ (XXH_readLE64(secret+8) - seed);
|
||||
{ xxh_u64 const input_lo = XXH_readLE64(input) ^ (XXH_readLE64(secret) + seed);
|
||||
xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ (XXH_readLE64(secret+8) - seed);
|
||||
XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi, PRIME64_1);
|
||||
U64 const lenContrib = XXH_mult32to64(len, PRIME32_5);
|
||||
xxh_u64 const lenContrib = XXH_mult32to64(len, PRIME32_5);
|
||||
m128.low64 += lenContrib;
|
||||
m128.high64 += input_hi * PRIME64_1;
|
||||
m128.low64 ^= (m128.high64 >> 32);
|
||||
@ -1331,7 +1331,7 @@ XXH3_len_9to16_128b(const BYTE* input, size_t len, const BYTE* secret, XXH64_has
|
||||
/* Assumption : `secret` size is >= 16
|
||||
* Note : it should be >= XXH3_SECRET_SIZE_MIN anyway */
|
||||
XXH_FORCE_INLINE XXH128_hash_t
|
||||
XXH3_len_0to16_128b(const BYTE* input, size_t len, const BYTE* secret, XXH64_hash_t seed)
|
||||
XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
|
||||
{
|
||||
XXH_ASSERT(len <= 16);
|
||||
{ if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
|
||||
@ -1343,40 +1343,40 @@ XXH3_len_0to16_128b(const BYTE* input, size_t len, const BYTE* secret, XXH64_has
|
||||
}
|
||||
|
||||
XXH_FORCE_INLINE XXH128_hash_t
|
||||
XXH3_hashLong_128b_internal(const BYTE* XXH_RESTRICT input, size_t len,
|
||||
const BYTE* XXH_RESTRICT secret, size_t secretSize)
|
||||
XXH3_hashLong_128b_internal(const xxh_u8* XXH_RESTRICT input, size_t len,
|
||||
const xxh_u8* XXH_RESTRICT secret, size_t secretSize)
|
||||
{
|
||||
XXH_ALIGN(XXH_ACC_ALIGN) U64 acc[ACC_NB] = XXH3_INIT_ACC;
|
||||
XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[ACC_NB] = XXH3_INIT_ACC;
|
||||
|
||||
XXH3_hashLong_internal_loop(acc, input, len, secret, secretSize, XXH3_acc_128bits);
|
||||
|
||||
/* converge into final hash */
|
||||
XXH_STATIC_ASSERT(sizeof(acc) == 64);
|
||||
XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
|
||||
{ U64 const low64 = XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START, (U64)len * PRIME64_1);
|
||||
U64 const high64 = XXH3_mergeAccs(acc, secret + secretSize - sizeof(acc) - XXH_SECRET_MERGEACCS_START, ~((U64)len * PRIME64_2));
|
||||
{ xxh_u64 const low64 = XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * PRIME64_1);
|
||||
xxh_u64 const high64 = XXH3_mergeAccs(acc, secret + secretSize - sizeof(acc) - XXH_SECRET_MERGEACCS_START, ~((xxh_u64)len * PRIME64_2));
|
||||
XXH128_hash_t const h128 = { low64, high64 };
|
||||
return h128;
|
||||
}
|
||||
}
|
||||
|
||||
XXH_NO_INLINE XXH128_hash_t /* It's important for performance that XXH3_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */
|
||||
XXH3_hashLong_128b_defaultSecret(const BYTE* input, size_t len)
|
||||
XXH3_hashLong_128b_defaultSecret(const xxh_u8* input, size_t len)
|
||||
{
|
||||
return XXH3_hashLong_128b_internal(input, len, kSecret, sizeof(kSecret));
|
||||
}
|
||||
|
||||
XXH_NO_INLINE XXH128_hash_t /* It's important for performance that XXH3_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */
|
||||
XXH3_hashLong_128b_withSecret(const BYTE* input, size_t len,
|
||||
const BYTE* secret, size_t secretSize)
|
||||
XXH3_hashLong_128b_withSecret(const xxh_u8* input, size_t len,
|
||||
const xxh_u8* secret, size_t secretSize)
|
||||
{
|
||||
return XXH3_hashLong_128b_internal(input, len, secret, secretSize);
|
||||
}
|
||||
|
||||
XXH_NO_INLINE XXH128_hash_t /* It's important for performance that XXH3_hashLong is not inlined. Not sure why (uop cache maybe ?), but difference is large and easily measurable */
|
||||
XXH3_hashLong_128b_withSeed(const BYTE* input, size_t len, XXH64_hash_t seed)
|
||||
XXH3_hashLong_128b_withSeed(const xxh_u8* input, size_t len, XXH64_hash_t seed)
|
||||
{
|
||||
XXH_ALIGN(8) BYTE secret[XXH_SECRET_DEFAULT_SIZE];
|
||||
XXH_ALIGN(8) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
|
||||
if (seed == 0) return XXH3_hashLong_128b_defaultSecret(input, len);
|
||||
XXH3_initCustomSecret(secret, seed);
|
||||
return XXH3_hashLong_128b_internal(input, len, secret, sizeof(secret));
|
||||
@ -1384,7 +1384,7 @@ XXH3_hashLong_128b_withSeed(const BYTE* input, size_t len, XXH64_hash_t seed)
|
||||
|
||||
|
||||
XXH_FORCE_INLINE XXH128_hash_t
|
||||
XXH128_mix32B(XXH128_hash_t acc, const BYTE* input_1, const BYTE* input_2, const BYTE* secret, XXH64_hash_t seed)
|
||||
XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2, const xxh_u8* secret, XXH64_hash_t seed)
|
||||
{
|
||||
acc.low64 += XXH3_mix16B (input_1, secret+0, seed);
|
||||
acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
|
||||
@ -1394,8 +1394,8 @@ XXH128_mix32B(XXH128_hash_t acc, const BYTE* input_1, const BYTE* input_2, const
|
||||
}
|
||||
|
||||
XXH_NO_INLINE XXH128_hash_t
|
||||
XXH3_len_129to240_128b(const BYTE* XXH_RESTRICT input, size_t len,
|
||||
const BYTE* XXH_RESTRICT secret, size_t secretSize,
|
||||
XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
|
||||
const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
|
||||
XXH64_hash_t seed)
|
||||
{
|
||||
XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
|
||||
@ -1418,8 +1418,8 @@ XXH3_len_129to240_128b(const BYTE* XXH_RESTRICT input, size_t len,
|
||||
/* last bytes */
|
||||
acc = XXH128_mix32B(acc, input + len - 16, input + len - 32, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16, 0ULL - seed);
|
||||
|
||||
{ U64 const low64 = acc.low64 + acc.high64;
|
||||
U64 const high64 = (acc.low64 * PRIME64_1) + (acc.high64 * PRIME64_4) + ((len - seed) * PRIME64_2);
|
||||
{ xxh_u64 const low64 = acc.low64 + acc.high64;
|
||||
xxh_u64 const high64 = (acc.low64 * PRIME64_1) + (acc.high64 * PRIME64_4) + ((len - seed) * PRIME64_2);
|
||||
XXH128_hash_t const h128 = { XXH3_avalanche(low64), (XXH64_hash_t)0 - XXH3_avalanche(high64) };
|
||||
return h128;
|
||||
}
|
||||
@ -1428,8 +1428,8 @@ XXH3_len_129to240_128b(const BYTE* XXH_RESTRICT input, size_t len,
|
||||
|
||||
|
||||
XXH_FORCE_INLINE XXH128_hash_t
|
||||
XXH3_len_17to128_128b(const BYTE* XXH_RESTRICT input, size_t len,
|
||||
const BYTE* XXH_RESTRICT secret, size_t secretSize,
|
||||
XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
|
||||
const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
|
||||
XXH64_hash_t seed)
|
||||
{
|
||||
XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
|
||||
@ -1448,8 +1448,8 @@ XXH3_len_17to128_128b(const BYTE* XXH_RESTRICT input, size_t len,
|
||||
acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
|
||||
}
|
||||
acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
|
||||
{ U64 const low64 = acc.low64 + acc.high64;
|
||||
U64 const high64 = (acc.low64 * PRIME64_1) + (acc.high64 * PRIME64_4) + ((len - seed) * PRIME64_2);
|
||||
{ xxh_u64 const low64 = acc.low64 + acc.high64;
|
||||
xxh_u64 const high64 = (acc.low64 * PRIME64_1) + (acc.high64 * PRIME64_4) + ((len - seed) * PRIME64_2);
|
||||
XXH128_hash_t const h128 = { XXH3_avalanche(low64), (XXH64_hash_t)0 - XXH3_avalanche(high64) };
|
||||
return h128;
|
||||
}
|
||||
@ -1458,10 +1458,10 @@ XXH3_len_17to128_128b(const BYTE* XXH_RESTRICT input, size_t len,
|
||||
|
||||
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len)
|
||||
{
|
||||
if (len <= 16) return XXH3_len_0to16_128b((const BYTE*)input, len, kSecret, 0);
|
||||
if (len <= 128) return XXH3_len_17to128_128b((const BYTE*)input, len, kSecret, sizeof(kSecret), 0);
|
||||
if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_128b((const BYTE*)input, len, kSecret, sizeof(kSecret), 0);
|
||||
return XXH3_hashLong_128b_defaultSecret((const BYTE*)input, len);
|
||||
if (len <= 16) return XXH3_len_0to16_128b((const xxh_u8*)input, len, kSecret, 0);
|
||||
if (len <= 128) return XXH3_len_17to128_128b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), 0);
|
||||
if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_128b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), 0);
|
||||
return XXH3_hashLong_128b_defaultSecret((const xxh_u8*)input, len);
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH128_hash_t
|
||||
@ -1472,19 +1472,19 @@ XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_
|
||||
* it should be done here.
|
||||
* For now, it's a contract pre-condition.
|
||||
* Adding a check and a branch here would cost performance at every hash */
|
||||
if (len <= 16) return XXH3_len_0to16_128b((const BYTE*)input, len, (const BYTE*)secret, 0);
|
||||
if (len <= 128) return XXH3_len_17to128_128b((const BYTE*)input, len, (const BYTE*)secret, secretSize, 0);
|
||||
if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_128b((const BYTE*)input, len, (const BYTE*)secret, secretSize, 0);
|
||||
return XXH3_hashLong_128b_withSecret((const BYTE*)input, len, (const BYTE*)secret, secretSize);
|
||||
if (len <= 16) return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, 0);
|
||||
if (len <= 128) return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, 0);
|
||||
if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, 0);
|
||||
return XXH3_hashLong_128b_withSecret((const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize);
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH128_hash_t
|
||||
XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
|
||||
{
|
||||
if (len <= 16) return XXH3_len_0to16_128b((const BYTE*)input, len, kSecret, seed);
|
||||
if (len <= 128) return XXH3_len_17to128_128b((const BYTE*)input, len, kSecret, sizeof(kSecret), seed);
|
||||
if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_128b((const BYTE*)input, len, kSecret, sizeof(kSecret), seed);
|
||||
return XXH3_hashLong_128b_withSeed((const BYTE*)input, len, seed);
|
||||
if (len <= 16) return XXH3_len_0to16_128b((const xxh_u8*)input, len, kSecret, seed);
|
||||
if (len <= 128) return XXH3_len_17to128_128b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), seed);
|
||||
if (len <= XXH3_MIDSIZE_MAX) return XXH3_len_129to240_128b((const xxh_u8*)input, len, kSecret, sizeof(kSecret), seed);
|
||||
return XXH3_hashLong_128b_withSeed((const xxh_u8*)input, len, seed);
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH128_hash_t
|
||||
@ -1503,7 +1503,7 @@ XXH128(const void* input, size_t len, XXH64_hash_t seed)
|
||||
static void
|
||||
XXH3_128bits_reset_internal(XXH3_state_t* statePtr,
|
||||
XXH64_hash_t seed,
|
||||
const BYTE* secret, size_t secretSize)
|
||||
const xxh_u8* secret, size_t secretSize)
|
||||
{
|
||||
XXH3_64bits_reset_internal(statePtr, seed, secret, secretSize);
|
||||
}
|
||||
@ -1520,7 +1520,7 @@ XXH_PUBLIC_API XXH_errorcode
|
||||
XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
|
||||
{
|
||||
if (statePtr == NULL) return XXH_ERROR;
|
||||
XXH3_128bits_reset_internal(statePtr, 0, (const BYTE*)secret, secretSize);
|
||||
XXH3_128bits_reset_internal(statePtr, 0, (const xxh_u8*)secret, secretSize);
|
||||
if (secret == NULL) return XXH_ERROR;
|
||||
if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
|
||||
return XXH_OK;
|
||||
@ -1539,7 +1539,7 @@ XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
|
||||
XXH_PUBLIC_API XXH_errorcode
|
||||
XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len)
|
||||
{
|
||||
return XXH3_update(state, (const BYTE*)input, len, XXH3_acc_128bits);
|
||||
return XXH3_update(state, (const xxh_u8*)input, len, XXH3_acc_128bits);
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state)
|
||||
@ -1548,8 +1548,8 @@ XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state)
|
||||
XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[ACC_NB];
|
||||
XXH3_digest_long(acc, state, XXH3_acc_128bits);
|
||||
XXH_ASSERT(state->secretLimit + STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
|
||||
{ U64 const low64 = XXH3_mergeAccs(acc, state->secret + XXH_SECRET_MERGEACCS_START, (U64)state->totalLen * PRIME64_1);
|
||||
U64 const high64 = XXH3_mergeAccs(acc, state->secret + state->secretLimit + STRIPE_LEN - sizeof(acc) - XXH_SECRET_MERGEACCS_START, ~((U64)state->totalLen * PRIME64_2));
|
||||
{ xxh_u64 const low64 = XXH3_mergeAccs(acc, state->secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)state->totalLen * PRIME64_1);
|
||||
xxh_u64 const high64 = XXH3_mergeAccs(acc, state->secret + state->secretLimit + STRIPE_LEN - sizeof(acc) - XXH_SECRET_MERGEACCS_START, ~((xxh_u64)state->totalLen * PRIME64_2));
|
||||
XXH128_hash_t const h128 = { low64, high64 };
|
||||
return h128;
|
||||
}
|
||||
|
227
xxhash.c
227
xxhash.c
@ -166,13 +166,18 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
|
||||
&& (defined (__cplusplus) \
|
||||
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
||||
# include <stdint.h>
|
||||
typedef uint8_t BYTE;
|
||||
typedef uint16_t U16;
|
||||
typedef uint32_t U32;
|
||||
typedef uint8_t xxh_u8;
|
||||
typedef uint16_t xxh_u16;
|
||||
typedef uint32_t xxh_u32;
|
||||
# else
|
||||
typedef unsigned char BYTE;
|
||||
typedef unsigned short U16;
|
||||
typedef unsigned int U32;
|
||||
# include <limits.h>
|
||||
typedef unsigned char xxh_u8;
|
||||
typedef unsigned short xxh_u16;
|
||||
# if (UINT_MAX == 0xFFFFFFFFUL)
|
||||
typedef unsigned int xxh_u32;
|
||||
# else
|
||||
typedef unsigned long xxh_u32;
|
||||
# endif
|
||||
# endif
|
||||
#endif
|
||||
|
||||
@ -182,23 +187,23 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
|
||||
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
|
||||
|
||||
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
|
||||
static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
|
||||
static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
|
||||
|
||||
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
|
||||
|
||||
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
|
||||
/* currently only defined for gcc and icc */
|
||||
typedef union { U32 u32; } __attribute__((packed)) unalign;
|
||||
static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
|
||||
typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
|
||||
static xxh_u32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
|
||||
|
||||
#else
|
||||
|
||||
/* portable and safe solution. Generally efficient.
|
||||
* see : http://stackoverflow.com/a/32095106/646947
|
||||
*/
|
||||
static U32 XXH_read32(const void* memPtr)
|
||||
static xxh_u32 XXH_read32(const void* memPtr)
|
||||
{
|
||||
U32 val;
|
||||
xxh_u32 val;
|
||||
memcpy(&val, memPtr, sizeof(val));
|
||||
return val;
|
||||
}
|
||||
@ -221,7 +226,7 @@ typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
|
||||
# else
|
||||
static int XXH_isLittleEndian(void)
|
||||
{
|
||||
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
|
||||
const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; /* don't use static : performance detrimental */
|
||||
return one.c[0];
|
||||
}
|
||||
# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
|
||||
@ -257,7 +262,7 @@ static int XXH_isLittleEndian(void)
|
||||
#elif XXH_GCC_VERSION >= 403
|
||||
# define XXH_swap32 __builtin_bswap32
|
||||
#else
|
||||
static U32 XXH_swap32 (U32 x)
|
||||
static xxh_u32 XXH_swap32 (xxh_u32 x)
|
||||
{
|
||||
return ((x << 24) & 0xff000000 ) |
|
||||
((x << 8) & 0x00ff0000 ) |
|
||||
@ -272,23 +277,23 @@ static U32 XXH_swap32 (U32 x)
|
||||
*****************************/
|
||||
typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
|
||||
|
||||
XXH_FORCE_INLINE U32 XXH_readLE32(const void* ptr)
|
||||
XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
|
||||
{
|
||||
return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
|
||||
}
|
||||
|
||||
static U32 XXH_readBE32(const void* ptr)
|
||||
static xxh_u32 XXH_readBE32(const void* ptr)
|
||||
{
|
||||
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
|
||||
}
|
||||
|
||||
XXH_FORCE_INLINE U32
|
||||
XXH_FORCE_INLINE xxh_u32
|
||||
XXH_readLE32_align(const void* ptr, XXH_alignment align)
|
||||
{
|
||||
if (align==XXH_unaligned) {
|
||||
return XXH_readLE32(ptr);
|
||||
} else {
|
||||
return XXH_CPU_LITTLE_ENDIAN ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
|
||||
return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -302,13 +307,13 @@ XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
|
||||
/* *******************************************************************
|
||||
* 32-bit hash functions
|
||||
*********************************************************************/
|
||||
static const U32 PRIME32_1 = 0x9E3779B1U; /* 0b10011110001101110111100110110001 */
|
||||
static const U32 PRIME32_2 = 0x85EBCA77U; /* 0b10000101111010111100101001110111 */
|
||||
static const U32 PRIME32_3 = 0xC2B2AE3DU; /* 0b11000010101100101010111000111101 */
|
||||
static const U32 PRIME32_4 = 0x27D4EB2FU; /* 0b00100111110101001110101100101111 */
|
||||
static const U32 PRIME32_5 = 0x165667B1U; /* 0b00010110010101100110011110110001 */
|
||||
static const xxh_u32 PRIME32_1 = 0x9E3779B1U; /* 0b10011110001101110111100110110001 */
|
||||
static const xxh_u32 PRIME32_2 = 0x85EBCA77U; /* 0b10000101111010111100101001110111 */
|
||||
static const xxh_u32 PRIME32_3 = 0xC2B2AE3DU; /* 0b11000010101100101010111000111101 */
|
||||
static const xxh_u32 PRIME32_4 = 0x27D4EB2FU; /* 0b00100111110101001110101100101111 */
|
||||
static const xxh_u32 PRIME32_5 = 0x165667B1U; /* 0b00010110010101100110011110110001 */
|
||||
|
||||
static U32 XXH32_round(U32 acc, U32 input)
|
||||
static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
|
||||
{
|
||||
acc += input * PRIME32_2;
|
||||
acc = XXH_rotl32(acc, 13);
|
||||
@ -361,7 +366,7 @@ static U32 XXH32_round(U32 acc, U32 input)
|
||||
}
|
||||
|
||||
/* mix all bits */
|
||||
static U32 XXH32_avalanche(U32 h32)
|
||||
static xxh_u32 XXH32_avalanche(xxh_u32 h32)
|
||||
{
|
||||
h32 ^= h32 >> 15;
|
||||
h32 *= PRIME32_2;
|
||||
@ -373,8 +378,8 @@ static U32 XXH32_avalanche(U32 h32)
|
||||
|
||||
#define XXH_get32bits(p) XXH_readLE32_align(p, align)
|
||||
|
||||
static U32
|
||||
XXH32_finalize(U32 h32, const BYTE* ptr, size_t len, XXH_alignment align)
|
||||
static xxh_u32
|
||||
XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
|
||||
{
|
||||
#define PROCESS1 \
|
||||
h32 += (*ptr++) * PRIME32_5; \
|
||||
@ -442,25 +447,25 @@ XXH32_finalize(U32 h32, const BYTE* ptr, size_t len, XXH_alignment align)
|
||||
}
|
||||
}
|
||||
|
||||
XXH_FORCE_INLINE U32
|
||||
XXH32_endian_align(const BYTE* input, size_t len, U32 seed, XXH_alignment align)
|
||||
XXH_FORCE_INLINE xxh_u32
|
||||
XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
|
||||
{
|
||||
const BYTE* bEnd = input + len;
|
||||
U32 h32;
|
||||
const xxh_u8* bEnd = input + len;
|
||||
xxh_u32 h32;
|
||||
|
||||
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
|
||||
if (input==NULL) {
|
||||
len=0;
|
||||
bEnd=input=(const BYTE*)(size_t)16;
|
||||
bEnd=input=(const xxh_u8*)(size_t)16;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (len>=16) {
|
||||
const BYTE* const limit = bEnd - 15;
|
||||
U32 v1 = seed + PRIME32_1 + PRIME32_2;
|
||||
U32 v2 = seed + PRIME32_2;
|
||||
U32 v3 = seed + 0;
|
||||
U32 v4 = seed - PRIME32_1;
|
||||
const xxh_u8* const limit = bEnd - 15;
|
||||
xxh_u32 v1 = seed + PRIME32_1 + PRIME32_2;
|
||||
xxh_u32 v2 = seed + PRIME32_2;
|
||||
xxh_u32 v3 = seed + 0;
|
||||
xxh_u32 v4 = seed - PRIME32_1;
|
||||
|
||||
do {
|
||||
v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
|
||||
@ -475,29 +480,29 @@ XXH32_endian_align(const BYTE* input, size_t len, U32 seed, XXH_alignment align)
|
||||
h32 = seed + PRIME32_5;
|
||||
}
|
||||
|
||||
h32 += (U32)len;
|
||||
h32 += (xxh_u32)len;
|
||||
|
||||
return XXH32_finalize(h32, input, len&15, align);
|
||||
}
|
||||
|
||||
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, unsigned int seed)
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
|
||||
{
|
||||
#if 0
|
||||
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
|
||||
XXH32_state_t state;
|
||||
XXH32_reset(&state, seed);
|
||||
XXH32_update(&state, (const BYTE*)input, len);
|
||||
XXH32_update(&state, (const xxh_u8*)input, len);
|
||||
return XXH32_digest(&state);
|
||||
|
||||
#else
|
||||
|
||||
if (XXH_FORCE_ALIGN_CHECK) {
|
||||
if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
|
||||
return XXH32_endian_align((const BYTE*)input, len, seed, XXH_aligned);
|
||||
return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
|
||||
} }
|
||||
|
||||
return XXH32_endian_align((const BYTE*)input, len, seed, XXH_unaligned);
|
||||
return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -520,7 +525,7 @@ XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t
|
||||
memcpy(dstState, srcState, sizeof(*dstState));
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
|
||||
{
|
||||
XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
|
||||
memset(&state, 0, sizeof(state));
|
||||
@ -544,21 +549,21 @@ XXH32_update(XXH32_state_t* state, const void* input, size_t len)
|
||||
return XXH_ERROR;
|
||||
#endif
|
||||
|
||||
{ const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* const bEnd = p + len;
|
||||
{ const xxh_u8* p = (const xxh_u8*)input;
|
||||
const xxh_u8* const bEnd = p + len;
|
||||
|
||||
state->total_len_32 += (XXH32_hash_t)len;
|
||||
state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
|
||||
|
||||
if (state->memsize + len < 16) { /* fill in tmp buffer */
|
||||
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
|
||||
XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
|
||||
state->memsize += (XXH32_hash_t)len;
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
if (state->memsize) { /* some data left from previous update */
|
||||
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
|
||||
{ const U32* p32 = state->mem32;
|
||||
XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
|
||||
{ const xxh_u32* p32 = state->mem32;
|
||||
state->v1 = XXH32_round(state->v1, XXH_readLE32(p32)); p32++;
|
||||
state->v2 = XXH32_round(state->v2, XXH_readLE32(p32)); p32++;
|
||||
state->v3 = XXH32_round(state->v3, XXH_readLE32(p32)); p32++;
|
||||
@ -569,11 +574,11 @@ XXH32_update(XXH32_state_t* state, const void* input, size_t len)
|
||||
}
|
||||
|
||||
if (p <= bEnd-16) {
|
||||
const BYTE* const limit = bEnd - 16;
|
||||
U32 v1 = state->v1;
|
||||
U32 v2 = state->v2;
|
||||
U32 v3 = state->v3;
|
||||
U32 v4 = state->v4;
|
||||
const xxh_u8* const limit = bEnd - 16;
|
||||
xxh_u32 v1 = state->v1;
|
||||
xxh_u32 v2 = state->v2;
|
||||
xxh_u32 v3 = state->v3;
|
||||
xxh_u32 v4 = state->v4;
|
||||
|
||||
do {
|
||||
v1 = XXH32_round(v1, XXH_readLE32(p)); p+=4;
|
||||
@ -600,7 +605,7 @@ XXH32_update(XXH32_state_t* state, const void* input, size_t len)
|
||||
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* state)
|
||||
{
|
||||
U32 h32;
|
||||
xxh_u32 h32;
|
||||
|
||||
if (state->large_len) {
|
||||
h32 = XXH_rotl32(state->v1, 1)
|
||||
@ -613,7 +618,7 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* state)
|
||||
|
||||
h32 += state->total_len_32;
|
||||
|
||||
return XXH32_finalize(h32, (const BYTE*)state->mem32, state->memsize, XXH_aligned);
|
||||
return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
|
||||
}
|
||||
|
||||
|
||||
@ -652,10 +657,10 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src
|
||||
&& (defined (__cplusplus) \
|
||||
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
||||
# include <stdint.h>
|
||||
typedef uint64_t U64;
|
||||
typedef uint64_t xxh_u64;
|
||||
# else
|
||||
/* if compiler doesn't support unsigned long long, replace by another 64-bit type */
|
||||
typedef unsigned long long U64;
|
||||
typedef unsigned long long xxh_u64;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
@ -688,14 +693,14 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src
|
||||
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
|
||||
|
||||
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
|
||||
static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
|
||||
static xxh_u64 XXH_read64(const void* memPtr) { return *(const xxh_u64*) memPtr; }
|
||||
|
||||
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
|
||||
|
||||
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
|
||||
/* currently only defined for gcc and icc */
|
||||
typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64;
|
||||
static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
|
||||
typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
|
||||
static xxh_u64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
|
||||
|
||||
#else
|
||||
|
||||
@ -703,9 +708,9 @@ static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
|
||||
* see : http://stackoverflow.com/a/32095106/646947
|
||||
*/
|
||||
|
||||
static U64 XXH_read64(const void* memPtr)
|
||||
static xxh_u64 XXH_read64(const void* memPtr)
|
||||
{
|
||||
U64 val;
|
||||
xxh_u64 val;
|
||||
memcpy(&val, memPtr, sizeof(val));
|
||||
return val;
|
||||
}
|
||||
@ -717,7 +722,7 @@ static U64 XXH_read64(const void* memPtr)
|
||||
#elif XXH_GCC_VERSION >= 403
|
||||
# define XXH_swap64 __builtin_bswap64
|
||||
#else
|
||||
static U64 XXH_swap64 (U64 x)
|
||||
static xxh_u64 XXH_swap64 (xxh_u64 x)
|
||||
{
|
||||
return ((x << 56) & 0xff00000000000000ULL) |
|
||||
((x << 40) & 0x00ff000000000000ULL) |
|
||||
@ -730,35 +735,35 @@ static U64 XXH_swap64 (U64 x)
|
||||
}
|
||||
#endif
|
||||
|
||||
XXH_FORCE_INLINE U64 XXH_readLE64(const void* ptr)
|
||||
XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
|
||||
{
|
||||
return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
|
||||
}
|
||||
|
||||
static U64 XXH_readBE64(const void* ptr)
|
||||
static xxh_u64 XXH_readBE64(const void* ptr)
|
||||
{
|
||||
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
|
||||
}
|
||||
|
||||
XXH_FORCE_INLINE U64
|
||||
XXH_FORCE_INLINE xxh_u64
|
||||
XXH_readLE64_align(const void* ptr, XXH_alignment align)
|
||||
{
|
||||
if (align==XXH_unaligned)
|
||||
return XXH_readLE64(ptr);
|
||||
else
|
||||
return XXH_CPU_LITTLE_ENDIAN ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
|
||||
return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
|
||||
}
|
||||
|
||||
|
||||
/*====== xxh64 ======*/
|
||||
|
||||
static const U64 PRIME64_1 = 0x9E3779B185EBCA87ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */
|
||||
static const U64 PRIME64_2 = 0xC2B2AE3D27D4EB4FULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */
|
||||
static const U64 PRIME64_3 = 0x165667B19E3779F9ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */
|
||||
static const U64 PRIME64_4 = 0x85EBCA77C2B2AE63ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */
|
||||
static const U64 PRIME64_5 = 0x27D4EB2F165667C5ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */
|
||||
static const xxh_u64 PRIME64_1 = 0x9E3779B185EBCA87ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */
|
||||
static const xxh_u64 PRIME64_2 = 0xC2B2AE3D27D4EB4FULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */
|
||||
static const xxh_u64 PRIME64_3 = 0x165667B19E3779F9ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */
|
||||
static const xxh_u64 PRIME64_4 = 0x85EBCA77C2B2AE63ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */
|
||||
static const xxh_u64 PRIME64_5 = 0x27D4EB2F165667C5ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */
|
||||
|
||||
static U64 XXH64_round(U64 acc, U64 input)
|
||||
static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
|
||||
{
|
||||
acc += input * PRIME64_2;
|
||||
acc = XXH_rotl64(acc, 31);
|
||||
@ -766,7 +771,7 @@ static U64 XXH64_round(U64 acc, U64 input)
|
||||
return acc;
|
||||
}
|
||||
|
||||
static U64 XXH64_mergeRound(U64 acc, U64 val)
|
||||
static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
|
||||
{
|
||||
val = XXH64_round(0, val);
|
||||
acc ^= val;
|
||||
@ -774,7 +779,7 @@ static U64 XXH64_mergeRound(U64 acc, U64 val)
|
||||
return acc;
|
||||
}
|
||||
|
||||
static U64 XXH64_avalanche(U64 h64)
|
||||
static xxh_u64 XXH64_avalanche(xxh_u64 h64)
|
||||
{
|
||||
h64 ^= h64 >> 33;
|
||||
h64 *= PRIME64_2;
|
||||
@ -787,20 +792,20 @@ static U64 XXH64_avalanche(U64 h64)
|
||||
|
||||
#define XXH_get64bits(p) XXH_readLE64_align(p, align)
|
||||
|
||||
static U64
|
||||
XXH64_finalize(U64 h64, const BYTE* ptr, size_t len, XXH_alignment align)
|
||||
static xxh_u64
|
||||
XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
|
||||
{
|
||||
#define PROCESS1_64 \
|
||||
h64 ^= (*ptr++) * PRIME64_5; \
|
||||
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
|
||||
|
||||
#define PROCESS4_64 \
|
||||
h64 ^= (U64)(XXH_get32bits(ptr)) * PRIME64_1; \
|
||||
h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * PRIME64_1; \
|
||||
ptr+=4; \
|
||||
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
|
||||
|
||||
#define PROCESS8_64 { \
|
||||
U64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); \
|
||||
xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); \
|
||||
ptr+=8; \
|
||||
h64 ^= k1; \
|
||||
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \
|
||||
@ -910,25 +915,25 @@ XXH64_finalize(U64 h64, const BYTE* ptr, size_t len, XXH_alignment align)
|
||||
return 0; /* unreachable, but some compilers complain without it */
|
||||
}
|
||||
|
||||
XXH_FORCE_INLINE U64
|
||||
XXH64_endian_align(const BYTE* input, size_t len, U64 seed, XXH_alignment align)
|
||||
XXH_FORCE_INLINE xxh_u64
|
||||
XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
|
||||
{
|
||||
const BYTE* bEnd = input + len;
|
||||
U64 h64;
|
||||
const xxh_u8* bEnd = input + len;
|
||||
xxh_u64 h64;
|
||||
|
||||
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
|
||||
if (input==NULL) {
|
||||
len=0;
|
||||
bEnd=input=(const BYTE*)(size_t)32;
|
||||
bEnd=input=(const xxh_u8*)(size_t)32;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (len>=32) {
|
||||
const BYTE* const limit = bEnd - 32;
|
||||
U64 v1 = seed + PRIME64_1 + PRIME64_2;
|
||||
U64 v2 = seed + PRIME64_2;
|
||||
U64 v3 = seed + 0;
|
||||
U64 v4 = seed - PRIME64_1;
|
||||
const xxh_u8* const limit = bEnd - 32;
|
||||
xxh_u64 v1 = seed + PRIME64_1 + PRIME64_2;
|
||||
xxh_u64 v2 = seed + PRIME64_2;
|
||||
xxh_u64 v3 = seed + 0;
|
||||
xxh_u64 v4 = seed - PRIME64_1;
|
||||
|
||||
do {
|
||||
v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
|
||||
@ -947,29 +952,29 @@ XXH64_endian_align(const BYTE* input, size_t len, U64 seed, XXH_alignment align)
|
||||
h64 = seed + PRIME64_5;
|
||||
}
|
||||
|
||||
h64 += (U64) len;
|
||||
h64 += (xxh_u64) len;
|
||||
|
||||
return XXH64_finalize(h64, input, len, align);
|
||||
}
|
||||
|
||||
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, unsigned long long seed)
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed)
|
||||
{
|
||||
#if 0
|
||||
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
|
||||
XXH64_state_t state;
|
||||
XXH64_reset(&state, seed);
|
||||
XXH64_update(&state, (const BYTE*)input, len);
|
||||
XXH64_update(&state, (const xxh_u8*)input, len);
|
||||
return XXH64_digest(&state);
|
||||
|
||||
#else
|
||||
|
||||
if (XXH_FORCE_ALIGN_CHECK) {
|
||||
if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
|
||||
return XXH64_endian_align((const BYTE*)input, len, seed, XXH_aligned);
|
||||
return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
|
||||
} }
|
||||
|
||||
return XXH64_endian_align((const BYTE*)input, len, seed, XXH_unaligned);
|
||||
return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
|
||||
|
||||
#endif
|
||||
}
|
||||
@ -991,7 +996,7 @@ XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t
|
||||
memcpy(dstState, srcState, sizeof(*dstState));
|
||||
}
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed)
|
||||
{
|
||||
XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
|
||||
memset(&state, 0, sizeof(state));
|
||||
@ -1014,19 +1019,19 @@ XXH64_update (XXH64_state_t* state, const void* input, size_t len)
|
||||
return XXH_ERROR;
|
||||
#endif
|
||||
|
||||
{ const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* const bEnd = p + len;
|
||||
{ const xxh_u8* p = (const xxh_u8*)input;
|
||||
const xxh_u8* const bEnd = p + len;
|
||||
|
||||
state->total_len += len;
|
||||
|
||||
if (state->memsize + len < 32) { /* fill in tmp buffer */
|
||||
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
|
||||
state->memsize += (U32)len;
|
||||
XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
|
||||
state->memsize += (xxh_u32)len;
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
if (state->memsize) { /* tmp buffer is full */
|
||||
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
|
||||
XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
|
||||
state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0));
|
||||
state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1));
|
||||
state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2));
|
||||
@ -1036,11 +1041,11 @@ XXH64_update (XXH64_state_t* state, const void* input, size_t len)
|
||||
}
|
||||
|
||||
if (p+32 <= bEnd) {
|
||||
const BYTE* const limit = bEnd - 32;
|
||||
U64 v1 = state->v1;
|
||||
U64 v2 = state->v2;
|
||||
U64 v3 = state->v3;
|
||||
U64 v4 = state->v4;
|
||||
const xxh_u8* const limit = bEnd - 32;
|
||||
xxh_u64 v1 = state->v1;
|
||||
xxh_u64 v2 = state->v2;
|
||||
xxh_u64 v3 = state->v3;
|
||||
xxh_u64 v4 = state->v4;
|
||||
|
||||
do {
|
||||
v1 = XXH64_round(v1, XXH_readLE64(p)); p+=8;
|
||||
@ -1067,13 +1072,13 @@ XXH64_update (XXH64_state_t* state, const void* input, size_t len)
|
||||
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* state)
|
||||
{
|
||||
U64 h64;
|
||||
xxh_u64 h64;
|
||||
|
||||
if (state->total_len >= 32) {
|
||||
U64 const v1 = state->v1;
|
||||
U64 const v2 = state->v2;
|
||||
U64 const v3 = state->v3;
|
||||
U64 const v4 = state->v4;
|
||||
xxh_u64 const v1 = state->v1;
|
||||
xxh_u64 const v2 = state->v2;
|
||||
xxh_u64 const v3 = state->v3;
|
||||
xxh_u64 const v4 = state->v4;
|
||||
|
||||
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
|
||||
h64 = XXH64_mergeRound(h64, v1);
|
||||
@ -1084,9 +1089,9 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* state)
|
||||
h64 = state->v3 /*seed*/ + PRIME64_5;
|
||||
}
|
||||
|
||||
h64 += (U64) state->total_len;
|
||||
h64 += (xxh_u64) state->total_len;
|
||||
|
||||
return XXH64_finalize(h64, (const BYTE*)state->mem64, (size_t)state->total_len, XXH_aligned);
|
||||
return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
|
||||
}
|
||||
|
||||
|
||||
|
15
xxhash.h
15
xxhash.h
@ -178,7 +178,12 @@ XXH_PUBLIC_API unsigned XXH_versionNumber (void);
|
||||
# include <stdint.h>
|
||||
typedef uint32_t XXH32_hash_t;
|
||||
#else
|
||||
typedef unsigned int XXH32_hash_t;
|
||||
# include <limits.h>
|
||||
# if UINT_MAX == 0xFFFFFFFFUL
|
||||
typedef unsigned int XXH32_hash_t;
|
||||
# else
|
||||
typedef unsigned long XXH32_hash_t;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/*! XXH32() :
|
||||
@ -186,7 +191,7 @@ XXH_PUBLIC_API unsigned XXH_versionNumber (void);
|
||||
The memory between input & input+length must be valid (allocated and read-accessible).
|
||||
"seed" can be used to alter the result predictably.
|
||||
Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
|
||||
|
||||
/*====== Streaming ======*/
|
||||
|
||||
@ -216,7 +221,7 @@ XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
|
||||
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
|
||||
XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
|
||||
|
||||
@ -260,7 +265,7 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src
|
||||
"seed" can be used to alter the result predictably.
|
||||
This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark).
|
||||
*/
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, XXH64_hash_t seed);
|
||||
|
||||
/*====== Streaming ======*/
|
||||
typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
|
||||
@ -268,7 +273,7 @@ XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
|
||||
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
|
||||
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, XXH64_hash_t seed);
|
||||
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
|
||||
XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
|
||||
|
||||
|
156
xxhsum.c
156
xxhsum.c
@ -136,23 +136,29 @@ static __inline int IS_CONSOLE(FILE* stdStream) {
|
||||
# define MEM_MODULE
|
||||
# if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
|
||||
# include <stdint.h>
|
||||
typedef uint8_t BYTE;
|
||||
typedef uint16_t U16;
|
||||
typedef uint32_t U32;
|
||||
typedef int32_t S32;
|
||||
typedef uint64_t U64;
|
||||
typedef uint8_t xxh_u8;
|
||||
typedef uint16_t xxh_u16;
|
||||
typedef uint32_t xxh_u32;
|
||||
typedef int32_t xxh_s32;
|
||||
typedef uint64_t xxh_u64;
|
||||
# else
|
||||
typedef unsigned char BYTE;
|
||||
typedef unsigned short U16;
|
||||
typedef unsigned int U32;
|
||||
typedef signed int S32;
|
||||
typedef unsigned long long U64;
|
||||
# include <limits.h>
|
||||
typedef unsigned char xxh_u8;
|
||||
typedef unsigned short xxh_u16;
|
||||
# if UINT_MAX == 0xFFFFFFFFUL
|
||||
typedef unsigned int xxh_u32;
|
||||
typedef signed int xxh_s32;
|
||||
# else
|
||||
typedef unsigned long xxh_u32;
|
||||
typedef signed long xxh_s32;
|
||||
# endif
|
||||
typedef unsigned long long xxh_u64;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
static unsigned BMK_isLittleEndian(void)
|
||||
{
|
||||
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
|
||||
const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; /* don't use static : performance detrimental */
|
||||
return one.c[0];
|
||||
}
|
||||
|
||||
@ -288,7 +294,7 @@ static int g_displayLevel = 2;
|
||||
/* ************************************
|
||||
* Local variables
|
||||
**************************************/
|
||||
static U32 g_nbIterations = NBLOOPS;
|
||||
static xxh_u32 g_nbIterations = NBLOOPS;
|
||||
|
||||
|
||||
/* ************************************
|
||||
@ -300,7 +306,7 @@ static clock_t BMK_clockSpan( clock_t start )
|
||||
}
|
||||
|
||||
|
||||
static size_t BMK_findMaxMem(U64 requiredMem)
|
||||
static size_t BMK_findMaxMem(xxh_u64 requiredMem)
|
||||
{
|
||||
size_t const step = 64 MB;
|
||||
void* testmem = NULL;
|
||||
@ -324,7 +330,7 @@ static size_t BMK_findMaxMem(U64 requiredMem)
|
||||
}
|
||||
|
||||
|
||||
static U64 BMK_GetFileSize(const char* infilename)
|
||||
static xxh_u64 BMK_GetFileSize(const char* infilename)
|
||||
{
|
||||
int r;
|
||||
#if defined(_MSC_VER)
|
||||
@ -335,39 +341,39 @@ static U64 BMK_GetFileSize(const char* infilename)
|
||||
r = stat(infilename, &statbuf);
|
||||
#endif
|
||||
if (r || !S_ISREG(statbuf.st_mode)) return 0; /* No good... */
|
||||
return (U64)statbuf.st_size;
|
||||
return (xxh_u64)statbuf.st_size;
|
||||
}
|
||||
|
||||
typedef U32 (*hashFunction)(const void* buffer, size_t bufferSize, U32 seed);
|
||||
typedef xxh_u32 (*hashFunction)(const void* buffer, size_t bufferSize, xxh_u32 seed);
|
||||
|
||||
static U32 localXXH32(const void* buffer, size_t bufferSize, U32 seed) { return XXH32(buffer, bufferSize, seed); }
|
||||
static xxh_u32 localXXH32(const void* buffer, size_t bufferSize, xxh_u32 seed) { return XXH32(buffer, bufferSize, seed); }
|
||||
|
||||
static U32 localXXH64(const void* buffer, size_t bufferSize, U32 seed) { return (U32)XXH64(buffer, bufferSize, seed); }
|
||||
static xxh_u32 localXXH64(const void* buffer, size_t bufferSize, xxh_u32 seed) { return (xxh_u32)XXH64(buffer, bufferSize, seed); }
|
||||
|
||||
static U32 localXXH3_64b(const void* buffer, size_t bufferSize, U32 seed) { (void)seed; return (U32)XXH3_64bits(buffer, bufferSize); }
|
||||
static U32 localXXH3_64b_seeded(const void* buffer, size_t bufferSize, U32 seed) { return (U32)XXH3_64bits_withSeed(buffer, bufferSize, seed); }
|
||||
static xxh_u32 localXXH3_64b(const void* buffer, size_t bufferSize, xxh_u32 seed) { (void)seed; return (xxh_u32)XXH3_64bits(buffer, bufferSize); }
|
||||
static xxh_u32 localXXH3_64b_seeded(const void* buffer, size_t bufferSize, xxh_u32 seed) { return (xxh_u32)XXH3_64bits_withSeed(buffer, bufferSize, seed); }
|
||||
|
||||
static U32 localXXH3_128b(const void* buffer, size_t bufferSize, U32 seed) { (void)seed; return (U32)(XXH3_128bits(buffer, bufferSize).low64); }
|
||||
static U32 localXXH3_128b_seeded(const void* buffer, size_t bufferSize, U32 seed) { return (U32)(XXH3_128bits_withSeed(buffer, bufferSize, seed).low64); }
|
||||
static xxh_u32 localXXH3_128b(const void* buffer, size_t bufferSize, xxh_u32 seed) { (void)seed; return (xxh_u32)(XXH3_128bits(buffer, bufferSize).low64); }
|
||||
static xxh_u32 localXXH3_128b_seeded(const void* buffer, size_t bufferSize, xxh_u32 seed) { return (xxh_u32)(XXH3_128bits_withSeed(buffer, bufferSize, seed).low64); }
|
||||
|
||||
static void BMK_benchHash(hashFunction h, const char* hName, const void* buffer, size_t bufferSize)
|
||||
{
|
||||
U32 nbh_perIteration = (U32)((300 MB) / (bufferSize+1)) + 1; /* first loop conservatively aims for 300 MB/s */
|
||||
U32 iterationNb;
|
||||
xxh_u32 nbh_perIteration = (xxh_u32)((300 MB) / (bufferSize+1)) + 1; /* first loop conservatively aims for 300 MB/s */
|
||||
xxh_u32 iterationNb;
|
||||
double fastestH = 100000000.;
|
||||
|
||||
DISPLAYLEVEL(2, "\r%70s\r", ""); /* Clean display line */
|
||||
if (g_nbIterations<1) g_nbIterations=1;
|
||||
for (iterationNb = 1; iterationNb <= g_nbIterations; iterationNb++) {
|
||||
U32 r=0;
|
||||
xxh_u32 r=0;
|
||||
clock_t cStart;
|
||||
|
||||
DISPLAYLEVEL(2, "%1u-%-22.22s : %10u ->\r", iterationNb, hName, (U32)bufferSize);
|
||||
DISPLAYLEVEL(2, "%1u-%-22.22s : %10u ->\r", iterationNb, hName, (xxh_u32)bufferSize);
|
||||
cStart = clock();
|
||||
while (clock() == cStart); /* starts clock() at its exact beginning */
|
||||
cStart = clock();
|
||||
|
||||
{ U32 u;
|
||||
{ xxh_u32 u;
|
||||
for (u=0; u<nbh_perIteration; u++)
|
||||
r += h(buffer, bufferSize, u);
|
||||
}
|
||||
@ -382,20 +388,20 @@ static void BMK_benchHash(hashFunction h, const char* hName, const void* buffer,
|
||||
}
|
||||
if (timeS < fastestH) fastestH = timeS;
|
||||
DISPLAYLEVEL(2, "%1u-%-22.22s : %10u -> %8.0f it/s (%7.1f MB/s) \r",
|
||||
iterationNb, hName, (U32)bufferSize,
|
||||
iterationNb, hName, (xxh_u32)bufferSize,
|
||||
(double)1 / fastestH,
|
||||
((double)bufferSize / (1<<20)) / fastestH );
|
||||
}
|
||||
{ double nbh_perSecond = (1 / fastestH) + 1;
|
||||
if (nbh_perSecond > (double)(4000U<<20)) nbh_perSecond = (double)(4000U<<20);
|
||||
nbh_perIteration = (U32)nbh_perSecond;
|
||||
nbh_perIteration = (xxh_u32)nbh_perSecond;
|
||||
}
|
||||
}
|
||||
DISPLAYLEVEL(1, "%-24.24s : %10u -> %8.0f it/s (%7.1f MB/s) \n", hName, (U32)bufferSize,
|
||||
DISPLAYLEVEL(1, "%-24.24s : %10u -> %8.0f it/s (%7.1f MB/s) \n", hName, (xxh_u32)bufferSize,
|
||||
(double)1 / fastestH,
|
||||
((double)bufferSize / (1<<20)) / fastestH);
|
||||
if (g_displayLevel<1)
|
||||
DISPLAYLEVEL(0, "%u, ", (U32)((double)1 / fastestH));
|
||||
DISPLAYLEVEL(0, "%u, ", (xxh_u32)((double)1 / fastestH));
|
||||
}
|
||||
|
||||
|
||||
@ -404,7 +410,7 @@ static void BMK_benchHash(hashFunction h, const char* hName, const void* buffer,
|
||||
* buffer : is supposed 8-bytes aligned (if malloc'ed, it should be)
|
||||
* the real allocated size of buffer is supposed to be >= (bufferSize+3).
|
||||
* @return : 0 on success, 1 if error (invalid mode selected) */
|
||||
static int BMK_benchMem(const void* buffer, size_t bufferSize, U32 specificTest)
|
||||
static int BMK_benchMem(const void* buffer, size_t bufferSize, xxh_u32 specificTest)
|
||||
{
|
||||
assert((((size_t)buffer) & 8) == 0); /* ensure alignment */
|
||||
|
||||
@ -465,9 +471,9 @@ static int BMK_benchMem(const void* buffer, size_t bufferSize, U32 specificTest)
|
||||
|
||||
|
||||
static size_t BMK_selectBenchedSize(const char* fileName)
|
||||
{ U64 const inFileSize = BMK_GetFileSize(fileName);
|
||||
{ xxh_u64 const inFileSize = BMK_GetFileSize(fileName);
|
||||
size_t benchedSize = (size_t) BMK_findMaxMem(inFileSize);
|
||||
if ((U64)benchedSize > inFileSize) benchedSize = (size_t)inFileSize;
|
||||
if ((xxh_u64)benchedSize > inFileSize) benchedSize = (size_t)inFileSize;
|
||||
if (benchedSize < inFileSize) {
|
||||
DISPLAY("Not enough memory for '%s' full size; testing %i MB only...\n", fileName, (int)(benchedSize>>20));
|
||||
}
|
||||
@ -475,7 +481,7 @@ static size_t BMK_selectBenchedSize(const char* fileName)
|
||||
}
|
||||
|
||||
|
||||
static int BMK_benchFiles(const char** fileNamesTable, int nbFiles, U32 specificTest)
|
||||
static int BMK_benchFiles(const char** fileNamesTable, int nbFiles, xxh_u32 specificTest)
|
||||
{
|
||||
int result = 0;
|
||||
int fileIdx;
|
||||
@ -522,7 +528,7 @@ static int BMK_benchFiles(const char** fileNamesTable, int nbFiles, U32 specific
|
||||
}
|
||||
|
||||
|
||||
static int BMK_benchInternal(size_t keySize, U32 specificTest)
|
||||
static int BMK_benchInternal(size_t keySize, xxh_u32 specificTest)
|
||||
{
|
||||
void* const buffer = calloc(keySize+16+3, 1);
|
||||
if (!buffer) {
|
||||
@ -535,9 +541,9 @@ static int BMK_benchInternal(size_t keySize, U32 specificTest)
|
||||
/* bench */
|
||||
DISPLAYLEVEL(1, "Sample of ");
|
||||
if (keySize > 10 KB) {
|
||||
DISPLAYLEVEL(1, "%u KB", (U32)(keySize >> 10));
|
||||
DISPLAYLEVEL(1, "%u KB", (xxh_u32)(keySize >> 10));
|
||||
} else {
|
||||
DISPLAYLEVEL(1, "%u bytes", (U32)keySize);
|
||||
DISPLAYLEVEL(1, "%u bytes", (xxh_u32)keySize);
|
||||
}
|
||||
DISPLAYLEVEL(1, "... \n");
|
||||
|
||||
@ -572,7 +578,7 @@ static void BMK_checkResult64(XXH64_hash_t r1, XXH64_hash_t r2)
|
||||
static int nbTests = 1;
|
||||
if (r1!=r2) {
|
||||
DISPLAY("\rError: 64-bit hash test %i: Internal sanity check failed!\n", nbTests);
|
||||
DISPLAY("\rGot 0x%08X%08XULL, expected 0x%08X%08XULL.\n", (U32)(r1>>32), (U32)r1, (U32)(r2>>32), (U32)r2);
|
||||
DISPLAY("\rGot 0x%08X%08XULL, expected 0x%08X%08XULL.\n", (xxh_u32)(r1>>32), (xxh_u32)r1, (xxh_u32)(r2>>32), (xxh_u32)r2);
|
||||
DISPLAY("\rNote: If you modified the hash functions, make sure to either update the values\n"
|
||||
"or temporarily comment out the tests in BMK_sanityCheck.\n");
|
||||
exit(1);
|
||||
@ -586,8 +592,8 @@ static void BMK_checkResult128(XXH128_hash_t r1, XXH128_hash_t r2)
|
||||
if ((r1.low64 != r2.low64) || (r1.high64 != r2.high64)) {
|
||||
DISPLAY("\rError: 128-bit hash test %i: Internal sanity check failed.\n", nbTests);
|
||||
DISPLAY("\rGot { 0x%08X%08XULL, 0x%08X%08XULL }, expected { 0x%08X%08XULL, %08X%08XULL } \n",
|
||||
(U32)(r1.low64>>32), (U32)r1.low64, (U32)(r1.high64>>32), (U32)r1.high64,
|
||||
(U32)(r2.low64>>32), (U32)r2.low64, (U32)(r2.high64>>32), (U32)r2.high64 );
|
||||
(xxh_u32)(r1.low64>>32), (xxh_u32)r1.low64, (xxh_u32)(r1.high64>>32), (xxh_u32)r1.high64,
|
||||
(xxh_u32)(r2.low64>>32), (xxh_u32)r2.low64, (xxh_u32)(r2.high64>>32), (xxh_u32)r2.high64 );
|
||||
DISPLAY("\rNote: If you modified the hash functions, make sure to either update the values\n"
|
||||
"or temporarily comment out the tests in BMK_sanityCheck.\n");
|
||||
exit(1);
|
||||
@ -596,7 +602,7 @@ static void BMK_checkResult128(XXH128_hash_t r1, XXH128_hash_t r2)
|
||||
}
|
||||
|
||||
|
||||
static void BMK_testXXH32(const void* sequence, size_t len, U32 seed, U32 Nresult)
|
||||
static void BMK_testXXH32(const void* sequence, size_t len, xxh_u32 seed, xxh_u32 Nresult)
|
||||
{
|
||||
XXH32_state_t state;
|
||||
size_t pos;
|
||||
@ -613,7 +619,7 @@ static void BMK_testXXH32(const void* sequence, size_t len, U32 seed, U32 Nresul
|
||||
BMK_checkResult32(XXH32_digest(&state), Nresult);
|
||||
}
|
||||
|
||||
static void BMK_testXXH64(const void* data, size_t len, U64 seed, U64 Nresult)
|
||||
static void BMK_testXXH64(const void* data, size_t len, xxh_u64 seed, xxh_u64 Nresult)
|
||||
{
|
||||
XXH64_state_t state;
|
||||
size_t pos;
|
||||
@ -630,15 +636,15 @@ static void BMK_testXXH64(const void* data, size_t len, U64 seed, U64 Nresult)
|
||||
BMK_checkResult64(XXH64_digest(&state), Nresult);
|
||||
}
|
||||
|
||||
static void BMK_testXXH3(const void* data, size_t len, U64 seed, U64 Nresult)
|
||||
static void BMK_testXXH3(const void* data, size_t len, xxh_u64 seed, xxh_u64 Nresult)
|
||||
{
|
||||
{ U64 const Dresult = XXH3_64bits_withSeed(data, len, seed);
|
||||
{ xxh_u64 const Dresult = XXH3_64bits_withSeed(data, len, seed);
|
||||
BMK_checkResult64(Dresult, Nresult);
|
||||
}
|
||||
|
||||
/* check that the no-seed variant produces same result as seed==0 */
|
||||
if (seed == 0) {
|
||||
U64 const Dresult = XXH3_64bits(data, len);
|
||||
xxh_u64 const Dresult = XXH3_64bits(data, len);
|
||||
BMK_checkResult64(Dresult, Nresult);
|
||||
}
|
||||
|
||||
@ -667,9 +673,9 @@ static void BMK_testXXH3(const void* data, size_t len, U64 seed, U64 Nresult)
|
||||
} }
|
||||
}
|
||||
|
||||
static void BMK_testXXH3_withSecret(const void* data, size_t len, const void* secret, size_t secretSize, U64 Nresult)
|
||||
static void BMK_testXXH3_withSecret(const void* data, size_t len, const void* secret, size_t secretSize, xxh_u64 Nresult)
|
||||
{
|
||||
{ U64 const Dresult = XXH3_64bits_withSecret(data, len, secret, secretSize);
|
||||
{ xxh_u64 const Dresult = XXH3_64bits_withSecret(data, len, secret, secretSize);
|
||||
BMK_checkResult64(Dresult, Nresult);
|
||||
}
|
||||
|
||||
@ -688,7 +694,7 @@ static void BMK_testXXH3_withSecret(const void* data, size_t len, const void* se
|
||||
} }
|
||||
}
|
||||
|
||||
void BMK_testXXH128(const void* data, size_t len, U64 seed, XXH128_hash_t Nresult)
|
||||
void BMK_testXXH128(const void* data, size_t len, xxh_u64 seed, XXH128_hash_t Nresult)
|
||||
{
|
||||
{ XXH128_hash_t const Dresult = XXH3_128bits_withSeed(data, len, seed);
|
||||
BMK_checkResult128(Dresult, Nresult);
|
||||
@ -734,14 +740,14 @@ void BMK_testXXH128(const void* data, size_t len, U64 seed, XXH128_hash_t Nresul
|
||||
#define SANITY_BUFFER_SIZE 2243
|
||||
static void BMK_sanityCheck(void)
|
||||
{
|
||||
const U32 prime = 2654435761U;
|
||||
const U64 prime64 = 11400714785074694797ULL;
|
||||
BYTE sanityBuffer[SANITY_BUFFER_SIZE];
|
||||
U64 byteGen = prime;
|
||||
const xxh_u32 prime = 2654435761U;
|
||||
const xxh_u64 prime64 = 11400714785074694797ULL;
|
||||
xxh_u8 sanityBuffer[SANITY_BUFFER_SIZE];
|
||||
xxh_u64 byteGen = prime;
|
||||
|
||||
int i;
|
||||
for (i=0; i<SANITY_BUFFER_SIZE; i++) {
|
||||
sanityBuffer[i] = (BYTE)(byteGen>>56);
|
||||
sanityBuffer[i] = (xxh_u8)(byteGen>>56);
|
||||
byteGen *= prime64;
|
||||
}
|
||||
|
||||
@ -916,7 +922,7 @@ static void BMK_sanityCheck(void)
|
||||
|
||||
static void BMK_display_LittleEndian(const void* ptr, size_t length)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)ptr;
|
||||
const xxh_u8* p = (const xxh_u8*)ptr;
|
||||
size_t idx;
|
||||
for (idx=length-1; idx<length; idx--) /* intentional underflow to negative to detect end */
|
||||
DISPLAYRESULT("%02x", p[idx]);
|
||||
@ -924,7 +930,7 @@ static void BMK_display_LittleEndian(const void* ptr, size_t length)
|
||||
|
||||
static void BMK_display_BigEndian(const void* ptr, size_t length)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)ptr;
|
||||
const xxh_u8* p = (const xxh_u8*)ptr;
|
||||
size_t idx;
|
||||
for (idx=0; idx<length; idx++)
|
||||
DISPLAYRESULT("%02x", p[idx]);
|
||||
@ -1149,10 +1155,10 @@ typedef struct {
|
||||
char* lineBuf;
|
||||
size_t blockSize;
|
||||
char* blockBuf;
|
||||
U32 strictMode;
|
||||
U32 statusOnly;
|
||||
U32 warn;
|
||||
U32 quiet;
|
||||
xxh_u32 strictMode;
|
||||
xxh_u32 statusOnly;
|
||||
xxh_u32 warn;
|
||||
xxh_u32 quiet;
|
||||
ParseFileReport report;
|
||||
} ParseFileArg;
|
||||
|
||||
@ -1486,10 +1492,10 @@ static void parseFile1(ParseFileArg* parseFileArg)
|
||||
*/
|
||||
static int checkFile(const char* inFileName,
|
||||
const endianess displayEndianess,
|
||||
U32 strictMode,
|
||||
U32 statusOnly,
|
||||
U32 warn,
|
||||
U32 quiet)
|
||||
xxh_u32 strictMode,
|
||||
xxh_u32 statusOnly,
|
||||
xxh_u32 warn,
|
||||
xxh_u32 quiet)
|
||||
{
|
||||
int result = 0;
|
||||
FILE* inFile = NULL;
|
||||
@ -1570,10 +1576,10 @@ static int checkFile(const char* inFileName,
|
||||
|
||||
static int checkFiles(const char** fnList, int fnTotal,
|
||||
const endianess displayEndianess,
|
||||
U32 strictMode,
|
||||
U32 statusOnly,
|
||||
U32 warn,
|
||||
U32 quiet)
|
||||
xxh_u32 strictMode,
|
||||
xxh_u32 statusOnly,
|
||||
xxh_u32 warn,
|
||||
xxh_u32 quiet)
|
||||
{
|
||||
int ok = 1;
|
||||
|
||||
@ -1687,13 +1693,13 @@ int main(int argc, const char** argv)
|
||||
{
|
||||
int i, filenamesStart = 0;
|
||||
const char* const exename = argv[0];
|
||||
U32 benchmarkMode = 0;
|
||||
U32 fileCheckMode = 0;
|
||||
U32 strictMode = 0;
|
||||
U32 statusOnly = 0;
|
||||
U32 warn = 0;
|
||||
U32 quiet = 0;
|
||||
U32 specificTest = 0;
|
||||
xxh_u32 benchmarkMode = 0;
|
||||
xxh_u32 fileCheckMode = 0;
|
||||
xxh_u32 strictMode = 0;
|
||||
xxh_u32 statusOnly = 0;
|
||||
xxh_u32 warn = 0;
|
||||
xxh_u32 quiet = 0;
|
||||
xxh_u32 specificTest = 0;
|
||||
size_t keySize = XXH_DEFAULT_SAMPLE_SIZE;
|
||||
algoType algo = g_defaultAlgo;
|
||||
endianess displayEndianess = big_endian;
|
||||
|
Loading…
Reference in New Issue
Block a user