diff --git a/adv-simd.h b/adv-simd.h index 80f49bd3..df109ebe 100644 --- a/adv-simd.h +++ b/adv-simd.h @@ -112,7 +112,7 @@ inline size_t AdvancedProcessBlocks64_6x2_NEON(F2 func2, F6 func6, CRYPTOPP_ASSERT(outBlocks); CRYPTOPP_ASSERT(length >= 8); -#if defined(CRYPTOPP_LITTLE_ENDIAN) +#if (CRYPTOPP_LITTLE_ENDIAN) const uint32x4_t s_one = {0, 0, 0, 1<<24}; const uint32x4_t s_two = {0, 2<<24, 0, 2<<24}; #else @@ -357,7 +357,7 @@ inline size_t AdvancedProcessBlocks128_6x1_NEON(F1 func1, F6 func6, CRYPTOPP_ASSERT(outBlocks); CRYPTOPP_ASSERT(length >= 16); -#if defined(CRYPTOPP_LITTLE_ENDIAN) +#if (CRYPTOPP_LITTLE_ENDIAN) const uint32x4_t s_one = {0, 0, 0, 1<<24}; const uint32x4_t s_two = {0, 2<<24, 0, 2<<24}; #else @@ -520,7 +520,7 @@ inline size_t AdvancedProcessBlocks128_4x1_NEON(F1 func1, F4 func4, CRYPTOPP_ASSERT(length >= 16); CRYPTOPP_UNUSED(unused); -#if defined(CRYPTOPP_LITTLE_ENDIAN) +#if (CRYPTOPP_LITTLE_ENDIAN) const uint32x4_t s_one = {0, 0, 0, 1<<24}; const uint32x4_t s_two = {0, 2<<24, 0, 2<<24}; #else @@ -660,7 +660,7 @@ inline size_t AdvancedProcessBlocks128_6x2_NEON(F2 func2, F6 func6, CRYPTOPP_ASSERT(outBlocks); CRYPTOPP_ASSERT(length >= 16); -#if defined(CRYPTOPP_LITTLE_ENDIAN) +#if (CRYPTOPP_LITTLE_ENDIAN) const uint32x4_t s_one = {0, 0, 0, 1<<24}; const uint32x4_t s_two = {0, 2<<24, 0, 2<<24}; #else @@ -1806,7 +1806,7 @@ inline size_t AdvancedProcessBlocks64_6x2_ALTIVEC(F2 func2, F6 func6, CRYPTOPP_ASSERT(outBlocks); CRYPTOPP_ASSERT(length >= 8); -#if defined(CRYPTOPP_LITTLE_ENDIAN) +#if (CRYPTOPP_LITTLE_ENDIAN) enum {LowOffset=8, HighOffset=0}; const uint32x4_p s_one = {1,0,0,0}; const uint32x4_p s_two = {2,0,2,0}; @@ -2077,7 +2077,7 @@ inline size_t AdvancedProcessBlocks128_4x1_ALTIVEC(F1 func1, F4 func4, CRYPTOPP_ASSERT(outBlocks); CRYPTOPP_ASSERT(length >= 16); -#if defined(CRYPTOPP_LITTLE_ENDIAN) +#if (CRYPTOPP_LITTLE_ENDIAN) const uint32x4_p s_one = {1,0,0,0}; #else const uint32x4_p s_one = {0,0,0,1}; @@ -2222,7 +2222,7 @@ inline size_t AdvancedProcessBlocks128_6x1_ALTIVEC(F1 func1, F6 func6, CRYPTOPP_ASSERT(outBlocks); CRYPTOPP_ASSERT(length >= 16); -#if defined(CRYPTOPP_LITTLE_ENDIAN) +#if (CRYPTOPP_LITTLE_ENDIAN) const uint32x4_p s_one = {1,0,0,0}; #else const uint32x4_p s_one = {0,0,0,1}; diff --git a/aria.cpp b/aria.cpp index 6e59d0ff..3b848109 100644 --- a/aria.cpp +++ b/aria.cpp @@ -291,7 +291,7 @@ void ARIA::Base::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, b else #endif // CRYPTOPP_ENABLE_ARIA_SSSE3_INTRINSICS -#ifdef CRYPTOPP_LITTLE_ENDIAN +#if (CRYPTOPP_LITTLE_ENDIAN) { outBlock[ 0] = (byte)(X1[ARIA_BRF(t[0],3)] ) ^ rk[ 3]; outBlock[ 1] = (byte)(X2[ARIA_BRF(t[0],2)]>>8) ^ rk[ 2]; diff --git a/camellia.cpp b/camellia.cpp index 07bd38da..30b8bc99 100644 --- a/camellia.cpp +++ b/camellia.cpp @@ -60,7 +60,7 @@ NAMESPACE_BEGIN(CryptoPP) ROUND(lh, ll, rh, rl, k0, k1) \ ROUND(rh, rl, lh, ll, k2, k3) -#ifdef CRYPTOPP_LITTLE_ENDIAN +#if (CRYPTOPP_LITTLE_ENDIAN) #define EFI(i) (1-(i)) #else #define EFI(i) (i) diff --git a/config.h b/config.h index 75bbdc72..65131315 100644 --- a/config.h +++ b/config.h @@ -9,23 +9,24 @@ // ***************** Important Settings ******************** // define this if running on a big-endian CPU +// big endian will be assumed if CRYPTOPP_LITTLE_ENDIAN is not non-0 #if !defined(CRYPTOPP_LITTLE_ENDIAN) && !defined(CRYPTOPP_BIG_ENDIAN) && (defined(__BIG_ENDIAN__) || (defined(__s390__) || defined(__s390x__) || defined(__zarch__)) || (defined(__m68k__) || defined(__MC68K__)) || defined(__sparc) || defined(__sparc__) || defined(__hppa__) || defined(__MIPSEB__) || defined(__ARMEB__) || (defined(__MWERKS__) && !defined(__INTEL__))) # define CRYPTOPP_BIG_ENDIAN 1 #endif // define this if running on a little-endian CPU -// big endian will be assumed if CRYPTOPP_LITTLE_ENDIAN is not defined +// big endian will be assumed if CRYPTOPP_LITTLE_ENDIAN is not non-0 #if !defined(CRYPTOPP_BIG_ENDIAN) && !defined(CRYPTOPP_LITTLE_ENDIAN) # define CRYPTOPP_LITTLE_ENDIAN 1 #endif // Sanity checks. Some processors have more than big, little and bi-endian modes. PDP mode, where order results in "4312", should // raise red flags immediately. Additionally, mis-classified machines, like (previosuly) S/390, should raise red flags immediately. -#if defined(CRYPTOPP_BIG_ENDIAN) && defined(__GNUC__) && defined(__BYTE_ORDER__) && (__BYTE_ORDER__ != __ORDER_BIG_ENDIAN__) -# error "CRYPTOPP_BIG_ENDIAN is set, but __BYTE_ORDER__ is not __ORDER_BIG_ENDIAN__" +#if (CRYPTOPP_BIG_ENDIAN) && defined(__GNUC__) && defined(__BYTE_ORDER__) && (__BYTE_ORDER__ != __ORDER_BIG_ENDIAN__) +# error "(CRYPTOPP_BIG_ENDIAN) is set, but __BYTE_ORDER__ is not __ORDER_BIG_ENDIAN__" #endif -#if defined(CRYPTOPP_LITTLE_ENDIAN) && defined(__GNUC__) && defined(__BYTE_ORDER__) && (__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__) -# error "CRYPTOPP_LITTLE_ENDIAN is set, but __BYTE_ORDER__ is not __ORDER_LITTLE_ENDIAN__" +#if (CRYPTOPP_LITTLE_ENDIAN) && defined(__GNUC__) && defined(__BYTE_ORDER__) && (__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__) +# error "(CRYPTOPP_LITTLE_ENDIAN) is set, but __BYTE_ORDER__ is not __ORDER_LITTLE_ENDIAN__" #endif // Define this if you want to disable all OS-dependent features, @@ -606,7 +607,7 @@ NAMESPACE_END // We don't have an ARM big endian test rig. Disable // ARM-BE ASM and instrinsics until we can test it. -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) # define CRYPTOPP_DISABLE_ASM 1 #endif diff --git a/crc.cpp b/crc.cpp index 6922df45..854886c2 100644 --- a/crc.cpp +++ b/crc.cpp @@ -21,7 +21,7 @@ extern void CRC32C_Update_SSE42(const byte *s, size_t n, word32& c); /* Table of CRC-32's of all single byte values (made by makecrc.c) */ const word32 CRC32::m_tab[] = { -#ifdef CRYPTOPP_LITTLE_ENDIAN +#if (CRYPTOPP_LITTLE_ENDIAN) 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L, 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L, 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L, @@ -189,7 +189,7 @@ void CRC32::TruncatedFinal(byte *hash, size_t size) // Castagnoli CRC32C (iSCSI) const word32 CRC32C::m_tab[] = { -#ifdef CRYPTOPP_LITTLE_ENDIAN +#if (CRYPTOPP_LITTLE_ENDIAN) 0x00000000L, 0xf26b8303L, 0xe13b70f7L, 0x1350f3f4L, 0xc79a971fL, 0x35f1141cL, 0x26a1e7e8L, 0xd4ca64ebL, 0x8ad958cfL, 0x78b2dbccL, 0x6be22838L, 0x9989ab3bL, 0x4d43cfd0L, 0xbf284cd3L, 0xac78bf27L, diff --git a/crc.h b/crc.h index 006313e1..02332e4b 100644 --- a/crc.h +++ b/crc.h @@ -12,7 +12,7 @@ NAMESPACE_BEGIN(CryptoPP) const word32 CRC32_NEGL = 0xffffffffL; -#ifdef CRYPTOPP_LITTLE_ENDIAN +#if (CRYPTOPP_LITTLE_ENDIAN) #define CRC32_INDEX(c) (c & 0xff) #define CRC32_SHIFTED(c) (c >> 8) #else diff --git a/gcm-simd.cpp b/gcm-simd.cpp index 46afb17b..16ab1a43 100644 --- a/gcm-simd.cpp +++ b/gcm-simd.cpp @@ -191,7 +191,7 @@ using CryptoPP::VectorRotateLeft; inline uint64x2_p VMULL2LE(const uint64x2_p& val) { -#if CRYPTOPP_BIG_ENDIAN +#if (CRYPTOPP_BIG_ENDIAN) return VectorRotateLeft<8>(val); #else return val; @@ -776,7 +776,7 @@ inline uint64x2_p GCM_Multiply_VMULL(uint64x2_p x, uint64x2_p h, uint64x2_p r) inline uint64x2_p LoadHashKey(const byte *hashKey) { -#if CRYPTOPP_BIG_ENDIAN +#if (CRYPTOPP_BIG_ENDIAN) const uint64x2_p key = (uint64x2_p)VectorLoad(hashKey); const uint8x16_p mask = {8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7}; return vec_perm(key, key, mask); @@ -825,7 +825,7 @@ inline T SwapWords(const T& data) inline uint64x2_p LoadBuffer1(const byte *dataBuffer) { -#if CRYPTOPP_BIG_ENDIAN +#if (CRYPTOPP_BIG_ENDIAN) return (uint64x2_p)VectorLoad(dataBuffer); #else const uint64x2_p data = (uint64x2_p)VectorLoad(dataBuffer); @@ -836,7 +836,7 @@ inline uint64x2_p LoadBuffer1(const byte *dataBuffer) inline uint64x2_p LoadBuffer2(const byte *dataBuffer) { -#if CRYPTOPP_BIG_ENDIAN +#if (CRYPTOPP_BIG_ENDIAN) return (uint64x2_p)SwapWords(VectorLoadBE(dataBuffer)); #else return (uint64x2_p)VectorLoadBE(dataBuffer); diff --git a/gcm.cpp b/gcm.cpp index 84129b56..59236ac2 100644 --- a/gcm.cpp +++ b/gcm.cpp @@ -438,7 +438,7 @@ size_t GCM_Base::AuthenticateBlocks(const byte *data, size_t len) #define READ_TABLE_WORD64_COMMON(a, b, c, d) *(word64 *)(void *)(mulTable+(a*1024)+(b*256)+c+d*8) - #ifdef CRYPTOPP_LITTLE_ENDIAN + #if (CRYPTOPP_LITTLE_ENDIAN) #if CRYPTOPP_BOOL_SLOW_WORD64 word32 z0 = (word32)x0; word32 z1 = (word32)(x0>>32); @@ -509,7 +509,7 @@ size_t GCM_Base::AuthenticateBlocks(const byte *data, size_t len) #define READ_TABLE_WORD64_COMMON(a, c, d) *(word64 *)(void *)(mulTable+(a)*256*16+(c)+(d)*8) - #ifdef CRYPTOPP_LITTLE_ENDIAN + #if (CRYPTOPP_LITTLE_ENDIAN) #if CRYPTOPP_BOOL_SLOW_WORD64 word32 z0 = (word32)x0; word32 z1 = (word32)(x0>>32); diff --git a/integer.cpp b/integer.cpp index ff807547..58665ddd 100644 --- a/integer.cpp +++ b/integer.cpp @@ -285,7 +285,7 @@ public: #endif { #if defined(CRYPTOPP_NATIVE_DWORD_AVAILABLE) -# if defined(CRYPTOPP_LITTLE_ENDIAN) +# if (CRYPTOPP_LITTLE_ENDIAN) const word t[2] = {low,high}; memcpy(&m_whole, t, sizeof(m_whole)); # else @@ -390,7 +390,7 @@ private: // Thanks to Martin Bonner at http://stackoverflow.com/a/39507183 struct half_words { - #ifdef CRYPTOPP_LITTLE_ENDIAN + #if (CRYPTOPP_LITTLE_ENDIAN) word low; word high; #else diff --git a/kalyna.cpp b/kalyna.cpp index 18d6e2c0..6cff5cd0 100644 --- a/kalyna.cpp +++ b/kalyna.cpp @@ -43,7 +43,7 @@ using CryptoPP::KalynaTab::IS; template inline void MakeOddKey(const word64 evenkey[NB], word64 oddkey[NB]) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) if (NB == 2) { oddkey[0] = (evenkey[1] << 8) | (evenkey[0] >> 56); diff --git a/lea-simd.cpp b/lea-simd.cpp index c7eaa819..e8af1ded 100644 --- a/lea-simd.cpp +++ b/lea-simd.cpp @@ -107,7 +107,7 @@ inline uint32x4_t RotateRight(const uint32x4_t& val) template <> inline uint32x4_t RotateLeft<8>(const uint32x4_t& val) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 14,13,12,15, 10,9,8,11, 6,5,4,7, 2,1,0,3 }; const uint8x16_t mask = vld1q_u8(maskb); #else @@ -122,7 +122,7 @@ inline uint32x4_t RotateLeft<8>(const uint32x4_t& val) template <> inline uint32x4_t RotateRight<8>(const uint32x4_t& val) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 12,15,14,13, 8,11,10,9, 4,7,6,5, 0,3,2,1 }; const uint8x16_t mask = vld1q_u8(maskb); #else diff --git a/misc.h b/misc.h index e6ef4d05..086f76ed 100644 --- a/misc.h +++ b/misc.h @@ -1094,9 +1094,9 @@ inline bool IsAligned(const void *ptr) return IsAlignedOn(ptr, GetAlignmentOf()); } -#if defined(CRYPTOPP_LITTLE_ENDIAN) +#if (CRYPTOPP_LITTLE_ENDIAN) typedef LittleEndian NativeByteOrder; -#elif defined(CRYPTOPP_BIG_ENDIAN) +#elif (CRYPTOPP_BIG_ENDIAN) typedef BigEndian NativeByteOrder; #else # error "Unable to determine endian-ness" @@ -1107,7 +1107,7 @@ inline bool IsAligned(const void *ptr) /// native byte order is big-endian /// \details NativeByteOrder is a typedef depending on the platform. If CRYPTOPP_LITTLE_ENDIAN is /// set in config.h, then GetNativeByteOrder returns LittleEndian. If -/// CRYPTOPP_BIG_ENDIAN is set, then GetNativeByteOrder returns BigEndian. +/// (CRYPTOPP_BIG_ENDIAN) is set, then GetNativeByteOrder returns BigEndian. /// \note There are other byte orders besides little- and big-endian, and they include bi-endian /// and PDP-endian. If a system is neither little-endian nor big-endian, then a compile time /// error occurs. diff --git a/ppc-simd.h b/ppc-simd.h index b8213f89..7da9886c 100644 --- a/ppc-simd.h +++ b/ppc-simd.h @@ -218,7 +218,7 @@ inline T VectorShiftLeft(const T& vec) } else { -#if CRYPTOPP_BIG_ENDIAN +#if (CRYPTOPP_BIG_ENDIAN) return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)zero, C); #else return (T)vec_sld((uint8x16_p)zero, (uint8x16_p)vec, 16-C); @@ -261,7 +261,7 @@ inline T VectorShiftRight(const T& vec) } else { -#if CRYPTOPP_BIG_ENDIAN +#if (CRYPTOPP_BIG_ENDIAN) return (T)vec_sld((uint8x16_p)zero, (uint8x16_p)vec, 16-C); #else return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)zero, C); @@ -284,7 +284,7 @@ template inline T VectorRotateLeft(const T& vec) { enum { R = C&0xf }; -#if CRYPTOPP_BIG_ENDIAN +#if (CRYPTOPP_BIG_ENDIAN) return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)vec, R); #else return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)vec, 16-R); @@ -306,7 +306,7 @@ template inline T VectorRotateRight(const T& vec) { enum { R = C&0xf }; -#if CRYPTOPP_BIG_ENDIAN +#if (CRYPTOPP_BIG_ENDIAN) return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)vec, 16-R); #else return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)vec, R); @@ -399,7 +399,7 @@ inline uint32x4_p VectorLoadBE(const byte src[16]) #if defined(CRYPTOPP_XLC_VERSION) return (uint32x4_p)vec_xl_be(0, (byte*)src); #else -# if defined(CRYPTOPP_BIG_ENDIAN) +# if (CRYPTOPP_BIG_ENDIAN) return (uint32x4_p)vec_vsx_ld(0, src); # else return (uint32x4_p)Reverse(vec_vsx_ld(0, src)); @@ -420,7 +420,7 @@ inline uint32x4_p VectorLoadBE(int off, const byte src[16]) #if defined(CRYPTOPP_XLC_VERSION) return (uint32x4_p)vec_xl_be(off, (byte*)src); #else -# if defined(CRYPTOPP_BIG_ENDIAN) +# if (CRYPTOPP_BIG_ENDIAN) return (uint32x4_p)vec_vsx_ld(off, (byte*)src); # else return (uint32x4_p)Reverse(vec_vsx_ld(off, (byte*)src)); @@ -497,7 +497,7 @@ inline void VectorStoreBE(const T& src, byte dest[16]) #if defined(CRYPTOPP_XLC_VERSION) vec_xst_be((uint8x16_p)src, 0, (byte*)dest); #else -# if defined(CRYPTOPP_BIG_ENDIAN) +# if (CRYPTOPP_BIG_ENDIAN) vec_vsx_st((uint8x16_p)src, 0, (byte*)dest); # else vec_vsx_st((uint8x16_p)Reverse(src), 0, (byte*)dest); @@ -521,7 +521,7 @@ inline void VectorStoreBE(const T& src, int off, byte dest[16]) #if defined(CRYPTOPP_XLC_VERSION) vec_xst_be((uint8x16_p)src, off, (byte*)dest); #else -# if defined(CRYPTOPP_BIG_ENDIAN) +# if (CRYPTOPP_BIG_ENDIAN) vec_vsx_st((uint8x16_p)src, off, (byte*)dest); # else vec_vsx_st((uint8x16_p)Reverse(src), off, (byte*)dest); @@ -660,7 +660,7 @@ inline uint32x4_p VectorLoad(int off, const word32 src[4]) /// \since Crypto++ 6.0 inline uint32x4_p VectorLoadBE(const byte src[16]) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) return (uint32x4_p)VectorLoad(src); #else return (uint32x4_p)Reverse(VectorLoad(src)); @@ -701,7 +701,7 @@ inline void VectorStore(const T& data, byte dest[16]) template inline void VectorStoreBE(const T& src, byte dest[16]) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) VectorStore(src, dest); #else VectorStore(Reverse(src), dest); diff --git a/rijndael-simd.cpp b/rijndael-simd.cpp index 26228f1d..b957d971 100644 --- a/rijndael-simd.cpp +++ b/rijndael-simd.cpp @@ -841,7 +841,7 @@ void Rijndael_UncheckedSetKey_POWER8(const byte* userKey, size_t keyLen, word32* rkey += keyLen/4; } -#if defined(CRYPTOPP_LITTLE_ENDIAN) +#if (CRYPTOPP_LITTLE_ENDIAN) rkey = rk; const uint8x16_p mask = ((uint8x16_p){12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3}); const uint8x16_p zero = {0}; diff --git a/rijndael.cpp b/rijndael.cpp index f0e200fb..76d91e63 100644 --- a/rijndael.cpp +++ b/rijndael.cpp @@ -203,7 +203,7 @@ ANONYMOUS_NAMESPACE_END #define QUARTER_ROUND_E(t, a, b, c, d) QUARTER_ROUND(TL_M, Te, t, a, b, c, d) #define QUARTER_ROUND_D(t, a, b, c, d) QUARTER_ROUND(TL_M, Td, t, a, b, c, d) -#ifdef CRYPTOPP_LITTLE_ENDIAN +#if (CRYPTOPP_LITTLE_ENDIAN) #define QUARTER_ROUND_FE(t, a, b, c, d) QUARTER_ROUND(TL_F, Te, t, d, c, b, a) #define QUARTER_ROUND_FD(t, a, b, c, d) QUARTER_ROUND(TL_F, Td, t, d, c, b, a) #if defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS) diff --git a/sha-simd.cpp b/sha-simd.cpp index e699fbe4..687fe169 100644 --- a/sha-simd.cpp +++ b/sha-simd.cpp @@ -1115,7 +1115,7 @@ void VectorStore32x4u(const uint32x4_p8 val, T* data, int offset) template static inline uint32x4_p8 VectorLoadMsg32x4(const T* data, int offset) { -#if defined(CRYPTOPP_LITTLE_ENDIAN) +#if (CRYPTOPP_LITTLE_ENDIAN) const uint8x16_p8 mask = {3,2,1,0, 7,6,5,4, 11,10,9,8, 15,14,13,12}; const uint32x4_p8 r = VectorLoad32x4u(data, offset); return (uint32x4_p8)vec_perm(r, r, mask); @@ -1190,7 +1190,7 @@ uint32x4_p8 VectorPack(const uint32x4_p8 a, const uint32x4_p8 b, template static inline uint32x4_p8 VectorShiftLeft(const uint32x4_p8 val) { -#if defined(CRYPTOPP_LITTLE_ENDIAN) +#if (CRYPTOPP_LITTLE_ENDIAN) return (uint32x4_p8)vec_sld((uint8x16_p8)val, (uint8x16_p8)val, (16-L)&0xf); #else return (uint32x4_p8)vec_sld((uint8x16_p8)val, (uint8x16_p8)val, L&0xf); @@ -1409,7 +1409,7 @@ void VectorStore64x2u(const uint64x2_p8 val, T* data, int offset) template static inline uint64x2_p8 VectorLoadMsg64x2(const T* data, int offset) { -#if defined(CRYPTOPP_LITTLE_ENDIAN) +#if (CRYPTOPP_LITTLE_ENDIAN) const uint8x16_p8 mask = {0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15}; return VectorPermute64x2(VectorLoad64x2u(data, offset), mask); #else @@ -1481,7 +1481,7 @@ uint64x2_p8 VectorPack(const uint64x2_p8 x, const uint64x2_p8 y) template static inline uint64x2_p8 VectorShiftLeft(const uint64x2_p8 val) { -#if defined(CRYPTOPP_LITTLE_ENDIAN) +#if (CRYPTOPP_LITTLE_ENDIAN) return (uint64x2_p8)vec_sld((uint8x16_p8)val, (uint8x16_p8)val, (16-L)&0xf); #else return (uint64x2_p8)vec_sld((uint8x16_p8)val, (uint8x16_p8)val, L&0xf); diff --git a/shark.cpp b/shark.cpp index a8c304eb..6e3b8da3 100644 --- a/shark.cpp +++ b/shark.cpp @@ -67,7 +67,7 @@ void SHARK::Base::UncheckedSetKey(const byte *key, unsigned int keyLen, const Na m_roundKeys[i] = SHARKTransform(m_roundKeys[i]); } -#ifdef CRYPTOPP_LITTLE_ENDIAN +#if (CRYPTOPP_LITTLE_ENDIAN) m_roundKeys[0] = ByteReverse(m_roundKeys[0]); m_roundKeys[m_rounds] = ByteReverse(m_roundKeys[m_rounds]); #endif @@ -84,7 +84,7 @@ void SHARK::Enc::InitForKeySetup() m_roundKeys[DEFAULT_ROUNDS] = SHARKTransform(cbox[0][DEFAULT_ROUNDS]); -#ifdef CRYPTOPP_LITTLE_ENDIAN +#if (CRYPTOPP_LITTLE_ENDIAN) m_roundKeys[0] = ByteReverse(m_roundKeys[0]); m_roundKeys[m_rounds] = ByteReverse(m_roundKeys[m_rounds]); #endif diff --git a/simon128-simd.cpp b/simon128-simd.cpp index 5d5af659..21af1d40 100644 --- a/simon128-simd.cpp +++ b/simon128-simd.cpp @@ -99,7 +99,7 @@ inline uint64x2_t RotateRight64(const uint64x2_t& val) template <> inline uint64x2_t RotateLeft64<8>(const uint64x2_t& val) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 14,13,12,11, 10,9,8,15, 6,5,4,3, 2,1,0,7 }; const uint8x16_t mask = vld1q_u8(maskb); #else @@ -115,7 +115,7 @@ inline uint64x2_t RotateLeft64<8>(const uint64x2_t& val) template <> inline uint64x2_t RotateRight64<8>(const uint64x2_t& val) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 8,15,14,13, 12,11,10,9, 0,7,6,5, 4,3,2,1 }; const uint8x16_t mask = vld1q_u8(maskb); #else @@ -567,7 +567,7 @@ inline uint64x2_p SIMON128_f(const uint64x2_p val) inline void SIMON128_Enc_Block(uint32x4_p &block, const word64 *subkeys, unsigned int rounds) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0}; #else @@ -595,7 +595,7 @@ inline void SIMON128_Enc_Block(uint32x4_p &block, const word64 *subkeys, unsigne std::swap(x1, y1); } -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; //const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0}; #else @@ -609,7 +609,7 @@ inline void SIMON128_Enc_Block(uint32x4_p &block, const word64 *subkeys, unsigne inline void SIMON128_Dec_Block(uint32x4_p &block, const word64 *subkeys, unsigned int rounds) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0}; #else @@ -638,7 +638,7 @@ inline void SIMON128_Dec_Block(uint32x4_p &block, const word64 *subkeys, unsigne y1 = VectorXor(VectorXor(y1, SIMON128_f(x1)), rk2); } -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; //const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0}; #else @@ -654,7 +654,7 @@ inline void SIMON128_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4, uint32x4_p &block5, const word64 *subkeys, unsigned int rounds) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0}; #else @@ -692,7 +692,7 @@ inline void SIMON128_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, std::swap(x1, y1); std::swap(x2, y2); std::swap(x3, y3); } -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0}; #else @@ -713,7 +713,7 @@ inline void SIMON128_Dec_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4, uint32x4_p &block5, const word64 *subkeys, unsigned int rounds) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0}; #else @@ -752,7 +752,7 @@ inline void SIMON128_Dec_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, y3 = VectorXor(VectorXor(y3, SIMON128_f(x3)), rk2); } -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0}; #else diff --git a/simon64-simd.cpp b/simon64-simd.cpp index a103dddb..726d74a3 100644 --- a/simon64-simd.cpp +++ b/simon64-simd.cpp @@ -105,7 +105,7 @@ inline uint32x4_t RotateRight32(const uint32x4_t& val) template <> inline uint32x4_t RotateLeft32<8>(const uint32x4_t& val) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 14,13,12,15, 10,9,8,11, 6,5,4,7, 2,1,0,3 }; const uint8x16_t mask = vld1q_u8(maskb); #else @@ -121,7 +121,7 @@ inline uint32x4_t RotateLeft32<8>(const uint32x4_t& val) template <> inline uint32x4_t RotateRight32<8>(const uint32x4_t& val) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 12,15,14,13, 8,11,10,9, 4,7,6,5, 0,3,2,1 }; const uint8x16_t mask = vld1q_u8(maskb); #else @@ -559,7 +559,7 @@ inline uint32x4_p SIMON64_f(const uint32x4_p val) inline void SIMON64_Enc_Block(uint32x4_p &block0, uint32x4_p &block1, const word32 *subkeys, unsigned int rounds) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {7,6,5,4, 15,14,13,12, 23,22,21,20, 31,30,29,28}; const uint8x16_p m2 = {3,2,1,0, 11,10,9,8, 19,18,17,16, 27,26,25,24}; #else @@ -587,7 +587,7 @@ inline void SIMON64_Enc_Block(uint32x4_p &block0, uint32x4_p &block1, std::swap(x1, y1); } -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m3 = {19,18,17,16, 3,2,1,0, 23,22,21,20, 7,6,5,4}; const uint8x16_p m4 = {27,26,25,24, 11,10,9,8, 31,30,29,28, 15,14,13,12}; #else @@ -603,7 +603,7 @@ inline void SIMON64_Enc_Block(uint32x4_p &block0, uint32x4_p &block1, inline void SIMON64_Dec_Block(uint32x4_p &block0, uint32x4_p &block1, const word32 *subkeys, unsigned int rounds) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {7,6,5,4, 15,14,13,12, 23,22,21,20, 31,30,29,28}; const uint8x16_p m2 = {3,2,1,0, 11,10,9,8, 19,18,17,16, 27,26,25,24}; #else @@ -632,7 +632,7 @@ inline void SIMON64_Dec_Block(uint32x4_p &block0, uint32x4_p &block1, y1 = VectorXor(VectorXor(y1, SIMON64_f(x1)), rk2); } -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m3 = {19,18,17,16, 3,2,1,0, 23,22,21,20, 7,6,5,4}; const uint8x16_p m4 = {27,26,25,24, 11,10,9,8, 31,30,29,28, 15,14,13,12}; #else @@ -649,7 +649,7 @@ inline void SIMON64_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4, uint32x4_p &block5, const word32 *subkeys, unsigned int rounds) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {7,6,5,4, 15,14,13,12, 23,22,21,20, 31,30,29,28}; const uint8x16_p m2 = {3,2,1,0, 11,10,9,8, 19,18,17,16, 27,26,25,24}; #else @@ -687,7 +687,7 @@ inline void SIMON64_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, std::swap(x1, y1); std::swap(x2, y2); std::swap(x3, y3); } -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m3 = {19,18,17,16, 3,2,1,0, 23,22,21,20, 7,6,5,4}; const uint8x16_p m4 = {27,26,25,24, 11,10,9,8, 31,30,29,28, 15,14,13,12}; #else @@ -708,7 +708,7 @@ inline void SIMON64_Dec_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4, uint32x4_p &block5, const word32 *subkeys, unsigned int rounds) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {7,6,5,4, 15,14,13,12, 23,22,21,20, 31,30,29,28}; const uint8x16_p m2 = {3,2,1,0, 11,10,9,8, 19,18,17,16, 27,26,25,24}; #else @@ -747,7 +747,7 @@ inline void SIMON64_Dec_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, y3 = VectorXor(VectorXor(y3, SIMON64_f(x3)), rk2); } -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m3 = {19,18,17,16, 3,2,1,0, 23,22,21,20, 7,6,5,4}; const uint8x16_p m4 = {27,26,25,24, 11,10,9,8, 31,30,29,28, 15,14,13,12}; #else diff --git a/speck128-simd.cpp b/speck128-simd.cpp index da7d949e..abb856a0 100644 --- a/speck128-simd.cpp +++ b/speck128-simd.cpp @@ -96,7 +96,7 @@ inline uint64x2_t RotateRight64(const uint64x2_t& val) template <> inline uint64x2_t RotateLeft64<8>(const uint64x2_t& val) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 14,13,12,11, 10,9,8,15, 6,5,4,3, 2,1,0,7 }; const uint8x16_t mask = vld1q_u8(maskb); #else @@ -112,7 +112,7 @@ inline uint64x2_t RotateLeft64<8>(const uint64x2_t& val) template <> inline uint64x2_t RotateRight64<8>(const uint64x2_t& val) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 8,15,14,13, 12,11,10,9, 0,7,6,5, 4,3,2,1 }; const uint8x16_t mask = vld1q_u8(maskb); #else @@ -493,7 +493,7 @@ inline uint64x2_p RotateRight64(const uint64x2_p val) void SPECK128_Enc_Block(uint32x4_p &block, const word64 *subkeys, unsigned int rounds) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0}; #else @@ -517,7 +517,7 @@ void SPECK128_Enc_Block(uint32x4_p &block, const word64 *subkeys, unsigned int r y1 = VectorXor(y1, x1); } -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; //const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0}; #else @@ -531,7 +531,7 @@ void SPECK128_Enc_Block(uint32x4_p &block, const word64 *subkeys, unsigned int r void SPECK128_Dec_Block(uint32x4_p &block, const word64 *subkeys, unsigned int rounds) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0}; #else @@ -554,7 +554,7 @@ void SPECK128_Dec_Block(uint32x4_p &block, const word64 *subkeys, unsigned int r x1 = RotateLeft64<8>(x1); } -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; //const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0}; #else @@ -570,7 +570,7 @@ void SPECK128_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4, uint32x4_p &block5, const word64 *subkeys, unsigned int rounds) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0}; #else @@ -608,7 +608,7 @@ void SPECK128_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, y3 = VectorXor(y3, x3); } -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0}; #else @@ -629,7 +629,7 @@ void SPECK128_Dec_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4, uint32x4_p &block5, const word64 *subkeys, unsigned int rounds) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0}; #else @@ -667,7 +667,7 @@ void SPECK128_Dec_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, x3 = RotateLeft64<8>(x3); } -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0}; #else diff --git a/speck64-simd.cpp b/speck64-simd.cpp index e4ce8fa2..fe970679 100644 --- a/speck64-simd.cpp +++ b/speck64-simd.cpp @@ -102,7 +102,7 @@ inline uint32x4_t RotateRight32(const uint32x4_t& val) template <> inline uint32x4_t RotateLeft32<8>(const uint32x4_t& val) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 14,13,12,15, 10,9,8,11, 6,5,4,7, 2,1,0,3 }; const uint8x16_t mask = vld1q_u8(maskb); #else @@ -118,7 +118,7 @@ inline uint32x4_t RotateLeft32<8>(const uint32x4_t& val) template <> inline uint32x4_t RotateRight32<8>(const uint32x4_t& val) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 12,15,14,13, 8,11,10,9, 4,7,6,5, 0,3,2,1 }; const uint8x16_t mask = vld1q_u8(maskb); #else @@ -498,7 +498,7 @@ inline uint32x4_p RotateRight32(const uint32x4_p val) void SPECK64_Enc_Block(uint32x4_p &block0, uint32x4_p &block1, const word32 *subkeys, unsigned int rounds) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {7,6,5,4, 15,14,13,12, 23,22,21,20, 31,30,29,28}; const uint8x16_p m2 = {3,2,1,0, 11,10,9,8, 19,18,17,16, 27,26,25,24}; #else @@ -522,7 +522,7 @@ void SPECK64_Enc_Block(uint32x4_p &block0, uint32x4_p &block1, y1 = VectorXor(y1, x1); } -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m3 = {19,18,17,16, 3,2,1,0, 23,22,21,20, 7,6,5,4}; const uint8x16_p m4 = {27,26,25,24, 11,10,9,8, 31,30,29,28, 15,14,13,12}; #else @@ -538,7 +538,7 @@ void SPECK64_Enc_Block(uint32x4_p &block0, uint32x4_p &block1, void SPECK64_Dec_Block(uint32x4_p &block0, uint32x4_p &block1, const word32 *subkeys, unsigned int rounds) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {7,6,5,4, 15,14,13,12, 23,22,21,20, 31,30,29,28}; const uint8x16_p m2 = {3,2,1,0, 11,10,9,8, 19,18,17,16, 27,26,25,24}; #else @@ -562,7 +562,7 @@ void SPECK64_Dec_Block(uint32x4_p &block0, uint32x4_p &block1, x1 = RotateLeft32<8>(x1); } -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m3 = {19,18,17,16, 3,2,1,0, 23,22,21,20, 7,6,5,4}; const uint8x16_p m4 = {27,26,25,24, 11,10,9,8, 31,30,29,28, 15,14,13,12}; #else @@ -579,7 +579,7 @@ void SPECK64_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4, uint32x4_p &block5, const word32 *subkeys, unsigned int rounds) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {7,6,5,4, 15,14,13,12, 23,22,21,20, 31,30,29,28}; const uint8x16_p m2 = {3,2,1,0, 11,10,9,8, 19,18,17,16, 27,26,25,24}; #else @@ -620,7 +620,7 @@ void SPECK64_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, y3 = VectorXor(y3, x3); } -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m3 = {19,18,17,16, 3,2,1,0, 23,22,21,20, 7,6,5,4}; const uint8x16_p m4 = {27,26,25,24, 11,10,9,8, 31,30,29,28, 15,14,13,12}; #else @@ -641,7 +641,7 @@ void SPECK64_Dec_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4, uint32x4_p &block5, const word32 *subkeys, unsigned int rounds) { -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {7,6,5,4, 15,14,13,12, 23,22,21,20, 31,30,29,28}; const uint8x16_p m2 = {3,2,1,0, 11,10,9,8, 19,18,17,16, 27,26,25,24}; #else @@ -682,7 +682,7 @@ void SPECK64_Dec_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, x3 = RotateLeft32<8>(x3); } -#if defined(CRYPTOPP_BIG_ENDIAN) +#if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m3 = {19,18,17,16, 3,2,1,0, 23,22,21,20, 7,6,5,4}; const uint8x16_p m4 = {27,26,25,24, 11,10,9,8, 31,30,29,28, 15,14,13,12}; #else diff --git a/validat1.cpp b/validat1.cpp index b6f2f65a..d7b4ae19 100644 --- a/validat1.cpp +++ b/validat1.cpp @@ -1121,7 +1121,7 @@ bool TestAltivecOps() pass1 = (0 == std::memcmp(st3, dest+3, 16)) && pass1; CRYPTOPP_ASSERT(pass1); -#if defined(CRYPTOPP_LITTLE_ENDIAN) +#if (CRYPTOPP_LITTLE_ENDIAN) VectorStore(VectorLoadBE(src), dest); pass1 = (0 != std::memcmp(src, dest, 16)) && pass1; CRYPTOPP_ASSERT(pass1); diff --git a/validat3.cpp b/validat3.cpp index 40facce2..b7cc2d34 100644 --- a/validat3.cpp +++ b/validat3.cpp @@ -214,7 +214,7 @@ bool TestSettings() if (w == 0x04030201L) { -#ifdef CRYPTOPP_LITTLE_ENDIAN +#if (CRYPTOPP_LITTLE_ENDIAN) std::cout << "passed: "; #else std::cout << "FAILED: "; @@ -224,7 +224,7 @@ bool TestSettings() } else if (w == 0x01020304L) { -#ifndef CRYPTOPP_LITTLE_ENDIAN +#if (CRYPTOPP_BIG_ENDIAN) std::cout << "passed: "; #else std::cout << "FAILED: ";