From afffba7b7bc81e135ca0d10e3ee97774a6574361 Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Fri, 7 Jun 2019 13:02:25 -0400 Subject: [PATCH 01/11] Add -mtune=native option to makefile Added for Solaris 11 .3 on SPARC64 --- GNUmakefile | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/GNUmakefile b/GNUmakefile index 5e98825d..5f3c30c3 100755 --- a/GNUmakefile +++ b/GNUmakefile @@ -860,6 +860,15 @@ ifeq ($(findstring native,$(MAKECMDGOALS)),native) NATIVE_OPT = -march=native endif # NATIVE_OPT + # And tune + ifeq ($(NATIVE_OPT),) + TOPT = -mtune=native + HAVE_OPT = $(shell $(CXX) $(TCXXFLAGS) $(ZOPT) $(TOPT) $(TPROG) -o $(TOUT) 2>&1 | tr ' ' '\n' | wc -l) + ifeq ($(strip $(HAVE_OPT)),0) + NATIVE_OPT = -mtune=native + endif # NATIVE_OPT + endif + # Try SunCC next ifeq ($(NATIVE_OPT),) TOPT = -native From 43b01973b1dc0c50cd394502f7475e2c1039ad35 Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Sat, 8 Jun 2019 11:00:11 -0400 Subject: [PATCH 02/11] Clear lgtm findings We did some refactoring and added sse_simd.h. Over time more SSE functions will likely move into sse_simd.h --- Filelist.txt | 1 + asn.cpp | 16 ++++---- chacha_avx.cpp | 85 +++++++++++++++++++--------------------- cryptlib.vcxproj | 1 + cryptlib.vcxproj.filters | 3 ++ gf2n_simd.cpp | 26 ++++++------ sse_simd.h | 84 +++++++++++++++++++++++++++++++++++++++ 7 files changed, 149 insertions(+), 67 deletions(-) create mode 100644 sse_simd.h diff --git a/Filelist.txt b/Filelist.txt index 933d1226..141ec0e8 100644 --- a/Filelist.txt +++ b/Filelist.txt @@ -358,6 +358,7 @@ square.cpp square.h squaretb.cpp sse_simd.cpp +sse_simd.h stdcpp.h strciphr.cpp strciphr.h diff --git a/asn.cpp b/asn.cpp index ec5a097d..1631f52e 100644 --- a/asn.cpp +++ b/asn.cpp @@ -395,25 +395,25 @@ void EncodedObjectFilter::Put(const byte *inString, size_t length) } BERGeneralDecoder::BERGeneralDecoder(BufferedTransformation &inQueue) - : m_inQueue(inQueue), m_finished(false) + : m_inQueue(inQueue), m_length(0), m_finished(false) { Init(DefaultTag); } BERGeneralDecoder::BERGeneralDecoder(BufferedTransformation &inQueue, byte asnTag) - : m_inQueue(inQueue), m_finished(false) + : m_inQueue(inQueue), m_length(0), m_finished(false) { Init(asnTag); } BERGeneralDecoder::BERGeneralDecoder(BERGeneralDecoder &inQueue) - : m_inQueue(inQueue), m_finished(false) + : m_inQueue(inQueue), m_length(0), m_finished(false) { Init(DefaultTag); } BERGeneralDecoder::BERGeneralDecoder(BERGeneralDecoder &inQueue, byte asnTag) - : m_inQueue(inQueue), m_finished(false) + : m_inQueue(inQueue), m_length(0), m_finished(false) { Init(asnTag); } @@ -514,22 +514,22 @@ lword BERGeneralDecoder::ReduceLength(lword delta) } DERGeneralEncoder::DERGeneralEncoder(BufferedTransformation &outQueue) - : ByteQueue(), m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false) + : m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false) { } DERGeneralEncoder::DERGeneralEncoder(BufferedTransformation &outQueue, byte asnTag) - : ByteQueue(), m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false) + : m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false) { } DERGeneralEncoder::DERGeneralEncoder(DERGeneralEncoder &outQueue) - : ByteQueue(), m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false) + : m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false) { } DERGeneralEncoder::DERGeneralEncoder(DERGeneralEncoder &outQueue, byte asnTag) - : ByteQueue(), m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false) + : m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false) { } diff --git a/chacha_avx.cpp b/chacha_avx.cpp index 20693488..72dc42c2 100644 --- a/chacha_avx.cpp +++ b/chacha_avx.cpp @@ -24,6 +24,7 @@ # include # include # include +# include "sse_simd.h" #endif // Squash MS LNK4221 and libtool warnings @@ -91,14 +92,10 @@ NAMESPACE_BEGIN(CryptoPP) void ChaCha_OperateKeystream_AVX2(const word32 *state, const byte* input, byte *output, unsigned int rounds) { - MAYBE_CONST __m128i* state_mm = (MAYBE_CONST __m128i*)(state); - MAYBE_CONST __m256i* input_mm = (MAYBE_CONST __m256i*)(input); - __m256i* output_mm = reinterpret_cast<__m256i*>(output); - - const __m256i state0 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 0)); - const __m256i state1 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 1)); - const __m256i state2 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 2)); - const __m256i state3 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 3)); + const __m256i state0 = _mm256_broadcastsi128_si256(load_m128i<0>(state)); + const __m256i state1 = _mm256_broadcastsi128_si256(load_m128i<1>(state)); + const __m256i state2 = _mm256_broadcastsi128_si256(load_m128i<2>(state)); + const __m256i state3 = _mm256_broadcastsi128_si256(load_m128i<3>(state)); const __m256i CTR0 = _mm256_set_epi32(0, 0, 0, 0, 0, 0, 0, 4); const __m256i CTR1 = _mm256_set_epi32(0, 0, 0, 1, 0, 0, 0, 5); @@ -304,80 +301,80 @@ void ChaCha_OperateKeystream_AVX2(const word32 *state, const byte* input, byte * X3_3 = _mm256_add_epi32(X3_3, state3); X3_3 = _mm256_add_epi64(X3_3, CTR3); - if (input_mm) + if (input) { - _mm256_storeu_si256(output_mm + 0, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 0), + store_m256i<0>(output, _mm256_xor_si256(load_m256i<0>(input), _mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 1, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 1), + store_m256i<1>(output, _mm256_xor_si256(load_m256i<1>(input), _mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 2, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 2), + store_m256i<2>(output, _mm256_xor_si256(load_m256i<2>(input), _mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 3, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 3), + store_m256i<3>(output, _mm256_xor_si256(load_m256i<3>(input), _mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4)))); } else { - _mm256_storeu_si256(output_mm + 0, _mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 1, _mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 2, _mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 3, _mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4))); + store_m256i<0>(output, _mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4))); + store_m256i<1>(output, _mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4))); + store_m256i<2>(output, _mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4))); + store_m256i<3>(output, _mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4))); } - if (input_mm) + if (input) { - _mm256_storeu_si256(output_mm + 4, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 4), + store_m256i<4>(output, _mm256_xor_si256(load_m256i<4>(input), _mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 5, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 5), + store_m256i<5>(output, _mm256_xor_si256(load_m256i<5>(input), _mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 6, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 6), + store_m256i<6>(output, _mm256_xor_si256(load_m256i<6>(input), _mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 7, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 7), + store_m256i<7>(output, _mm256_xor_si256(load_m256i<7>(input), _mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4)))); } else { - _mm256_storeu_si256(output_mm + 4, _mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 5, _mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 6, _mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 7, _mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4))); + store_m256i<4>(output, _mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4))); + store_m256i<5>(output, _mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4))); + store_m256i<6>(output, _mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4))); + store_m256i<7>(output, _mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4))); } - if (input_mm) + if (input) { - _mm256_storeu_si256(output_mm + 8, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 8), + store_m256i<8>(output, _mm256_xor_si256(load_m256i<8>(input), _mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 9, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 9), + store_m256i<9>(output, _mm256_xor_si256(load_m256i<9>(input), _mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 10, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 10), + store_m256i<10>(output, _mm256_xor_si256(load_m256i<10>(input), _mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 11, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 11), + store_m256i<11>(output, _mm256_xor_si256(load_m256i<11>(input), _mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4)))); } else { - _mm256_storeu_si256(output_mm + 8, _mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 9, _mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 10, _mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 11, _mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4))); + store_m256i<8>(output, _mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4))); + store_m256i<9>(output, _mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4))); + store_m256i<10>(output, _mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4))); + store_m256i<11>(output, _mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4))); } - if (input_mm) + if (input) { - _mm256_storeu_si256(output_mm + 12, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 12), + store_m256i<12>(output, _mm256_xor_si256(load_m256i<12>(input), _mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 13, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 13), + store_m256i<13>(output, _mm256_xor_si256(load_m256i<13>(input), _mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 14, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 14), + store_m256i<14>(output, _mm256_xor_si256(load_m256i<14>(input), _mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 15, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 15), + store_m256i<15>(output, _mm256_xor_si256(load_m256i<15>(input), _mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4)))); } else { - _mm256_storeu_si256(output_mm + 12, _mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 13, _mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 14, _mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 15, _mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4))); + store_m256i<12>(output, _mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4))); + store_m256i<13>(output, _mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4))); + store_m256i<14>(output, _mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4))); + store_m256i<15>(output, _mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4))); } // https://software.intel.com/en-us/articles/avoiding-avx-sse-transition-penalties diff --git a/cryptlib.vcxproj b/cryptlib.vcxproj index 4985c096..1f81c046 100644 --- a/cryptlib.vcxproj +++ b/cryptlib.vcxproj @@ -538,6 +538,7 @@ + diff --git a/cryptlib.vcxproj.filters b/cryptlib.vcxproj.filters index de140e33..7b91f87b 100644 --- a/cryptlib.vcxproj.filters +++ b/cryptlib.vcxproj.filters @@ -993,6 +993,9 @@ Header Files + + Header Files + Header Files diff --git a/gf2n_simd.cpp b/gf2n_simd.cpp index 1d4d933f..3bd3cd0e 100644 --- a/gf2n_simd.cpp +++ b/gf2n_simd.cpp @@ -28,6 +28,7 @@ #if (CRYPTOPP_CLMUL_AVAILABLE) # include # include +# include "sse_simd.h" #endif #if (CRYPTOPP_ARM_PMULL_AVAILABLE) @@ -465,36 +466,31 @@ NAMESPACE_BEGIN(CryptoPP) void GF2NT_233_Multiply_Reduce_CLMUL(const word* pA, const word* pB, word* pC) { - const __m128i* pAA = reinterpret_cast(pA); - const __m128i* pBB = reinterpret_cast(pB); - __m128i a0 = _mm_loadu_si128(pAA+0); - __m128i a1 = _mm_loadu_si128(pAA+1); - __m128i b0 = _mm_loadu_si128(pBB+0); - __m128i b1 = _mm_loadu_si128(pBB+1); + __m128i a0 = load_m128i<0>(pA); + __m128i a1 = load_m128i<1>(pA); + __m128i b0 = load_m128i<0>(pB); + __m128i b1 = load_m128i<1>(pB); __m128i c0, c1, c2, c3; F2N_Multiply_256x256_CLMUL(c3, c2, c1, c0, a1, a0, b1, b0); GF2NT_233_Reduce_CLMUL(c3, c2, c1, c0); - __m128i* pCC = reinterpret_cast<__m128i*>(pC); - _mm_storeu_si128(pCC+0, c0); - _mm_storeu_si128(pCC+1, c1); + store_m128i<0>(pC, c0); + store_m128i<1>(pC, c1); } void GF2NT_233_Square_Reduce_CLMUL(const word* pA, word* pC) { - const __m128i* pAA = reinterpret_cast(pA); - __m128i a0 = _mm_loadu_si128(pAA+0); - __m128i a1 = _mm_loadu_si128(pAA+1); + __m128i a0 = load_m128i<0>(pA); + __m128i a1 = load_m128i<1>(pA); __m128i c0, c1, c2, c3; F2N_Square_256_CLMUL(c3, c2, c1, c0, a1, a0); GF2NT_233_Reduce_CLMUL(c3, c2, c1, c0); - __m128i* pCC = reinterpret_cast<__m128i*>(pC); - _mm_storeu_si128(pCC+0, c0); - _mm_storeu_si128(pCC+1, c1); + store_m128i<0>(pC, c0); + store_m128i<1>(pC, c1); } #elif (CRYPTOPP_ARM_PMULL_AVAILABLE) diff --git a/sse_simd.h b/sse_simd.h new file mode 100644 index 00000000..c77cbd82 --- /dev/null +++ b/sse_simd.h @@ -0,0 +1,84 @@ +// sse_simd.h - written and placed in public domain by Jeffrey Walton +// Helper functions to work with SSE and above. The class file +// was added after a scan by lgtm.com. We caught some findings +// that were not problems, but we refactored to squash them. + +#ifndef CRYPTOPP_SSE_CRYPTO_H +#define CRYPTOPP_SSE_CRYPTO_H + +#include "config.h" + +#if (CRYPTOPP_SSE2_INTRIN_AVAILABLE) +# include +#endif + +#if (CRYPTOPP_AVX2_AVAILABLE) +# include +#endif + +NAMESPACE_BEGIN(CryptoPP) + +#if (CRYPTOPP_SSE2_INTRIN_AVAILABLE) + +// N specifies the nth 128-bit element +template +inline __m128i load_m128i(T* ptr) +{ + enum { SCALE=sizeof(__m128i)/sizeof(T) }; + return _mm_loadu_si128( + reinterpret_cast<__m128i*>(ptr+SCALE*N)); +} + +// N specifies the nth 128-bit element +template +inline __m128i load_m128i(const T* ptr) +{ + enum { SCALE=sizeof(__m128i)/sizeof(T) }; + return _mm_loadu_si128( + reinterpret_cast(ptr+SCALE*N)); +} + +// N specifies the nth 128-bit element +template +inline void store_m128i(T* ptr, __m128i val) +{ + enum { SCALE=sizeof(__m128i)/sizeof(T) }; + return _mm_storeu_si128( + reinterpret_cast<__m128i*>(ptr+SCALE*N), val); +} +#endif + +#if (CRYPTOPP_AVX2_AVAILABLE) + +// N specifies the nth 256-bit element +template +inline __m256i load_m256i(T* ptr) +{ + enum { SCALE=sizeof(__m256i)/sizeof(T) }; + return _mm256_loadu_si256( + reinterpret_cast<__m256i*>(ptr+SCALE*N)); +} + +// N specifies the nth 256-bit element +template +inline __m256i load_m256i(const T* ptr) +{ + enum { SCALE=sizeof(__m256i)/sizeof(T) }; + return _mm256_loadu_si256( + reinterpret_cast(ptr+SCALE*N)); +} + +// N specifies the nth 256-bit element +template +inline void store_m256i(T* ptr, __m256i val) +{ + enum { SCALE=sizeof(__m256i)/sizeof(T) }; + return _mm256_storeu_si256( + reinterpret_cast<__m256i*>(ptr+SCALE*N), val); +} + +#endif + +NAMESPACE_END + +#endif // CRYPTOPP_SSE_CRYPTO_H \ No newline at end of file From 6a11f00768c969a10c0d0f2048da3f69a18c728c Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Sat, 8 Jun 2019 12:59:14 -0400 Subject: [PATCH 03/11] Clear lgtm findings --- chacha_simd.cpp | 85 ++++++++++++++++++++++++------------------------- sse_simd.h | 45 +++++++++++++++++++++++++- 2 files changed, 85 insertions(+), 45 deletions(-) diff --git a/chacha_simd.cpp b/chacha_simd.cpp index 9fd6b0f1..e225579d 100644 --- a/chacha_simd.cpp +++ b/chacha_simd.cpp @@ -38,6 +38,7 @@ #if (CRYPTOPP_SSE2_INTRIN_AVAILABLE) # include # include +# include "sse_simd.h" #endif #if defined(__SSSE3__) @@ -565,14 +566,10 @@ void ChaCha_OperateKeystream_NEON(const word32 *state, const byte* input, byte * void ChaCha_OperateKeystream_SSE2(const word32 *state, const byte* input, byte *output, unsigned int rounds) { - const __m128i* state_mm = reinterpret_cast(state); - const __m128i* input_mm = reinterpret_cast(input); - __m128i* output_mm = reinterpret_cast<__m128i*>(output); - - const __m128i state0 = _mm_load_si128(state_mm + 0); - const __m128i state1 = _mm_load_si128(state_mm + 1); - const __m128i state2 = _mm_load_si128(state_mm + 2); - const __m128i state3 = _mm_load_si128(state_mm + 3); + const __m128i state0 = load_m128i<0>(state); + const __m128i state1 = load_m128i<1>(state); + const __m128i state2 = load_m128i<2>(state); + const __m128i state3 = load_m128i<3>(state); __m128i r0_0 = state0; __m128i r0_1 = state1; @@ -772,57 +769,57 @@ void ChaCha_OperateKeystream_SSE2(const word32 *state, const byte* input, byte * r3_3 = _mm_add_epi32(r3_3, state3); r3_3 = _mm_add_epi64(r3_3, _mm_set_epi32(0, 0, 0, 3)); - if (input_mm) + if (input) { - r0_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 0), r0_0); - r0_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 1), r0_1); - r0_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 2), r0_2); - r0_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 3), r0_3); + r0_0 = _mm_xor_si128(load_m128i<0>(input), r0_0); + r0_1 = _mm_xor_si128(load_m128i<1>(input), r0_1); + r0_2 = _mm_xor_si128(load_m128i<2>(input), r0_2); + r0_3 = _mm_xor_si128(load_m128i<3>(input), r0_3); } - _mm_storeu_si128(output_mm + 0, r0_0); - _mm_storeu_si128(output_mm + 1, r0_1); - _mm_storeu_si128(output_mm + 2, r0_2); - _mm_storeu_si128(output_mm + 3, r0_3); + store_m128i<0>(output, r0_0); + store_m128i<1>(output, r0_1); + store_m128i<2>(output, r0_2); + store_m128i<3>(output, r0_3); - if (input_mm) + if (input) { - r1_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 4), r1_0); - r1_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 5), r1_1); - r1_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 6), r1_2); - r1_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 7), r1_3); + r1_0 = _mm_xor_si128(load_m128i<4>(input), r1_0); + r1_1 = _mm_xor_si128(load_m128i<5>(input), r1_1); + r1_2 = _mm_xor_si128(load_m128i<6>(input), r1_2); + r1_3 = _mm_xor_si128(load_m128i<7>(input), r1_3); } - _mm_storeu_si128(output_mm + 4, r1_0); - _mm_storeu_si128(output_mm + 5, r1_1); - _mm_storeu_si128(output_mm + 6, r1_2); - _mm_storeu_si128(output_mm + 7, r1_3); + store_m128i<4>(output, r1_0); + store_m128i<5>(output, r1_1); + store_m128i<6>(output, r1_2); + store_m128i<7>(output, r1_3); - if (input_mm) + if (input) { - r2_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 8), r2_0); - r2_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 9), r2_1); - r2_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 10), r2_2); - r2_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 11), r2_3); + r2_0 = _mm_xor_si128(load_m128i< 8>(input), r2_0); + r2_1 = _mm_xor_si128(load_m128i< 9>(input), r2_1); + r2_2 = _mm_xor_si128(load_m128i<10>(input), r2_2); + r2_3 = _mm_xor_si128(load_m128i<11>(input), r2_3); } - _mm_storeu_si128(output_mm + 8, r2_0); - _mm_storeu_si128(output_mm + 9, r2_1); - _mm_storeu_si128(output_mm + 10, r2_2); - _mm_storeu_si128(output_mm + 11, r2_3); + store_m128i< 8>(output, r2_0); + store_m128i< 9>(output, r2_1); + store_m128i<10>(output, r2_2); + store_m128i<11>(output, r2_3); - if (input_mm) + if (input) { - r3_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 12), r3_0); - r3_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 13), r3_1); - r3_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 14), r3_2); - r3_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 15), r3_3); + r3_0 = _mm_xor_si128(load_m128i<12>(input), r3_0); + r3_1 = _mm_xor_si128(load_m128i<13>(input), r3_1); + r3_2 = _mm_xor_si128(load_m128i<14>(input), r3_2); + r3_3 = _mm_xor_si128(load_m128i<15>(input), r3_3); } - _mm_storeu_si128(output_mm + 12, r3_0); - _mm_storeu_si128(output_mm + 13, r3_1); - _mm_storeu_si128(output_mm + 14, r3_2); - _mm_storeu_si128(output_mm + 15, r3_3); + store_m128i<12>(output, r3_0); + store_m128i<13>(output, r3_1); + store_m128i<14>(output, r3_2); + store_m128i<15>(output, r3_3); } #endif // CRYPTOPP_SSE2_INTRIN_AVAILABLE diff --git a/sse_simd.h b/sse_simd.h index c77cbd82..0effad47 100644 --- a/sse_simd.h +++ b/sse_simd.h @@ -20,6 +20,27 @@ NAMESPACE_BEGIN(CryptoPP) #if (CRYPTOPP_SSE2_INTRIN_AVAILABLE) +template +inline __m128i load_m128i(T* ptr) +{ + return _mm_loadu_si128( + reinterpret_cast<__m128i*>(ptr)); +} + +template +inline __m128i load_m128i(const T* ptr) +{ + return _mm_loadu_si128( + reinterpret_cast(ptr)); +} + +template +inline void store_m128i(T* ptr, __m128i val) +{ + return _mm_storeu_si128( + reinterpret_cast<__m128i*>(ptr), val); +} + // N specifies the nth 128-bit element template inline __m128i load_m128i(T* ptr) @@ -46,10 +67,32 @@ inline void store_m128i(T* ptr, __m128i val) return _mm_storeu_si128( reinterpret_cast<__m128i*>(ptr+SCALE*N), val); } + #endif #if (CRYPTOPP_AVX2_AVAILABLE) +template +inline __m256i load_m256i(T* ptr) +{ + return _mm256_loadu_si256( + reinterpret_cast<__m256i*>(ptr)); +} + +template +inline __m256i load_m256i(const T* ptr) +{ + return _mm256_loadu_si256( + reinterpret_cast(ptr)); +} + +template +inline void store_m256i(T* ptr, __m256i val) +{ + return _mm256_storeu_si256( + reinterpret_cast<__m256i*>(ptr), val); +} + // N specifies the nth 256-bit element template inline __m256i load_m256i(T* ptr) @@ -81,4 +124,4 @@ inline void store_m256i(T* ptr, __m256i val) NAMESPACE_END -#endif // CRYPTOPP_SSE_CRYPTO_H \ No newline at end of file +#endif // CRYPTOPP_SSE_CRYPTO_H From 3ce1823fd190a8518c99882d22ca86e22a642650 Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Sun, 9 Jun 2019 00:00:22 -0400 Subject: [PATCH 04/11] Fix SunCC compile Sun's compiler is mostly braindead. --- chacha_avx.cpp | 7 ---- sse_simd.h | 90 +++++++++++++++++++++++--------------------------- 2 files changed, 42 insertions(+), 55 deletions(-) diff --git a/chacha_avx.cpp b/chacha_avx.cpp index 72dc42c2..cdf50266 100644 --- a/chacha_avx.cpp +++ b/chacha_avx.cpp @@ -30,13 +30,6 @@ // Squash MS LNK4221 and libtool warnings extern const char CHACHA_AVX_FNAME[] = __FILE__; -// Sun Studio 12.4 OK, 12.5 and 12.6 compile error. -#if (__SUNPRO_CC >= 0x5140) && (__SUNPRO_CC <= 0x5150) -# define MAYBE_CONST -#else -# define MAYBE_CONST const -#endif - // VS2017 and global optimization bug. TODO, figure out when // we can re-enable full optimizations for VS2017. Also see // https://github.com/weidai11/cryptopp/issues/649 and diff --git a/sse_simd.h b/sse_simd.h index 0effad47..fe3a0332 100644 --- a/sse_simd.h +++ b/sse_simd.h @@ -20,43 +20,40 @@ NAMESPACE_BEGIN(CryptoPP) #if (CRYPTOPP_SSE2_INTRIN_AVAILABLE) -template -inline __m128i load_m128i(T* ptr) +template +inline __m128i load_m128i(const byte* ptr) { + enum { SCALE=sizeof(__m128i)/sizeof(byte) }; return _mm_loadu_si128( - reinterpret_cast<__m128i*>(ptr)); + const_cast<__m128i*>( // SunCC workaround + reinterpret_cast(ptr+SCALE*N))); } -template -inline __m128i load_m128i(const T* ptr) +template +inline __m128i load_m128i(const word16* ptr) { + enum { SCALE=sizeof(__m128i)/sizeof(word16) }; return _mm_loadu_si128( - reinterpret_cast(ptr)); + const_cast<__m128i*>( // SunCC workaround + reinterpret_cast(ptr+SCALE*N))); } -template -inline void store_m128i(T* ptr, __m128i val) +template +inline __m128i load_m128i(const word32* ptr) { - return _mm_storeu_si128( - reinterpret_cast<__m128i*>(ptr), val); + enum { SCALE=sizeof(__m128i)/sizeof(word32) }; + return _mm_loadu_si128( + const_cast<__m128i*>( // SunCC workaround + reinterpret_cast(ptr+SCALE*N))); } -// N specifies the nth 128-bit element -template -inline __m128i load_m128i(T* ptr) +template +inline __m128i load_m128i(const word64* ptr) { - enum { SCALE=sizeof(__m128i)/sizeof(T) }; + enum { SCALE=sizeof(__m128i)/sizeof(word64) }; return _mm_loadu_si128( - reinterpret_cast<__m128i*>(ptr+SCALE*N)); -} - -// N specifies the nth 128-bit element -template -inline __m128i load_m128i(const T* ptr) -{ - enum { SCALE=sizeof(__m128i)/sizeof(T) }; - return _mm_loadu_si128( - reinterpret_cast(ptr+SCALE*N)); + const_cast<__m128i*>( // SunCC workaround + reinterpret_cast(ptr+SCALE*N))); } // N specifies the nth 128-bit element @@ -72,43 +69,40 @@ inline void store_m128i(T* ptr, __m128i val) #if (CRYPTOPP_AVX2_AVAILABLE) -template -inline __m256i load_m256i(T* ptr) +template +inline __m256i load_m256i(const byte* ptr) { + enum { SCALE=sizeof(__m256i)/sizeof(byte) }; return _mm256_loadu_si256( - reinterpret_cast<__m256i*>(ptr)); + const_cast<__m256i*>( // SunCC workaround + reinterpret_cast(ptr+SCALE*N))); } -template -inline __m256i load_m256i(const T* ptr) +template +inline __m256i load_m256i(const word16* ptr) { + enum { SCALE=sizeof(__m256i)/sizeof(word16) }; return _mm256_loadu_si256( - reinterpret_cast(ptr)); + const_cast<__m256i*>( // SunCC workaround + reinterpret_cast(ptr+SCALE*N))); } -template -inline void store_m256i(T* ptr, __m256i val) +template +inline __m256i load_m256i(const word32* ptr) { - return _mm256_storeu_si256( - reinterpret_cast<__m256i*>(ptr), val); + enum { SCALE=sizeof(__m256i)/sizeof(word32) }; + return _mm256_loadu_si256( + const_cast<__m256i*>( // SunCC workaround + reinterpret_cast(ptr+SCALE*N))); } -// N specifies the nth 256-bit element -template -inline __m256i load_m256i(T* ptr) +template +inline __m256i load_m256i(const word64* ptr) { - enum { SCALE=sizeof(__m256i)/sizeof(T) }; + enum { SCALE=sizeof(__m256i)/sizeof(word64) }; return _mm256_loadu_si256( - reinterpret_cast<__m256i*>(ptr+SCALE*N)); -} - -// N specifies the nth 256-bit element -template -inline __m256i load_m256i(const T* ptr) -{ - enum { SCALE=sizeof(__m256i)/sizeof(T) }; - return _mm256_loadu_si256( - reinterpret_cast(ptr+SCALE*N)); + const_cast<__m256i*>( // SunCC workaround + reinterpret_cast(ptr+SCALE*N))); } // N specifies the nth 256-bit element From 8fab1c3677198f941ae83fcf322edc420603d325 Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Sun, 9 Jun 2019 01:49:44 -0400 Subject: [PATCH 05/11] Revert changes for lgtm findings This broke SunCC to the point of no repair. SunCC is using AVX2 instructions for C++ and SSE2. Man this compiler sucks... --- Filelist.txt | 1 - asn.cpp | 16 +++--- chacha_avx.cpp | 92 ++++++++++++++++------------- chacha_simd.cpp | 85 ++++++++++++++------------- cryptlib.vcxproj | 1 - cryptlib.vcxproj.filters | 3 - gf2n_simd.cpp | 26 +++++---- sse_simd.h | 121 --------------------------------------- 8 files changed, 118 insertions(+), 227 deletions(-) delete mode 100644 sse_simd.h diff --git a/Filelist.txt b/Filelist.txt index 141ec0e8..933d1226 100644 --- a/Filelist.txt +++ b/Filelist.txt @@ -358,7 +358,6 @@ square.cpp square.h squaretb.cpp sse_simd.cpp -sse_simd.h stdcpp.h strciphr.cpp strciphr.h diff --git a/asn.cpp b/asn.cpp index 1631f52e..ec5a097d 100644 --- a/asn.cpp +++ b/asn.cpp @@ -395,25 +395,25 @@ void EncodedObjectFilter::Put(const byte *inString, size_t length) } BERGeneralDecoder::BERGeneralDecoder(BufferedTransformation &inQueue) - : m_inQueue(inQueue), m_length(0), m_finished(false) + : m_inQueue(inQueue), m_finished(false) { Init(DefaultTag); } BERGeneralDecoder::BERGeneralDecoder(BufferedTransformation &inQueue, byte asnTag) - : m_inQueue(inQueue), m_length(0), m_finished(false) + : m_inQueue(inQueue), m_finished(false) { Init(asnTag); } BERGeneralDecoder::BERGeneralDecoder(BERGeneralDecoder &inQueue) - : m_inQueue(inQueue), m_length(0), m_finished(false) + : m_inQueue(inQueue), m_finished(false) { Init(DefaultTag); } BERGeneralDecoder::BERGeneralDecoder(BERGeneralDecoder &inQueue, byte asnTag) - : m_inQueue(inQueue), m_length(0), m_finished(false) + : m_inQueue(inQueue), m_finished(false) { Init(asnTag); } @@ -514,22 +514,22 @@ lword BERGeneralDecoder::ReduceLength(lword delta) } DERGeneralEncoder::DERGeneralEncoder(BufferedTransformation &outQueue) - : m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false) + : ByteQueue(), m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false) { } DERGeneralEncoder::DERGeneralEncoder(BufferedTransformation &outQueue, byte asnTag) - : m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false) + : ByteQueue(), m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false) { } DERGeneralEncoder::DERGeneralEncoder(DERGeneralEncoder &outQueue) - : m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false) + : ByteQueue(), m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false) { } DERGeneralEncoder::DERGeneralEncoder(DERGeneralEncoder &outQueue, byte asnTag) - : m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false) + : ByteQueue(), m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false) { } diff --git a/chacha_avx.cpp b/chacha_avx.cpp index cdf50266..20693488 100644 --- a/chacha_avx.cpp +++ b/chacha_avx.cpp @@ -24,12 +24,18 @@ # include # include # include -# include "sse_simd.h" #endif // Squash MS LNK4221 and libtool warnings extern const char CHACHA_AVX_FNAME[] = __FILE__; +// Sun Studio 12.4 OK, 12.5 and 12.6 compile error. +#if (__SUNPRO_CC >= 0x5140) && (__SUNPRO_CC <= 0x5150) +# define MAYBE_CONST +#else +# define MAYBE_CONST const +#endif + // VS2017 and global optimization bug. TODO, figure out when // we can re-enable full optimizations for VS2017. Also see // https://github.com/weidai11/cryptopp/issues/649 and @@ -85,10 +91,14 @@ NAMESPACE_BEGIN(CryptoPP) void ChaCha_OperateKeystream_AVX2(const word32 *state, const byte* input, byte *output, unsigned int rounds) { - const __m256i state0 = _mm256_broadcastsi128_si256(load_m128i<0>(state)); - const __m256i state1 = _mm256_broadcastsi128_si256(load_m128i<1>(state)); - const __m256i state2 = _mm256_broadcastsi128_si256(load_m128i<2>(state)); - const __m256i state3 = _mm256_broadcastsi128_si256(load_m128i<3>(state)); + MAYBE_CONST __m128i* state_mm = (MAYBE_CONST __m128i*)(state); + MAYBE_CONST __m256i* input_mm = (MAYBE_CONST __m256i*)(input); + __m256i* output_mm = reinterpret_cast<__m256i*>(output); + + const __m256i state0 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 0)); + const __m256i state1 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 1)); + const __m256i state2 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 2)); + const __m256i state3 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 3)); const __m256i CTR0 = _mm256_set_epi32(0, 0, 0, 0, 0, 0, 0, 4); const __m256i CTR1 = _mm256_set_epi32(0, 0, 0, 1, 0, 0, 0, 5); @@ -294,80 +304,80 @@ void ChaCha_OperateKeystream_AVX2(const word32 *state, const byte* input, byte * X3_3 = _mm256_add_epi32(X3_3, state3); X3_3 = _mm256_add_epi64(X3_3, CTR3); - if (input) + if (input_mm) { - store_m256i<0>(output, _mm256_xor_si256(load_m256i<0>(input), + _mm256_storeu_si256(output_mm + 0, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 0), _mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4)))); - store_m256i<1>(output, _mm256_xor_si256(load_m256i<1>(input), + _mm256_storeu_si256(output_mm + 1, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 1), _mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4)))); - store_m256i<2>(output, _mm256_xor_si256(load_m256i<2>(input), + _mm256_storeu_si256(output_mm + 2, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 2), _mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4)))); - store_m256i<3>(output, _mm256_xor_si256(load_m256i<3>(input), + _mm256_storeu_si256(output_mm + 3, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 3), _mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4)))); } else { - store_m256i<0>(output, _mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4))); - store_m256i<1>(output, _mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4))); - store_m256i<2>(output, _mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4))); - store_m256i<3>(output, _mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4))); + _mm256_storeu_si256(output_mm + 0, _mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4))); + _mm256_storeu_si256(output_mm + 1, _mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4))); + _mm256_storeu_si256(output_mm + 2, _mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4))); + _mm256_storeu_si256(output_mm + 3, _mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4))); } - if (input) + if (input_mm) { - store_m256i<4>(output, _mm256_xor_si256(load_m256i<4>(input), + _mm256_storeu_si256(output_mm + 4, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 4), _mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4)))); - store_m256i<5>(output, _mm256_xor_si256(load_m256i<5>(input), + _mm256_storeu_si256(output_mm + 5, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 5), _mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4)))); - store_m256i<6>(output, _mm256_xor_si256(load_m256i<6>(input), + _mm256_storeu_si256(output_mm + 6, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 6), _mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4)))); - store_m256i<7>(output, _mm256_xor_si256(load_m256i<7>(input), + _mm256_storeu_si256(output_mm + 7, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 7), _mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4)))); } else { - store_m256i<4>(output, _mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4))); - store_m256i<5>(output, _mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4))); - store_m256i<6>(output, _mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4))); - store_m256i<7>(output, _mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4))); + _mm256_storeu_si256(output_mm + 4, _mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4))); + _mm256_storeu_si256(output_mm + 5, _mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4))); + _mm256_storeu_si256(output_mm + 6, _mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4))); + _mm256_storeu_si256(output_mm + 7, _mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4))); } - if (input) + if (input_mm) { - store_m256i<8>(output, _mm256_xor_si256(load_m256i<8>(input), + _mm256_storeu_si256(output_mm + 8, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 8), _mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4)))); - store_m256i<9>(output, _mm256_xor_si256(load_m256i<9>(input), + _mm256_storeu_si256(output_mm + 9, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 9), _mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4)))); - store_m256i<10>(output, _mm256_xor_si256(load_m256i<10>(input), + _mm256_storeu_si256(output_mm + 10, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 10), _mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4)))); - store_m256i<11>(output, _mm256_xor_si256(load_m256i<11>(input), + _mm256_storeu_si256(output_mm + 11, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 11), _mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4)))); } else { - store_m256i<8>(output, _mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4))); - store_m256i<9>(output, _mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4))); - store_m256i<10>(output, _mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4))); - store_m256i<11>(output, _mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4))); + _mm256_storeu_si256(output_mm + 8, _mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4))); + _mm256_storeu_si256(output_mm + 9, _mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4))); + _mm256_storeu_si256(output_mm + 10, _mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4))); + _mm256_storeu_si256(output_mm + 11, _mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4))); } - if (input) + if (input_mm) { - store_m256i<12>(output, _mm256_xor_si256(load_m256i<12>(input), + _mm256_storeu_si256(output_mm + 12, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 12), _mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4)))); - store_m256i<13>(output, _mm256_xor_si256(load_m256i<13>(input), + _mm256_storeu_si256(output_mm + 13, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 13), _mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4)))); - store_m256i<14>(output, _mm256_xor_si256(load_m256i<14>(input), + _mm256_storeu_si256(output_mm + 14, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 14), _mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4)))); - store_m256i<15>(output, _mm256_xor_si256(load_m256i<15>(input), + _mm256_storeu_si256(output_mm + 15, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 15), _mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4)))); } else { - store_m256i<12>(output, _mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4))); - store_m256i<13>(output, _mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4))); - store_m256i<14>(output, _mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4))); - store_m256i<15>(output, _mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4))); + _mm256_storeu_si256(output_mm + 12, _mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4))); + _mm256_storeu_si256(output_mm + 13, _mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4))); + _mm256_storeu_si256(output_mm + 14, _mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4))); + _mm256_storeu_si256(output_mm + 15, _mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4))); } // https://software.intel.com/en-us/articles/avoiding-avx-sse-transition-penalties diff --git a/chacha_simd.cpp b/chacha_simd.cpp index e225579d..9fd6b0f1 100644 --- a/chacha_simd.cpp +++ b/chacha_simd.cpp @@ -38,7 +38,6 @@ #if (CRYPTOPP_SSE2_INTRIN_AVAILABLE) # include # include -# include "sse_simd.h" #endif #if defined(__SSSE3__) @@ -566,10 +565,14 @@ void ChaCha_OperateKeystream_NEON(const word32 *state, const byte* input, byte * void ChaCha_OperateKeystream_SSE2(const word32 *state, const byte* input, byte *output, unsigned int rounds) { - const __m128i state0 = load_m128i<0>(state); - const __m128i state1 = load_m128i<1>(state); - const __m128i state2 = load_m128i<2>(state); - const __m128i state3 = load_m128i<3>(state); + const __m128i* state_mm = reinterpret_cast(state); + const __m128i* input_mm = reinterpret_cast(input); + __m128i* output_mm = reinterpret_cast<__m128i*>(output); + + const __m128i state0 = _mm_load_si128(state_mm + 0); + const __m128i state1 = _mm_load_si128(state_mm + 1); + const __m128i state2 = _mm_load_si128(state_mm + 2); + const __m128i state3 = _mm_load_si128(state_mm + 3); __m128i r0_0 = state0; __m128i r0_1 = state1; @@ -769,57 +772,57 @@ void ChaCha_OperateKeystream_SSE2(const word32 *state, const byte* input, byte * r3_3 = _mm_add_epi32(r3_3, state3); r3_3 = _mm_add_epi64(r3_3, _mm_set_epi32(0, 0, 0, 3)); - if (input) + if (input_mm) { - r0_0 = _mm_xor_si128(load_m128i<0>(input), r0_0); - r0_1 = _mm_xor_si128(load_m128i<1>(input), r0_1); - r0_2 = _mm_xor_si128(load_m128i<2>(input), r0_2); - r0_3 = _mm_xor_si128(load_m128i<3>(input), r0_3); + r0_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 0), r0_0); + r0_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 1), r0_1); + r0_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 2), r0_2); + r0_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 3), r0_3); } - store_m128i<0>(output, r0_0); - store_m128i<1>(output, r0_1); - store_m128i<2>(output, r0_2); - store_m128i<3>(output, r0_3); + _mm_storeu_si128(output_mm + 0, r0_0); + _mm_storeu_si128(output_mm + 1, r0_1); + _mm_storeu_si128(output_mm + 2, r0_2); + _mm_storeu_si128(output_mm + 3, r0_3); - if (input) + if (input_mm) { - r1_0 = _mm_xor_si128(load_m128i<4>(input), r1_0); - r1_1 = _mm_xor_si128(load_m128i<5>(input), r1_1); - r1_2 = _mm_xor_si128(load_m128i<6>(input), r1_2); - r1_3 = _mm_xor_si128(load_m128i<7>(input), r1_3); + r1_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 4), r1_0); + r1_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 5), r1_1); + r1_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 6), r1_2); + r1_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 7), r1_3); } - store_m128i<4>(output, r1_0); - store_m128i<5>(output, r1_1); - store_m128i<6>(output, r1_2); - store_m128i<7>(output, r1_3); + _mm_storeu_si128(output_mm + 4, r1_0); + _mm_storeu_si128(output_mm + 5, r1_1); + _mm_storeu_si128(output_mm + 6, r1_2); + _mm_storeu_si128(output_mm + 7, r1_3); - if (input) + if (input_mm) { - r2_0 = _mm_xor_si128(load_m128i< 8>(input), r2_0); - r2_1 = _mm_xor_si128(load_m128i< 9>(input), r2_1); - r2_2 = _mm_xor_si128(load_m128i<10>(input), r2_2); - r2_3 = _mm_xor_si128(load_m128i<11>(input), r2_3); + r2_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 8), r2_0); + r2_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 9), r2_1); + r2_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 10), r2_2); + r2_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 11), r2_3); } - store_m128i< 8>(output, r2_0); - store_m128i< 9>(output, r2_1); - store_m128i<10>(output, r2_2); - store_m128i<11>(output, r2_3); + _mm_storeu_si128(output_mm + 8, r2_0); + _mm_storeu_si128(output_mm + 9, r2_1); + _mm_storeu_si128(output_mm + 10, r2_2); + _mm_storeu_si128(output_mm + 11, r2_3); - if (input) + if (input_mm) { - r3_0 = _mm_xor_si128(load_m128i<12>(input), r3_0); - r3_1 = _mm_xor_si128(load_m128i<13>(input), r3_1); - r3_2 = _mm_xor_si128(load_m128i<14>(input), r3_2); - r3_3 = _mm_xor_si128(load_m128i<15>(input), r3_3); + r3_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 12), r3_0); + r3_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 13), r3_1); + r3_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 14), r3_2); + r3_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 15), r3_3); } - store_m128i<12>(output, r3_0); - store_m128i<13>(output, r3_1); - store_m128i<14>(output, r3_2); - store_m128i<15>(output, r3_3); + _mm_storeu_si128(output_mm + 12, r3_0); + _mm_storeu_si128(output_mm + 13, r3_1); + _mm_storeu_si128(output_mm + 14, r3_2); + _mm_storeu_si128(output_mm + 15, r3_3); } #endif // CRYPTOPP_SSE2_INTRIN_AVAILABLE diff --git a/cryptlib.vcxproj b/cryptlib.vcxproj index 1f81c046..4985c096 100644 --- a/cryptlib.vcxproj +++ b/cryptlib.vcxproj @@ -538,7 +538,6 @@ - diff --git a/cryptlib.vcxproj.filters b/cryptlib.vcxproj.filters index 7b91f87b..de140e33 100644 --- a/cryptlib.vcxproj.filters +++ b/cryptlib.vcxproj.filters @@ -993,9 +993,6 @@ Header Files - - Header Files - Header Files diff --git a/gf2n_simd.cpp b/gf2n_simd.cpp index 3bd3cd0e..1d4d933f 100644 --- a/gf2n_simd.cpp +++ b/gf2n_simd.cpp @@ -28,7 +28,6 @@ #if (CRYPTOPP_CLMUL_AVAILABLE) # include # include -# include "sse_simd.h" #endif #if (CRYPTOPP_ARM_PMULL_AVAILABLE) @@ -466,31 +465,36 @@ NAMESPACE_BEGIN(CryptoPP) void GF2NT_233_Multiply_Reduce_CLMUL(const word* pA, const word* pB, word* pC) { - __m128i a0 = load_m128i<0>(pA); - __m128i a1 = load_m128i<1>(pA); - __m128i b0 = load_m128i<0>(pB); - __m128i b1 = load_m128i<1>(pB); + const __m128i* pAA = reinterpret_cast(pA); + const __m128i* pBB = reinterpret_cast(pB); + __m128i a0 = _mm_loadu_si128(pAA+0); + __m128i a1 = _mm_loadu_si128(pAA+1); + __m128i b0 = _mm_loadu_si128(pBB+0); + __m128i b1 = _mm_loadu_si128(pBB+1); __m128i c0, c1, c2, c3; F2N_Multiply_256x256_CLMUL(c3, c2, c1, c0, a1, a0, b1, b0); GF2NT_233_Reduce_CLMUL(c3, c2, c1, c0); - store_m128i<0>(pC, c0); - store_m128i<1>(pC, c1); + __m128i* pCC = reinterpret_cast<__m128i*>(pC); + _mm_storeu_si128(pCC+0, c0); + _mm_storeu_si128(pCC+1, c1); } void GF2NT_233_Square_Reduce_CLMUL(const word* pA, word* pC) { - __m128i a0 = load_m128i<0>(pA); - __m128i a1 = load_m128i<1>(pA); + const __m128i* pAA = reinterpret_cast(pA); + __m128i a0 = _mm_loadu_si128(pAA+0); + __m128i a1 = _mm_loadu_si128(pAA+1); __m128i c0, c1, c2, c3; F2N_Square_256_CLMUL(c3, c2, c1, c0, a1, a0); GF2NT_233_Reduce_CLMUL(c3, c2, c1, c0); - store_m128i<0>(pC, c0); - store_m128i<1>(pC, c1); + __m128i* pCC = reinterpret_cast<__m128i*>(pC); + _mm_storeu_si128(pCC+0, c0); + _mm_storeu_si128(pCC+1, c1); } #elif (CRYPTOPP_ARM_PMULL_AVAILABLE) diff --git a/sse_simd.h b/sse_simd.h deleted file mode 100644 index fe3a0332..00000000 --- a/sse_simd.h +++ /dev/null @@ -1,121 +0,0 @@ -// sse_simd.h - written and placed in public domain by Jeffrey Walton -// Helper functions to work with SSE and above. The class file -// was added after a scan by lgtm.com. We caught some findings -// that were not problems, but we refactored to squash them. - -#ifndef CRYPTOPP_SSE_CRYPTO_H -#define CRYPTOPP_SSE_CRYPTO_H - -#include "config.h" - -#if (CRYPTOPP_SSE2_INTRIN_AVAILABLE) -# include -#endif - -#if (CRYPTOPP_AVX2_AVAILABLE) -# include -#endif - -NAMESPACE_BEGIN(CryptoPP) - -#if (CRYPTOPP_SSE2_INTRIN_AVAILABLE) - -template -inline __m128i load_m128i(const byte* ptr) -{ - enum { SCALE=sizeof(__m128i)/sizeof(byte) }; - return _mm_loadu_si128( - const_cast<__m128i*>( // SunCC workaround - reinterpret_cast(ptr+SCALE*N))); -} - -template -inline __m128i load_m128i(const word16* ptr) -{ - enum { SCALE=sizeof(__m128i)/sizeof(word16) }; - return _mm_loadu_si128( - const_cast<__m128i*>( // SunCC workaround - reinterpret_cast(ptr+SCALE*N))); -} - -template -inline __m128i load_m128i(const word32* ptr) -{ - enum { SCALE=sizeof(__m128i)/sizeof(word32) }; - return _mm_loadu_si128( - const_cast<__m128i*>( // SunCC workaround - reinterpret_cast(ptr+SCALE*N))); -} - -template -inline __m128i load_m128i(const word64* ptr) -{ - enum { SCALE=sizeof(__m128i)/sizeof(word64) }; - return _mm_loadu_si128( - const_cast<__m128i*>( // SunCC workaround - reinterpret_cast(ptr+SCALE*N))); -} - -// N specifies the nth 128-bit element -template -inline void store_m128i(T* ptr, __m128i val) -{ - enum { SCALE=sizeof(__m128i)/sizeof(T) }; - return _mm_storeu_si128( - reinterpret_cast<__m128i*>(ptr+SCALE*N), val); -} - -#endif - -#if (CRYPTOPP_AVX2_AVAILABLE) - -template -inline __m256i load_m256i(const byte* ptr) -{ - enum { SCALE=sizeof(__m256i)/sizeof(byte) }; - return _mm256_loadu_si256( - const_cast<__m256i*>( // SunCC workaround - reinterpret_cast(ptr+SCALE*N))); -} - -template -inline __m256i load_m256i(const word16* ptr) -{ - enum { SCALE=sizeof(__m256i)/sizeof(word16) }; - return _mm256_loadu_si256( - const_cast<__m256i*>( // SunCC workaround - reinterpret_cast(ptr+SCALE*N))); -} - -template -inline __m256i load_m256i(const word32* ptr) -{ - enum { SCALE=sizeof(__m256i)/sizeof(word32) }; - return _mm256_loadu_si256( - const_cast<__m256i*>( // SunCC workaround - reinterpret_cast(ptr+SCALE*N))); -} - -template -inline __m256i load_m256i(const word64* ptr) -{ - enum { SCALE=sizeof(__m256i)/sizeof(word64) }; - return _mm256_loadu_si256( - const_cast<__m256i*>( // SunCC workaround - reinterpret_cast(ptr+SCALE*N))); -} - -// N specifies the nth 256-bit element -template -inline void store_m256i(T* ptr, __m256i val) -{ - enum { SCALE=sizeof(__m256i)/sizeof(T) }; - return _mm256_storeu_si256( - reinterpret_cast<__m256i*>(ptr+SCALE*N), val); -} - -#endif - -NAMESPACE_END - -#endif // CRYPTOPP_SSE_CRYPTO_H From 55fe6a21915022b0015abe27589c4a6fb0d4fca8 Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Sun, 9 Jun 2019 02:00:53 -0400 Subject: [PATCH 06/11] Cleanup BERGeneralDecoder constructors --- asn.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/asn.cpp b/asn.cpp index ec5a097d..b72c45cf 100644 --- a/asn.cpp +++ b/asn.cpp @@ -514,22 +514,22 @@ lword BERGeneralDecoder::ReduceLength(lword delta) } DERGeneralEncoder::DERGeneralEncoder(BufferedTransformation &outQueue) - : ByteQueue(), m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false) + : m_outQueue(outQueue), m_length(0), m_asnTag(DefaultTag), m_finished(false) { } DERGeneralEncoder::DERGeneralEncoder(BufferedTransformation &outQueue, byte asnTag) - : ByteQueue(), m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false) + : m_outQueue(outQueue), m_length(0), m_asnTag(asnTag), m_finished(false) { } DERGeneralEncoder::DERGeneralEncoder(DERGeneralEncoder &outQueue) - : ByteQueue(), m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false) + : m_outQueue(outQueue), m_length(0), m_asnTag(DefaultTag), m_finished(false) { } DERGeneralEncoder::DERGeneralEncoder(DERGeneralEncoder &outQueue, byte asnTag) - : ByteQueue(), m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false) + : m_outQueue(outQueue), m_length(0), m_asnTag(asnTag), m_finished(false) { } From c1f4d17e109e31a48ad5d34c842e0fabb8e709a3 Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Sun, 9 Jun 2019 02:03:06 -0400 Subject: [PATCH 07/11] Cleanup BERGeneralDecoder constructors For real this time... --- asn.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/asn.cpp b/asn.cpp index b72c45cf..1631f52e 100644 --- a/asn.cpp +++ b/asn.cpp @@ -395,25 +395,25 @@ void EncodedObjectFilter::Put(const byte *inString, size_t length) } BERGeneralDecoder::BERGeneralDecoder(BufferedTransformation &inQueue) - : m_inQueue(inQueue), m_finished(false) + : m_inQueue(inQueue), m_length(0), m_finished(false) { Init(DefaultTag); } BERGeneralDecoder::BERGeneralDecoder(BufferedTransformation &inQueue, byte asnTag) - : m_inQueue(inQueue), m_finished(false) + : m_inQueue(inQueue), m_length(0), m_finished(false) { Init(asnTag); } BERGeneralDecoder::BERGeneralDecoder(BERGeneralDecoder &inQueue) - : m_inQueue(inQueue), m_finished(false) + : m_inQueue(inQueue), m_length(0), m_finished(false) { Init(DefaultTag); } BERGeneralDecoder::BERGeneralDecoder(BERGeneralDecoder &inQueue, byte asnTag) - : m_inQueue(inQueue), m_finished(false) + : m_inQueue(inQueue), m_length(0), m_finished(false) { Init(asnTag); } @@ -514,22 +514,22 @@ lword BERGeneralDecoder::ReduceLength(lword delta) } DERGeneralEncoder::DERGeneralEncoder(BufferedTransformation &outQueue) - : m_outQueue(outQueue), m_length(0), m_asnTag(DefaultTag), m_finished(false) + : m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false) { } DERGeneralEncoder::DERGeneralEncoder(BufferedTransformation &outQueue, byte asnTag) - : m_outQueue(outQueue), m_length(0), m_asnTag(asnTag), m_finished(false) + : m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false) { } DERGeneralEncoder::DERGeneralEncoder(DERGeneralEncoder &outQueue) - : m_outQueue(outQueue), m_length(0), m_asnTag(DefaultTag), m_finished(false) + : m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false) { } DERGeneralEncoder::DERGeneralEncoder(DERGeneralEncoder &outQueue, byte asnTag) - : m_outQueue(outQueue), m_length(0), m_asnTag(asnTag), m_finished(false) + : m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false) { } From 8c78985de2362fd9387ce8a602d6f3a16982c2a5 Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Sun, 9 Jun 2019 02:56:30 -0400 Subject: [PATCH 08/11] Add ModularArithmetic::operator= --- modarith.h | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/modarith.h b/modarith.h index aa90c8a5..c473e3d0 100644 --- a/modarith.h +++ b/modarith.h @@ -35,6 +35,9 @@ CRYPTOPP_DLL_TEMPLATE_CLASS AbstractEuclideanDomain; ///
    abcd = group.Add(a, group.Add(b, group.Add(c,d));
/// The following code will produce incorrect results: ///
    abcd = group.Add(group.Add(a,b), group.Add(c,d));
+/// \details If a ModularArithmetic is copied or assigned the modulus +/// is copied, but not the internal data members. The internal data +/// members are undefined after copy or assignment. /// \sa Integer on the /// Crypto++ wiki. class CRYPTOPP_DLL ModularArithmetic : public AbstractRing @@ -54,7 +57,18 @@ public: /// \brief Copy construct a ModularArithmetic /// \param ma other ModularArithmetic ModularArithmetic(const ModularArithmetic &ma) - : m_modulus(ma.m_modulus), m_result(static_cast(0), ma.m_modulus.reg.size()) {} + : m_modulus(ma.m_modulus), m_result(static_cast(0), m_modulus.reg.size()) {} + + /// \brief Assign a ModularArithmetic + /// \param ma other ModularArithmetic + ModularArithmetic& operator=(const ModularArithmetic &ma) { + if (this != &ma) + { + m_modulus = ma.m_modulus; + m_result = Integer(static_cast(0), m_modulus.reg.size()); + } + return *this; + } /// \brief Construct a ModularArithmetic /// \param bt BER encoded ModularArithmetic From 955ac6fe2419b8956adb7402234580dc5e954d49 Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Sun, 9 Jun 2019 04:29:40 -0400 Subject: [PATCH 09/11] Rework SSE2 and AVX2 loads and stores --- chacha_avx.cpp | 120 ++++++++++++++++++++++++++++++------------------ chacha_simd.cpp | 84 ++++++++++++++++----------------- gf2n_simd.cpp | 27 +++++------ 3 files changed, 128 insertions(+), 103 deletions(-) diff --git a/chacha_avx.cpp b/chacha_avx.cpp index 20693488..a2e56f96 100644 --- a/chacha_avx.cpp +++ b/chacha_avx.cpp @@ -91,14 +91,14 @@ NAMESPACE_BEGIN(CryptoPP) void ChaCha_OperateKeystream_AVX2(const word32 *state, const byte* input, byte *output, unsigned int rounds) { - MAYBE_CONST __m128i* state_mm = (MAYBE_CONST __m128i*)(state); - MAYBE_CONST __m256i* input_mm = (MAYBE_CONST __m256i*)(input); - __m256i* output_mm = reinterpret_cast<__m256i*>(output); - - const __m256i state0 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 0)); - const __m256i state1 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 1)); - const __m256i state2 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 2)); - const __m256i state3 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 3)); + const __m256i state0 = _mm256_broadcastsi128_si256( + _mm_loadu_si128(reinterpret_cast(state+0*4))); + const __m256i state1 = _mm256_broadcastsi128_si256( + _mm_loadu_si128(reinterpret_cast(state+1*4))); + const __m256i state2 = _mm256_broadcastsi128_si256( + _mm_loadu_si128(reinterpret_cast(state+2*4))); + const __m256i state3 = _mm256_broadcastsi128_si256( + _mm_loadu_si128(reinterpret_cast(state+3*4))); const __m256i CTR0 = _mm256_set_epi32(0, 0, 0, 0, 0, 0, 0, 4); const __m256i CTR1 = _mm256_set_epi32(0, 0, 0, 1, 0, 0, 0, 5); @@ -304,80 +304,112 @@ void ChaCha_OperateKeystream_AVX2(const word32 *state, const byte* input, byte * X3_3 = _mm256_add_epi32(X3_3, state3); X3_3 = _mm256_add_epi64(X3_3, CTR3); - if (input_mm) + if (input) { - _mm256_storeu_si256(output_mm + 0, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 0), + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+0*32), _mm256_xor_si256( + _mm256_loadu_si256(reinterpret_cast(input+0*32)), _mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 1, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 1), + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+1*32), _mm256_xor_si256( + _mm256_loadu_si256(reinterpret_cast(input+1*32)), _mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 2, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 2), + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+2*32), _mm256_xor_si256( + _mm256_loadu_si256(reinterpret_cast(input+2*32)), _mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 3, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 3), + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+3*32), _mm256_xor_si256( + _mm256_loadu_si256(reinterpret_cast(input+3*32)), _mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4)))); } else { - _mm256_storeu_si256(output_mm + 0, _mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 1, _mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 2, _mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 3, _mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+0*32), + _mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+1*32), + _mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+2*32), + _mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+3*32), + _mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4))); } - if (input_mm) + if (input) { - _mm256_storeu_si256(output_mm + 4, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 4), + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+4*32), _mm256_xor_si256( + _mm256_loadu_si256(reinterpret_cast(input+4*32)), _mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 5, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 5), + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+5*32), _mm256_xor_si256( + _mm256_loadu_si256(reinterpret_cast(input+5*32)), _mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 6, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 6), + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+6*32), _mm256_xor_si256( + _mm256_loadu_si256(reinterpret_cast(input+6*32)), _mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 7, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 7), + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+7*32), _mm256_xor_si256( + _mm256_loadu_si256(reinterpret_cast(input+7*32)), _mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4)))); } else { - _mm256_storeu_si256(output_mm + 4, _mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 5, _mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 6, _mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 7, _mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+4*32), + _mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+5*32), + _mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+6*32), + _mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+7*32), + _mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4))); } - if (input_mm) + if (input) { - _mm256_storeu_si256(output_mm + 8, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 8), + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+ 8*32), _mm256_xor_si256( + _mm256_loadu_si256(reinterpret_cast(input+8*32)), _mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 9, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 9), + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+ 9*32), _mm256_xor_si256( + _mm256_loadu_si256(reinterpret_cast(input+9*32)), _mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 10, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 10), + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+10*32), _mm256_xor_si256( + _mm256_loadu_si256(reinterpret_cast(input+10*32)), _mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 11, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 11), + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+11*32), _mm256_xor_si256( + _mm256_loadu_si256(reinterpret_cast(input+11*32)), _mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4)))); } else { - _mm256_storeu_si256(output_mm + 8, _mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 9, _mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 10, _mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 11, _mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+ 8*32), + _mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+ 9*32), + _mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+10*32), + _mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+11*32), + _mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4))); } - if (input_mm) + if (input) { - _mm256_storeu_si256(output_mm + 12, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 12), + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+12*32), _mm256_xor_si256( + _mm256_loadu_si256(reinterpret_cast(input+12*32)), _mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 13, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 13), + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+13*32), _mm256_xor_si256( + _mm256_loadu_si256(reinterpret_cast(input+13*32)), _mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 14, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 14), + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+14*32), _mm256_xor_si256( + _mm256_loadu_si256(reinterpret_cast(input+14*32)), _mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 15, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 15), + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+15*32), _mm256_xor_si256( + _mm256_loadu_si256(reinterpret_cast(input+15*32)), _mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4)))); } else { - _mm256_storeu_si256(output_mm + 12, _mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 13, _mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 14, _mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 15, _mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+12*32), + _mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+13*32), + _mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+14*32), + _mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+15*32), + _mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4))); } // https://software.intel.com/en-us/articles/avoiding-avx-sse-transition-penalties diff --git a/chacha_simd.cpp b/chacha_simd.cpp index 9fd6b0f1..a983ab69 100644 --- a/chacha_simd.cpp +++ b/chacha_simd.cpp @@ -565,14 +565,10 @@ void ChaCha_OperateKeystream_NEON(const word32 *state, const byte* input, byte * void ChaCha_OperateKeystream_SSE2(const word32 *state, const byte* input, byte *output, unsigned int rounds) { - const __m128i* state_mm = reinterpret_cast(state); - const __m128i* input_mm = reinterpret_cast(input); - __m128i* output_mm = reinterpret_cast<__m128i*>(output); - - const __m128i state0 = _mm_load_si128(state_mm + 0); - const __m128i state1 = _mm_load_si128(state_mm + 1); - const __m128i state2 = _mm_load_si128(state_mm + 2); - const __m128i state3 = _mm_load_si128(state_mm + 3); + const __m128i state0 = _mm_load_si128(reinterpret_cast(state+0*4)); + const __m128i state1 = _mm_load_si128(reinterpret_cast(state+1*4)); + const __m128i state2 = _mm_load_si128(reinterpret_cast(state+2*4)); + const __m128i state3 = _mm_load_si128(reinterpret_cast(state+3*4)); __m128i r0_0 = state0; __m128i r0_1 = state1; @@ -772,57 +768,57 @@ void ChaCha_OperateKeystream_SSE2(const word32 *state, const byte* input, byte * r3_3 = _mm_add_epi32(r3_3, state3); r3_3 = _mm_add_epi64(r3_3, _mm_set_epi32(0, 0, 0, 3)); - if (input_mm) + if (input) { - r0_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 0), r0_0); - r0_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 1), r0_1); - r0_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 2), r0_2); - r0_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 3), r0_3); + r0_0 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast(input+0*16)), r0_0); + r0_1 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast(input+1*16)), r0_1); + r0_2 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast(input+2*16)), r0_2); + r0_3 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast(input+3*16)), r0_3); } - _mm_storeu_si128(output_mm + 0, r0_0); - _mm_storeu_si128(output_mm + 1, r0_1); - _mm_storeu_si128(output_mm + 2, r0_2); - _mm_storeu_si128(output_mm + 3, r0_3); + _mm_storeu_si128(reinterpret_cast<__m128i*>(output+0*16), r0_0); + _mm_storeu_si128(reinterpret_cast<__m128i*>(output+1*16), r0_1); + _mm_storeu_si128(reinterpret_cast<__m128i*>(output+2*16), r0_2); + _mm_storeu_si128(reinterpret_cast<__m128i*>(output+3*16), r0_3); - if (input_mm) + if (input) { - r1_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 4), r1_0); - r1_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 5), r1_1); - r1_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 6), r1_2); - r1_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 7), r1_3); + r1_0 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast(input+4*16)), r1_0); + r1_1 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast(input+5*16)), r1_1); + r1_2 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast(input+6*16)), r1_2); + r1_3 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast(input+7*16)), r1_3); } - _mm_storeu_si128(output_mm + 4, r1_0); - _mm_storeu_si128(output_mm + 5, r1_1); - _mm_storeu_si128(output_mm + 6, r1_2); - _mm_storeu_si128(output_mm + 7, r1_3); + _mm_storeu_si128(reinterpret_cast<__m128i*>(output+4*16), r1_0); + _mm_storeu_si128(reinterpret_cast<__m128i*>(output+5*16), r1_1); + _mm_storeu_si128(reinterpret_cast<__m128i*>(output+6*16), r1_2); + _mm_storeu_si128(reinterpret_cast<__m128i*>(output+7*16), r1_3); - if (input_mm) + if (input) { - r2_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 8), r2_0); - r2_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 9), r2_1); - r2_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 10), r2_2); - r2_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 11), r2_3); + r2_0 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast(input+ 8*16)), r2_0); + r2_1 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast(input+ 9*16)), r2_1); + r2_2 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast(input+10*16)), r2_2); + r2_3 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast(input+11*16)), r2_3); } - _mm_storeu_si128(output_mm + 8, r2_0); - _mm_storeu_si128(output_mm + 9, r2_1); - _mm_storeu_si128(output_mm + 10, r2_2); - _mm_storeu_si128(output_mm + 11, r2_3); + _mm_storeu_si128(reinterpret_cast<__m128i*>(output+ 8*16), r2_0); + _mm_storeu_si128(reinterpret_cast<__m128i*>(output+ 9*16), r2_1); + _mm_storeu_si128(reinterpret_cast<__m128i*>(output+10*16), r2_2); + _mm_storeu_si128(reinterpret_cast<__m128i*>(output+11*16), r2_3); - if (input_mm) + if (input) { - r3_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 12), r3_0); - r3_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 13), r3_1); - r3_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 14), r3_2); - r3_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 15), r3_3); + r3_0 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast(input+12*16)), r3_0); + r3_1 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast(input+13*16)), r3_1); + r3_2 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast(input+14*16)), r3_2); + r3_3 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast(input+15*16)), r3_3); } - _mm_storeu_si128(output_mm + 12, r3_0); - _mm_storeu_si128(output_mm + 13, r3_1); - _mm_storeu_si128(output_mm + 14, r3_2); - _mm_storeu_si128(output_mm + 15, r3_3); + _mm_storeu_si128(reinterpret_cast<__m128i*>(output+12*16), r3_0); + _mm_storeu_si128(reinterpret_cast<__m128i*>(output+13*16), r3_1); + _mm_storeu_si128(reinterpret_cast<__m128i*>(output+14*16), r3_2); + _mm_storeu_si128(reinterpret_cast<__m128i*>(output+15*16), r3_3); } #endif // CRYPTOPP_SSE2_INTRIN_AVAILABLE diff --git a/gf2n_simd.cpp b/gf2n_simd.cpp index 1d4d933f..71d16ecb 100644 --- a/gf2n_simd.cpp +++ b/gf2n_simd.cpp @@ -465,36 +465,33 @@ NAMESPACE_BEGIN(CryptoPP) void GF2NT_233_Multiply_Reduce_CLMUL(const word* pA, const word* pB, word* pC) { - const __m128i* pAA = reinterpret_cast(pA); - const __m128i* pBB = reinterpret_cast(pB); - __m128i a0 = _mm_loadu_si128(pAA+0); - __m128i a1 = _mm_loadu_si128(pAA+1); - __m128i b0 = _mm_loadu_si128(pBB+0); - __m128i b1 = _mm_loadu_si128(pBB+1); + enum {S=sizeof(__m128i)/sizeof(word)}; + __m128i a0 = _mm_loadu_si128(reinterpret_cast(pA+0*S)); + __m128i a1 = _mm_loadu_si128(reinterpret_cast(pA+1*S)); + __m128i b0 = _mm_loadu_si128(reinterpret_cast(pB+0*S)); + __m128i b1 = _mm_loadu_si128(reinterpret_cast(pB+1*S)); __m128i c0, c1, c2, c3; F2N_Multiply_256x256_CLMUL(c3, c2, c1, c0, a1, a0, b1, b0); GF2NT_233_Reduce_CLMUL(c3, c2, c1, c0); - __m128i* pCC = reinterpret_cast<__m128i*>(pC); - _mm_storeu_si128(pCC+0, c0); - _mm_storeu_si128(pCC+1, c1); + _mm_storeu_si128(reinterpret_cast<__m128i*>(pC+0*S), c0); + _mm_storeu_si128(reinterpret_cast<__m128i*>(pC+1*S), c1); } void GF2NT_233_Square_Reduce_CLMUL(const word* pA, word* pC) { - const __m128i* pAA = reinterpret_cast(pA); - __m128i a0 = _mm_loadu_si128(pAA+0); - __m128i a1 = _mm_loadu_si128(pAA+1); + enum {S=sizeof(__m128i)/sizeof(word)}; + __m128i a0 = _mm_loadu_si128(reinterpret_cast(pA+0*S)); + __m128i a1 = _mm_loadu_si128(reinterpret_cast(pA+1*S)); __m128i c0, c1, c2, c3; F2N_Square_256_CLMUL(c3, c2, c1, c0, a1, a0); GF2NT_233_Reduce_CLMUL(c3, c2, c1, c0); - __m128i* pCC = reinterpret_cast<__m128i*>(pC); - _mm_storeu_si128(pCC+0, c0); - _mm_storeu_si128(pCC+1, c1); + _mm_storeu_si128(reinterpret_cast<__m128i*>(pC+0*S), c0); + _mm_storeu_si128(reinterpret_cast<__m128i*>(pC+1*S), c1); } #elif (CRYPTOPP_ARM_PMULL_AVAILABLE) From 570a8e1b3620687eeec4c4889a37be9c840f956c Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Sun, 9 Jun 2019 12:12:46 -0400 Subject: [PATCH 10/11] Whitespace check-in --- chacha_avx.cpp | 96 +++++++++++++++++++++++++------------------------- 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/chacha_avx.cpp b/chacha_avx.cpp index a2e56f96..af80dba0 100644 --- a/chacha_avx.cpp +++ b/chacha_avx.cpp @@ -306,18 +306,18 @@ void ChaCha_OperateKeystream_AVX2(const word32 *state, const byte* input, byte * if (input) { - _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+0*32), _mm256_xor_si256( - _mm256_loadu_si256(reinterpret_cast(input+0*32)), - _mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4)))); - _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+1*32), _mm256_xor_si256( - _mm256_loadu_si256(reinterpret_cast(input+1*32)), - _mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4)))); - _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+2*32), _mm256_xor_si256( - _mm256_loadu_si256(reinterpret_cast(input+2*32)), - _mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4)))); - _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+3*32), _mm256_xor_si256( - _mm256_loadu_si256(reinterpret_cast(input+3*32)), - _mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4)))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+0*32), + _mm256_xor_si256(_mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4)), + _mm256_loadu_si256(reinterpret_cast(input+0*32)))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+1*32), + _mm256_xor_si256(_mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4)), + _mm256_loadu_si256(reinterpret_cast(input+1*32)))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+2*32), + _mm256_xor_si256(_mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4)), + _mm256_loadu_si256(reinterpret_cast(input+2*32)))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+3*32), + _mm256_xor_si256(_mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4)), + _mm256_loadu_si256(reinterpret_cast(input+3*32)))); } else { @@ -333,18 +333,18 @@ void ChaCha_OperateKeystream_AVX2(const word32 *state, const byte* input, byte * if (input) { - _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+4*32), _mm256_xor_si256( - _mm256_loadu_si256(reinterpret_cast(input+4*32)), - _mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4)))); - _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+5*32), _mm256_xor_si256( - _mm256_loadu_si256(reinterpret_cast(input+5*32)), - _mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4)))); - _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+6*32), _mm256_xor_si256( - _mm256_loadu_si256(reinterpret_cast(input+6*32)), - _mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4)))); - _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+7*32), _mm256_xor_si256( - _mm256_loadu_si256(reinterpret_cast(input+7*32)), - _mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4)))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+4*32), + _mm256_xor_si256(_mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4)), + _mm256_loadu_si256(reinterpret_cast(input+4*32)))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+5*32), + _mm256_xor_si256(_mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4)), + _mm256_loadu_si256(reinterpret_cast(input+5*32)))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+6*32), + _mm256_xor_si256(_mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4)), + _mm256_loadu_si256(reinterpret_cast(input+6*32)))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+7*32), + _mm256_xor_si256(_mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4)), + _mm256_loadu_si256(reinterpret_cast(input+7*32)))); } else { @@ -360,18 +360,18 @@ void ChaCha_OperateKeystream_AVX2(const word32 *state, const byte* input, byte * if (input) { - _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+ 8*32), _mm256_xor_si256( - _mm256_loadu_si256(reinterpret_cast(input+8*32)), - _mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4)))); - _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+ 9*32), _mm256_xor_si256( - _mm256_loadu_si256(reinterpret_cast(input+9*32)), - _mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4)))); - _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+10*32), _mm256_xor_si256( - _mm256_loadu_si256(reinterpret_cast(input+10*32)), - _mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4)))); - _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+11*32), _mm256_xor_si256( - _mm256_loadu_si256(reinterpret_cast(input+11*32)), - _mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4)))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+ 8*32), + _mm256_xor_si256(_mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4)), + _mm256_loadu_si256(reinterpret_cast(input+8*32)))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+ 9*32), + _mm256_xor_si256(_mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4)), + _mm256_loadu_si256(reinterpret_cast(input+9*32)))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+10*32), + _mm256_xor_si256(_mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4)), + _mm256_loadu_si256(reinterpret_cast(input+10*32)))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+11*32), + _mm256_xor_si256(_mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4)), + _mm256_loadu_si256(reinterpret_cast(input+11*32)))); } else { @@ -387,18 +387,18 @@ void ChaCha_OperateKeystream_AVX2(const word32 *state, const byte* input, byte * if (input) { - _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+12*32), _mm256_xor_si256( - _mm256_loadu_si256(reinterpret_cast(input+12*32)), - _mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4)))); - _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+13*32), _mm256_xor_si256( - _mm256_loadu_si256(reinterpret_cast(input+13*32)), - _mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4)))); - _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+14*32), _mm256_xor_si256( - _mm256_loadu_si256(reinterpret_cast(input+14*32)), - _mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4)))); - _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+15*32), _mm256_xor_si256( - _mm256_loadu_si256(reinterpret_cast(input+15*32)), - _mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4)))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+12*32), + _mm256_xor_si256(_mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4)), + _mm256_loadu_si256(reinterpret_cast(input+12*32)))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+13*32), + _mm256_xor_si256(_mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4)), + _mm256_loadu_si256(reinterpret_cast(input+13*32)))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+14*32), + _mm256_xor_si256(_mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4)), + _mm256_loadu_si256(reinterpret_cast(input+14*32)))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(output+15*32), + _mm256_xor_si256(_mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4)), + _mm256_loadu_si256(reinterpret_cast(input+15*32)))); } else { From 0ea4354157f86ad5b96e430361ccbb94f051b366 Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Sun, 9 Jun 2019 12:52:10 -0400 Subject: [PATCH 11/11] Update comments --- integer.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/integer.cpp b/integer.cpp index 8274b93b..f5abf83e 100644 --- a/integer.cpp +++ b/integer.cpp @@ -18,13 +18,13 @@ // For Integer::Zero(), Integer::One() and Integer::Two(), we use one of three // strategies. First, if initialization priorities are available then we use // them. Initialization priorities are init_priority() on Linux and init_seg() -// on Windows. AIX, OS X and several other platforms lack them. Initialization +// on Windows. OS X and several other platforms lack them. Initialization // priorities are platform specific but they are also the most trouble free // with determisitic destruction. // Second, if C++11 dynamic initialization is available, then we use it. After -// the std::call_once fiasco we dropped the priority dynamic initialization -// to avoid unknown troubles platforms that are tested less frequently. In -// addition Microsoft platforms mostly do not provide dynamic initialization. +// the std::call_once fiasco we moved to dynamic initialization to avoid +// unknown troubles platforms that are tested less frequently. In addition +// Microsoft platforms mostly do not provide dynamic initialization. // The MSDN docs claim they do but they don't in practice because we need // Visual Studio 2017 and Windows 10 or above. // Third, we fall back to Wei's original code of a Singleton. Wei's original @@ -47,9 +47,9 @@ // Java or .Net then Singleton must be avoided at all costs. // // The code below has a path cut-in for BMI2 using mulx and adcx instructions. -// There was a modest speedup of approximately 0.03 ms in Integer operations. -// We had to disable BMI2 for the moment because some OS X machines were -// advertising BMI/BMI2 support but caused SIGILL's at runtime. Also see +// There was a modest speedup of approximately 0.03 ms in public key Integer +// operations. We had to disable BMI2 for the moment because some OS X machines +// were advertising BMI/BMI2 support but caused SIGILL's at runtime. Also see // https://github.com/weidai11/cryptopp/issues/850. #include "pch.h"