mirror of
https://github.com/shadps4-emu/ext-cryptopp.git
synced 2024-11-23 09:59:42 +00:00
Pull changes from master branch
This commit is contained in:
commit
d24c991913
@ -860,6 +860,15 @@ ifeq ($(findstring native,$(MAKECMDGOALS)),native)
|
|||||||
NATIVE_OPT = -march=native
|
NATIVE_OPT = -march=native
|
||||||
endif # NATIVE_OPT
|
endif # NATIVE_OPT
|
||||||
|
|
||||||
|
# And tune
|
||||||
|
ifeq ($(NATIVE_OPT),)
|
||||||
|
TOPT = -mtune=native
|
||||||
|
HAVE_OPT = $(shell $(CXX) $(TCXXFLAGS) $(ZOPT) $(TOPT) $(TPROG) -o $(TOUT) 2>&1 | tr ' ' '\n' | wc -l)
|
||||||
|
ifeq ($(strip $(HAVE_OPT)),0)
|
||||||
|
NATIVE_OPT = -mtune=native
|
||||||
|
endif # NATIVE_OPT
|
||||||
|
endif
|
||||||
|
|
||||||
# Try SunCC next
|
# Try SunCC next
|
||||||
ifeq ($(NATIVE_OPT),)
|
ifeq ($(NATIVE_OPT),)
|
||||||
TOPT = -native
|
TOPT = -native
|
||||||
|
16
asn.cpp
16
asn.cpp
@ -395,25 +395,25 @@ void EncodedObjectFilter::Put(const byte *inString, size_t length)
|
|||||||
}
|
}
|
||||||
|
|
||||||
BERGeneralDecoder::BERGeneralDecoder(BufferedTransformation &inQueue)
|
BERGeneralDecoder::BERGeneralDecoder(BufferedTransformation &inQueue)
|
||||||
: m_inQueue(inQueue), m_finished(false)
|
: m_inQueue(inQueue), m_length(0), m_finished(false)
|
||||||
{
|
{
|
||||||
Init(DefaultTag);
|
Init(DefaultTag);
|
||||||
}
|
}
|
||||||
|
|
||||||
BERGeneralDecoder::BERGeneralDecoder(BufferedTransformation &inQueue, byte asnTag)
|
BERGeneralDecoder::BERGeneralDecoder(BufferedTransformation &inQueue, byte asnTag)
|
||||||
: m_inQueue(inQueue), m_finished(false)
|
: m_inQueue(inQueue), m_length(0), m_finished(false)
|
||||||
{
|
{
|
||||||
Init(asnTag);
|
Init(asnTag);
|
||||||
}
|
}
|
||||||
|
|
||||||
BERGeneralDecoder::BERGeneralDecoder(BERGeneralDecoder &inQueue)
|
BERGeneralDecoder::BERGeneralDecoder(BERGeneralDecoder &inQueue)
|
||||||
: m_inQueue(inQueue), m_finished(false)
|
: m_inQueue(inQueue), m_length(0), m_finished(false)
|
||||||
{
|
{
|
||||||
Init(DefaultTag);
|
Init(DefaultTag);
|
||||||
}
|
}
|
||||||
|
|
||||||
BERGeneralDecoder::BERGeneralDecoder(BERGeneralDecoder &inQueue, byte asnTag)
|
BERGeneralDecoder::BERGeneralDecoder(BERGeneralDecoder &inQueue, byte asnTag)
|
||||||
: m_inQueue(inQueue), m_finished(false)
|
: m_inQueue(inQueue), m_length(0), m_finished(false)
|
||||||
{
|
{
|
||||||
Init(asnTag);
|
Init(asnTag);
|
||||||
}
|
}
|
||||||
@ -514,22 +514,22 @@ lword BERGeneralDecoder::ReduceLength(lword delta)
|
|||||||
}
|
}
|
||||||
|
|
||||||
DERGeneralEncoder::DERGeneralEncoder(BufferedTransformation &outQueue)
|
DERGeneralEncoder::DERGeneralEncoder(BufferedTransformation &outQueue)
|
||||||
: ByteQueue(), m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false)
|
: m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
DERGeneralEncoder::DERGeneralEncoder(BufferedTransformation &outQueue, byte asnTag)
|
DERGeneralEncoder::DERGeneralEncoder(BufferedTransformation &outQueue, byte asnTag)
|
||||||
: ByteQueue(), m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false)
|
: m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
DERGeneralEncoder::DERGeneralEncoder(DERGeneralEncoder &outQueue)
|
DERGeneralEncoder::DERGeneralEncoder(DERGeneralEncoder &outQueue)
|
||||||
: ByteQueue(), m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false)
|
: m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
DERGeneralEncoder::DERGeneralEncoder(DERGeneralEncoder &outQueue, byte asnTag)
|
DERGeneralEncoder::DERGeneralEncoder(DERGeneralEncoder &outQueue, byte asnTag)
|
||||||
: ByteQueue(), m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false)
|
: m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
152
chacha_avx.cpp
152
chacha_avx.cpp
@ -91,14 +91,14 @@ NAMESPACE_BEGIN(CryptoPP)
|
|||||||
|
|
||||||
void ChaCha_OperateKeystream_AVX2(const word32 *state, const byte* input, byte *output, unsigned int rounds)
|
void ChaCha_OperateKeystream_AVX2(const word32 *state, const byte* input, byte *output, unsigned int rounds)
|
||||||
{
|
{
|
||||||
MAYBE_CONST __m128i* state_mm = (MAYBE_CONST __m128i*)(state);
|
const __m256i state0 = _mm256_broadcastsi128_si256(
|
||||||
MAYBE_CONST __m256i* input_mm = (MAYBE_CONST __m256i*)(input);
|
_mm_loadu_si128(reinterpret_cast<const __m128i*>(state+0*4)));
|
||||||
__m256i* output_mm = reinterpret_cast<__m256i*>(output);
|
const __m256i state1 = _mm256_broadcastsi128_si256(
|
||||||
|
_mm_loadu_si128(reinterpret_cast<const __m128i*>(state+1*4)));
|
||||||
const __m256i state0 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 0));
|
const __m256i state2 = _mm256_broadcastsi128_si256(
|
||||||
const __m256i state1 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 1));
|
_mm_loadu_si128(reinterpret_cast<const __m128i*>(state+2*4)));
|
||||||
const __m256i state2 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 2));
|
const __m256i state3 = _mm256_broadcastsi128_si256(
|
||||||
const __m256i state3 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 3));
|
_mm_loadu_si128(reinterpret_cast<const __m128i*>(state+3*4)));
|
||||||
|
|
||||||
const __m256i CTR0 = _mm256_set_epi32(0, 0, 0, 0, 0, 0, 0, 4);
|
const __m256i CTR0 = _mm256_set_epi32(0, 0, 0, 0, 0, 0, 0, 4);
|
||||||
const __m256i CTR1 = _mm256_set_epi32(0, 0, 0, 1, 0, 0, 0, 5);
|
const __m256i CTR1 = _mm256_set_epi32(0, 0, 0, 1, 0, 0, 0, 5);
|
||||||
@ -304,80 +304,112 @@ void ChaCha_OperateKeystream_AVX2(const word32 *state, const byte* input, byte *
|
|||||||
X3_3 = _mm256_add_epi32(X3_3, state3);
|
X3_3 = _mm256_add_epi32(X3_3, state3);
|
||||||
X3_3 = _mm256_add_epi64(X3_3, CTR3);
|
X3_3 = _mm256_add_epi64(X3_3, CTR3);
|
||||||
|
|
||||||
if (input_mm)
|
if (input)
|
||||||
{
|
{
|
||||||
_mm256_storeu_si256(output_mm + 0, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 0),
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+0*32),
|
||||||
_mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4))));
|
_mm256_xor_si256(_mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4)),
|
||||||
_mm256_storeu_si256(output_mm + 1, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 1),
|
_mm256_loadu_si256(reinterpret_cast<const __m256i*>(input+0*32))));
|
||||||
_mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4))));
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+1*32),
|
||||||
_mm256_storeu_si256(output_mm + 2, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 2),
|
_mm256_xor_si256(_mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4)),
|
||||||
_mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4))));
|
_mm256_loadu_si256(reinterpret_cast<const __m256i*>(input+1*32))));
|
||||||
_mm256_storeu_si256(output_mm + 3, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 3),
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+2*32),
|
||||||
_mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4))));
|
_mm256_xor_si256(_mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4)),
|
||||||
|
_mm256_loadu_si256(reinterpret_cast<const __m256i*>(input+2*32))));
|
||||||
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+3*32),
|
||||||
|
_mm256_xor_si256(_mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4)),
|
||||||
|
_mm256_loadu_si256(reinterpret_cast<const __m256i*>(input+3*32))));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
_mm256_storeu_si256(output_mm + 0, _mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4)));
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+0*32),
|
||||||
_mm256_storeu_si256(output_mm + 1, _mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4)));
|
_mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4)));
|
||||||
_mm256_storeu_si256(output_mm + 2, _mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4)));
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+1*32),
|
||||||
_mm256_storeu_si256(output_mm + 3, _mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4)));
|
_mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4)));
|
||||||
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+2*32),
|
||||||
|
_mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4)));
|
||||||
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+3*32),
|
||||||
|
_mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4)));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (input_mm)
|
if (input)
|
||||||
{
|
{
|
||||||
_mm256_storeu_si256(output_mm + 4, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 4),
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+4*32),
|
||||||
_mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4))));
|
_mm256_xor_si256(_mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4)),
|
||||||
_mm256_storeu_si256(output_mm + 5, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 5),
|
_mm256_loadu_si256(reinterpret_cast<const __m256i*>(input+4*32))));
|
||||||
_mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4))));
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+5*32),
|
||||||
_mm256_storeu_si256(output_mm + 6, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 6),
|
_mm256_xor_si256(_mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4)),
|
||||||
_mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4))));
|
_mm256_loadu_si256(reinterpret_cast<const __m256i*>(input+5*32))));
|
||||||
_mm256_storeu_si256(output_mm + 7, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 7),
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+6*32),
|
||||||
_mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4))));
|
_mm256_xor_si256(_mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4)),
|
||||||
|
_mm256_loadu_si256(reinterpret_cast<const __m256i*>(input+6*32))));
|
||||||
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+7*32),
|
||||||
|
_mm256_xor_si256(_mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4)),
|
||||||
|
_mm256_loadu_si256(reinterpret_cast<const __m256i*>(input+7*32))));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
_mm256_storeu_si256(output_mm + 4, _mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4)));
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+4*32),
|
||||||
_mm256_storeu_si256(output_mm + 5, _mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4)));
|
_mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4)));
|
||||||
_mm256_storeu_si256(output_mm + 6, _mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4)));
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+5*32),
|
||||||
_mm256_storeu_si256(output_mm + 7, _mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4)));
|
_mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4)));
|
||||||
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+6*32),
|
||||||
|
_mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4)));
|
||||||
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+7*32),
|
||||||
|
_mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4)));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (input_mm)
|
if (input)
|
||||||
{
|
{
|
||||||
_mm256_storeu_si256(output_mm + 8, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 8),
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+ 8*32),
|
||||||
_mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4))));
|
_mm256_xor_si256(_mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4)),
|
||||||
_mm256_storeu_si256(output_mm + 9, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 9),
|
_mm256_loadu_si256(reinterpret_cast<const __m256i*>(input+8*32))));
|
||||||
_mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4))));
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+ 9*32),
|
||||||
_mm256_storeu_si256(output_mm + 10, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 10),
|
_mm256_xor_si256(_mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4)),
|
||||||
_mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4))));
|
_mm256_loadu_si256(reinterpret_cast<const __m256i*>(input+9*32))));
|
||||||
_mm256_storeu_si256(output_mm + 11, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 11),
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+10*32),
|
||||||
_mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4))));
|
_mm256_xor_si256(_mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4)),
|
||||||
|
_mm256_loadu_si256(reinterpret_cast<const __m256i*>(input+10*32))));
|
||||||
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+11*32),
|
||||||
|
_mm256_xor_si256(_mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4)),
|
||||||
|
_mm256_loadu_si256(reinterpret_cast<const __m256i*>(input+11*32))));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
_mm256_storeu_si256(output_mm + 8, _mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4)));
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+ 8*32),
|
||||||
_mm256_storeu_si256(output_mm + 9, _mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4)));
|
_mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4)));
|
||||||
_mm256_storeu_si256(output_mm + 10, _mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4)));
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+ 9*32),
|
||||||
_mm256_storeu_si256(output_mm + 11, _mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4)));
|
_mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4)));
|
||||||
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+10*32),
|
||||||
|
_mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4)));
|
||||||
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+11*32),
|
||||||
|
_mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4)));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (input_mm)
|
if (input)
|
||||||
{
|
{
|
||||||
_mm256_storeu_si256(output_mm + 12, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 12),
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+12*32),
|
||||||
_mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4))));
|
_mm256_xor_si256(_mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4)),
|
||||||
_mm256_storeu_si256(output_mm + 13, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 13),
|
_mm256_loadu_si256(reinterpret_cast<const __m256i*>(input+12*32))));
|
||||||
_mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4))));
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+13*32),
|
||||||
_mm256_storeu_si256(output_mm + 14, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 14),
|
_mm256_xor_si256(_mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4)),
|
||||||
_mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4))));
|
_mm256_loadu_si256(reinterpret_cast<const __m256i*>(input+13*32))));
|
||||||
_mm256_storeu_si256(output_mm + 15, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 15),
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+14*32),
|
||||||
_mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4))));
|
_mm256_xor_si256(_mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4)),
|
||||||
|
_mm256_loadu_si256(reinterpret_cast<const __m256i*>(input+14*32))));
|
||||||
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+15*32),
|
||||||
|
_mm256_xor_si256(_mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4)),
|
||||||
|
_mm256_loadu_si256(reinterpret_cast<const __m256i*>(input+15*32))));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
_mm256_storeu_si256(output_mm + 12, _mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4)));
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+12*32),
|
||||||
_mm256_storeu_si256(output_mm + 13, _mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4)));
|
_mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4)));
|
||||||
_mm256_storeu_si256(output_mm + 14, _mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4)));
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+13*32),
|
||||||
_mm256_storeu_si256(output_mm + 15, _mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4)));
|
_mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4)));
|
||||||
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+14*32),
|
||||||
|
_mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4)));
|
||||||
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(output+15*32),
|
||||||
|
_mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://software.intel.com/en-us/articles/avoiding-avx-sse-transition-penalties
|
// https://software.intel.com/en-us/articles/avoiding-avx-sse-transition-penalties
|
||||||
|
@ -565,14 +565,10 @@ void ChaCha_OperateKeystream_NEON(const word32 *state, const byte* input, byte *
|
|||||||
|
|
||||||
void ChaCha_OperateKeystream_SSE2(const word32 *state, const byte* input, byte *output, unsigned int rounds)
|
void ChaCha_OperateKeystream_SSE2(const word32 *state, const byte* input, byte *output, unsigned int rounds)
|
||||||
{
|
{
|
||||||
const __m128i* state_mm = reinterpret_cast<const __m128i*>(state);
|
const __m128i state0 = _mm_load_si128(reinterpret_cast<const __m128i*>(state+0*4));
|
||||||
const __m128i* input_mm = reinterpret_cast<const __m128i*>(input);
|
const __m128i state1 = _mm_load_si128(reinterpret_cast<const __m128i*>(state+1*4));
|
||||||
__m128i* output_mm = reinterpret_cast<__m128i*>(output);
|
const __m128i state2 = _mm_load_si128(reinterpret_cast<const __m128i*>(state+2*4));
|
||||||
|
const __m128i state3 = _mm_load_si128(reinterpret_cast<const __m128i*>(state+3*4));
|
||||||
const __m128i state0 = _mm_load_si128(state_mm + 0);
|
|
||||||
const __m128i state1 = _mm_load_si128(state_mm + 1);
|
|
||||||
const __m128i state2 = _mm_load_si128(state_mm + 2);
|
|
||||||
const __m128i state3 = _mm_load_si128(state_mm + 3);
|
|
||||||
|
|
||||||
__m128i r0_0 = state0;
|
__m128i r0_0 = state0;
|
||||||
__m128i r0_1 = state1;
|
__m128i r0_1 = state1;
|
||||||
@ -772,57 +768,57 @@ void ChaCha_OperateKeystream_SSE2(const word32 *state, const byte* input, byte *
|
|||||||
r3_3 = _mm_add_epi32(r3_3, state3);
|
r3_3 = _mm_add_epi32(r3_3, state3);
|
||||||
r3_3 = _mm_add_epi64(r3_3, _mm_set_epi32(0, 0, 0, 3));
|
r3_3 = _mm_add_epi64(r3_3, _mm_set_epi32(0, 0, 0, 3));
|
||||||
|
|
||||||
if (input_mm)
|
if (input)
|
||||||
{
|
{
|
||||||
r0_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 0), r0_0);
|
r0_0 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i*>(input+0*16)), r0_0);
|
||||||
r0_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 1), r0_1);
|
r0_1 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i*>(input+1*16)), r0_1);
|
||||||
r0_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 2), r0_2);
|
r0_2 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i*>(input+2*16)), r0_2);
|
||||||
r0_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 3), r0_3);
|
r0_3 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i*>(input+3*16)), r0_3);
|
||||||
}
|
}
|
||||||
|
|
||||||
_mm_storeu_si128(output_mm + 0, r0_0);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(output+0*16), r0_0);
|
||||||
_mm_storeu_si128(output_mm + 1, r0_1);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(output+1*16), r0_1);
|
||||||
_mm_storeu_si128(output_mm + 2, r0_2);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(output+2*16), r0_2);
|
||||||
_mm_storeu_si128(output_mm + 3, r0_3);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(output+3*16), r0_3);
|
||||||
|
|
||||||
if (input_mm)
|
if (input)
|
||||||
{
|
{
|
||||||
r1_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 4), r1_0);
|
r1_0 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i*>(input+4*16)), r1_0);
|
||||||
r1_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 5), r1_1);
|
r1_1 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i*>(input+5*16)), r1_1);
|
||||||
r1_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 6), r1_2);
|
r1_2 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i*>(input+6*16)), r1_2);
|
||||||
r1_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 7), r1_3);
|
r1_3 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i*>(input+7*16)), r1_3);
|
||||||
}
|
}
|
||||||
|
|
||||||
_mm_storeu_si128(output_mm + 4, r1_0);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(output+4*16), r1_0);
|
||||||
_mm_storeu_si128(output_mm + 5, r1_1);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(output+5*16), r1_1);
|
||||||
_mm_storeu_si128(output_mm + 6, r1_2);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(output+6*16), r1_2);
|
||||||
_mm_storeu_si128(output_mm + 7, r1_3);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(output+7*16), r1_3);
|
||||||
|
|
||||||
if (input_mm)
|
if (input)
|
||||||
{
|
{
|
||||||
r2_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 8), r2_0);
|
r2_0 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i*>(input+ 8*16)), r2_0);
|
||||||
r2_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 9), r2_1);
|
r2_1 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i*>(input+ 9*16)), r2_1);
|
||||||
r2_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 10), r2_2);
|
r2_2 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i*>(input+10*16)), r2_2);
|
||||||
r2_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 11), r2_3);
|
r2_3 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i*>(input+11*16)), r2_3);
|
||||||
}
|
}
|
||||||
|
|
||||||
_mm_storeu_si128(output_mm + 8, r2_0);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(output+ 8*16), r2_0);
|
||||||
_mm_storeu_si128(output_mm + 9, r2_1);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(output+ 9*16), r2_1);
|
||||||
_mm_storeu_si128(output_mm + 10, r2_2);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(output+10*16), r2_2);
|
||||||
_mm_storeu_si128(output_mm + 11, r2_3);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(output+11*16), r2_3);
|
||||||
|
|
||||||
if (input_mm)
|
if (input)
|
||||||
{
|
{
|
||||||
r3_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 12), r3_0);
|
r3_0 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i*>(input+12*16)), r3_0);
|
||||||
r3_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 13), r3_1);
|
r3_1 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i*>(input+13*16)), r3_1);
|
||||||
r3_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 14), r3_2);
|
r3_2 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i*>(input+14*16)), r3_2);
|
||||||
r3_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 15), r3_3);
|
r3_3 = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i*>(input+15*16)), r3_3);
|
||||||
}
|
}
|
||||||
|
|
||||||
_mm_storeu_si128(output_mm + 12, r3_0);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(output+12*16), r3_0);
|
||||||
_mm_storeu_si128(output_mm + 13, r3_1);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(output+13*16), r3_1);
|
||||||
_mm_storeu_si128(output_mm + 14, r3_2);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(output+14*16), r3_2);
|
||||||
_mm_storeu_si128(output_mm + 15, r3_3);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(output+15*16), r3_3);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // CRYPTOPP_SSE2_INTRIN_AVAILABLE
|
#endif // CRYPTOPP_SSE2_INTRIN_AVAILABLE
|
||||||
|
@ -465,36 +465,33 @@ NAMESPACE_BEGIN(CryptoPP)
|
|||||||
void
|
void
|
||||||
GF2NT_233_Multiply_Reduce_CLMUL(const word* pA, const word* pB, word* pC)
|
GF2NT_233_Multiply_Reduce_CLMUL(const word* pA, const word* pB, word* pC)
|
||||||
{
|
{
|
||||||
const __m128i* pAA = reinterpret_cast<const __m128i*>(pA);
|
enum {S=sizeof(__m128i)/sizeof(word)};
|
||||||
const __m128i* pBB = reinterpret_cast<const __m128i*>(pB);
|
__m128i a0 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pA+0*S));
|
||||||
__m128i a0 = _mm_loadu_si128(pAA+0);
|
__m128i a1 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pA+1*S));
|
||||||
__m128i a1 = _mm_loadu_si128(pAA+1);
|
__m128i b0 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pB+0*S));
|
||||||
__m128i b0 = _mm_loadu_si128(pBB+0);
|
__m128i b1 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pB+1*S));
|
||||||
__m128i b1 = _mm_loadu_si128(pBB+1);
|
|
||||||
|
|
||||||
__m128i c0, c1, c2, c3;
|
__m128i c0, c1, c2, c3;
|
||||||
F2N_Multiply_256x256_CLMUL(c3, c2, c1, c0, a1, a0, b1, b0);
|
F2N_Multiply_256x256_CLMUL(c3, c2, c1, c0, a1, a0, b1, b0);
|
||||||
GF2NT_233_Reduce_CLMUL(c3, c2, c1, c0);
|
GF2NT_233_Reduce_CLMUL(c3, c2, c1, c0);
|
||||||
|
|
||||||
__m128i* pCC = reinterpret_cast<__m128i*>(pC);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(pC+0*S), c0);
|
||||||
_mm_storeu_si128(pCC+0, c0);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(pC+1*S), c1);
|
||||||
_mm_storeu_si128(pCC+1, c1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
GF2NT_233_Square_Reduce_CLMUL(const word* pA, word* pC)
|
GF2NT_233_Square_Reduce_CLMUL(const word* pA, word* pC)
|
||||||
{
|
{
|
||||||
const __m128i* pAA = reinterpret_cast<const __m128i*>(pA);
|
enum {S=sizeof(__m128i)/sizeof(word)};
|
||||||
__m128i a0 = _mm_loadu_si128(pAA+0);
|
__m128i a0 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pA+0*S));
|
||||||
__m128i a1 = _mm_loadu_si128(pAA+1);
|
__m128i a1 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pA+1*S));
|
||||||
|
|
||||||
__m128i c0, c1, c2, c3;
|
__m128i c0, c1, c2, c3;
|
||||||
F2N_Square_256_CLMUL(c3, c2, c1, c0, a1, a0);
|
F2N_Square_256_CLMUL(c3, c2, c1, c0, a1, a0);
|
||||||
GF2NT_233_Reduce_CLMUL(c3, c2, c1, c0);
|
GF2NT_233_Reduce_CLMUL(c3, c2, c1, c0);
|
||||||
|
|
||||||
__m128i* pCC = reinterpret_cast<__m128i*>(pC);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(pC+0*S), c0);
|
||||||
_mm_storeu_si128(pCC+0, c0);
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(pC+1*S), c1);
|
||||||
_mm_storeu_si128(pCC+1, c1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#elif (CRYPTOPP_ARM_PMULL_AVAILABLE)
|
#elif (CRYPTOPP_ARM_PMULL_AVAILABLE)
|
||||||
|
14
integer.cpp
14
integer.cpp
@ -18,13 +18,13 @@
|
|||||||
// For Integer::Zero(), Integer::One() and Integer::Two(), we use one of three
|
// For Integer::Zero(), Integer::One() and Integer::Two(), we use one of three
|
||||||
// strategies. First, if initialization priorities are available then we use
|
// strategies. First, if initialization priorities are available then we use
|
||||||
// them. Initialization priorities are init_priority() on Linux and init_seg()
|
// them. Initialization priorities are init_priority() on Linux and init_seg()
|
||||||
// on Windows. AIX, OS X and several other platforms lack them. Initialization
|
// on Windows. OS X and several other platforms lack them. Initialization
|
||||||
// priorities are platform specific but they are also the most trouble free
|
// priorities are platform specific but they are also the most trouble free
|
||||||
// with determisitic destruction.
|
// with determisitic destruction.
|
||||||
// Second, if C++11 dynamic initialization is available, then we use it. After
|
// Second, if C++11 dynamic initialization is available, then we use it. After
|
||||||
// the std::call_once fiasco we dropped the priority dynamic initialization
|
// the std::call_once fiasco we moved to dynamic initialization to avoid
|
||||||
// to avoid unknown troubles platforms that are tested less frequently. In
|
// unknown troubles platforms that are tested less frequently. In addition
|
||||||
// addition Microsoft platforms mostly do not provide dynamic initialization.
|
// Microsoft platforms mostly do not provide dynamic initialization.
|
||||||
// The MSDN docs claim they do but they don't in practice because we need
|
// The MSDN docs claim they do but they don't in practice because we need
|
||||||
// Visual Studio 2017 and Windows 10 or above.
|
// Visual Studio 2017 and Windows 10 or above.
|
||||||
// Third, we fall back to Wei's original code of a Singleton. Wei's original
|
// Third, we fall back to Wei's original code of a Singleton. Wei's original
|
||||||
@ -47,9 +47,9 @@
|
|||||||
// Java or .Net then Singleton must be avoided at all costs.
|
// Java or .Net then Singleton must be avoided at all costs.
|
||||||
//
|
//
|
||||||
// The code below has a path cut-in for BMI2 using mulx and adcx instructions.
|
// The code below has a path cut-in for BMI2 using mulx and adcx instructions.
|
||||||
// There was a modest speedup of approximately 0.03 ms in Integer operations.
|
// There was a modest speedup of approximately 0.03 ms in public key Integer
|
||||||
// We had to disable BMI2 for the moment because some OS X machines were
|
// operations. We had to disable BMI2 for the moment because some OS X machines
|
||||||
// advertising BMI/BMI2 support but caused SIGILL's at runtime. Also see
|
// were advertising BMI/BMI2 support but caused SIGILL's at runtime. Also see
|
||||||
// https://github.com/weidai11/cryptopp/issues/850.
|
// https://github.com/weidai11/cryptopp/issues/850.
|
||||||
|
|
||||||
#include "pch.h"
|
#include "pch.h"
|
||||||
|
16
modarith.h
16
modarith.h
@ -35,6 +35,9 @@ CRYPTOPP_DLL_TEMPLATE_CLASS AbstractEuclideanDomain<Integer>;
|
|||||||
/// <pre> abcd = group.Add(a, group.Add(b, group.Add(c,d));</pre>
|
/// <pre> abcd = group.Add(a, group.Add(b, group.Add(c,d));</pre>
|
||||||
/// The following code will produce incorrect results:
|
/// The following code will produce incorrect results:
|
||||||
/// <pre> abcd = group.Add(group.Add(a,b), group.Add(c,d));</pre>
|
/// <pre> abcd = group.Add(group.Add(a,b), group.Add(c,d));</pre>
|
||||||
|
/// \details If a ModularArithmetic is copied or assigned the modulus
|
||||||
|
/// is copied, but not the internal data members. The internal data
|
||||||
|
/// members are undefined after copy or assignment.
|
||||||
/// \sa <A HREF="https://cryptopp.com/wiki/Integer">Integer</A> on the
|
/// \sa <A HREF="https://cryptopp.com/wiki/Integer">Integer</A> on the
|
||||||
/// Crypto++ wiki.
|
/// Crypto++ wiki.
|
||||||
class CRYPTOPP_DLL ModularArithmetic : public AbstractRing<Integer>
|
class CRYPTOPP_DLL ModularArithmetic : public AbstractRing<Integer>
|
||||||
@ -54,7 +57,18 @@ public:
|
|||||||
/// \brief Copy construct a ModularArithmetic
|
/// \brief Copy construct a ModularArithmetic
|
||||||
/// \param ma other ModularArithmetic
|
/// \param ma other ModularArithmetic
|
||||||
ModularArithmetic(const ModularArithmetic &ma)
|
ModularArithmetic(const ModularArithmetic &ma)
|
||||||
: AbstractRing<Integer>(ma), m_modulus(ma.m_modulus), m_result(static_cast<word>(0), ma.m_modulus.reg.size()) {}
|
: AbstractRing<Integer>(ma), m_modulus(ma.m_modulus), m_result(static_cast<word>(0), m_modulus.reg.size()) {}
|
||||||
|
|
||||||
|
/// \brief Assign a ModularArithmetic
|
||||||
|
/// \param ma other ModularArithmetic
|
||||||
|
ModularArithmetic& operator=(const ModularArithmetic &ma) {
|
||||||
|
if (this != &ma)
|
||||||
|
{
|
||||||
|
m_modulus = ma.m_modulus;
|
||||||
|
m_result = Integer(static_cast<word>(0), m_modulus.reg.size());
|
||||||
|
}
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
/// \brief Construct a ModularArithmetic
|
/// \brief Construct a ModularArithmetic
|
||||||
/// \param bt BER encoded ModularArithmetic
|
/// \param bt BER encoded ModularArithmetic
|
||||||
|
Loading…
Reference in New Issue
Block a user