2017-08-17 16:33:43 +00:00
|
|
|
// gcm-simd.cpp - written and placed in the public domain by
|
|
|
|
// Jeffrey Walton, Uri Blumenthal and Marcel Raad.
|
|
|
|
//
|
|
|
|
// This source file uses intrinsics to gain access to SSE4.2 and
|
|
|
|
// ARMv8a CRC-32 and CRC-32C instructions. A separate source file
|
|
|
|
// is needed because additional CXXFLAGS are required to enable
|
|
|
|
// the appropriate instructions sets in some build configurations.
|
|
|
|
|
|
|
|
#include "pch.h"
|
|
|
|
#include "config.h"
|
|
|
|
#include "misc.h"
|
|
|
|
|
2018-07-16 13:39:01 +00:00
|
|
|
// Clang 3.3 integrated assembler crash on Linux. Other versions
|
|
|
|
// produce incorrect results. Clang has never handled Intel ASM
|
|
|
|
// very well. I wish LLVM would fix it.
|
|
|
|
#if defined(CRYPTOPP_DISABLE_INTEL_ASM)
|
2017-08-17 16:33:43 +00:00
|
|
|
# undef CRYPTOPP_X86_ASM_AVAILABLE
|
|
|
|
# undef CRYPTOPP_X32_ASM_AVAILABLE
|
|
|
|
# undef CRYPTOPP_X64_ASM_AVAILABLE
|
2017-08-21 01:25:29 +00:00
|
|
|
# undef CRYPTOPP_SSE2_ASM_AVAILABLE
|
2017-08-17 16:33:43 +00:00
|
|
|
#endif
|
|
|
|
|
2017-11-16 07:38:53 +00:00
|
|
|
#if (CRYPTOPP_SSE2_INTRIN_AVAILABLE)
|
|
|
|
# include <emmintrin.h>
|
2018-07-16 02:31:50 +00:00
|
|
|
# include <xmmintrin.h>
|
2017-11-16 07:38:53 +00:00
|
|
|
#endif
|
|
|
|
|
2017-08-17 16:33:43 +00:00
|
|
|
#if (CRYPTOPP_CLMUL_AVAILABLE)
|
2017-09-12 09:29:51 +00:00
|
|
|
# include <tmmintrin.h>
|
|
|
|
# include <wmmintrin.h>
|
2017-08-17 16:33:43 +00:00
|
|
|
#endif
|
|
|
|
|
2018-01-20 18:23:41 +00:00
|
|
|
#if (CRYPTOPP_ARM_NEON_AVAILABLE)
|
2017-09-12 09:29:51 +00:00
|
|
|
# include <arm_neon.h>
|
2017-08-17 16:33:43 +00:00
|
|
|
#endif
|
|
|
|
|
2017-09-13 21:16:57 +00:00
|
|
|
#if defined(CRYPTOPP_ARM_ACLE_AVAILABLE)
|
2018-01-20 18:23:41 +00:00
|
|
|
# include <stdint.h>
|
2017-09-12 09:29:51 +00:00
|
|
|
# include <arm_acle.h>
|
2017-08-17 16:33:43 +00:00
|
|
|
#endif
|
|
|
|
|
2018-08-09 12:09:13 +00:00
|
|
|
#if defined(CRYPTOPP_ALTIVEC_AVAILABLE)
|
2018-08-06 09:40:38 +00:00
|
|
|
# include "ppc-simd.h"
|
|
|
|
#endif
|
|
|
|
|
2017-08-17 16:33:43 +00:00
|
|
|
#ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
|
|
|
|
# include <signal.h>
|
|
|
|
# include <setjmp.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef EXCEPTION_EXECUTE_HANDLER
|
|
|
|
# define EXCEPTION_EXECUTE_HANDLER 1
|
|
|
|
#endif
|
|
|
|
|
2017-08-19 05:35:36 +00:00
|
|
|
// Clang __m128i casts, http://bugs.llvm.org/show_bug.cgi?id=20670
|
2017-08-17 16:33:43 +00:00
|
|
|
#define M128_CAST(x) ((__m128i *)(void *)(x))
|
|
|
|
#define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x))
|
|
|
|
|
2018-01-21 00:39:49 +00:00
|
|
|
// GCC cast warning
|
|
|
|
#define UINT64X2_CAST(x) ((uint64x2_t *)(void *)(x))
|
|
|
|
#define CONST_UINT64X2_CAST(x) ((const uint64x2_t *)(const void *)(x))
|
|
|
|
|
2018-08-09 12:09:13 +00:00
|
|
|
// Debugging on PowerPC
|
|
|
|
#if (CRYPTOPP_BOOL_PPC32 || CRYPTOPP_BOOL_PPC64)
|
|
|
|
# ifndef NDEBUG
|
|
|
|
# undef INLINE
|
|
|
|
# define INLINE
|
|
|
|
# else
|
|
|
|
# define INLINE inline
|
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
|
2018-07-06 07:46:25 +00:00
|
|
|
// Squash MS LNK4221 and libtool warnings
|
|
|
|
extern const char GCM_SIMD_FNAME[] = __FILE__;
|
|
|
|
|
2017-08-17 16:33:43 +00:00
|
|
|
ANONYMOUS_NAMESPACE_BEGIN
|
|
|
|
|
2018-08-06 09:40:38 +00:00
|
|
|
// ************************* Miscellaneous ************************* //
|
|
|
|
|
2017-11-16 04:05:30 +00:00
|
|
|
#if CRYPTOPP_ARM_PMULL_AVAILABLE
|
2017-08-17 16:33:43 +00:00
|
|
|
#if defined(__GNUC__)
|
|
|
|
// Schneiders, Hovsmith and O'Rourke used this trick.
|
|
|
|
// It results in much better code generation in production code
|
|
|
|
// by avoiding D-register spills when using vgetq_lane_u64. The
|
|
|
|
// problem does not surface under minimal test cases.
|
|
|
|
inline uint64x2_t PMULL_00(const uint64x2_t a, const uint64x2_t b)
|
|
|
|
{
|
|
|
|
uint64x2_t r;
|
|
|
|
__asm __volatile("pmull %0.1q, %1.1d, %2.1d \n\t"
|
|
|
|
:"=w" (r) : "w" (a), "w" (b) );
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline uint64x2_t PMULL_01(const uint64x2_t a, const uint64x2_t b)
|
|
|
|
{
|
|
|
|
uint64x2_t r;
|
|
|
|
__asm __volatile("pmull %0.1q, %1.1d, %2.1d \n\t"
|
|
|
|
:"=w" (r) : "w" (a), "w" (vget_high_u64(b)) );
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline uint64x2_t PMULL_10(const uint64x2_t a, const uint64x2_t b)
|
|
|
|
{
|
|
|
|
uint64x2_t r;
|
|
|
|
__asm __volatile("pmull %0.1q, %1.1d, %2.1d \n\t"
|
|
|
|
:"=w" (r) : "w" (vget_high_u64(a)), "w" (b) );
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline uint64x2_t PMULL_11(const uint64x2_t a, const uint64x2_t b)
|
|
|
|
{
|
|
|
|
uint64x2_t r;
|
|
|
|
__asm __volatile("pmull2 %0.1q, %1.2d, %2.2d \n\t"
|
|
|
|
:"=w" (r) : "w" (a), "w" (b) );
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b, unsigned int c)
|
|
|
|
{
|
|
|
|
uint64x2_t r;
|
|
|
|
__asm __volatile("ext %0.16b, %1.16b, %2.16b, %3 \n\t"
|
|
|
|
:"=w" (r) : "w" (a), "w" (b), "I" (c) );
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
// https://github.com/weidai11/cryptopp/issues/366
|
|
|
|
template <unsigned int C>
|
|
|
|
inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b)
|
|
|
|
{
|
|
|
|
uint64x2_t r;
|
|
|
|
__asm __volatile("ext %0.16b, %1.16b, %2.16b, %3 \n\t"
|
|
|
|
:"=w" (r) : "w" (a), "w" (b), "I" (C) );
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
#endif // GCC and compatibles
|
|
|
|
|
|
|
|
#if defined(_MSC_VER)
|
|
|
|
inline uint64x2_t PMULL_00(const uint64x2_t a, const uint64x2_t b)
|
|
|
|
{
|
2018-08-10 03:28:49 +00:00
|
|
|
return (uint64x2_t)(vmull_p64(
|
|
|
|
vgetq_lane_u64(vreinterpretq_u64_u8(a),0),
|
|
|
|
vgetq_lane_u64(vreinterpretq_u64_u8(b),0)));
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
inline uint64x2_t PMULL_01(const uint64x2_t a, const uint64x2_t b)
|
|
|
|
{
|
2018-08-10 03:28:49 +00:00
|
|
|
return (uint64x2_t)(vmull_p64(
|
|
|
|
vgetq_lane_u64(vreinterpretq_u64_u8(a),0),
|
|
|
|
vgetq_lane_u64(vreinterpretq_u64_u8(b),1)));
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
inline uint64x2_t PMULL_10(const uint64x2_t a, const uint64x2_t b)
|
|
|
|
{
|
2018-08-10 03:28:49 +00:00
|
|
|
return (uint64x2_t)(vmull_p64(
|
|
|
|
vgetq_lane_u64(vreinterpretq_u64_u8(a),1),
|
|
|
|
vgetq_lane_u64(vreinterpretq_u64_u8(b),0)));
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
inline uint64x2_t PMULL_11(const uint64x2_t a, const uint64x2_t b)
|
|
|
|
{
|
2018-08-10 03:28:49 +00:00
|
|
|
return (uint64x2_t)(vmull_p64(
|
|
|
|
vgetq_lane_u64(vreinterpretq_u64_u8(a),1),
|
|
|
|
vgetq_lane_u64(vreinterpretq_u64_u8(b),1)));
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b, unsigned int c)
|
|
|
|
{
|
2018-08-10 03:28:49 +00:00
|
|
|
return (uint64x2_t)vextq_u8(
|
|
|
|
vreinterpretq_u8_u64(a), vreinterpretq_u8_u64(b), c);
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// https://github.com/weidai11/cryptopp/issues/366
|
|
|
|
template <unsigned int C>
|
|
|
|
inline uint64x2_t VEXT_U8(uint64x2_t a, uint64x2_t b)
|
|
|
|
{
|
2018-08-10 03:28:49 +00:00
|
|
|
return (uint64x2_t)vextq_u8(
|
|
|
|
vreinterpretq_u8_u64(a), vreinterpretq_u8_u64(b), C);
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
#endif // Microsoft and compatibles
|
|
|
|
#endif // CRYPTOPP_ARM_PMULL_AVAILABLE
|
|
|
|
|
2018-08-09 12:09:13 +00:00
|
|
|
#if CRYPTOPP_POWER8_VMULL_AVAILABLE
|
|
|
|
using CryptoPP::uint32x4_p;
|
2018-08-06 09:40:38 +00:00
|
|
|
using CryptoPP::uint64x2_p;
|
2018-08-09 12:09:13 +00:00
|
|
|
using CryptoPP::VectorGetLow;
|
|
|
|
using CryptoPP::VectorGetHigh;
|
|
|
|
using CryptoPP::VectorRotateLeft;
|
|
|
|
|
|
|
|
// Carryless multiples appear to be endian-sensitive. Big-endian
|
|
|
|
// multiplies return a result {a,b}, while little-endian return
|
|
|
|
// a result {b,a}. Since the multiply routines are reflective and
|
|
|
|
// use LE the BE results need a fixup.
|
|
|
|
INLINE uint64x2_p AdjustBE(const uint64x2_p& val)
|
|
|
|
{
|
|
|
|
#if CRYPTOPP_BIG_ENDIAN
|
|
|
|
return VectorRotateLeft<8>(val);
|
|
|
|
#else
|
|
|
|
return val;
|
|
|
|
#endif
|
|
|
|
}
|
2018-08-06 09:40:38 +00:00
|
|
|
|
2018-08-06 22:37:25 +00:00
|
|
|
// _mm_clmulepi64_si128(a, b, 0x00)
|
2018-08-09 12:09:13 +00:00
|
|
|
INLINE uint64x2_p VMULL_00(const uint64x2_p& a, const uint64x2_p& b)
|
2018-08-06 09:40:38 +00:00
|
|
|
{
|
|
|
|
#if defined(__xlc__) || defined(__xlC__)
|
2018-08-09 12:09:13 +00:00
|
|
|
return AdjustBE(__vpmsumd (VectorGetHigh(a), VectorGetHigh(b)));
|
2018-08-06 22:06:32 +00:00
|
|
|
#else
|
2018-08-09 12:09:13 +00:00
|
|
|
return AdjustBE(__builtin_crypto_vpmsumd (VectorGetHigh(a), VectorGetHigh(b)));
|
2018-08-06 22:06:32 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-08-06 22:37:25 +00:00
|
|
|
// _mm_clmulepi64_si128(a, b, 0x01)
|
2018-08-09 12:09:13 +00:00
|
|
|
INLINE uint64x2_p VMULL_01(const uint64x2_p& a, const uint64x2_p& b)
|
2018-08-06 22:06:32 +00:00
|
|
|
{
|
2018-08-09 22:18:40 +00:00
|
|
|
// Small speedup. VectorGetHigh(b) ensures the high dword of 'b' is 0.
|
|
|
|
// The 0 used in the vmull yields 0 for the high product, so the high
|
|
|
|
// dword of 'a' is "don't care".
|
2018-08-06 22:06:32 +00:00
|
|
|
#if defined(__xlc__) || defined(__xlC__)
|
2018-08-09 22:18:40 +00:00
|
|
|
// return AdjustBE(__vpmsumd (VectorGetLow(a), VectorGetHigh(b)));
|
|
|
|
return AdjustBE(__vpmsumd (a, VectorGetHigh(b)));
|
2018-08-06 22:06:32 +00:00
|
|
|
#else
|
2018-08-09 22:18:40 +00:00
|
|
|
// return AdjustBE(__builtin_crypto_vpmsumd (VectorGetLow(a), VectorGetHigh(b)));
|
|
|
|
return AdjustBE(__builtin_crypto_vpmsumd (a, VectorGetHigh(b)));
|
2018-08-06 22:06:32 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-08-06 22:37:25 +00:00
|
|
|
// _mm_clmulepi64_si128(a, b, 0x10)
|
2018-08-09 12:09:13 +00:00
|
|
|
INLINE uint64x2_p VMULL_10(const uint64x2_p& a, const uint64x2_p& b)
|
2018-08-06 22:06:32 +00:00
|
|
|
{
|
2018-08-09 22:18:40 +00:00
|
|
|
// Small speedup. VectorGetHigh(a) ensures the high dword of 'a' is 0.
|
|
|
|
// The 0 used in the vmull yields 0 for the high product, so the high
|
|
|
|
// dword of 'b' is "don't care".
|
2018-08-06 22:06:32 +00:00
|
|
|
#if defined(__xlc__) || defined(__xlC__)
|
2018-08-09 22:18:40 +00:00
|
|
|
// return AdjustBE(__vpmsumd (VectorGetHigh(a), VectorGetLow(b)));
|
|
|
|
return AdjustBE(__vpmsumd (VectorGetHigh(a), b));
|
2018-08-06 09:40:38 +00:00
|
|
|
#else
|
2018-08-09 22:18:40 +00:00
|
|
|
// return AdjustBE(__builtin_crypto_vpmsumd (VectorGetHigh(a), VectorGetLow(b)));
|
|
|
|
return AdjustBE(__builtin_crypto_vpmsumd (VectorGetHigh(a), b));
|
2018-08-06 09:40:38 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-08-06 22:37:25 +00:00
|
|
|
// _mm_clmulepi64_si128(a, b, 0x11)
|
2018-08-09 12:09:13 +00:00
|
|
|
INLINE uint64x2_p VMULL_11(const uint64x2_p& a, const uint64x2_p& b)
|
2018-08-06 09:40:38 +00:00
|
|
|
{
|
2018-08-09 22:18:40 +00:00
|
|
|
// Small speedup. VectorGetLow(a) ensures the high dword of 'a' is 0.
|
|
|
|
// The 0 used in the vmull yields 0 for the high product, so the high
|
|
|
|
// dword of 'b' is "don't care".
|
2018-08-06 09:40:38 +00:00
|
|
|
#if defined(__xlc__) || defined(__xlC__)
|
2018-08-09 22:18:40 +00:00
|
|
|
// return AdjustBE(__vpmsumd (VectorGetLow(a), VectorGetLow(b)));
|
|
|
|
return AdjustBE(__vpmsumd (VectorGetLow(a), b));
|
2018-08-06 09:40:38 +00:00
|
|
|
#else
|
2018-08-09 22:18:40 +00:00
|
|
|
// return AdjustBE(__builtin_crypto_vpmsumd (VectorGetLow(a), VectorGetLow(b)));
|
|
|
|
return AdjustBE(__builtin_crypto_vpmsumd (VectorGetLow(a), b));
|
2018-08-06 09:40:38 +00:00
|
|
|
#endif
|
|
|
|
}
|
2018-08-09 12:09:13 +00:00
|
|
|
#endif // CRYPTOPP_POWER8_VMULL_AVAILABLE
|
2018-08-06 09:40:38 +00:00
|
|
|
|
2017-08-17 16:33:43 +00:00
|
|
|
ANONYMOUS_NAMESPACE_END
|
|
|
|
|
|
|
|
NAMESPACE_BEGIN(CryptoPP)
|
|
|
|
|
2018-08-06 09:40:38 +00:00
|
|
|
// ************************* Feature Probes ************************* //
|
|
|
|
|
2017-08-17 16:33:43 +00:00
|
|
|
#ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
|
|
|
|
extern "C" {
|
|
|
|
typedef void (*SigHandler)(int);
|
|
|
|
|
|
|
|
static jmp_buf s_jmpSIGILL;
|
|
|
|
static void SigIllHandler(int)
|
|
|
|
{
|
|
|
|
longjmp(s_jmpSIGILL, 1);
|
|
|
|
}
|
2018-03-31 17:05:45 +00:00
|
|
|
}
|
2017-08-17 16:33:43 +00:00
|
|
|
#endif // Not CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY
|
|
|
|
|
|
|
|
#if (CRYPTOPP_BOOL_ARM32 || CRYPTOPP_BOOL_ARM64)
|
|
|
|
bool CPU_ProbePMULL()
|
|
|
|
{
|
2017-09-20 01:08:37 +00:00
|
|
|
#if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
|
2018-07-16 23:04:24 +00:00
|
|
|
return false;
|
2017-09-20 01:08:37 +00:00
|
|
|
#elif (CRYPTOPP_ARM_PMULL_AVAILABLE)
|
2017-08-17 16:33:43 +00:00
|
|
|
# if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY)
|
|
|
|
volatile bool result = true;
|
|
|
|
__try
|
|
|
|
{
|
2018-08-10 05:00:26 +00:00
|
|
|
const poly64_t a1={0x9090909090909090}, b1={0xb0b0b0b0b0b0b0b0};
|
2018-08-06 09:40:38 +00:00
|
|
|
const poly8x16_t a2={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
|
|
|
|
0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
|
|
|
|
b2={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
|
|
|
|
0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
|
2017-08-17 16:33:43 +00:00
|
|
|
|
2018-08-09 12:09:13 +00:00
|
|
|
const poly128_t r1 = pmull_p64(a1, b1);
|
|
|
|
const poly128_t r2 = pmull_high_p64((poly64x2_t)(a2), (poly64x2_t)(b2));
|
2017-08-17 16:33:43 +00:00
|
|
|
|
2018-08-10 05:00:26 +00:00
|
|
|
// Linaro is missing a lot of pmull gear. Also see http://github.com/weidai11/cryptopp/issues/233.
|
Add ARMv8.4 cpu feature detection support (GH #685) (#687)
This PR adds ARMv8.4 cpu feature detection support. Previously we only needed ARMv8.1 and things were much easier. For example, ARMv8.1 `__ARM_FEATURE_CRYPTO` meant PMULL, AES, SHA-1 and SHA-256 were available. ARMv8.4 `__ARM_FEATURE_CRYPTO` means PMULL, AES, SHA-1, SHA-256, SHA-512, SHA-3, SM3 and SM4 are available.
We still use the same pattern as before. We make something available based on compiler version and/or preprocessor macros. But this time around we had to tighten things up a bit to ensure ARMv8.4 did not cross-pollinate down into ARMv8.1.
ARMv8.4 is largely untested at the moment. There is no hardware in the field and CI lacks QEMU with the relevant patches/support. We will probably have to revisit some of this stuff in the future.
Since this update applies to ARM gadgets we took the time to expand Android and iOS testing on Travis. Travis now tests more platforms, and includes Autotools and CMake builds, too.
2018-07-15 12:35:14 +00:00
|
|
|
const uint64x2_t t1 = (uint64x2_t)(r1); // {bignum,bignum}
|
|
|
|
const uint64x2_t t2 = (uint64x2_t)(r2); // {bignum,bignum}
|
2017-08-17 16:33:43 +00:00
|
|
|
|
2018-08-06 09:40:38 +00:00
|
|
|
result = !!(vgetq_lane_u64(t1,0) == 0x5300530053005300 &&
|
|
|
|
vgetq_lane_u64(t1,1) == 0x5300530053005300 &&
|
|
|
|
vgetq_lane_u64(t2,0) == 0x6c006c006c006c00 &&
|
|
|
|
vgetq_lane_u64(t2,1) == 0x6c006c006c006c00);
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
__except (EXCEPTION_EXECUTE_HANDLER)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
# else
|
|
|
|
|
|
|
|
// longjmp and clobber warnings. Volatile is required.
|
|
|
|
volatile bool result = true;
|
|
|
|
|
|
|
|
volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
|
|
|
|
if (oldHandler == SIG_ERR)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
volatile sigset_t oldMask;
|
|
|
|
if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (setjmp(s_jmpSIGILL))
|
|
|
|
result = false;
|
|
|
|
else
|
|
|
|
{
|
2018-08-10 05:00:26 +00:00
|
|
|
// Linaro is missing a lot of pmull gear. Also see http://github.com/weidai11/cryptopp/issues/233.
|
|
|
|
const uint64x2_t a1={0,0x9090909090909090}, b1={0,0xb0b0b0b0b0b0b0b0};
|
|
|
|
const uint8x16_t a2={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
|
2018-08-06 09:40:38 +00:00
|
|
|
0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
|
|
|
|
b2={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
|
|
|
|
0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
|
2017-08-17 16:33:43 +00:00
|
|
|
|
2018-08-10 05:00:26 +00:00
|
|
|
const uint64x2_t r1 = PMULL_00(a1, b1);
|
|
|
|
const uint64x2_t r2 = PMULL_11((uint64x2_t)a2, (uint64x2_t)b2);
|
2017-08-17 16:33:43 +00:00
|
|
|
|
2018-08-10 05:00:26 +00:00
|
|
|
result = !!(vgetq_lane_u64(r1,0) == 0x5300530053005300 &&
|
|
|
|
vgetq_lane_u64(r1,1) == 0x5300530053005300 &&
|
|
|
|
vgetq_lane_u64(r2,0) == 0x6c006c006c006c00 &&
|
|
|
|
vgetq_lane_u64(r2,1) == 0x6c006c006c006c00);
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
|
|
|
|
signal(SIGILL, oldHandler);
|
|
|
|
return result;
|
|
|
|
# endif
|
|
|
|
#else
|
|
|
|
return false;
|
2018-07-16 02:31:50 +00:00
|
|
|
#endif // CRYPTOPP_ARM_PMULL_AVAILABLE
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
#endif // ARM32 or ARM64
|
|
|
|
|
2018-08-06 09:40:38 +00:00
|
|
|
#if (CRYPTOPP_BOOL_PPC32 || CRYPTOPP_BOOL_PPC64)
|
|
|
|
bool CPU_ProbePMULL()
|
|
|
|
{
|
|
|
|
#if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
|
|
|
|
return false;
|
2018-08-09 12:09:13 +00:00
|
|
|
#elif (CRYPTOPP_POWER8_VMULL_AVAILABLE)
|
2018-08-06 09:40:38 +00:00
|
|
|
// longjmp and clobber warnings. Volatile is required.
|
|
|
|
volatile bool result = true;
|
|
|
|
|
|
|
|
volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
|
|
|
|
if (oldHandler == SIG_ERR)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
volatile sigset_t oldMask;
|
|
|
|
if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (setjmp(s_jmpSIGILL))
|
|
|
|
result = false;
|
|
|
|
else
|
|
|
|
{
|
2018-08-09 12:09:13 +00:00
|
|
|
const uint8x16_p a={0x0f,0x08,0x08,0x08, 0x80,0x80,0x80,0x80,
|
|
|
|
0x00,0x0a,0x0a,0x0a, 0xa0,0xa0,0xa0,0xa0},
|
|
|
|
b={0x0f,0xc0,0xc0,0xc0, 0x0c,0x0c,0x0c,0x0c,
|
|
|
|
0x00,0xe0,0xe0,0xe0, 0x0e,0x0e,0x0e,0x0e};
|
2018-08-06 09:40:38 +00:00
|
|
|
|
2018-08-09 12:09:13 +00:00
|
|
|
const uint64x2_p r1 = VMULL_00((uint64x2_p)(a), (uint64x2_p)(b));
|
|
|
|
const uint64x2_p r2 = VMULL_01((uint64x2_p)(a), (uint64x2_p)(b));
|
|
|
|
const uint64x2_p r3 = VMULL_10((uint64x2_p)(a), (uint64x2_p)(b));
|
|
|
|
const uint64x2_p r4 = VMULL_11((uint64x2_p)(a), (uint64x2_p)(b));
|
|
|
|
|
2018-08-10 03:28:49 +00:00
|
|
|
result = VectorNotEqual(r1, r2) && VectorNotEqual(r3, r4);
|
2018-08-06 09:40:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
|
|
|
|
signal(SIGILL, oldHandler);
|
|
|
|
return result;
|
|
|
|
#else
|
|
|
|
return false;
|
2018-08-09 12:09:13 +00:00
|
|
|
#endif // CRYPTOPP_POWER8_VMULL_AVAILABLE
|
2018-08-06 09:40:38 +00:00
|
|
|
}
|
|
|
|
#endif // PPC32 or PPC64
|
|
|
|
|
|
|
|
// *************************** ARM NEON *************************** //
|
|
|
|
|
2017-08-17 16:33:43 +00:00
|
|
|
#if CRYPTOPP_ARM_NEON_AVAILABLE
|
|
|
|
void GCM_Xor16_NEON(byte *a, const byte *b, const byte *c)
|
|
|
|
{
|
|
|
|
CRYPTOPP_ASSERT(IsAlignedOn(a,GetAlignmentOf<uint64x2_t>()));
|
|
|
|
CRYPTOPP_ASSERT(IsAlignedOn(b,GetAlignmentOf<uint64x2_t>()));
|
|
|
|
CRYPTOPP_ASSERT(IsAlignedOn(c,GetAlignmentOf<uint64x2_t>()));
|
2018-01-21 00:39:49 +00:00
|
|
|
*UINT64X2_CAST(a) = veorq_u64(*CONST_UINT64X2_CAST(b), *CONST_UINT64X2_CAST(c));
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
2018-07-16 02:31:50 +00:00
|
|
|
#endif // CRYPTOPP_ARM_NEON_AVAILABLE
|
2017-08-17 16:33:43 +00:00
|
|
|
|
|
|
|
#if CRYPTOPP_ARM_PMULL_AVAILABLE
|
|
|
|
|
2018-08-10 05:00:26 +00:00
|
|
|
// Swaps high and low 64-bit words
|
|
|
|
inline uint64x2_t SwapWords(const uint64x2_t& data)
|
|
|
|
{
|
2018-08-10 05:57:14 +00:00
|
|
|
return (uint64x2_t)vcombine_u64(
|
|
|
|
vget_high_u64(data), vget_low_u64(data));
|
2018-08-10 05:00:26 +00:00
|
|
|
}
|
|
|
|
|
2017-08-17 16:33:43 +00:00
|
|
|
uint64x2_t GCM_Reduce_PMULL(uint64x2_t c0, uint64x2_t c1, uint64x2_t c2, const uint64x2_t &r)
|
|
|
|
{
|
|
|
|
c1 = veorq_u64(c1, VEXT_U8<8>(vdupq_n_u64(0), c0));
|
|
|
|
c1 = veorq_u64(c1, PMULL_01(c0, r));
|
|
|
|
c0 = VEXT_U8<8>(c0, vdupq_n_u64(0));
|
|
|
|
c0 = vshlq_n_u64(veorq_u64(c0, c1), 1);
|
|
|
|
c0 = PMULL_00(c0, r);
|
|
|
|
c2 = veorq_u64(c2, c0);
|
|
|
|
c2 = veorq_u64(c2, VEXT_U8<8>(c1, vdupq_n_u64(0)));
|
|
|
|
c1 = vshrq_n_u64(vcombine_u64(vget_low_u64(c1), vget_low_u64(c2)), 63);
|
|
|
|
c2 = vshlq_n_u64(c2, 1);
|
|
|
|
|
|
|
|
return veorq_u64(c2, c1);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64x2_t GCM_Multiply_PMULL(const uint64x2_t &x, const uint64x2_t &h, const uint64x2_t &r)
|
|
|
|
{
|
|
|
|
const uint64x2_t c0 = PMULL_00(x, h);
|
|
|
|
const uint64x2_t c1 = veorq_u64(PMULL_10(x, h), PMULL_01(x, h));
|
|
|
|
const uint64x2_t c2 = PMULL_11(x, h);
|
|
|
|
|
|
|
|
return GCM_Reduce_PMULL(c0, c1, c2, r);
|
|
|
|
}
|
|
|
|
|
|
|
|
void GCM_SetKeyWithoutResync_PMULL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
|
|
|
|
{
|
2018-07-16 23:04:24 +00:00
|
|
|
const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
|
2017-08-17 16:33:43 +00:00
|
|
|
const uint64x2_t t = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(hashKey)));
|
|
|
|
const uint64x2_t h0 = vextq_u64(t, t, 1);
|
|
|
|
|
|
|
|
uint64x2_t h = h0;
|
|
|
|
unsigned int i;
|
|
|
|
for (i=0; i<tableSize-32; i+=32)
|
|
|
|
{
|
|
|
|
const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
|
|
|
|
vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
|
|
|
|
vst1q_u64((uint64_t *)(mulTable+i+16), h1);
|
|
|
|
vst1q_u64((uint64_t *)(mulTable+i+8), h);
|
|
|
|
vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
|
|
|
|
h = GCM_Multiply_PMULL(h1, h0, r);
|
|
|
|
}
|
|
|
|
|
|
|
|
const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
|
|
|
|
vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
|
|
|
|
vst1q_u64((uint64_t *)(mulTable+i+16), h1);
|
|
|
|
vst1q_u64((uint64_t *)(mulTable+i+8), h);
|
|
|
|
vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t GCM_AuthenticateBlocks_PMULL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
|
|
|
|
{
|
2018-07-16 23:04:24 +00:00
|
|
|
const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
|
2018-08-09 12:09:13 +00:00
|
|
|
uint64x2_t x = vreinterpretq_u64_u8(vld1q_u8(hbuffer));
|
2017-08-17 16:33:43 +00:00
|
|
|
|
2018-07-16 23:04:24 +00:00
|
|
|
while (len >= 16)
|
2017-08-17 16:33:43 +00:00
|
|
|
{
|
2018-07-16 23:04:24 +00:00
|
|
|
size_t i=0, s = UnsignedMin(len/16U, 8U);
|
|
|
|
uint64x2_t d1, d2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-1)*16U)));
|
2017-08-17 16:33:43 +00:00
|
|
|
uint64x2_t c0 = vdupq_n_u64(0);
|
|
|
|
uint64x2_t c1 = vdupq_n_u64(0);
|
|
|
|
uint64x2_t c2 = vdupq_n_u64(0);
|
|
|
|
|
|
|
|
while (true)
|
|
|
|
{
|
2018-08-09 12:09:13 +00:00
|
|
|
const uint64x2_t h0 = vld1q_u64((const uint64_t*)(mtable+(i+0)*16));
|
|
|
|
const uint64x2_t h1 = vld1q_u64((const uint64_t*)(mtable+(i+1)*16));
|
2017-08-17 16:33:43 +00:00
|
|
|
const uint64x2_t h2 = veorq_u64(h0, h1);
|
|
|
|
|
|
|
|
if (++i == s)
|
|
|
|
{
|
|
|
|
const uint64x2_t t1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
|
|
|
|
d1 = veorq_u64(vextq_u64(t1, t1, 1), x);
|
|
|
|
c0 = veorq_u64(c0, PMULL_00(d1, h0));
|
|
|
|
c2 = veorq_u64(c2, PMULL_10(d1, h1));
|
2018-08-10 05:00:26 +00:00
|
|
|
d1 = veorq_u64(d1, SwapWords(d1));
|
2017-08-17 16:33:43 +00:00
|
|
|
c1 = veorq_u64(c1, PMULL_00(d1, h2));
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
d1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
|
|
|
|
c0 = veorq_u64(c0, PMULL_10(d2, h0));
|
|
|
|
c2 = veorq_u64(c2, PMULL_10(d1, h1));
|
|
|
|
d2 = veorq_u64(d2, d1);
|
|
|
|
c1 = veorq_u64(c1, PMULL_10(d2, h2));
|
|
|
|
|
|
|
|
if (++i == s)
|
|
|
|
{
|
|
|
|
const uint64x2_t t2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
|
|
|
|
d1 = veorq_u64(vextq_u64(t2, t2, 1), x);
|
|
|
|
c0 = veorq_u64(c0, PMULL_01(d1, h0));
|
|
|
|
c2 = veorq_u64(c2, PMULL_11(d1, h1));
|
2018-08-10 05:00:26 +00:00
|
|
|
d1 = veorq_u64(d1, SwapWords(d1));
|
2017-08-17 16:33:43 +00:00
|
|
|
c1 = veorq_u64(c1, PMULL_01(d1, h2));
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
const uint64x2_t t3 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
|
|
|
|
d2 = vextq_u64(t3, t3, 1);
|
|
|
|
c0 = veorq_u64(c0, PMULL_01(d1, h0));
|
|
|
|
c2 = veorq_u64(c2, PMULL_01(d2, h1));
|
|
|
|
d1 = veorq_u64(d1, d2);
|
|
|
|
c1 = veorq_u64(c1, PMULL_01(d1, h2));
|
|
|
|
}
|
|
|
|
data += s*16;
|
|
|
|
len -= s*16;
|
|
|
|
|
|
|
|
c1 = veorq_u64(veorq_u64(c1, c0), c2);
|
|
|
|
x = GCM_Reduce_PMULL(c0, c1, c2, r);
|
|
|
|
}
|
|
|
|
|
|
|
|
vst1q_u64(reinterpret_cast<uint64_t *>(hbuffer), x);
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
void GCM_ReverseHashBufferIfNeeded_PMULL(byte *hashBuffer)
|
|
|
|
{
|
|
|
|
if (GetNativeByteOrder() != BIG_ENDIAN_ORDER)
|
|
|
|
{
|
|
|
|
const uint8x16_t x = vrev64q_u8(vld1q_u8(hashBuffer));
|
|
|
|
vst1q_u8(hashBuffer, vextq_u8(x, x, 8));
|
|
|
|
}
|
|
|
|
}
|
2018-07-16 02:31:50 +00:00
|
|
|
#endif // CRYPTOPP_ARM_PMULL_AVAILABLE
|
2017-08-17 16:33:43 +00:00
|
|
|
|
2018-08-06 09:40:38 +00:00
|
|
|
// ***************************** SSE ***************************** //
|
|
|
|
|
2017-11-16 07:38:53 +00:00
|
|
|
#if CRYPTOPP_SSE2_INTRIN_AVAILABLE || CRYPTOPP_SSE2_ASM_AVAILABLE
|
|
|
|
// SunCC 5.10-5.11 compiler crash. Move GCM_Xor16_SSE2 out-of-line, and place in
|
|
|
|
// a source file with a SSE architecture switch. Also see GH #226 and GH #284.
|
|
|
|
void GCM_Xor16_SSE2(byte *a, const byte *b, const byte *c)
|
|
|
|
{
|
2017-11-24 10:49:56 +00:00
|
|
|
# if CRYPTOPP_SSE2_ASM_AVAILABLE && defined(__GNUC__)
|
|
|
|
asm ("movdqa %1, %%xmm0; pxor %2, %%xmm0; movdqa %%xmm0, %0;"
|
2018-01-07 15:53:15 +00:00
|
|
|
: "=m" (a[0]) : "m"(b[0]), "m"(c[0]));
|
2017-11-24 10:49:56 +00:00
|
|
|
# else // CRYPTOPP_SSE2_INTRIN_AVAILABLE
|
2017-11-16 07:38:53 +00:00
|
|
|
_mm_store_si128(M128_CAST(a), _mm_xor_si128(
|
|
|
|
_mm_load_si128(CONST_M128_CAST(b)),
|
|
|
|
_mm_load_si128(CONST_M128_CAST(c))));
|
|
|
|
# endif
|
2017-11-24 10:49:56 +00:00
|
|
|
}
|
2018-07-16 02:31:50 +00:00
|
|
|
#endif // CRYPTOPP_SSE2_ASM_AVAILABLE
|
2017-11-16 07:38:53 +00:00
|
|
|
|
2017-08-17 16:33:43 +00:00
|
|
|
#if CRYPTOPP_CLMUL_AVAILABLE
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
// preserved for testing
|
|
|
|
void gcm_gf_mult(const unsigned char *a, const unsigned char *b, unsigned char *c)
|
|
|
|
{
|
|
|
|
word64 Z0=0, Z1=0, V0, V1;
|
|
|
|
|
|
|
|
typedef BlockGetAndPut<word64, BigEndian> Block;
|
|
|
|
Block::Get(a)(V0)(V1);
|
|
|
|
|
|
|
|
for (int i=0; i<16; i++)
|
|
|
|
{
|
|
|
|
for (int j=0x80; j!=0; j>>=1)
|
|
|
|
{
|
|
|
|
int x = b[i] & j;
|
|
|
|
Z0 ^= x ? V0 : 0;
|
|
|
|
Z1 ^= x ? V1 : 0;
|
|
|
|
x = (int)V1 & 1;
|
|
|
|
V1 = (V1>>1) | (V0<<63);
|
|
|
|
V0 = (V0>>1) ^ (x ? W64LIT(0xe1) << 56 : 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Block::Put(NULLPTR, c)(Z0)(Z1);
|
|
|
|
}
|
|
|
|
|
|
|
|
__m128i _mm_clmulepi64_si128(const __m128i &a, const __m128i &b, int i)
|
|
|
|
{
|
|
|
|
word64 A[1] = {ByteReverse(((word64*)&a)[i&1])};
|
|
|
|
word64 B[1] = {ByteReverse(((word64*)&b)[i>>4])};
|
|
|
|
|
|
|
|
PolynomialMod2 pa((byte *)A, 8);
|
|
|
|
PolynomialMod2 pb((byte *)B, 8);
|
|
|
|
PolynomialMod2 c = pa*pb;
|
|
|
|
|
|
|
|
__m128i output;
|
|
|
|
for (int i=0; i<16; i++)
|
|
|
|
((byte *)&output)[i] = c.GetByte(i);
|
|
|
|
return output;
|
|
|
|
}
|
|
|
|
#endif // Testing
|
|
|
|
|
2018-08-10 05:57:14 +00:00
|
|
|
// Swaps high and low 64-bit words
|
|
|
|
inline __m128i SwapWords(const __m128i& val)
|
|
|
|
{
|
|
|
|
return _mm_shuffle_epi32(val, _MM_SHUFFLE(1, 0, 3, 2));
|
|
|
|
}
|
|
|
|
|
2018-08-09 22:18:40 +00:00
|
|
|
// SunCC 5.11-5.15 compiler crash. Make the function inline
|
2018-07-16 02:31:50 +00:00
|
|
|
// and parameters non-const. Also see GH #188 and GH #224.
|
2018-07-16 21:09:26 +00:00
|
|
|
inline __m128i GCM_Reduce_CLMUL(__m128i c0, __m128i c1, __m128i c2, const __m128i& r)
|
2017-08-17 16:33:43 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
The polynomial to be reduced is c0 * x^128 + c1 * x^64 + c2. c0t below refers to the most
|
|
|
|
significant half of c0 as a polynomial, which, due to GCM's bit reflection, are in the
|
|
|
|
rightmost bit positions, and the lowest byte addresses.
|
|
|
|
|
|
|
|
c1 ^= c0t * 0xc200000000000000
|
|
|
|
c2t ^= c0t
|
|
|
|
t = shift (c1t ^ c0b) left 1 bit
|
|
|
|
c2 ^= t * 0xe100000000000000
|
|
|
|
c2t ^= c1b
|
|
|
|
shift c2 left 1 bit and xor in lowest bit of c1t
|
|
|
|
*/
|
|
|
|
c1 = _mm_xor_si128(c1, _mm_slli_si128(c0, 8));
|
|
|
|
c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(c0, r, 0x10));
|
|
|
|
c0 = _mm_srli_si128(c0, 8);
|
|
|
|
c0 = _mm_xor_si128(c0, c1);
|
|
|
|
c0 = _mm_slli_epi64(c0, 1);
|
|
|
|
c0 = _mm_clmulepi64_si128(c0, r, 0);
|
|
|
|
c2 = _mm_xor_si128(c2, c0);
|
|
|
|
c2 = _mm_xor_si128(c2, _mm_srli_si128(c1, 8));
|
|
|
|
c1 = _mm_unpacklo_epi64(c1, c2);
|
|
|
|
c1 = _mm_srli_epi64(c1, 63);
|
|
|
|
c2 = _mm_slli_epi64(c2, 1);
|
|
|
|
return _mm_xor_si128(c2, c1);
|
|
|
|
}
|
|
|
|
|
2018-08-09 22:18:40 +00:00
|
|
|
// SunCC 5.13-5.14 compiler crash. Don't make the function inline.
|
|
|
|
// This is in contrast to GCM_Reduce_CLMUL, which must be inline.
|
2018-07-16 08:42:52 +00:00
|
|
|
__m128i GCM_Multiply_CLMUL(const __m128i &x, const __m128i &h, const __m128i &r)
|
2017-08-17 16:33:43 +00:00
|
|
|
{
|
|
|
|
const __m128i c0 = _mm_clmulepi64_si128(x,h,0);
|
|
|
|
const __m128i c1 = _mm_xor_si128(_mm_clmulepi64_si128(x,h,1), _mm_clmulepi64_si128(x,h,0x10));
|
|
|
|
const __m128i c2 = _mm_clmulepi64_si128(x,h,0x11);
|
|
|
|
|
|
|
|
return GCM_Reduce_CLMUL(c0, c1, c2, r);
|
|
|
|
}
|
|
|
|
|
|
|
|
void GCM_SetKeyWithoutResync_CLMUL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
|
|
|
|
{
|
2018-07-16 23:04:24 +00:00
|
|
|
const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
|
|
|
|
const __m128i m = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
|
|
|
|
const __m128i h0 = _mm_shuffle_epi8(_mm_load_si128(CONST_M128_CAST(hashKey)), m);
|
2017-08-17 16:33:43 +00:00
|
|
|
|
|
|
|
__m128i h = h0;
|
|
|
|
unsigned int i;
|
|
|
|
for (i=0; i<tableSize-32; i+=32)
|
|
|
|
{
|
|
|
|
const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
|
|
|
|
_mm_storel_epi64(M128_CAST(mulTable+i), h);
|
|
|
|
_mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
|
|
|
|
_mm_storeu_si128(M128_CAST(mulTable+i+8), h);
|
|
|
|
_mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
|
|
|
|
h = GCM_Multiply_CLMUL(h1, h0, r);
|
|
|
|
}
|
|
|
|
|
|
|
|
const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
|
|
|
|
_mm_storel_epi64(M128_CAST(mulTable+i), h);
|
|
|
|
_mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
|
|
|
|
_mm_storeu_si128(M128_CAST(mulTable+i+8), h);
|
|
|
|
_mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t GCM_AuthenticateBlocks_CLMUL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
|
|
|
|
{
|
2018-07-16 23:04:24 +00:00
|
|
|
const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
|
|
|
|
const __m128i m1 = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
|
|
|
|
const __m128i m2 = _mm_set_epi32(0x08090a0b, 0x0c0d0e0f, 0x00010203, 0x04050607);
|
2018-08-09 12:09:13 +00:00
|
|
|
__m128i x = _mm_load_si128(M128_CAST(hbuffer));
|
2017-08-17 16:33:43 +00:00
|
|
|
|
|
|
|
while (len >= 16)
|
|
|
|
{
|
2018-07-16 23:04:24 +00:00
|
|
|
size_t i=0, s = UnsignedMin(len/16, 8U);
|
|
|
|
__m128i d1 = _mm_loadu_si128(CONST_M128_CAST(data+(s-1)*16));
|
|
|
|
__m128i d2 = _mm_shuffle_epi8(d1, m2);
|
2017-08-17 16:33:43 +00:00
|
|
|
__m128i c0 = _mm_setzero_si128();
|
|
|
|
__m128i c1 = _mm_setzero_si128();
|
|
|
|
__m128i c2 = _mm_setzero_si128();
|
|
|
|
|
|
|
|
while (true)
|
|
|
|
{
|
2018-08-09 12:09:13 +00:00
|
|
|
const __m128i h0 = _mm_load_si128(CONST_M128_CAST(mtable+(i+0)*16));
|
|
|
|
const __m128i h1 = _mm_load_si128(CONST_M128_CAST(mtable+(i+1)*16));
|
2017-08-17 16:33:43 +00:00
|
|
|
const __m128i h2 = _mm_xor_si128(h0, h1);
|
|
|
|
|
|
|
|
if (++i == s)
|
|
|
|
{
|
2018-07-16 23:04:24 +00:00
|
|
|
d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), m1);
|
2017-08-17 16:33:43 +00:00
|
|
|
d1 = _mm_xor_si128(d1, x);
|
|
|
|
c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0));
|
|
|
|
c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
|
2018-08-10 05:57:14 +00:00
|
|
|
d1 = _mm_xor_si128(d1, SwapWords(d1));
|
2017-08-17 16:33:43 +00:00
|
|
|
c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-07-16 23:04:24 +00:00
|
|
|
d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), m2);
|
2017-08-17 16:33:43 +00:00
|
|
|
c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d2, h0, 1));
|
|
|
|
c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
|
|
|
|
d2 = _mm_xor_si128(d2, d1);
|
|
|
|
c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d2, h2, 1));
|
|
|
|
|
|
|
|
if (++i == s)
|
|
|
|
{
|
2018-07-16 23:04:24 +00:00
|
|
|
d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), m1);
|
2017-08-17 16:33:43 +00:00
|
|
|
d1 = _mm_xor_si128(d1, x);
|
|
|
|
c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
|
|
|
|
c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 0x11));
|
2018-08-10 05:57:14 +00:00
|
|
|
d1 = _mm_xor_si128(d1, SwapWords(d1));
|
2017-08-17 16:33:43 +00:00
|
|
|
c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-07-16 23:04:24 +00:00
|
|
|
d2 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), m1);
|
2017-08-17 16:33:43 +00:00
|
|
|
c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
|
|
|
|
c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d2, h1, 0x10));
|
|
|
|
d1 = _mm_xor_si128(d1, d2);
|
|
|
|
c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
|
|
|
|
}
|
|
|
|
data += s*16;
|
|
|
|
len -= s*16;
|
|
|
|
|
|
|
|
c1 = _mm_xor_si128(_mm_xor_si128(c1, c0), c2);
|
|
|
|
x = GCM_Reduce_CLMUL(c0, c1, c2, r);
|
|
|
|
}
|
|
|
|
|
|
|
|
_mm_store_si128(M128_CAST(hbuffer), x);
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
void GCM_ReverseHashBufferIfNeeded_CLMUL(byte *hashBuffer)
|
|
|
|
{
|
2018-07-16 23:04:24 +00:00
|
|
|
// SSSE3 instruction, but only used with CLMUL
|
|
|
|
__m128i &val = *M128_CAST(hashBuffer);
|
|
|
|
const __m128i mask = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
|
|
|
|
val = _mm_shuffle_epi8(val, mask);
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
2018-07-16 02:31:50 +00:00
|
|
|
#endif // CRYPTOPP_CLMUL_AVAILABLE
|
2017-08-17 16:33:43 +00:00
|
|
|
|
2018-08-09 12:09:13 +00:00
|
|
|
// ***************************** POWER8 ***************************** //
|
|
|
|
|
|
|
|
#if CRYPTOPP_ALTIVEC_AVAILABLE
|
|
|
|
void GCM_Xor16_ALTIVEC(byte *a, const byte *b, const byte *c)
|
|
|
|
{
|
|
|
|
VectorStore(VectorXor(VectorLoad(b), VectorLoad(c)), a);
|
|
|
|
}
|
2018-08-09 22:18:40 +00:00
|
|
|
#endif // CRYPTOPP_ALTIVEC_AVAILABLE
|
2018-08-09 12:09:13 +00:00
|
|
|
|
|
|
|
#if CRYPTOPP_POWER8_VMULL_AVAILABLE
|
|
|
|
|
|
|
|
uint64x2_p GCM_Reduce_VMULL(uint64x2_p c0, uint64x2_p c1, uint64x2_p c2, uint64x2_p r)
|
|
|
|
{
|
2018-08-10 08:27:49 +00:00
|
|
|
const uint64x2_p m1 = {1,1}, m63 = {63,63};
|
2018-08-09 12:09:13 +00:00
|
|
|
|
2018-08-10 08:27:49 +00:00
|
|
|
c1 = VectorXor(c1, VectorShiftRight<8>(c0));
|
2018-08-09 12:09:13 +00:00
|
|
|
c1 = VectorXor(c1, VMULL_10(c0, r));
|
2018-08-10 08:27:49 +00:00
|
|
|
c0 = VectorShiftLeft<8>(c0);
|
2018-08-09 12:09:13 +00:00
|
|
|
c0 = VectorXor(c0, c1);
|
|
|
|
c0 = vec_sl(c0, m1);
|
|
|
|
c0 = VMULL_00(c0, r);
|
|
|
|
c2 = VectorXor(c2, c0);
|
2018-08-10 08:27:49 +00:00
|
|
|
c2 = VectorXor(c2, VectorShiftLeft<8>(c1));
|
2018-08-09 12:09:13 +00:00
|
|
|
c1 = vec_sr(vec_mergeh(c1, c2), m63);
|
|
|
|
c2 = vec_sl(c2, m1);
|
|
|
|
|
|
|
|
return VectorXor(c2, c1);
|
|
|
|
}
|
|
|
|
|
|
|
|
INLINE uint64x2_p GCM_Multiply_VMULL(uint64x2_p x, uint64x2_p h, uint64x2_p r)
|
|
|
|
{
|
|
|
|
const uint64x2_p c0 = VMULL_00(x, h);
|
|
|
|
const uint64x2_p c1 = VectorXor(VMULL_01(x, h), VMULL_10(x, h));
|
|
|
|
const uint64x2_p c2 = VMULL_11(x, h);
|
|
|
|
|
|
|
|
return GCM_Reduce_VMULL(c0, c1, c2, r);
|
|
|
|
}
|
|
|
|
|
|
|
|
INLINE uint64x2_p LoadHashKey(const byte *hashKey)
|
|
|
|
{
|
|
|
|
#if CRYPTOPP_BIG_ENDIAN
|
|
|
|
const uint64x2_p key = (uint64x2_p)VectorLoad(hashKey);
|
|
|
|
const uint8x16_p mask = {8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7};
|
|
|
|
return vec_perm(key, key, mask);
|
|
|
|
#else
|
|
|
|
const uint64x2_p key = (uint64x2_p)VectorLoad(hashKey);
|
|
|
|
const uint8x16_p mask = {15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0};
|
|
|
|
return vec_perm(key, key, mask);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void GCM_SetKeyWithoutResync_VMULL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
|
|
|
|
{
|
|
|
|
const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
|
|
|
|
uint64x2_p h = LoadHashKey(hashKey), h0 = h;
|
|
|
|
|
|
|
|
unsigned int i;
|
|
|
|
uint64_t temp[2];
|
|
|
|
|
|
|
|
for (i=0; i<tableSize-32; i+=32)
|
|
|
|
{
|
|
|
|
const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
|
|
|
|
|
|
|
|
VectorStore(h, (byte*)temp);
|
|
|
|
std::memcpy(mulTable+i, temp+0, 8);
|
|
|
|
VectorStore(h1, mulTable+i+16);
|
|
|
|
VectorStore(h, mulTable+i+8);
|
|
|
|
VectorStore(h1, (byte*)temp);
|
|
|
|
std::memcpy(mulTable+i+8, temp+0, 8);
|
|
|
|
|
|
|
|
h = GCM_Multiply_VMULL(h1, h0, r);
|
|
|
|
}
|
|
|
|
|
|
|
|
const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
|
|
|
|
|
|
|
|
VectorStore(h, (byte*)temp);
|
|
|
|
std::memcpy(mulTable+i, temp+0, 8);
|
|
|
|
VectorStore(h1, mulTable+i+16);
|
|
|
|
VectorStore(h, mulTable+i+8);
|
|
|
|
VectorStore(h1, (byte*)temp);
|
|
|
|
std::memcpy(mulTable+i+8, temp+0, 8);
|
|
|
|
}
|
|
|
|
|
2018-08-10 05:00:26 +00:00
|
|
|
// Swaps high and low 64-bit words
|
|
|
|
template <class T>
|
|
|
|
INLINE T SwapWords(const T& data)
|
|
|
|
{
|
|
|
|
return (T)VectorRotateLeft<8>(data);
|
|
|
|
}
|
|
|
|
|
2018-08-10 03:28:49 +00:00
|
|
|
INLINE uint64x2_p LoadBuffer1(const byte *dataBuffer)
|
|
|
|
{
|
|
|
|
#if CRYPTOPP_BIG_ENDIAN
|
|
|
|
return (uint64x2_p)VectorLoad(dataBuffer);
|
|
|
|
#else
|
|
|
|
const uint64x2_p data = (uint64x2_p)VectorLoad(dataBuffer);
|
|
|
|
const uint8x16_p mask = {7,6,5,4, 3,2,1,0, 15,14,13,12, 11,10,9,8};
|
|
|
|
return vec_perm(data, data, mask);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
INLINE uint64x2_p LoadBuffer2(const byte *dataBuffer)
|
|
|
|
{
|
|
|
|
#if CRYPTOPP_BIG_ENDIAN
|
2018-08-10 05:00:26 +00:00
|
|
|
return (uint64x2_p)SwapWords(VectorLoadBE(dataBuffer));
|
2018-08-10 03:28:49 +00:00
|
|
|
#else
|
2018-08-10 05:00:26 +00:00
|
|
|
return (uint64x2_p)VectorLoadBE(dataBuffer);
|
2018-08-10 03:28:49 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-08-09 12:09:13 +00:00
|
|
|
size_t GCM_AuthenticateBlocks_VMULL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
|
|
|
|
{
|
|
|
|
const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
|
|
|
|
uint64x2_p x = (uint64x2_p)VectorLoad(hbuffer);
|
|
|
|
|
|
|
|
while (len >= 16)
|
|
|
|
{
|
|
|
|
size_t i=0, s = UnsignedMin(len/16, 8U);
|
2018-08-10 03:28:49 +00:00
|
|
|
uint64x2_p d1, d2 = LoadBuffer1(data+(s-1)*16);
|
2018-08-09 12:09:13 +00:00
|
|
|
uint64x2_p c0 = {0}, c1 = {0}, c2 = {0};
|
|
|
|
|
|
|
|
while (true)
|
|
|
|
{
|
|
|
|
const uint64x2_p h0 = (uint64x2_p)VectorLoad(mtable+(i+0)*16);
|
|
|
|
const uint64x2_p h1 = (uint64x2_p)VectorLoad(mtable+(i+1)*16);
|
|
|
|
const uint64x2_p h2 = (uint64x2_p)VectorXor(h0, h1);
|
|
|
|
|
|
|
|
if (++i == s)
|
|
|
|
{
|
2018-08-10 03:28:49 +00:00
|
|
|
d1 = LoadBuffer2(data);
|
2018-08-09 12:09:13 +00:00
|
|
|
d1 = VectorXor(d1, x);
|
|
|
|
c0 = VectorXor(c0, VMULL_00(d1, h0));
|
|
|
|
c2 = VectorXor(c2, VMULL_01(d1, h1));
|
2018-08-10 03:28:49 +00:00
|
|
|
d1 = VectorXor(d1, SwapWords(d1));
|
2018-08-09 12:09:13 +00:00
|
|
|
c1 = VectorXor(c1, VMULL_00(d1, h2));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-08-10 03:28:49 +00:00
|
|
|
d1 = LoadBuffer1(data+(s-i)*16-8);
|
2018-08-09 12:09:13 +00:00
|
|
|
c0 = VectorXor(c0, VMULL_01(d2, h0));
|
2018-08-10 03:28:49 +00:00
|
|
|
c2 = VectorXor(c2, VMULL_01(d1, h1));
|
2018-08-09 12:09:13 +00:00
|
|
|
d2 = VectorXor(d2, d1);
|
2018-08-10 03:28:49 +00:00
|
|
|
c1 = VectorXor(c1, VMULL_01(d2, h2));
|
2018-08-09 12:09:13 +00:00
|
|
|
|
|
|
|
if (++i == s)
|
|
|
|
{
|
2018-08-10 03:28:49 +00:00
|
|
|
d1 = LoadBuffer2(data);
|
2018-08-09 12:09:13 +00:00
|
|
|
d1 = VectorXor(d1, x);
|
|
|
|
c0 = VectorXor(c0, VMULL_10(d1, h0));
|
|
|
|
c2 = VectorXor(c2, VMULL_11(d1, h1));
|
2018-08-10 03:28:49 +00:00
|
|
|
d1 = VectorXor(d1, SwapWords(d1));
|
2018-08-09 12:09:13 +00:00
|
|
|
c1 = VectorXor(c1, VMULL_10(d1, h2));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-08-10 03:28:49 +00:00
|
|
|
d2 = LoadBuffer2(data+(s-i)*16-8);
|
2018-08-09 12:09:13 +00:00
|
|
|
c0 = VectorXor(c0, VMULL_10(d1, h0));
|
|
|
|
c2 = VectorXor(c2, VMULL_10(d2, h1));
|
|
|
|
d1 = VectorXor(d1, d2);
|
|
|
|
c1 = VectorXor(c1, VMULL_10(d1, h2));
|
|
|
|
}
|
|
|
|
data += s*16;
|
|
|
|
len -= s*16;
|
|
|
|
|
|
|
|
c1 = VectorXor(VectorXor(c1, c0), c2);
|
|
|
|
x = GCM_Reduce_VMULL(c0, c1, c2, r);
|
|
|
|
}
|
|
|
|
|
|
|
|
VectorStore(x, hbuffer);
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
void GCM_ReverseHashBufferIfNeeded_VMULL(byte *hashBuffer)
|
|
|
|
{
|
|
|
|
const uint64x2_p mask = {0x08090a0b0c0d0e0full, 0x0001020304050607ull};
|
2018-08-10 03:28:49 +00:00
|
|
|
VectorStore(VectorPermute(VectorLoad(hashBuffer), mask), hashBuffer);
|
2018-08-09 12:09:13 +00:00
|
|
|
}
|
|
|
|
#endif // CRYPTOPP_POWER8_VMULL_AVAILABLE
|
|
|
|
|
2017-10-12 18:02:26 +00:00
|
|
|
NAMESPACE_END
|