2018-11-10 13:00:14 +00:00
|
|
|
// rijndael_simd.cpp - written and placed in the public domain by
|
2017-08-17 16:33:43 +00:00
|
|
|
// Jeffrey Walton, Uri Blumenthal and Marcel Raad.
|
2017-09-22 01:45:23 +00:00
|
|
|
// AES-NI code originally written by Wei Dai.
|
2017-08-17 16:33:43 +00:00
|
|
|
//
|
2017-09-22 01:45:23 +00:00
|
|
|
// This source file uses intrinsics and built-ins to gain access to
|
|
|
|
// AES-NI, ARMv8a AES and Power8 AES instructions. A separate source
|
|
|
|
// file is needed because additional CXXFLAGS are required to enable
|
|
|
|
// the appropriate instructions sets in some build configurations.
|
2017-08-17 16:33:43 +00:00
|
|
|
//
|
|
|
|
// ARMv8a AES code based on CriticalBlue code from Johannes Schneiders,
|
|
|
|
// Skip Hovsmith and Barry O'Rourke for the mbedTLS project. Stepping
|
|
|
|
// mbedTLS under a debugger was helped for us to determine problems
|
|
|
|
// with our subkey generation and scheduling.
|
2017-09-12 02:52:22 +00:00
|
|
|
//
|
2017-09-20 12:57:53 +00:00
|
|
|
// AltiVec and Power8 code based on http://github.com/noloader/AES-Intrinsics and
|
|
|
|
// http://www.ibm.com/developerworks/library/se-power8-in-core-cryptography/
|
2017-09-22 01:45:23 +00:00
|
|
|
// For Power8 do not remove the casts, even when const-ness is cast away. It causes
|
2018-02-20 22:03:32 +00:00
|
|
|
// failed compiles and a 0.3 to 0.6 cpb drop in performance. The IBM documentation
|
|
|
|
// absolutely sucks. Thanks to Andy Polyakov, Paul R and Trudeaun for answering
|
|
|
|
// questions and filling the gaps in the IBM documentation.
|
2017-09-21 16:08:54 +00:00
|
|
|
//
|
2017-08-17 16:33:43 +00:00
|
|
|
|
|
|
|
#include "pch.h"
|
|
|
|
#include "config.h"
|
|
|
|
#include "misc.h"
|
|
|
|
|
|
|
|
#if (CRYPTOPP_AESNI_AVAILABLE)
|
2018-12-28 19:08:54 +00:00
|
|
|
# include "adv_simd.h"
|
2017-09-12 02:52:22 +00:00
|
|
|
# include <smmintrin.h>
|
|
|
|
# include <wmmintrin.h>
|
2017-08-17 16:33:43 +00:00
|
|
|
#endif
|
|
|
|
|
2019-01-04 16:25:55 +00:00
|
|
|
// C1189: error: This header is specific to ARM targets
|
Add ARMv8.4 cpu feature detection support (GH #685) (#687)
This PR adds ARMv8.4 cpu feature detection support. Previously we only needed ARMv8.1 and things were much easier. For example, ARMv8.1 `__ARM_FEATURE_CRYPTO` meant PMULL, AES, SHA-1 and SHA-256 were available. ARMv8.4 `__ARM_FEATURE_CRYPTO` means PMULL, AES, SHA-1, SHA-256, SHA-512, SHA-3, SM3 and SM4 are available.
We still use the same pattern as before. We make something available based on compiler version and/or preprocessor macros. But this time around we had to tighten things up a bit to ensure ARMv8.4 did not cross-pollinate down into ARMv8.1.
ARMv8.4 is largely untested at the moment. There is no hardware in the field and CI lacks QEMU with the relevant patches/support. We will probably have to revisit some of this stuff in the future.
Since this update applies to ARM gadgets we took the time to expand Android and iOS testing on Travis. Travis now tests more platforms, and includes Autotools and CMake builds, too.
2018-07-15 12:35:14 +00:00
|
|
|
#if (CRYPTOPP_ARM_NEON_AVAILABLE)
|
2018-12-28 19:08:54 +00:00
|
|
|
# include "adv_simd.h"
|
2019-01-04 16:25:55 +00:00
|
|
|
# ifndef _M_ARM64
|
|
|
|
# include <arm_neon.h>
|
|
|
|
# endif
|
2018-01-20 18:23:41 +00:00
|
|
|
#endif
|
|
|
|
|
2018-10-25 18:08:09 +00:00
|
|
|
#if (CRYPTOPP_ARM_ACLE_AVAILABLE)
|
2018-12-28 19:08:54 +00:00
|
|
|
# include "adv_simd.h"
|
2018-01-20 18:23:41 +00:00
|
|
|
# include <stdint.h>
|
|
|
|
# include <arm_acle.h>
|
2017-09-12 02:52:22 +00:00
|
|
|
#endif
|
|
|
|
|
2017-09-22 09:23:29 +00:00
|
|
|
#if defined(CRYPTOPP_POWER8_AES_AVAILABLE)
|
2018-12-28 19:08:54 +00:00
|
|
|
# include "adv_simd.h"
|
2018-11-10 13:00:14 +00:00
|
|
|
# include "ppc_simd.h"
|
2017-08-17 16:33:43 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
|
|
|
|
# include <signal.h>
|
|
|
|
# include <setjmp.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef EXCEPTION_EXECUTE_HANDLER
|
|
|
|
# define EXCEPTION_EXECUTE_HANDLER 1
|
|
|
|
#endif
|
|
|
|
|
2017-12-26 16:16:52 +00:00
|
|
|
// Clang __m128i casts, http://bugs.llvm.org/show_bug.cgi?id=20670
|
|
|
|
#define M128_CAST(x) ((__m128i *)(void *)(x))
|
|
|
|
#define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x))
|
|
|
|
|
2018-07-06 05:22:38 +00:00
|
|
|
// Squash MS LNK4221 and libtool warnings
|
|
|
|
extern const char RIJNDAEL_SIMD_FNAME[] = __FILE__;
|
|
|
|
|
2017-08-17 16:33:43 +00:00
|
|
|
NAMESPACE_BEGIN(CryptoPP)
|
|
|
|
|
2018-08-05 09:39:42 +00:00
|
|
|
// ************************* Feature Probes ************************* //
|
|
|
|
|
2017-08-17 16:33:43 +00:00
|
|
|
#ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
|
|
|
|
extern "C" {
|
|
|
|
typedef void (*SigHandler)(int);
|
|
|
|
|
2017-12-10 16:09:50 +00:00
|
|
|
static jmp_buf s_jmpSIGILL;
|
|
|
|
static void SigIllHandler(int)
|
|
|
|
{
|
|
|
|
longjmp(s_jmpSIGILL, 1);
|
|
|
|
}
|
2018-03-31 17:04:42 +00:00
|
|
|
}
|
2017-08-17 16:33:43 +00:00
|
|
|
#endif // Not CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY
|
|
|
|
|
2018-12-09 15:24:55 +00:00
|
|
|
#if (CRYPTOPP_BOOL_ARM32 || CRYPTOPP_BOOL_ARMV8)
|
2017-08-17 16:33:43 +00:00
|
|
|
bool CPU_ProbeAES()
|
|
|
|
{
|
2017-09-20 01:08:37 +00:00
|
|
|
#if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
|
2017-12-10 16:09:50 +00:00
|
|
|
return false;
|
2017-09-20 01:08:37 +00:00
|
|
|
#elif (CRYPTOPP_ARM_AES_AVAILABLE)
|
2017-08-17 16:33:43 +00:00
|
|
|
# if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY)
|
2017-12-10 16:09:50 +00:00
|
|
|
volatile bool result = true;
|
|
|
|
__try
|
|
|
|
{
|
|
|
|
// AES encrypt and decrypt
|
|
|
|
uint8x16_t data = vdupq_n_u8(0), key = vdupq_n_u8(0);
|
|
|
|
uint8x16_t r1 = vaeseq_u8(data, key);
|
|
|
|
uint8x16_t r2 = vaesdq_u8(data, key);
|
|
|
|
r1 = vaesmcq_u8(r1);
|
|
|
|
r2 = vaesimcq_u8(r2);
|
|
|
|
|
|
|
|
result = !!(vgetq_lane_u8(r1,0) | vgetq_lane_u8(r2,7));
|
|
|
|
}
|
|
|
|
__except (EXCEPTION_EXECUTE_HANDLER)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return result;
|
2017-08-17 16:33:43 +00:00
|
|
|
# else
|
|
|
|
|
2017-12-10 16:09:50 +00:00
|
|
|
// longjmp and clobber warnings. Volatile is required.
|
|
|
|
// http://github.com/weidai11/cryptopp/issues/24 and http://stackoverflow.com/q/7721854
|
|
|
|
volatile bool result = true;
|
|
|
|
|
|
|
|
volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
|
|
|
|
if (oldHandler == SIG_ERR)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
volatile sigset_t oldMask;
|
|
|
|
if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (setjmp(s_jmpSIGILL))
|
|
|
|
result = false;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
uint8x16_t data = vdupq_n_u8(0), key = vdupq_n_u8(0);
|
|
|
|
uint8x16_t r1 = vaeseq_u8(data, key);
|
|
|
|
uint8x16_t r2 = vaesdq_u8(data, key);
|
|
|
|
r1 = vaesmcq_u8(r1);
|
|
|
|
r2 = vaesimcq_u8(r2);
|
|
|
|
|
|
|
|
// Hack... GCC optimizes away the code and returns true
|
|
|
|
result = !!(vgetq_lane_u8(r1,0) | vgetq_lane_u8(r2,7));
|
|
|
|
}
|
|
|
|
|
|
|
|
sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
|
|
|
|
signal(SIGILL, oldHandler);
|
|
|
|
return result;
|
2017-08-17 16:33:43 +00:00
|
|
|
# endif
|
|
|
|
#else
|
2017-12-10 16:09:50 +00:00
|
|
|
return false;
|
2017-08-17 16:33:43 +00:00
|
|
|
#endif // CRYPTOPP_ARM_AES_AVAILABLE
|
|
|
|
}
|
|
|
|
#endif // ARM32 or ARM64
|
|
|
|
|
2017-09-12 02:52:22 +00:00
|
|
|
// ***************************** ARMv8 ***************************** //
|
|
|
|
|
2017-08-17 16:33:43 +00:00
|
|
|
#if (CRYPTOPP_ARM_AES_AVAILABLE)
|
2017-09-14 01:42:15 +00:00
|
|
|
|
2017-09-23 06:13:16 +00:00
|
|
|
ANONYMOUS_NAMESPACE_BEGIN
|
|
|
|
|
2017-12-10 16:09:50 +00:00
|
|
|
static inline void ARMV8_Enc_Block(uint64x2_t &data, const word32 *subkeys, unsigned int rounds)
|
2017-08-17 16:33:43 +00:00
|
|
|
{
|
2017-12-10 16:09:50 +00:00
|
|
|
CRYPTOPP_ASSERT(subkeys);
|
|
|
|
const byte *keys = reinterpret_cast<const byte*>(subkeys);
|
|
|
|
uint8x16_t block = vreinterpretq_u8_u64(data);
|
|
|
|
|
|
|
|
// AES single round encryption
|
|
|
|
block = vaeseq_u8(block, vld1q_u8(keys+0*16));
|
|
|
|
// AES mix columns
|
|
|
|
block = vaesmcq_u8(block);
|
|
|
|
|
|
|
|
for (unsigned int i=1; i<rounds-1; i+=2)
|
|
|
|
{
|
|
|
|
// AES single round encryption
|
|
|
|
block = vaeseq_u8(block, vld1q_u8(keys+i*16));
|
|
|
|
// AES mix columns
|
|
|
|
block = vaesmcq_u8(block);
|
|
|
|
// AES single round encryption
|
|
|
|
block = vaeseq_u8(block, vld1q_u8(keys+(i+1)*16));
|
|
|
|
// AES mix columns
|
|
|
|
block = vaesmcq_u8(block);
|
|
|
|
}
|
|
|
|
|
|
|
|
// AES single round encryption
|
|
|
|
block = vaeseq_u8(block, vld1q_u8(keys+(rounds-1)*16));
|
|
|
|
// Final Add (bitwise Xor)
|
|
|
|
block = veorq_u8(block, vld1q_u8(keys+rounds*16));
|
|
|
|
|
|
|
|
data = vreinterpretq_u64_u8(block);
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
|
2017-12-10 16:09:50 +00:00
|
|
|
static inline void ARMV8_Enc_6_Blocks(uint64x2_t &data0, uint64x2_t &data1,
|
|
|
|
uint64x2_t &data2, uint64x2_t &data3, uint64x2_t &data4, uint64x2_t &data5,
|
|
|
|
const word32 *subkeys, unsigned int rounds)
|
2017-08-17 16:33:43 +00:00
|
|
|
{
|
2017-12-10 16:09:50 +00:00
|
|
|
CRYPTOPP_ASSERT(subkeys);
|
|
|
|
const byte *keys = reinterpret_cast<const byte*>(subkeys);
|
|
|
|
|
|
|
|
uint8x16_t block0 = vreinterpretq_u8_u64(data0);
|
|
|
|
uint8x16_t block1 = vreinterpretq_u8_u64(data1);
|
|
|
|
uint8x16_t block2 = vreinterpretq_u8_u64(data2);
|
|
|
|
uint8x16_t block3 = vreinterpretq_u8_u64(data3);
|
|
|
|
uint8x16_t block4 = vreinterpretq_u8_u64(data4);
|
|
|
|
uint8x16_t block5 = vreinterpretq_u8_u64(data5);
|
|
|
|
|
|
|
|
uint8x16_t key;
|
|
|
|
for (unsigned int i=0; i<rounds-1; ++i)
|
|
|
|
{
|
|
|
|
uint8x16_t key = vld1q_u8(keys+i*16);
|
|
|
|
// AES single round encryption
|
|
|
|
block0 = vaeseq_u8(block0, key);
|
|
|
|
// AES mix columns
|
|
|
|
block0 = vaesmcq_u8(block0);
|
|
|
|
// AES single round encryption
|
|
|
|
block1 = vaeseq_u8(block1, key);
|
|
|
|
// AES mix columns
|
|
|
|
block1 = vaesmcq_u8(block1);
|
|
|
|
// AES single round encryption
|
|
|
|
block2 = vaeseq_u8(block2, key);
|
|
|
|
// AES mix columns
|
|
|
|
block2 = vaesmcq_u8(block2);
|
|
|
|
// AES single round encryption
|
|
|
|
block3 = vaeseq_u8(block3, key);
|
|
|
|
// AES mix columns
|
|
|
|
block3 = vaesmcq_u8(block3);
|
|
|
|
// AES single round encryption
|
|
|
|
block4 = vaeseq_u8(block4, key);
|
|
|
|
// AES mix columns
|
|
|
|
block4 = vaesmcq_u8(block4);
|
|
|
|
// AES single round encryption
|
|
|
|
block5 = vaeseq_u8(block5, key);
|
|
|
|
// AES mix columns
|
|
|
|
block5 = vaesmcq_u8(block5);
|
|
|
|
}
|
|
|
|
|
|
|
|
// AES single round encryption
|
|
|
|
key = vld1q_u8(keys+(rounds-1)*16);
|
|
|
|
block0 = vaeseq_u8(block0, key);
|
|
|
|
block1 = vaeseq_u8(block1, key);
|
|
|
|
block2 = vaeseq_u8(block2, key);
|
|
|
|
block3 = vaeseq_u8(block3, key);
|
|
|
|
block4 = vaeseq_u8(block4, key);
|
|
|
|
block5 = vaeseq_u8(block5, key);
|
|
|
|
|
|
|
|
// Final Add (bitwise Xor)
|
|
|
|
key = vld1q_u8(keys+rounds*16);
|
|
|
|
data0 = vreinterpretq_u64_u8(veorq_u8(block0, key));
|
|
|
|
data1 = vreinterpretq_u64_u8(veorq_u8(block1, key));
|
|
|
|
data2 = vreinterpretq_u64_u8(veorq_u8(block2, key));
|
|
|
|
data3 = vreinterpretq_u64_u8(veorq_u8(block3, key));
|
|
|
|
data4 = vreinterpretq_u64_u8(veorq_u8(block4, key));
|
|
|
|
data5 = vreinterpretq_u64_u8(veorq_u8(block5, key));
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
|
2017-12-10 16:09:50 +00:00
|
|
|
static inline void ARMV8_Dec_Block(uint64x2_t &data, const word32 *subkeys, unsigned int rounds)
|
2017-08-17 16:33:43 +00:00
|
|
|
{
|
2017-12-10 16:09:50 +00:00
|
|
|
CRYPTOPP_ASSERT(subkeys);
|
|
|
|
const byte *keys = reinterpret_cast<const byte*>(subkeys);
|
|
|
|
uint8x16_t block = vreinterpretq_u8_u64(data);
|
|
|
|
|
|
|
|
// AES single round decryption
|
|
|
|
block = vaesdq_u8(block, vld1q_u8(keys+0*16));
|
|
|
|
// AES inverse mix columns
|
|
|
|
block = vaesimcq_u8(block);
|
|
|
|
|
|
|
|
for (unsigned int i=1; i<rounds-1; i+=2)
|
|
|
|
{
|
|
|
|
// AES single round decryption
|
|
|
|
block = vaesdq_u8(block, vld1q_u8(keys+i*16));
|
|
|
|
// AES inverse mix columns
|
|
|
|
block = vaesimcq_u8(block);
|
|
|
|
// AES single round decryption
|
|
|
|
block = vaesdq_u8(block, vld1q_u8(keys+(i+1)*16));
|
|
|
|
// AES inverse mix columns
|
|
|
|
block = vaesimcq_u8(block);
|
|
|
|
}
|
|
|
|
|
|
|
|
// AES single round decryption
|
|
|
|
block = vaesdq_u8(block, vld1q_u8(keys+(rounds-1)*16));
|
|
|
|
// Final Add (bitwise Xor)
|
|
|
|
block = veorq_u8(block, vld1q_u8(keys+rounds*16));
|
|
|
|
|
|
|
|
data = vreinterpretq_u64_u8(block);
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
|
2017-12-10 16:09:50 +00:00
|
|
|
static inline void ARMV8_Dec_6_Blocks(uint64x2_t &data0, uint64x2_t &data1,
|
|
|
|
uint64x2_t &data2, uint64x2_t &data3, uint64x2_t &data4, uint64x2_t &data5,
|
|
|
|
const word32 *subkeys, unsigned int rounds)
|
2017-08-17 16:33:43 +00:00
|
|
|
{
|
2017-12-10 16:09:50 +00:00
|
|
|
CRYPTOPP_ASSERT(subkeys);
|
|
|
|
const byte *keys = reinterpret_cast<const byte*>(subkeys);
|
|
|
|
|
|
|
|
uint8x16_t block0 = vreinterpretq_u8_u64(data0);
|
|
|
|
uint8x16_t block1 = vreinterpretq_u8_u64(data1);
|
|
|
|
uint8x16_t block2 = vreinterpretq_u8_u64(data2);
|
|
|
|
uint8x16_t block3 = vreinterpretq_u8_u64(data3);
|
|
|
|
uint8x16_t block4 = vreinterpretq_u8_u64(data4);
|
|
|
|
uint8x16_t block5 = vreinterpretq_u8_u64(data5);
|
|
|
|
|
|
|
|
uint8x16_t key;
|
|
|
|
for (unsigned int i=0; i<rounds-1; ++i)
|
|
|
|
{
|
|
|
|
key = vld1q_u8(keys+i*16);
|
|
|
|
// AES single round decryption
|
|
|
|
block0 = vaesdq_u8(block0, key);
|
|
|
|
// AES inverse mix columns
|
|
|
|
block0 = vaesimcq_u8(block0);
|
|
|
|
// AES single round decryption
|
|
|
|
block1 = vaesdq_u8(block1, key);
|
|
|
|
// AES inverse mix columns
|
|
|
|
block1 = vaesimcq_u8(block1);
|
|
|
|
// AES single round decryption
|
|
|
|
block2 = vaesdq_u8(block2, key);
|
|
|
|
// AES inverse mix columns
|
|
|
|
block2 = vaesimcq_u8(block2);
|
|
|
|
// AES single round decryption
|
|
|
|
block3 = vaesdq_u8(block3, key);
|
|
|
|
// AES inverse mix columns
|
|
|
|
block3 = vaesimcq_u8(block3);
|
|
|
|
// AES single round decryption
|
|
|
|
block4 = vaesdq_u8(block4, key);
|
|
|
|
// AES inverse mix columns
|
|
|
|
block4 = vaesimcq_u8(block4);
|
|
|
|
// AES single round decryption
|
|
|
|
block5 = vaesdq_u8(block5, key);
|
|
|
|
// AES inverse mix columns
|
|
|
|
block5 = vaesimcq_u8(block5);
|
|
|
|
}
|
|
|
|
|
|
|
|
// AES single round decryption
|
|
|
|
key = vld1q_u8(keys+(rounds-1)*16);
|
|
|
|
block0 = vaesdq_u8(block0, key);
|
|
|
|
block1 = vaesdq_u8(block1, key);
|
|
|
|
block2 = vaesdq_u8(block2, key);
|
|
|
|
block3 = vaesdq_u8(block3, key);
|
|
|
|
block4 = vaesdq_u8(block4, key);
|
|
|
|
block5 = vaesdq_u8(block5, key);
|
|
|
|
|
|
|
|
// Final Add (bitwise Xor)
|
|
|
|
key = vld1q_u8(keys+rounds*16);
|
|
|
|
data0 = vreinterpretq_u64_u8(veorq_u8(block0, key));
|
|
|
|
data1 = vreinterpretq_u64_u8(veorq_u8(block1, key));
|
|
|
|
data2 = vreinterpretq_u64_u8(veorq_u8(block2, key));
|
|
|
|
data3 = vreinterpretq_u64_u8(veorq_u8(block3, key));
|
|
|
|
data4 = vreinterpretq_u64_u8(veorq_u8(block4, key));
|
|
|
|
data5 = vreinterpretq_u64_u8(veorq_u8(block5, key));
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
|
2017-09-23 09:28:59 +00:00
|
|
|
ANONYMOUS_NAMESPACE_END
|
|
|
|
|
2017-08-19 05:55:20 +00:00
|
|
|
size_t Rijndael_Enc_AdvancedProcessBlocks_ARMV8(const word32 *subKeys, size_t rounds,
|
2017-08-17 16:33:43 +00:00
|
|
|
const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
|
|
|
|
{
|
2018-06-23 16:35:06 +00:00
|
|
|
return AdvancedProcessBlocks128_6x1_NEON(ARMV8_Enc_Block, ARMV8_Enc_6_Blocks,
|
2017-08-19 05:55:20 +00:00
|
|
|
subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
|
2017-08-19 05:55:20 +00:00
|
|
|
size_t Rijndael_Dec_AdvancedProcessBlocks_ARMV8(const word32 *subKeys, size_t rounds,
|
2017-08-17 16:33:43 +00:00
|
|
|
const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
|
|
|
|
{
|
2018-06-23 16:35:06 +00:00
|
|
|
return AdvancedProcessBlocks128_6x1_NEON(ARMV8_Dec_Block, ARMV8_Dec_6_Blocks,
|
2017-08-19 05:55:20 +00:00
|
|
|
subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif // CRYPTOPP_ARM_AES_AVAILABLE
|
|
|
|
|
2017-09-12 02:52:22 +00:00
|
|
|
// ***************************** AES-NI ***************************** //
|
|
|
|
|
2017-08-17 16:33:43 +00:00
|
|
|
#if (CRYPTOPP_AESNI_AVAILABLE)
|
2017-09-14 01:37:55 +00:00
|
|
|
|
2017-09-23 06:13:16 +00:00
|
|
|
ANONYMOUS_NAMESPACE_BEGIN
|
|
|
|
|
2017-09-14 01:37:55 +00:00
|
|
|
/* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
|
|
|
|
CRYPTOPP_ALIGN_DATA(16)
|
|
|
|
const word32 s_rconLE[] = {
|
2018-02-20 11:42:43 +00:00
|
|
|
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36
|
2017-09-14 01:37:55 +00:00
|
|
|
};
|
|
|
|
|
2017-12-10 16:09:50 +00:00
|
|
|
static inline void AESNI_Enc_Block(__m128i &block, MAYBE_CONST word32 *subkeys, unsigned int rounds)
|
2017-08-17 16:33:43 +00:00
|
|
|
{
|
2017-12-10 16:09:50 +00:00
|
|
|
const __m128i* skeys = reinterpret_cast<const __m128i*>(subkeys);
|
|
|
|
|
|
|
|
block = _mm_xor_si128(block, skeys[0]);
|
|
|
|
for (unsigned int i=1; i<rounds-1; i+=2)
|
|
|
|
{
|
|
|
|
block = _mm_aesenc_si128(block, skeys[i]);
|
|
|
|
block = _mm_aesenc_si128(block, skeys[i+1]);
|
|
|
|
}
|
|
|
|
block = _mm_aesenc_si128(block, skeys[rounds-1]);
|
|
|
|
block = _mm_aesenclast_si128(block, skeys[rounds]);
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
|
2017-09-20 12:57:53 +00:00
|
|
|
static inline void AESNI_Enc_4_Blocks(__m128i &block0, __m128i &block1, __m128i &block2, __m128i &block3,
|
2017-12-10 16:09:50 +00:00
|
|
|
MAYBE_CONST word32 *subkeys, unsigned int rounds)
|
2017-08-17 16:33:43 +00:00
|
|
|
{
|
2017-12-10 16:09:50 +00:00
|
|
|
const __m128i* skeys = reinterpret_cast<const __m128i*>(subkeys);
|
|
|
|
|
|
|
|
__m128i rk = skeys[0];
|
|
|
|
block0 = _mm_xor_si128(block0, rk);
|
|
|
|
block1 = _mm_xor_si128(block1, rk);
|
|
|
|
block2 = _mm_xor_si128(block2, rk);
|
|
|
|
block3 = _mm_xor_si128(block3, rk);
|
|
|
|
for (unsigned int i=1; i<rounds; i++)
|
|
|
|
{
|
|
|
|
rk = skeys[i];
|
|
|
|
block0 = _mm_aesenc_si128(block0, rk);
|
|
|
|
block1 = _mm_aesenc_si128(block1, rk);
|
|
|
|
block2 = _mm_aesenc_si128(block2, rk);
|
|
|
|
block3 = _mm_aesenc_si128(block3, rk);
|
|
|
|
}
|
|
|
|
rk = skeys[rounds];
|
|
|
|
block0 = _mm_aesenclast_si128(block0, rk);
|
|
|
|
block1 = _mm_aesenclast_si128(block1, rk);
|
|
|
|
block2 = _mm_aesenclast_si128(block2, rk);
|
|
|
|
block3 = _mm_aesenclast_si128(block3, rk);
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
|
2017-12-10 16:09:50 +00:00
|
|
|
static inline void AESNI_Dec_Block(__m128i &block, MAYBE_CONST word32 *subkeys, unsigned int rounds)
|
2017-08-17 16:33:43 +00:00
|
|
|
{
|
2017-12-10 16:09:50 +00:00
|
|
|
const __m128i* skeys = reinterpret_cast<const __m128i*>(subkeys);
|
|
|
|
|
|
|
|
block = _mm_xor_si128(block, skeys[0]);
|
|
|
|
for (unsigned int i=1; i<rounds-1; i+=2)
|
|
|
|
{
|
|
|
|
block = _mm_aesdec_si128(block, skeys[i]);
|
|
|
|
block = _mm_aesdec_si128(block, skeys[i+1]);
|
|
|
|
}
|
|
|
|
block = _mm_aesdec_si128(block, skeys[rounds-1]);
|
|
|
|
block = _mm_aesdeclast_si128(block, skeys[rounds]);
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
|
2017-09-20 12:57:53 +00:00
|
|
|
static inline void AESNI_Dec_4_Blocks(__m128i &block0, __m128i &block1, __m128i &block2, __m128i &block3,
|
2017-12-10 16:09:50 +00:00
|
|
|
MAYBE_CONST word32 *subkeys, unsigned int rounds)
|
2017-08-17 16:33:43 +00:00
|
|
|
{
|
2017-12-10 16:09:50 +00:00
|
|
|
const __m128i* skeys = reinterpret_cast<const __m128i*>(subkeys);
|
|
|
|
|
|
|
|
__m128i rk = skeys[0];
|
|
|
|
block0 = _mm_xor_si128(block0, rk);
|
|
|
|
block1 = _mm_xor_si128(block1, rk);
|
|
|
|
block2 = _mm_xor_si128(block2, rk);
|
|
|
|
block3 = _mm_xor_si128(block3, rk);
|
|
|
|
for (unsigned int i=1; i<rounds; i++)
|
|
|
|
{
|
|
|
|
rk = skeys[i];
|
|
|
|
block0 = _mm_aesdec_si128(block0, rk);
|
|
|
|
block1 = _mm_aesdec_si128(block1, rk);
|
|
|
|
block2 = _mm_aesdec_si128(block2, rk);
|
|
|
|
block3 = _mm_aesdec_si128(block3, rk);
|
|
|
|
}
|
|
|
|
rk = skeys[rounds];
|
|
|
|
block0 = _mm_aesdeclast_si128(block0, rk);
|
|
|
|
block1 = _mm_aesdeclast_si128(block1, rk);
|
|
|
|
block2 = _mm_aesdeclast_si128(block2, rk);
|
|
|
|
block3 = _mm_aesdeclast_si128(block3, rk);
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
|
2017-09-23 06:13:16 +00:00
|
|
|
ANONYMOUS_NAMESPACE_END
|
2017-08-17 16:33:43 +00:00
|
|
|
|
2018-02-20 18:32:53 +00:00
|
|
|
void Rijndael_UncheckedSetKey_SSE4_AESNI(const byte *userKey, size_t keyLen, word32 *rk)
|
2017-08-17 16:33:43 +00:00
|
|
|
{
|
2018-02-20 18:32:53 +00:00
|
|
|
const size_t rounds = keyLen / 4 + 6;
|
2017-12-10 16:09:50 +00:00
|
|
|
const word32 *rc = s_rconLE;
|
|
|
|
|
|
|
|
__m128i temp = _mm_loadu_si128(M128_CAST(userKey+keyLen-16));
|
|
|
|
std::memcpy(rk, userKey, keyLen);
|
|
|
|
|
|
|
|
// keySize: m_key allocates 4*(rounds+1) word32's.
|
|
|
|
const size_t keySize = 4*(rounds+1);
|
|
|
|
const word32* end = rk + keySize;
|
|
|
|
|
|
|
|
while (true)
|
|
|
|
{
|
|
|
|
rk[keyLen/4] = rk[0] ^ _mm_extract_epi32(_mm_aeskeygenassist_si128(temp, 0), 3) ^ *(rc++);
|
|
|
|
rk[keyLen/4+1] = rk[1] ^ rk[keyLen/4];
|
|
|
|
rk[keyLen/4+2] = rk[2] ^ rk[keyLen/4+1];
|
|
|
|
rk[keyLen/4+3] = rk[3] ^ rk[keyLen/4+2];
|
|
|
|
|
|
|
|
if (rk + keyLen/4 + 4 == end)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (keyLen == 24)
|
|
|
|
{
|
|
|
|
rk[10] = rk[ 4] ^ rk[ 9];
|
|
|
|
rk[11] = rk[ 5] ^ rk[10];
|
|
|
|
temp = _mm_insert_epi32(temp, rk[11], 3);
|
|
|
|
}
|
|
|
|
else if (keyLen == 32)
|
|
|
|
{
|
|
|
|
temp = _mm_insert_epi32(temp, rk[11], 3);
|
|
|
|
rk[12] = rk[ 4] ^ _mm_extract_epi32(_mm_aeskeygenassist_si128(temp, 0), 2);
|
|
|
|
rk[13] = rk[ 5] ^ rk[12];
|
|
|
|
rk[14] = rk[ 6] ^ rk[13];
|
|
|
|
rk[15] = rk[ 7] ^ rk[14];
|
|
|
|
temp = _mm_insert_epi32(temp, rk[15], 3);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
temp = _mm_insert_epi32(temp, rk[7], 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
rk += keyLen/4;
|
|
|
|
}
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
|
|
|
|
2017-08-19 05:35:36 +00:00
|
|
|
void Rijndael_UncheckedSetKeyRev_AESNI(word32 *key, unsigned int rounds)
|
2017-08-17 16:33:43 +00:00
|
|
|
{
|
2017-12-10 16:09:50 +00:00
|
|
|
unsigned int i, j;
|
|
|
|
__m128i temp;
|
2017-08-17 16:33:43 +00:00
|
|
|
|
2017-12-26 16:20:18 +00:00
|
|
|
vec_swap(*M128_CAST(key), *M128_CAST(key+4*rounds));
|
2017-12-10 16:09:50 +00:00
|
|
|
|
|
|
|
for (i = 4, j = 4*rounds-4; i < j; i += 4, j -= 4)
|
|
|
|
{
|
|
|
|
temp = _mm_aesimc_si128(*M128_CAST(key+i));
|
|
|
|
*M128_CAST(key+i) = _mm_aesimc_si128(*M128_CAST(key+j));
|
|
|
|
*M128_CAST(key+j) = temp;
|
|
|
|
}
|
|
|
|
|
|
|
|
*M128_CAST(key+i) = _mm_aesimc_si128(*M128_CAST(key+i));
|
2017-08-17 16:33:43 +00:00
|
|
|
}
|
2017-09-23 06:13:16 +00:00
|
|
|
|
|
|
|
size_t Rijndael_Enc_AdvancedProcessBlocks_AESNI(const word32 *subKeys, size_t rounds,
|
|
|
|
const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
|
|
|
|
{
|
2017-12-10 16:09:50 +00:00
|
|
|
// SunCC workaround
|
|
|
|
MAYBE_CONST word32* sk = MAYBE_UNCONST_CAST(word32*, subKeys);
|
|
|
|
MAYBE_CONST byte* ib = MAYBE_UNCONST_CAST(byte*, inBlocks);
|
|
|
|
MAYBE_CONST byte* xb = MAYBE_UNCONST_CAST(byte*, xorBlocks);
|
2017-09-23 06:13:16 +00:00
|
|
|
|
2018-01-02 12:08:13 +00:00
|
|
|
return AdvancedProcessBlocks128_4x1_SSE(AESNI_Enc_Block, AESNI_Enc_4_Blocks,
|
2017-09-23 06:13:16 +00:00
|
|
|
sk, rounds, ib, xb, outBlocks, length, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t Rijndael_Dec_AdvancedProcessBlocks_AESNI(const word32 *subKeys, size_t rounds,
|
|
|
|
const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
|
|
|
|
{
|
2017-12-10 16:09:50 +00:00
|
|
|
MAYBE_CONST word32* sk = MAYBE_UNCONST_CAST(word32*, subKeys);
|
|
|
|
MAYBE_CONST byte* ib = MAYBE_UNCONST_CAST(byte*, inBlocks);
|
|
|
|
MAYBE_CONST byte* xb = MAYBE_UNCONST_CAST(byte*, xorBlocks);
|
2017-09-23 06:13:16 +00:00
|
|
|
|
2018-01-02 12:08:13 +00:00
|
|
|
return AdvancedProcessBlocks128_4x1_SSE(AESNI_Dec_Block, AESNI_Dec_4_Blocks,
|
2017-09-23 06:13:16 +00:00
|
|
|
sk, rounds, ib, xb, outBlocks, length, flags);
|
|
|
|
}
|
|
|
|
|
2017-08-17 16:33:43 +00:00
|
|
|
#endif // CRYPTOPP_AESNI_AVAILABLE
|
|
|
|
|
2018-11-19 07:28:29 +00:00
|
|
|
// ************************** Power 8 Crypto ************************** //
|
2017-09-12 02:52:22 +00:00
|
|
|
|
|
|
|
#if (CRYPTOPP_POWER8_AES_AVAILABLE)
|
|
|
|
|
2017-09-23 06:13:16 +00:00
|
|
|
ANONYMOUS_NAMESPACE_BEGIN
|
|
|
|
|
2018-02-20 11:42:43 +00:00
|
|
|
/* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
|
|
|
|
CRYPTOPP_ALIGN_DATA(16)
|
|
|
|
static const uint32_t s_rconBE[] = {
|
|
|
|
0x01000000, 0x02000000, 0x04000000, 0x08000000,
|
|
|
|
0x10000000, 0x20000000, 0x40000000, 0x80000000,
|
|
|
|
0x1B000000, 0x36000000
|
|
|
|
};
|
|
|
|
|
2018-01-02 13:13:42 +00:00
|
|
|
static inline void POWER8_Enc_Block(uint32x4_p &block, const word32 *subkeys, unsigned int rounds)
|
2017-09-12 02:52:22 +00:00
|
|
|
{
|
2017-12-10 16:09:50 +00:00
|
|
|
CRYPTOPP_ASSERT(IsAlignedOn(subkeys, 16));
|
|
|
|
const byte *keys = reinterpret_cast<const byte*>(subkeys);
|
2017-09-12 02:52:22 +00:00
|
|
|
|
2018-11-15 20:17:49 +00:00
|
|
|
uint32x4_p k = VecLoad(keys);
|
|
|
|
block = VecXor(block, k);
|
2017-09-12 02:52:22 +00:00
|
|
|
|
2017-12-10 16:09:50 +00:00
|
|
|
for (size_t i=1; i<rounds-1; i+=2)
|
|
|
|
{
|
2018-11-15 20:17:49 +00:00
|
|
|
block = VecEncrypt(block, VecLoad( i*16, keys));
|
|
|
|
block = VecEncrypt(block, VecLoad((i+1)*16, keys));
|
2017-12-10 16:09:50 +00:00
|
|
|
}
|
2017-09-12 02:52:22 +00:00
|
|
|
|
2018-11-15 20:17:49 +00:00
|
|
|
block = VecEncrypt(block, VecLoad((rounds-1)*16, keys));
|
|
|
|
block = VecEncryptLast(block, VecLoad(rounds*16, keys));
|
2017-09-12 22:15:55 +00:00
|
|
|
}
|
|
|
|
|
2018-01-02 13:13:42 +00:00
|
|
|
static inline void POWER8_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1,
|
|
|
|
uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4,
|
|
|
|
uint32x4_p &block5, const word32 *subkeys, unsigned int rounds)
|
2017-09-12 22:15:55 +00:00
|
|
|
{
|
2017-12-10 16:09:50 +00:00
|
|
|
CRYPTOPP_ASSERT(IsAlignedOn(subkeys, 16));
|
|
|
|
const byte *keys = reinterpret_cast<const byte*>(subkeys);
|
|
|
|
|
2018-11-15 20:17:49 +00:00
|
|
|
uint32x4_p k = VecLoad(keys);
|
|
|
|
block0 = VecXor(block0, k);
|
|
|
|
block1 = VecXor(block1, k);
|
|
|
|
block2 = VecXor(block2, k);
|
|
|
|
block3 = VecXor(block3, k);
|
|
|
|
block4 = VecXor(block4, k);
|
|
|
|
block5 = VecXor(block5, k);
|
2017-12-10 16:09:50 +00:00
|
|
|
|
|
|
|
for (size_t i=1; i<rounds; ++i)
|
|
|
|
{
|
2018-11-15 20:17:49 +00:00
|
|
|
k = VecLoad(i*16, keys);
|
|
|
|
block0 = VecEncrypt(block0, k);
|
|
|
|
block1 = VecEncrypt(block1, k);
|
|
|
|
block2 = VecEncrypt(block2, k);
|
|
|
|
block3 = VecEncrypt(block3, k);
|
|
|
|
block4 = VecEncrypt(block4, k);
|
|
|
|
block5 = VecEncrypt(block5, k);
|
2017-12-10 16:09:50 +00:00
|
|
|
}
|
|
|
|
|
2018-11-15 20:17:49 +00:00
|
|
|
k = VecLoad(rounds*16, keys);
|
|
|
|
block0 = VecEncryptLast(block0, k);
|
|
|
|
block1 = VecEncryptLast(block1, k);
|
|
|
|
block2 = VecEncryptLast(block2, k);
|
|
|
|
block3 = VecEncryptLast(block3, k);
|
|
|
|
block4 = VecEncryptLast(block4, k);
|
|
|
|
block5 = VecEncryptLast(block5, k);
|
2017-09-12 02:52:22 +00:00
|
|
|
}
|
|
|
|
|
2018-01-02 13:13:42 +00:00
|
|
|
static inline void POWER8_Dec_Block(uint32x4_p &block, const word32 *subkeys, unsigned int rounds)
|
2017-09-12 02:52:22 +00:00
|
|
|
{
|
2017-12-10 16:09:50 +00:00
|
|
|
CRYPTOPP_ASSERT(IsAlignedOn(subkeys, 16));
|
|
|
|
const byte *keys = reinterpret_cast<const byte*>(subkeys);
|
2017-09-12 02:52:22 +00:00
|
|
|
|
2018-11-15 20:17:49 +00:00
|
|
|
uint32x4_p k = VecLoad(rounds*16, keys);
|
|
|
|
block = VecXor(block, k);
|
2017-09-12 02:52:22 +00:00
|
|
|
|
2017-12-10 16:09:50 +00:00
|
|
|
for (size_t i=rounds-1; i>1; i-=2)
|
|
|
|
{
|
2018-11-15 20:17:49 +00:00
|
|
|
block = VecDecrypt(block, VecLoad( i*16, keys));
|
|
|
|
block = VecDecrypt(block, VecLoad((i-1)*16, keys));
|
2017-12-10 16:09:50 +00:00
|
|
|
}
|
2017-09-12 22:15:55 +00:00
|
|
|
|
2018-11-15 20:17:49 +00:00
|
|
|
block = VecDecrypt(block, VecLoad(16, keys));
|
|
|
|
block = VecDecryptLast(block, VecLoad(0, keys));
|
2017-09-12 22:15:55 +00:00
|
|
|
}
|
|
|
|
|
2018-01-02 13:13:42 +00:00
|
|
|
static inline void POWER8_Dec_6_Blocks(uint32x4_p &block0, uint32x4_p &block1,
|
|
|
|
uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4,
|
|
|
|
uint32x4_p &block5, const word32 *subkeys, unsigned int rounds)
|
2017-09-12 22:15:55 +00:00
|
|
|
{
|
2017-12-10 16:09:50 +00:00
|
|
|
CRYPTOPP_ASSERT(IsAlignedOn(subkeys, 16));
|
|
|
|
const byte *keys = reinterpret_cast<const byte*>(subkeys);
|
|
|
|
|
2018-11-15 20:17:49 +00:00
|
|
|
uint32x4_p k = VecLoad(rounds*16, keys);
|
|
|
|
block0 = VecXor(block0, k);
|
|
|
|
block1 = VecXor(block1, k);
|
|
|
|
block2 = VecXor(block2, k);
|
|
|
|
block3 = VecXor(block3, k);
|
|
|
|
block4 = VecXor(block4, k);
|
|
|
|
block5 = VecXor(block5, k);
|
2017-12-10 16:09:50 +00:00
|
|
|
|
|
|
|
for (size_t i=rounds-1; i>0; --i)
|
|
|
|
{
|
2018-11-15 20:17:49 +00:00
|
|
|
k = VecLoad(i*16, keys);
|
|
|
|
block0 = VecDecrypt(block0, k);
|
|
|
|
block1 = VecDecrypt(block1, k);
|
|
|
|
block2 = VecDecrypt(block2, k);
|
|
|
|
block3 = VecDecrypt(block3, k);
|
|
|
|
block4 = VecDecrypt(block4, k);
|
|
|
|
block5 = VecDecrypt(block5, k);
|
2017-12-10 16:09:50 +00:00
|
|
|
}
|
|
|
|
|
2018-11-15 20:17:49 +00:00
|
|
|
k = VecLoad(0, keys);
|
|
|
|
block0 = VecDecryptLast(block0, k);
|
|
|
|
block1 = VecDecryptLast(block1, k);
|
|
|
|
block2 = VecDecryptLast(block2, k);
|
|
|
|
block3 = VecDecryptLast(block3, k);
|
|
|
|
block4 = VecDecryptLast(block4, k);
|
|
|
|
block5 = VecDecryptLast(block5, k);
|
2017-09-12 22:15:55 +00:00
|
|
|
}
|
|
|
|
|
2017-09-23 06:13:16 +00:00
|
|
|
ANONYMOUS_NAMESPACE_END
|
|
|
|
|
2018-02-20 11:42:43 +00:00
|
|
|
void Rijndael_UncheckedSetKey_POWER8(const byte* userKey, size_t keyLen, word32* rk, const byte* Se)
|
2017-09-23 06:13:16 +00:00
|
|
|
{
|
2017-12-10 16:09:50 +00:00
|
|
|
const size_t rounds = keyLen / 4 + 6;
|
2018-02-20 11:42:43 +00:00
|
|
|
const word32 *rc = s_rconBE;
|
2018-11-07 18:18:37 +00:00
|
|
|
word32 *rkey = rk, temp;
|
2018-02-20 11:42:43 +00:00
|
|
|
|
2018-08-06 09:15:12 +00:00
|
|
|
GetUserKey(BIG_ENDIAN_ORDER, rkey, keyLen/4, userKey, keyLen);
|
2018-02-20 09:18:58 +00:00
|
|
|
|
|
|
|
// keySize: m_key allocates 4*(rounds+1) word32's.
|
|
|
|
const size_t keySize = 4*(rounds+1);
|
2018-08-06 09:15:12 +00:00
|
|
|
const word32* end = rkey + keySize;
|
2018-02-20 09:18:58 +00:00
|
|
|
|
|
|
|
while (true)
|
|
|
|
{
|
2018-08-06 09:15:12 +00:00
|
|
|
temp = rkey[keyLen/4-1];
|
2018-02-20 09:18:58 +00:00
|
|
|
word32 x = (word32(Se[GETBYTE(temp, 2)]) << 24) ^ (word32(Se[GETBYTE(temp, 1)]) << 16) ^
|
|
|
|
(word32(Se[GETBYTE(temp, 0)]) << 8) ^ Se[GETBYTE(temp, 3)];
|
2018-08-06 09:15:12 +00:00
|
|
|
rkey[keyLen/4] = rkey[0] ^ x ^ *(rc++);
|
|
|
|
rkey[keyLen/4+1] = rkey[1] ^ rkey[keyLen/4];
|
|
|
|
rkey[keyLen/4+2] = rkey[2] ^ rkey[keyLen/4+1];
|
|
|
|
rkey[keyLen/4+3] = rkey[3] ^ rkey[keyLen/4+2];
|
2018-02-20 09:18:58 +00:00
|
|
|
|
2018-08-06 09:15:12 +00:00
|
|
|
if (rkey + keyLen/4 + 4 == end)
|
2018-02-20 09:18:58 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
if (keyLen == 24)
|
|
|
|
{
|
2018-08-06 09:15:12 +00:00
|
|
|
rkey[10] = rkey[ 4] ^ rkey[ 9];
|
|
|
|
rkey[11] = rkey[ 5] ^ rkey[10];
|
2018-02-20 09:18:58 +00:00
|
|
|
}
|
|
|
|
else if (keyLen == 32)
|
|
|
|
{
|
2018-08-06 09:15:12 +00:00
|
|
|
temp = rkey[11];
|
|
|
|
rkey[12] = rkey[ 4] ^ (word32(Se[GETBYTE(temp, 3)]) << 24) ^ (word32(Se[GETBYTE(temp, 2)]) << 16) ^ (word32(Se[GETBYTE(temp, 1)]) << 8) ^ Se[GETBYTE(temp, 0)];
|
|
|
|
rkey[13] = rkey[ 5] ^ rkey[12];
|
|
|
|
rkey[14] = rkey[ 6] ^ rkey[13];
|
|
|
|
rkey[15] = rkey[ 7] ^ rkey[14];
|
2018-02-20 09:18:58 +00:00
|
|
|
}
|
2018-08-06 09:15:12 +00:00
|
|
|
rkey += keyLen/4;
|
2018-02-20 09:18:58 +00:00
|
|
|
}
|
2017-09-23 06:13:16 +00:00
|
|
|
|
2018-10-28 08:24:22 +00:00
|
|
|
#if (CRYPTOPP_LITTLE_ENDIAN)
|
2018-08-06 09:15:12 +00:00
|
|
|
rkey = rk;
|
2018-02-20 09:18:58 +00:00
|
|
|
const uint8x16_p mask = ((uint8x16_p){12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3});
|
|
|
|
const uint8x16_p zero = {0};
|
|
|
|
|
|
|
|
unsigned int i=0;
|
2018-08-06 09:15:12 +00:00
|
|
|
for (i=0; i<rounds; i+=2, rkey+=8)
|
2018-02-20 09:18:58 +00:00
|
|
|
{
|
2018-08-06 09:15:12 +00:00
|
|
|
const uint8x16_p d1 = vec_vsx_ld( 0, (uint8_t*)rkey);
|
|
|
|
const uint8x16_p d2 = vec_vsx_ld(16, (uint8_t*)rkey);
|
2018-11-15 20:17:49 +00:00
|
|
|
vec_vsx_st(VecPermute(d1, zero, mask), 0, (uint8_t*)rkey);
|
|
|
|
vec_vsx_st(VecPermute(d2, zero, mask), 16, (uint8_t*)rkey);
|
2018-02-20 09:18:58 +00:00
|
|
|
}
|
|
|
|
|
2018-08-06 09:15:12 +00:00
|
|
|
for ( ; i<rounds+1; i++, rkey+=4)
|
2018-11-07 18:18:37 +00:00
|
|
|
{
|
2018-08-06 09:15:12 +00:00
|
|
|
const uint8x16_p d = vec_vsx_ld( 0, (uint8_t*)rkey);
|
2018-11-15 20:17:49 +00:00
|
|
|
vec_vsx_st(VecPermute(d, zero, mask), 0, (uint8_t*)rkey);
|
2018-11-07 18:18:37 +00:00
|
|
|
}
|
2017-09-23 06:13:16 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-01-02 12:08:13 +00:00
|
|
|
size_t Rijndael_Enc_AdvancedProcessBlocks128_6x1_ALTIVEC(const word32 *subKeys, size_t rounds,
|
2017-12-10 16:09:50 +00:00
|
|
|
const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
|
2017-09-12 22:15:55 +00:00
|
|
|
{
|
2018-01-02 12:08:13 +00:00
|
|
|
return AdvancedProcessBlocks128_6x1_ALTIVEC(POWER8_Enc_Block, POWER8_Enc_6_Blocks,
|
2017-12-10 16:09:50 +00:00
|
|
|
subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
|
2017-09-12 22:15:55 +00:00
|
|
|
}
|
|
|
|
|
2018-01-02 12:08:13 +00:00
|
|
|
size_t Rijndael_Dec_AdvancedProcessBlocks128_6x1_ALTIVEC(const word32 *subKeys, size_t rounds,
|
2017-12-10 16:09:50 +00:00
|
|
|
const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
|
2017-09-12 22:15:55 +00:00
|
|
|
{
|
2018-01-02 12:08:13 +00:00
|
|
|
return AdvancedProcessBlocks128_6x1_ALTIVEC(POWER8_Dec_Block, POWER8_Dec_6_Blocks,
|
2017-12-10 16:09:50 +00:00
|
|
|
subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
|
2017-09-12 22:15:55 +00:00
|
|
|
}
|
|
|
|
|
2017-09-12 02:52:22 +00:00
|
|
|
#endif // CRYPTOPP_POWER8_AES_AVAILABLE
|
2017-08-17 16:33:43 +00:00
|
|
|
NAMESPACE_END
|