Add AES-NI accelerated SM4 encryption (GH #540)

Thanks to Markku-Juhani Olavi Saarinen for the code. Also see https://github.com/mjosaarinen/sm4ni
This commit is contained in:
Jeffrey Walton 2018-07-13 08:33:13 -04:00
parent 2f71e4d7d9
commit 3c21233440
No known key found for this signature in database
GPG Key ID: B36AB348921B1838
6 changed files with 414 additions and 4 deletions

View File

@ -305,6 +305,7 @@ skipjack.h
sm3.cpp
sm3.h
sm4.cpp
sm4-simd.cpp
sm4.h
smartptr.h
socketft.cpp

View File

@ -282,6 +282,7 @@ ifeq ($(findstring -DCRYPTOPP_DISABLE_AESNI,$(CXXFLAGS)),)
HAVE_AES = $(shell echo | $(CXX) -x c++ $(CXXFLAGS) -msse4.1 -maes -dM -E - 2>/dev/null | $(GREP) -i -c __AES__)
ifeq ($(HAVE_AES),1)
AES_FLAG = -msse4.1 -maes
SM4_FLAG = -mssse3 -maes
endif
ifeq ($(findstring -DCRYPTOPP_DISABLE_SHA,$(CXXFLAGS)),)
HAVE_SHA = $(shell echo | $(CXX) -x c++ $(CXXFLAGS) -msse4.2 -msha -dM -E - 2>/dev/null | $(GREP) -i -c __SHA__)
@ -322,6 +323,7 @@ ifeq ($(SUN_COMPILER),1)
ifeq ($(COUNT),0)
GCM_FLAG = -xarch=aes -D__PCLMUL__=1
AES_FLAG = -xarch=aes -D__AES__=1
SM4_FLAG = -xarch=aes -D__AES__=1
LDFLAGS += -xarch=aes
endif
COUNT := $(shell $(CXX) $(CXXFLAGS) -E -xarch=sha -xdumpmacros /dev/null 2>&1 | $(GREP) -i -c "illegal")
@ -1138,6 +1140,10 @@ simon-simd.o : simon-simd.cpp
speck-simd.o : speck-simd.cpp
$(CXX) $(strip $(CXXFLAGS) $(SPECK_FLAG) -c) $<
# AESNI available
sm4-simd.o : sm4-simd.cpp
$(CXX) $(strip $(CXXFLAGS) $(SM4_FLAG) -c) $<
# IBM XLC -O3 optimization bug
ifeq ($(XLC_COMPILER),1)
sm3.o : sm3.cpp

View File

@ -307,6 +307,7 @@ ifneq ($(IS_i686)$(IS_x86_64),00)
HAVE_AES = $(shell echo | $(CXX) -x c++ $(CXXFLAGS) -msse4.1 -maes -dM -E - 2>/dev/null | $(EGREP) -i -c __AES__)
ifeq ($(HAVE_AES),1)
AES_FLAG = -msse4.1 -maes
SM4_FLAG = -mssse3 -maes
endif
HAVE_SHA = $(shell echo | $(CXX) -x c++ $(CXXFLAGS) -msse4.2 -msha -dM -E - 2>/dev/null | $(EGREP) -i -c __SHA__)
ifeq ($(HAVE_SHA),1)
@ -545,6 +546,10 @@ simon-simd.o : simon-simd.cpp
speck-simd.o : speck-simd.cpp
$(CXX) $(strip $(CXXFLAGS) $(SPECK_FLAG) -c) $<
# AESNI available
sm4-simd.o : sm4-simd.cpp
$(CXX) $(strip $(CXXFLAGS) $(SM4_FLAG) -c) $<
%.o : %.cpp
$(CXX) $(strip $(CXXFLAGS) -c) $<

350
sm4-simd.cpp Normal file
View File

@ -0,0 +1,350 @@
// sm4-simd.cpp - written and placed in the public domain by
// Markku-Juhani O. Saarinen and Jeffrey Walton
//
// This source file uses intrinsics and built-ins to gain access to
// AESNI, ARM NEON and ARMv8a, and Power7 Altivec instructions. A separate
// source file is needed because additional CXXFLAGS are required to enable
// the appropriate instructions sets in some build configurations.
//
// Based on Markku-Juhani O. Saarinen work at https://github.com/mjosaarinen/sm4ni.
//
#include "pch.h"
#include "config.h"
#include "sm4.h"
#include "misc.h"
#include "adv-simd.h"
// Uncomment for benchmarking C++ against SSE.
// Do so in both simon.cpp and simon-simd.cpp.
// #undef CRYPTOPP_AESNI_AVAILABLE
#if (CRYPTOPP_AESNI_AVAILABLE)
# include <smmintrin.h>
# include <wmmintrin.h>
#endif
#if (CRYPTOPP_ARM_NEON_AVAILABLE)
# include <arm_neon.h>
#endif
// Can't use CRYPTOPP_ARM_XXX_AVAILABLE because too many
// compilers don't follow ACLE conventions for the include.
#if defined(CRYPTOPP_ARM_ACLE_AVAILABLE)
# include <stdint.h>
# include <arm_acle.h>
#endif
// Squash MS LNK4221 and libtool warnings
extern const char SM4_SIMD_FNAME[] = __FILE__;
ANONYMOUS_NAMESPACE_BEGIN
using CryptoPP::word32;
#if (CRYPTOPP_AESNI_AVAILABLE)
template <unsigned int R>
inline __m128i ShiftLeft(const __m128i& val)
{
return _mm_slli_epi32(val, R);
}
template <unsigned int R>
inline __m128i ShiftRight(const __m128i& val)
{
return _mm_srli_epi32(val, R);
}
template <unsigned int R>
inline __m128i ShiftLeft64(const __m128i& val)
{
return _mm_slli_epi64(val, R);
}
template <unsigned int R>
inline __m128i ShiftRight64(const __m128i& val)
{
return _mm_srli_epi64(val, R);
}
template <unsigned int R>
inline __m128i RotateLeft(const __m128i& val)
{
return _mm_or_si128(
_mm_slli_epi32(val, R), _mm_srli_epi32(val, 32-R));
}
template <unsigned int R>
inline __m128i RotateRight(const __m128i& val)
{
return _mm_or_si128(
_mm_slli_epi32(val, 32-R), _mm_srli_epi32(val, R));
}
template <>
inline __m128i RotateLeft<8>(const __m128i& val)
{
const __m128i r08 = _mm_set_epi64x(0x0E0D0C0F0A09080B, 0x0605040702010003);
return _mm_shuffle_epi8(val, r08);
}
template <>
inline __m128i RotateLeft<16>(const __m128i& val)
{
const __m128i mask = _mm_set_epi64x(0x0D0C0F0E09080B0A, 0x0504070601000302);
return _mm_shuffle_epi8(val, mask);
}
template <>
inline __m128i RotateLeft<24>(const __m128i& val)
{
const __m128i mask = _mm_set_epi64x(0x0C0F0E0D080B0A09, 0x0407060500030201);
return _mm_shuffle_epi8(val, mask);
}
/// \brief Unpack XMM words
/// \tparam IDX the element from each XMM word
/// \param a the first XMM word
/// \param b the second XMM word
/// \param c the third XMM word
/// \param d the fourth XMM word
/// \details UnpackXMM selects the IDX element from a, b, c, d and returns a concatenation
/// equivalent to <tt>a[IDX] || b[IDX] || c[IDX] || d[IDX]</tt>.
template <unsigned int IDX>
inline __m128i UnpackXMM(const __m128i& a, const __m128i& b, const __m128i& c, const __m128i& d)
{
// Should not be instantiated
CRYPTOPP_UNUSED(a); CRYPTOPP_UNUSED(b);
CRYPTOPP_UNUSED(c); CRYPTOPP_UNUSED(d);
CRYPTOPP_ASSERT(0);
return _mm_setzero_si128();
}
template <>
inline __m128i UnpackXMM<0>(const __m128i& a, const __m128i& b, const __m128i& c, const __m128i& d)
{
const __m128i r1 = _mm_unpacklo_epi32(a, b);
const __m128i r2 = _mm_unpacklo_epi32(c, d);
return _mm_unpacklo_epi64(r1, r2);
}
template <>
inline __m128i UnpackXMM<1>(const __m128i& a, const __m128i& b, const __m128i& c, const __m128i& d)
{
const __m128i r1 = _mm_unpacklo_epi32(a, b);
const __m128i r2 = _mm_unpacklo_epi32(c, d);
return _mm_unpackhi_epi64(r1, r2);
}
template <>
inline __m128i UnpackXMM<2>(const __m128i& a, const __m128i& b, const __m128i& c, const __m128i& d)
{
const __m128i r1 = _mm_unpackhi_epi32(a, b);
const __m128i r2 = _mm_unpackhi_epi32(c, d);
return _mm_unpacklo_epi64(r1, r2);
}
template <>
inline __m128i UnpackXMM<3>(const __m128i& a, const __m128i& b, const __m128i& c, const __m128i& d)
{
const __m128i r1 = _mm_unpackhi_epi32(a, b);
const __m128i r2 = _mm_unpackhi_epi32(c, d);
return _mm_unpackhi_epi64(r1, r2);
}
/// \brief Unpack a XMM word
/// \tparam IDX the element from each XMM word
/// \param v the first XMM word
/// \details UnpackXMM selects the IDX element from v and returns a concatenation
/// equivalent to <tt>v[IDX] || v[IDX] || v[IDX] || v[IDX]</tt>.
template <unsigned int IDX>
inline __m128i UnpackXMM(const __m128i& v)
{
// Should not be instantiated
CRYPTOPP_UNUSED(v); CRYPTOPP_ASSERT(0);
return _mm_setzero_si128();
}
template <>
inline __m128i UnpackXMM<0>(const __m128i& v)
{
// Splat to all lanes
return _mm_shuffle_epi8(v, _mm_set_epi8(3,2,1,0, 3,2,1,0, 3,2,1,0, 3,2,1,0));
}
template <>
inline __m128i UnpackXMM<1>(const __m128i& v)
{
// Splat to all lanes
return _mm_shuffle_epi8(v, _mm_set_epi8(7,6,5,4, 7,6,5,4, 7,6,5,4, 7,6,5,4));
}
template <>
inline __m128i UnpackXMM<2>(const __m128i& v)
{
// Splat to all lanes
return _mm_shuffle_epi8(v, _mm_set_epi8(11,10,9,8, 11,10,9,8, 11,10,9,8, 11,10,9,8));
}
template <>
inline __m128i UnpackXMM<3>(const __m128i& v)
{
// Splat to all lanes
return _mm_shuffle_epi8(v, _mm_set_epi8(15,14,13,12, 15,14,13,12, 15,14,13,12, 15,14,13,12));
}
template <unsigned int IDX>
inline __m128i RepackXMM(const __m128i& a, const __m128i& b, const __m128i& c, const __m128i& d)
{
return UnpackXMM<IDX>(a, b, c, d);
}
template <unsigned int IDX>
inline __m128i RepackXMM(const __m128i& v)
{
return UnpackXMM<IDX>(v);
}
inline void SM4_Encrypt(__m128i &block0, __m128i &block1,
__m128i &block2, __m128i &block3, const word32 *subkeys)
{
// nibble mask
const __m128i c0f = _mm_set_epi64x(0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F);
// flip all bytes in all 32-bit words
const __m128i flp = _mm_set_epi64x(0x0C0D0E0F08090A0B, 0x0405060700010203);
// inverse shift rows
const __m128i shr = _mm_set_epi64x(0x0306090C0F020508, 0x0B0E0104070A0D00);
// Affine transform 1 (low and high hibbles)
const __m128i m1l = _mm_set_epi64x(0xC7C1B4B222245157, 0x9197E2E474720701);
const __m128i m1h = _mm_set_epi64x(0xF052B91BF95BB012, 0xE240AB09EB49A200);
// Affine transform 2 (low and high hibbles)
const __m128i m2l = _mm_set_epi64x(0xEDD14478172BBE82, 0x5B67F2CEA19D0834);
const __m128i m2h = _mm_set_epi64x(0x11CDBE62CC1063BF, 0xAE7201DD73AFDC00);
__m128i t0 = UnpackXMM<0>(block0, block1, block2, block3);
__m128i t1 = UnpackXMM<1>(block0, block1, block2, block3);
__m128i t2 = UnpackXMM<2>(block0, block1, block2, block3);
__m128i t3 = UnpackXMM<3>(block0, block1, block2, block3);
t0 = _mm_shuffle_epi8(t0, flp);
t1 = _mm_shuffle_epi8(t1, flp);
t2 = _mm_shuffle_epi8(t2, flp);
t3 = _mm_shuffle_epi8(t3, flp);
const unsigned int ROUNDS = 32;
for (unsigned int i = 0; i < ROUNDS; i++)
{
const __m128i k = _mm_shuffle_epi32(_mm_castps_si128(
_mm_load_ss((const float*)(subkeys+i))), _MM_SHUFFLE(0,0,0,0));
__m128i x, y;
x = _mm_xor_si128(t1, _mm_xor_si128(t2, _mm_xor_si128(t3, k)));
y = _mm_and_si128(x, c0f); // inner affine
y = _mm_shuffle_epi8(m1l, y);
x = _mm_and_si128(ShiftRight64<4>(x), c0f);
x = _mm_xor_si128(_mm_shuffle_epi8(m1h, x), y);
x = _mm_shuffle_epi8(x, shr); // inverse MixColumns
x = _mm_aesenclast_si128(x, c0f); // AESNI instruction
y = _mm_andnot_si128(x, c0f); // outer affine
y = _mm_shuffle_epi8(m2l, y);
x = _mm_and_si128(ShiftRight64<4>(x), c0f);
x = _mm_xor_si128(_mm_shuffle_epi8(m2h, x), y);
// 4 parallel L1 linear transforms
y = _mm_xor_si128(x, RotateLeft<8>(x));
y = _mm_xor_si128(y, RotateLeft<16>(x));
y = _mm_xor_si128(ShiftLeft<2>(y), ShiftRight<30>(y));
x = _mm_xor_si128(x, _mm_xor_si128(y, RotateLeft<24>(x)));
// rotate registers
x = _mm_xor_si128(x, t0);
t0 = t1; t1 = t2;
t2 = t3; t3 = x;
}
t0 = _mm_shuffle_epi8(t0, flp);
t1 = _mm_shuffle_epi8(t1, flp);
t2 = _mm_shuffle_epi8(t2, flp);
t3 = _mm_shuffle_epi8(t3, flp);
block0 = RepackXMM<0>(t3,t2,t1,t0);
block1 = RepackXMM<1>(t3,t2,t1,t0);
block2 = RepackXMM<2>(t3,t2,t1,t0);
block3 = RepackXMM<3>(t3,t2,t1,t0);
}
inline void SM4_Enc_4_Blocks(__m128i &block0, __m128i &block1,
__m128i &block2, __m128i &block3, const word32 *subkeys, unsigned int /*rounds*/)
{
SM4_Encrypt(block0, block1, block2, block3, subkeys);
}
inline void SM4_Dec_4_Blocks(__m128i &block0, __m128i &block1,
__m128i &block2, __m128i &block3, const word32 *subkeys, unsigned int /*rounds*/)
{
SM4_Encrypt(block0, block1, block2, block3, subkeys);
}
inline void SM4_Enc_Block(__m128i &block0,
const word32 *subkeys, unsigned int /*rounds*/)
{
__m128i t1 = _mm_setzero_si128();
__m128i t2 = _mm_setzero_si128();
__m128i t3 = _mm_setzero_si128();
SM4_Encrypt(block0, t1, t2, t3, subkeys);
}
inline void SM4_Dec_Block(__m128i &block0,
const word32 *subkeys, unsigned int /*rounds*/)
{
__m128i t1 = _mm_setzero_si128();
__m128i t2 = _mm_setzero_si128();
__m128i t3 = _mm_setzero_si128();
SM4_Encrypt(block0, t1, t2, t3, subkeys);
}
#endif // CRYPTOPP_AESNI_AVAILABLE
ANONYMOUS_NAMESPACE_END
NAMESPACE_BEGIN(CryptoPP)
#if defined(CRYPTOPP_AESNI_AVAILABLE)
size_t SM4_Enc_AdvancedProcessBlocks_AESNI(const word32* subKeys, size_t rounds,
const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
{
return AdvancedProcessBlocks128_4x1_SSE(SM4_Enc_Block, SM4_Enc_4_Blocks,
subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
}
#endif // CRYPTOPP_AESNI_AVAILABLE
#if defined(CRYPTOPP_ARM_NEON_AVAILABLE)
size_t SM4_Enc_AdvancedProcessBlocks_NEON(const word32* subKeys, size_t rounds,
const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
{
uint32x4_t unused; // Avoid template argument deduction/substitution failures
return AdvancedProcessBlocks128_4x1_NEON(SM4_Enc_Block, SM4_Enc_4_Blocks,
unused, subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
}
size_t SM4_Dec_AdvancedProcessBlocks_NEON(const word32* subKeys, size_t rounds,
const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
{
uint32x4_t unused; // Avoid template argument deduction/substitution failures
return AdvancedProcessBlocks128_4x1_NEON(SM4_Dec_Block, SM4_Dec_4_Blocks,
unused, subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
}
#endif // CRYPTOPP_ARM_NEON_AVAILABLE
NAMESPACE_END

30
sm4.cpp
View File

@ -89,6 +89,22 @@ ANONYMOUS_NAMESPACE_END
NAMESPACE_BEGIN(CryptoPP)
#if CRYPTOPP_SM4_ADVANCED_PROCESS_BLOCKS
# if defined(CRYPTOPP_AESNI_AVAILABLE)
extern size_t SM4_Enc_AdvancedProcessBlocks_AESNI(const word32* subKeys, size_t rounds,
const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags);
# endif
#endif
std::string SM4::Enc::AlgorithmProvider() const
{
#if defined(CRYPTOPP_AESNI_AVAILABLE)
if (HasAESNI())
return "AESNI";
#endif
return "C++";
}
void SM4::Base::UncheckedSetKey(const byte *userKey, unsigned int keyLength, const NameValuePairs &params)
{
CRYPTOPP_ASSERT(keyLength == 16);
@ -174,4 +190,18 @@ void SM4::Dec::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, byt
OutBlock oblk(xorBlock, outBlock); oblk(m_wspace[3])(m_wspace[2])(m_wspace[1])(m_wspace[0]);
}
#if CRYPTOPP_SM4_ADVANCED_PROCESS_BLOCKS
size_t SM4::Enc::AdvancedProcessBlocks(const byte *inBlocks, const byte *xorBlocks,
byte *outBlocks, size_t length, word32 flags) const
{
#if defined(CRYPTOPP_AESNI_AVAILABLE)
if (HasAESNI()) {
return SM4_Enc_AdvancedProcessBlocks_AESNI(m_rkeys, 32,
inBlocks, xorBlocks, outBlocks, length, flags);
}
#endif
return BlockTransformation::AdvancedProcessBlocks(inBlocks, xorBlocks, outBlocks, length, flags);
}
#endif // CRYPTOPP_SM4_ADVANCED_PROCESS_BLOCKS
NAMESPACE_END

26
sm4.h
View File

@ -4,8 +4,11 @@
/// \brief Classes for the SM4 block cipher
/// \details SM4 is a block cipher designed by Xiaoyun Wang, et al. The block cipher is part of the
/// Chinese State Cryptography Administration portfolio. The cipher was formely known as SMS4.
/// \sa <A HREF="http://eprint.iacr.org/2008/329.pdf">SMS4 Encryption Algorithm for Wireless Networks</A> and
/// <A HREF="http://github.com/guanzhi/GmSSL">Reference implementation using OpenSSL</A>.
/// \details SM4 encryption is accelerated on machines with AES-NI. Decryption is not acclerated because
/// it is not profitable. Thanks to Markku-Juhani Olavi Saarinen for help and the code.
/// \sa <A HREF="http://eprint.iacr.org/2008/329.pdf">SMS4 Encryption Algorithm for Wireless Networks</A>,
/// <A HREF="http://github.com/guanzhi/GmSSL">Reference implementation using OpenSSL</A> and
/// <A HREF="https://github.com/mjosaarinen/sm4ni">Markku-Juhani Olavi Saarinen GitHub</A>.
/// \since Crypto++ 6.0
#ifndef CRYPTOPP_SM4_H
@ -15,6 +18,10 @@
#include "seckey.h"
#include "secblock.h"
#if (CRYPTOPP_BOOL_X64 || CRYPTOPP_BOOL_X32 || CRYPTOPP_BOOL_X86)
# define CRYPTOPP_SM4_ADVANCED_PROCESS_BLOCKS 1
#endif
NAMESPACE_BEGIN(CryptoPP)
/// \brief SM4 block cipher information
@ -42,7 +49,7 @@ public:
{
protected:
void UncheckedSetKey(const byte *userKey, unsigned int keyLength, const NameValuePairs &params);
protected:
SecBlock<word32, AllocatorWithCleanup<word32> > m_rkeys;
mutable SecBlock<word32, AllocatorWithCleanup<word32> > m_wspace;
};
@ -50,16 +57,27 @@ public:
/// \brief Provides implementation for encryption transformation
/// \details Enc provides implementation for encryption transformation. All key
/// sizes are supported.
/// \since Crypto++ 6.0
/// \details SM4 encryption is accelerated on machines with AES-NI. Decryption is
/// not acclerated because it is not profitable. Thanks to Markku-Juhani Olavi
/// Saarinen.
/// \since Crypto++ 6.0, AESNI encryption since Crypto++ 7.1
class CRYPTOPP_NO_VTABLE Enc : public Base
{
public:
std::string AlgorithmProvider() const;
protected:
void ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, byte *outBlock) const;
#if CRYPTOPP_SM4_ADVANCED_PROCESS_BLOCKS
size_t AdvancedProcessBlocks(const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) const;
#endif
};
/// \brief Provides implementation for encryption transformation
/// \details Dec provides implementation for decryption transformation. All key
/// sizes are supported.
/// \details SM4 encryption is accelerated on machines with AES-NI. Decryption is
/// not acclerated because it is not profitable. Thanks to Markku-Juhani Olavi
/// Saarinen.
/// \since Crypto++ 6.0
class CRYPTOPP_NO_VTABLE Dec : public Base
{