Add ARM SHA extensions for SHA1

Benchmarking on ARMv8/Aarch64 dev-board shows SHA-1 speeds up by 2.5x
This commit is contained in:
Jeffrey Walton 2017-01-13 05:44:19 -05:00
parent 52f13eca9b
commit d3bb0e13de
No known key found for this signature in database
GPG Key ID: B36AB348921B1838
2 changed files with 194 additions and 7 deletions

11
cpu.h
View File

@ -1,8 +1,9 @@
// cpu.h - written and placed in the public domain by Wei Dai
// updated for ARM by Jeffrey Walton
//! \file cpu.h
//! \brief Functions for CPU features and intrinsics
//! \details The functions are used in X86/X32/X64 and NEON code paths
//! \details The functions are used in X86/X32/X64 and ARM code paths
#ifndef CRYPTOPP_CPU_H
#define CRYPTOPP_CPU_H
@ -21,11 +22,13 @@
# if defined(__GNUC__)
# include <stdint.h>
# endif
# if CRYPTOPP_BOOL_NEON_INTRINSICS_AVAILABLE || defined(__ARM_NEON)
# if defined(__ARM_NEON)
# include <arm_neon.h>
# endif
# if (CRYPTOPP_BOOL_ARM_CRYPTO_INTRINSICS_AVAILABLE || CRYPTOPP_BOOL_ARM_CRC32_INTRINSICS_AVAILABLE) || defined(__ARM_ACLE)
# include <arm_acle.h>
# if defined(__GNUC__) && !defined(__clang__)
# if defined(__ARM_ACLE) || defined(__ARM_FEATURE_CRC32) || defined(__ARM_FEATURE_CRYPTO)
# include <arm_acle.h>
# endif
# endif
#endif // ARM32 and ARM64 Headers

190
sha.cpp
View File

@ -1,8 +1,9 @@
// sha.cpp - modified by Wei Dai from Steve Reid's public domain sha1.c
// Steve Reid implemented SHA-1. Wei Dai implemented SHA-2. Jeffrey Walton
// implemented Intel SHA extensions based on Intel articles and code by
// Sean Gulley. All code is in the public domain.
// implemented Intel SHA extensions based on Intel articles and code by
// Sean Gulley. Jeffrey Walton implemented ARM SHA based on ARM ARM and
// code from Johannes Schneiders. All code is in the public domain.
// use "cl /EP /P /DCRYPTOPP_GENERATE_X64_MASM sha.cpp" to generate MASM code
@ -293,6 +294,185 @@ static void SHA1_SSE_SHA_Transform(word32 *state, const word32 *data)
// end of Walton/Gulley's code //
/////////////////////////////////
//////////////////////////////////////
// start of Walton/Schneiders' code //
//////////////////////////////////////
#if CRYPTOPP_BOOL_ARM_CRYPTO_INTRINSICS_AVAILABLE
static void SHA1_ARM_SHA_Transform(word32 *state, const word32 *data)
{
uint32x4_t C0, C1, C2, C3;
uint32x4_t ABCD, ABCD_SAVED;
uint32x4_t MSG0, MSG1, MSG2, MSG3;
uint32x4_t TMP0, TMP1;
uint32_t E0, E0_SAVED, E1;
// Load initial values
C0 = vdupq_n_u32(0x5A827999);
C1 = vdupq_n_u32(0x6ED9EBA1);
C2 = vdupq_n_u32(0x8F1BBCDC);
C3 = vdupq_n_u32(0xCA62C1D6);
ABCD = vld1q_u32(&state[0]);
E0 = state[4];
// Save current hash
ABCD_SAVED = ABCD;
E0_SAVED = E0;
MSG0 = vld1q_u32(data + 0);
MSG1 = vld1q_u32(data + 4);
MSG2 = vld1q_u32(data + 8);
MSG3 = vld1q_u32(data + 12);
TMP0 = vaddq_u32(MSG0, C0);
TMP1 = vaddq_u32(MSG1, C0);
// Rounds 0-3
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1cq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG2, C0);
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
// Rounds 4-7
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1cq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG3, C0);
MSG0 = vsha1su1q_u32(MSG0, MSG3);
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
// Rounds 8-11
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1cq_u32(ABCD, E0, TMP0); /* 2 */
TMP0 = vaddq_u32(MSG0, C0);
MSG1 = vsha1su1q_u32(MSG1, MSG0);
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
// Rounds 12-15
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1cq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG1, C1);
MSG2 = vsha1su1q_u32(MSG2, MSG1);
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
// Rounds 16-19
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1cq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG2, C1);
MSG3 = vsha1su1q_u32(MSG3, MSG2);
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
// Rounds 20-23
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG3, C1);
MSG0 = vsha1su1q_u32(MSG0, MSG3);
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
// Rounds 24-27
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG0, C1);
MSG1 = vsha1su1q_u32(MSG1, MSG0);
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
// Rounds 28-31
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG1, C1);
MSG2 = vsha1su1q_u32(MSG2, MSG1);
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
// Rounds 32-35
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG2, C2);
MSG3 = vsha1su1q_u32(MSG3, MSG2);
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
// Rounds 36-39
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG3, C2);
MSG0 = vsha1su1q_u32(MSG0, MSG3);
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
// Rounds 40-43
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1mq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG0, C2);
MSG1 = vsha1su1q_u32(MSG1, MSG0);
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
// Rounds 44-47
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1mq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG1, C2);
MSG2 = vsha1su1q_u32(MSG2, MSG1);
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
// Rounds 48-51
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1mq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG2, C2);
MSG3 = vsha1su1q_u32(MSG3, MSG2);
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
// Rounds 52-55
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1mq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG3, C3);
MSG0 = vsha1su1q_u32(MSG0, MSG3);
MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
// Rounds 56-59
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1mq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG0, C3);
MSG1 = vsha1su1q_u32(MSG1, MSG0);
MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
// Rounds 60-63
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG1, C3);
MSG2 = vsha1su1q_u32(MSG2, MSG1);
MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
// Rounds 64-67
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E0, TMP0);
TMP0 = vaddq_u32(MSG2, C3);
MSG3 = vsha1su1q_u32(MSG3, MSG2);
MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
// Rounds 68-71
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
TMP1 = vaddq_u32(MSG3, C3);
MSG0 = vsha1su1q_u32(MSG0, MSG3);
// Rounds 72-75
E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E0, TMP0);
// Rounds 76-79
E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
ABCD = vsha1pq_u32(ABCD, E1, TMP1);
E0 += E0_SAVED;
ABCD = vaddq_u32(ABCD_SAVED, ABCD);
// Save state
vst1q_u32(&state[0], ABCD);
state[4] = E0;
}
#endif
////////////////////////////////////
// end of Walton/Schneiders' code //
////////////////////////////////////
pfnSHATransform InitializeSHA1Transform()
{
#if CRYPTOPP_BOOL_SSE_SHA_INTRINSICS_AVAILABLE
@ -300,7 +480,11 @@ pfnSHATransform InitializeSHA1Transform()
return &SHA1_SSE_SHA_Transform;
else
#endif
#if CRYPTOPP_BOOL_ARM_CRYPTO_INTRINSICS_AVAILABLE
if (HasSHA1())
return &SHA1_ARM_SHA_Transform;
else
#endif
return &SHA1_CXX_Transform;
}