inline T1 VecPermute(const T1 vec1, const T1 vec2, const T2 mask)
{
return (T1)vec_perm(vec1, (T1)vec2, (uint8x16_p)mask);
}
//@}
/// \name SHIFT AND ROTATE OPERATIONS
//@{
/// \brief Shift a vector left
/// \tparam C shift byte count
/// \tparam T vector type
/// \param vec the vector
/// \returns vector
/// \details VecShiftLeftOctet() returns a new vector after shifting the
/// concatenation of the zero vector and the source vector by the specified
/// number of bytes. The return vector is the same type as vec.
/// \details On big endian machines VecShiftLeftOctet() is vec_sld(a, z,
/// c). On little endian machines VecShiftLeftOctet() is translated to
/// vec_sld(z, a, 16-c). You should always call the function as
/// if on a big endian machine as shown below.
///
/// uint8x16_p x = VecLoad(ptr);
/// uint8x16_p y = VecShiftLeftOctet<12>(x);
///
/// \par Wraps
/// vec_sld
/// \sa Is vec_sld
/// endian sensitive? on Stack Overflow
/// \since Crypto++ 6.0
template
inline T VecShiftLeftOctet(const T vec)
{
const T zero = {0};
if (C >= 16)
{
// Out of range
return zero;
}
else if (C == 0)
{
// Noop
return vec;
}
else
{
#if defined(CRYPTOPP_BIG_ENDIAN)
enum { R=C&0xf };
return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)zero, R);
#else
enum { R=(16-C)&0xf }; // Linux xlC 13.1 workaround in Debug builds
return (T)vec_sld((uint8x16_p)zero, (uint8x16_p)vec, R);
#endif
}
}
/// \brief Shift a vector right
/// \tparam C shift byte count
/// \tparam T vector type
/// \param vec the vector
/// \returns vector
/// \details VecShiftRightOctet() returns a new vector after shifting the
/// concatenation of the zero vector and the source vector by the specified
/// number of bytes. The return vector is the same type as vec.
/// \details On big endian machines VecShiftRightOctet() is vec_sld(a, z,
/// c). On little endian machines VecShiftRightOctet() is translated to
/// vec_sld(z, a, 16-c). You should always call the function as
/// if on a big endian machine as shown below.
///
/// uint8x16_p x = VecLoad(ptr);
/// uint8x16_p y = VecShiftRightOctet<12>(y);
///
/// \par Wraps
/// vec_sld
/// \sa Is vec_sld
/// endian sensitive? on Stack Overflow
/// \since Crypto++ 6.0
template
inline T VecShiftRightOctet(const T vec)
{
const T zero = {0};
if (C >= 16)
{
// Out of range
return zero;
}
else if (C == 0)
{
// Noop
return vec;
}
else
{
#if defined(CRYPTOPP_BIG_ENDIAN)
enum { R=(16-C)&0xf }; // Linux xlC 13.1 workaround in Debug builds
return (T)vec_sld((uint8x16_p)zero, (uint8x16_p)vec, R);
#else
enum { R=C&0xf };
return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)zero, R);
#endif
}
}
/// \brief Rotate a vector left
/// \tparam C shift byte count
/// \tparam T vector type
/// \param vec the vector
/// \returns vector
/// \details VecRotateLeftOctet() returns a new vector after rotating the
/// concatenation of the source vector with itself by the specified
/// number of bytes. The return vector is the same type as vec.
/// \par Wraps
/// vec_sld
/// \sa Is vec_sld
/// endian sensitive? on Stack Overflow
/// \since Crypto++ 6.0
template
inline T VecRotateLeftOctet(const T vec)
{
#if defined(CRYPTOPP_BIG_ENDIAN)
enum { R = C&0xf };
return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)vec, R);
#else
enum { R=(16-C)&0xf }; // Linux xlC 13.1 workaround in Debug builds
return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)vec, R);
#endif
}
/// \brief Rotate a vector right
/// \tparam C shift byte count
/// \tparam T vector type
/// \param vec the vector
/// \returns vector
/// \details VecRotateRightOctet() returns a new vector after rotating the
/// concatenation of the source vector with itself by the specified
/// number of bytes. The return vector is the same type as vec.
/// \par Wraps
/// vec_sld
/// \sa Is vec_sld
/// endian sensitive? on Stack Overflow
/// \since Crypto++ 6.0
template
inline T VecRotateRightOctet(const T vec)
{
#if defined(CRYPTOPP_BIG_ENDIAN)
enum { R=(16-C)&0xf }; // Linux xlC 13.1 workaround in Debug builds
return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)vec, R);
#else
enum { R = C&0xf };
return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)vec, R);
#endif
}
/// \brief Rotate a packed vector left
/// \tparam C rotate bit count
/// \param vec the vector
/// \returns vector
/// \details VecRotateLeft() rotates each element in a packed vector by bit count.
/// \par Wraps
/// vec_rl
/// \since Crypto++ 7.0
template
inline uint32x4_p VecRotateLeft(const uint32x4_p vec)
{
const uint32x4_p m = {C, C, C, C};
return vec_rl(vec, m);
}
/// \brief Shift a packed vector left
/// \tparam C shift bit count
/// \param vec the vector
/// \returns vector
/// \details VecShiftLeft() rotates each element in a packed vector by bit count.
/// \par Wraps
/// vec_sl
/// \since Crypto++ 8.1
template
inline uint32x4_p VecShiftLeft(const uint32x4_p vec)
{
const uint32x4_p m = {C, C, C, C};
return vec_sl(vec, m);
}
/// \brief Rotate a packed vector right
/// \tparam C rotate bit count
/// \param vec the vector
/// \returns vector
/// \details VecRotateRight() rotates each element in a packed vector by bit count.
/// \par Wraps
/// vec_rl
/// \since Crypto++ 7.0
template
inline uint32x4_p VecRotateRight(const uint32x4_p vec)
{
const uint32x4_p m = {32-C, 32-C, 32-C, 32-C};
return vec_rl(vec, m);
}
/// \brief Shift a packed vector right
/// \tparam C shift bit count
/// \param vec the vector
/// \returns vector
/// \details VecShiftRight() rotates each element in a packed vector by bit count.
/// \par Wraps
/// vec_rl
/// \since Crypto++ 8.1
template
inline uint32x4_p VecShiftRight(const uint32x4_p vec)
{
const uint32x4_p m = {C, C, C, C};
return vec_sr(vec, m);
}
#if defined(_ARCH_PWR8) || defined(CRYPTOPP_DOXYGEN_PROCESSING)
/// \brief Rotate a packed vector left
/// \tparam C rotate bit count
/// \param vec the vector
/// \returns vector
/// \details VecRotateLeft() rotates each element in a packed vector by bit count.
/// \details VecRotateLeft() with 64-bit elements is available on POWER8 and above.
/// \par Wraps
/// vec_rl
/// \since Crypto++ 8.0
template
inline uint64x2_p VecRotateLeft(const uint64x2_p vec)
{
const uint64x2_p m = {C, C};
return vec_rl(vec, m);
}
/// \brief Shift a packed vector left
/// \tparam C shift bit count
/// \param vec the vector
/// \returns vector
/// \details VecShiftLeft() rotates each element in a packed vector by bit count.
/// \details VecShiftLeft() with 64-bit elements is available on POWER8 and above.
/// \par Wraps
/// vec_sl
/// \since Crypto++ 8.1
template
inline uint64x2_p VecShiftLeft(const uint64x2_p vec)
{
const uint64x2_p m = {C, C};
return vec_sl(vec, m);
}
/// \brief Rotate a packed vector right
/// \tparam C rotate bit count
/// \param vec the vector
/// \returns vector
/// \details VecRotateRight() rotates each element in a packed vector by bit count.
/// \details VecRotateRight() with 64-bit elements is available on POWER8 and above.
/// \par Wraps
/// vec_rl
/// \since Crypto++ 8.0
template
inline uint64x2_p VecRotateRight(const uint64x2_p vec)
{
const uint64x2_p m = {64-C, 64-C};
return vec_rl(vec, m);
}
/// \brief Shift a packed vector right
/// \tparam C shift bit count
/// \param vec the vector
/// \returns vector
/// \details VecShiftRight() rotates each element in a packed vector by bit count.
/// \details VecShiftRight() with 64-bit elements is available on POWER8 and above.
/// \par Wraps
/// vec_sr
/// \since Crypto++ 8.1
template
inline uint64x2_p VecShiftRight(const uint64x2_p vec)
{
const uint64x2_p m = {C, C};
return vec_sr(vec, m);
}
#endif // ARCH_PWR8
//@}
/// \name 32-BIT ENVIRONMENTS
//@{
/// \brief Add two 64-bit vectors
/// \param vec1 the first vector
/// \param vec2 the second vector
/// \returns vector
/// \details VecAdd64() returns a new vector from vec1 and vec2.
/// vec1 and vec2 are added as if uint64x2_p vectors. On POWER7
/// and below VecAdd64() manages the carries from the elements.
/// \par Wraps
/// vec_add for POWER8, vec_addc, vec_perm, vec_add for Altivec
/// \since Crypto++ 8.3
inline uint32x4_p VecAdd64(const uint32x4_p& vec1, const uint32x4_p& vec2)
{
// 64-bit elements available at POWER7 with VSX, but addudm requires POWER8
#if defined(_ARCH_PWR8)
return (uint32x4_p)vec_add((uint64x2_p)vec1, (uint64x2_p)vec2);
#else
// The carry mask selects carries for elements 1 and 3 and sets remaining
// elements to 0. The mask also shifts the carried values left by 4 bytes
// so the carries are added to elements 0 and 2.
const uint8x16_p cmask = {4,5,6,7, 16,16,16,16, 12,13,14,15, 16,16,16,16};
const uint32x4_p zero = {0, 0, 0, 0};
uint32x4_p cy = vec_addc(vec1, vec2);
cy = vec_perm(cy, zero, cmask);
return vec_add(vec_add(vec1, vec2), cy);
#endif
}
#if defined(_ARCH_PWR8) || defined(CRYPTOPP_DOXYGEN_PROCESSING)
/// \brief Add two 64-bit vectors
/// \param vec1 the first vector
/// \param vec2 the second vector
/// \returns vector
/// \details VecAdd64() returns a new vector from vec1 and vec2.
/// vec1 and vec2 are added as if uint64x2_p vectors. On POWER7
/// and below VecAdd64() manages the carries from the elements.
/// \par Wraps
/// vec_add for POWER8
/// \since Crypto++ 8.3
inline uint64x2_p VecAdd64(const uint64x2_p& vec1, const uint64x2_p& vec2)
{
// 64-bit elements available at POWER7 with VSX, but addudm requires POWER8
return vec_add(vec1, vec2);
}
#endif
/// \brief Subtract two 64-bit vectors
/// \param vec1 the first vector
/// \param vec2 the second vector
/// \details VecSub64() returns a new vector from vec1 and vec2.
/// vec1 and vec2 are subtracted as if uint64x2_p vectors. On POWER7
/// and below VecSub64() manages the borrows from the elements.
/// \par Wraps
/// vec_sub for POWER8, vec_subc, vec_andc, vec_perm, vec_sub for Altivec
/// \since Crypto++ 8.3
inline uint32x4_p VecSub64(const uint32x4_p& vec1, const uint32x4_p& vec2)
{
#if defined(_ARCH_PWR8)
// 64-bit elements available at POWER7 with VSX, but subudm requires POWER8
return (uint32x4_p)vec_sub((uint64x2_p)vec1, (uint64x2_p)vec2);
#else
// The borrow mask selects borrows for elements 1 and 3 and sets remaining
// elements to 0. The mask also shifts the borrowed values left by 4 bytes
// so the borrows are subtracted from elements 0 and 2.
const uint8x16_p bmask = {4,5,6,7, 16,16,16,16, 12,13,14,15, 16,16,16,16};
const uint32x4_p amask = {1, 1, 1, 1};
const uint32x4_p zero = {0, 0, 0, 0};
// subc sets the compliment of borrow, so we have to un-compliment it using andc.
uint32x4_p bw = vec_subc(vec1, vec2);
bw = vec_andc(amask, bw);
bw = vec_perm(bw, zero, bmask);
return vec_sub(vec_sub(vec1, vec2), bw);
#endif
}
#if defined(_ARCH_PWR8) || defined(CRYPTOPP_DOXYGEN_PROCESSING)
/// \brief Subtract two 64-bit vectors
/// \param vec1 the first vector
/// \param vec2 the second vector
/// \details VecSub64() returns a new vector from vec1 and vec2.
/// vec1 and vec2 are subtracted as if uint64x2_p vectors. On POWER7
/// and below VecSub64() manages the borrows from the elements.
/// \par Wraps
/// vec_sub for POWER8
/// \since Crypto++ 8.3
inline uint64x2_p VecSub64(const uint64x2_p& vec1, const uint64x2_p& vec2)
{
// 64-bit elements available at POWER7 with VSX, but subudm requires POWER8
return vec_sub(vec1, vec2);
}
#endif
/// \brief Rotate a 64-bit packed vector left
/// \tparam C rotate bit count
/// \param vec the vector
/// \returns vector
/// \details VecRotateLeft() rotates each element in a packed vector by bit count.
/// \details val is rotated as if uint64x2_p.
/// \par Wraps
/// vec_rl
/// \since Crypto++ 8.3
template
inline uint32x4_p VecRotateLeft64(const uint32x4_p val)
{
#if defined(_ARCH_PWR8)
return (uint32x4_p)VecRotateLeft((uint64x2_p)val);
#else
// C=0, 32, or 64 needs special handling. That is S32 and S64 below.
enum {BR=(C>=32), S64=C&63, S32=C&31};
// Get the low bits, shift them to high bits
uint32x4_p t1 = VecShiftLeft(val);
// Get the high bits, shift them to low bits
uint32x4_p t2 = VecShiftRight<32-S32>(val);
if (S64 == 0)
{
const uint8x16_p m = {0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15};
return VecPermute(val, m);
}
else if (S64 == 32)
{
const uint8x16_p m = {4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11};
return VecPermute(val, m);
}
else if (BR) // Big rotate amount?
{
const uint8x16_p m = {4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11};
t1 = VecPermute(t1, m);
}
else
{
const uint8x16_p m = {4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11};
t2 = VecPermute(t2, m);
}
return vec_or(t1, t2);
#endif
}
// Specializations. C=8 is used by Speck128.
template<>
inline uint32x4_p VecRotateLeft64<8>(const uint32x4_p val)
{
const uint8x16_p m = { 1,2,3,4, 5,6,7,0, 9,10,11,12, 13,14,15,8 };
return VecPermute(val, m);
}
#if defined(_ARCH_PWR8) || defined(CRYPTOPP_DOXYGEN_PROCESSING)
/// \brief Rotate a 64-bit packed vector left
/// \tparam C rotate bit count
/// \param vec the vector
/// \returns vector
/// \details VecRotateLeft64() rotates each element in a packed vector by bit count.
/// \par Wraps
/// vec_rl
/// \since Crypto++ 8.3
template
inline uint64x2_p VecRotateLeft64(const uint64x2_p val)
{
return VecRotateLeft(val);
}
#endif
/// \brief Rotate a 64-bit packed vector right
/// \tparam C rotate bit count
/// \param vec the vector
/// \returns vector
/// \details VecRotateRight64() rotates each element in a packed vector by bit count.
/// \details val is rotated as if uint64x2_p.
/// \par Wraps
/// vec_rl
/// \since Crypto++ 8.3
template
inline uint32x4_p VecRotateRight64(const uint32x4_p val)
{
#if defined(_ARCH_PWR8)
return (uint32x4_p)VecRotateRight((uint64x2_p)val);
#else
// C=0, 32, or 64 needs special handling. That is S32 and S64 below.
enum {BR=(C>=32), S64=C&63, S32=C&31};
// Get the low bits, shift them to high bits
uint32x4_p t1 = VecShiftRight(val);
// Get the high bits, shift them to low bits
uint32x4_p t2 = VecShiftLeft<32-S32>(val);
if (S64 == 0)
{
const uint8x16_p m = {0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15};
return VecPermute(val, m);
}
else if (S64 == 32)
{
const uint8x16_p m = {4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11};
return VecPermute(val, m);
}
else if (BR) // Big rotate amount?
{
const uint8x16_p m = {4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11};
t1 = VecPermute(t1, m);
}
else
{
const uint8x16_p m = {4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11};
t2 = VecPermute(t2, m);
}
return vec_or(t1, t2);
#endif
}
// Specializations. C=8 is used by Speck128.
template<>
inline uint32x4_p VecRotateRight64<8>(const uint32x4_p val)
{
const uint8x16_p m = { 7,0,1,2, 3,4,5,6, 15,8,9,10, 11,12,13,14 };
return VecPermute(val, m);
}
#if defined(_ARCH_PWR8) || defined(CRYPTOPP_DOXYGEN_PROCESSING)
/// \brief Rotate a 64-bit packed vector right
/// \tparam C rotate bit count
/// \param vec the vector
/// \returns vector
/// \details VecRotateRight64() rotates each element in a packed vector by bit count.
/// \par Wraps
/// vec_rl
/// \since Crypto++ 8.3
template
inline uint64x2_p VecRotateRight64(const uint64x2_p val)
{
return VecRotateRight(val);
}
#endif
/// \brief AND two vectors
/// \tparam T1 vector type
/// \tparam T2 vector type
/// \param vec1 the first vector
/// \param vec2 the second vector
/// \returns vector
/// \details VecAnd64() returns a new vector from vec1 and vec2. The return vector
/// is the same type as vec1.
/// \details VecAnd64() is a convenience function that simply performs a VecXor().
/// \par Wraps
/// vec_and
/// \since Crypto++ 8.3
template
inline T1 VecAnd64(const T1 vec1, const T2 vec2)
{
return (T1)vec_and(vec1, (T1)vec2);
}
/// \brief OR two vectors
/// \tparam T1 vector type
/// \tparam T2 vector type
/// \param vec1 the first vector
/// \param vec2 the second vector
/// \returns vector
/// \details VecOr64() returns a new vector from vec1 and vec2. The return vector
/// is the same type as vec1.
/// \details VecOr64() is a convenience function that simply performs a VecXor().
/// \par Wraps
/// vec_or
/// \since Crypto++ 8.3
template
inline T1 VecOr64(const T1 vec1, const T2 vec2)
{
return (T1)vec_or(vec1, (T1)vec2);
}
/// \brief XOR two vectors
/// \tparam T1 vector type
/// \tparam T2 vector type
/// \param vec1 the first vector
/// \param vec2 the second vector
/// \returns vector
/// \details VecXor64() returns a new vector from vec1 and vec2. The return vector
/// is the same type as vec1.
/// \details VecXor64() is a convenience function that simply performs a VecXor().
/// \par Wraps
/// vec_xor
/// \since Crypto++ 8.3
template
inline T1 VecXor64(const T1 vec1, const T2 vec2)
{
return (T1)vec_xor(vec1, (T1)vec2);
}
//@}
/// \name OTHER OPERATIONS
//@{
/// \brief Merge two vectors
/// \tparam T vector type
/// \param vec1 the first vector
/// \param vec2 the second vector
/// \returns vector
/// \par Wraps
/// vec_mergel
/// \since Crypto++ 8.1
template
inline T VecMergeLow(const T vec1, const T vec2)
{
return vec_mergel(vec1, vec2);
}
/// \brief Merge two vectors
/// \tparam T vector type
/// \param vec1 the first vector
/// \param vec2 the second vector
/// \returns vector
/// \par Wraps
/// vec_mergeh
/// \since Crypto++ 8.1
template
inline T VecMergeHigh(const T vec1, const T vec2)
{
return vec_mergeh(vec1, vec2);
}
/// \brief Extract a dword from a vector
/// \tparam T vector type
/// \param val the vector
/// \returns vector created from low dword
/// \details VecGetLow() extracts the low dword from a vector. The low dword
/// is composed of the least significant bits and occupies bytes 8 through 15
/// when viewed as a big endian array. The return vector is the same type as
/// the original vector and padded with 0's in the most significant bit positions.
/// \par Wraps
/// vec_sld
/// \since Crypto++ 7.0
template
inline T VecGetLow(const T val)
{
#if defined(CRYPTOPP_BIG_ENDIAN) && (defined(__VSX__) || defined(_ARCH_PWR8))
const T zero = {0};
return (T)VecMergeLow((uint64x2_p)zero, (uint64x2_p)val);
#else
return VecShiftRightOctet<8>(VecShiftLeftOctet<8>(val));
#endif
}
/// \brief Extract a dword from a vector
/// \tparam T vector type
/// \param val the vector
/// \returns vector created from high dword
/// \details VecGetHigh() extracts the high dword from a vector. The high dword
/// is composed of the most significant bits and occupies bytes 0 through 7
/// when viewed as a big endian array. The return vector is the same type as
/// the original vector and padded with 0's in the most significant bit positions.
/// \par Wraps
/// vec_sld
/// \since Crypto++ 7.0
template
inline T VecGetHigh(const T val)
{
#if defined(CRYPTOPP_BIG_ENDIAN) && (defined(__VSX__) || defined(_ARCH_PWR8))
const T zero = {0};
return (T)VecMergeHigh((uint64x2_p)zero, (uint64x2_p)val);
#else
return VecShiftRightOctet<8>(val);
#endif
}
/// \brief Exchange high and low double words
/// \tparam T vector type
/// \param vec the vector
/// \returns vector
/// \par Wraps
/// vec_sld
/// \since Crypto++ 7.0
template
inline T VecSwapWords(const T vec)
{
return (T)vec_sld((uint8x16_p)vec, (uint8x16_p)vec, 8);
}
//@}
/// \name COMPARISON
//@{
/// \brief Compare two vectors
/// \tparam T1 vector type
/// \tparam T2 vector type
/// \param vec1 the first vector
/// \param vec2 the second vector
/// \returns true if vec1 equals vec2, false otherwise
/// \details VecEqual() performs a bitwise compare. The vector element types do
/// not matter.
/// \par Wraps
/// vec_all_eq
/// \since Crypto++ 8.0
template
inline bool VecEqual(const T1 vec1, const T2 vec2)
{
return 1 == vec_all_eq((uint32x4_p)vec1, (uint32x4_p)vec2);
}
/// \brief Compare two vectors
/// \tparam T1 vector type
/// \tparam T2 vector type
/// \param vec1 the first vector
/// \param vec2 the second vector
/// \returns true if vec1 does not equal vec2, false otherwise
/// \details VecNotEqual() performs a bitwise compare. The vector element types do
/// not matter.
/// \par Wraps
/// vec_all_eq
/// \since Crypto++ 8.0
template
inline bool VecNotEqual(const T1 vec1, const T2 vec2)
{
return 0 == vec_all_eq((uint32x4_p)vec1, (uint32x4_p)vec2);
}
//@}
//////////////////////// Power8 Crypto ////////////////////////
// __CRYPTO__ alone is not enough. Clang will define __CRYPTO__
// when it is not available, like with Power7. Sigh...
#if (defined(_ARCH_PWR8) && defined(__CRYPTO__)) || defined(CRYPTOPP_DOXYGEN_PROCESSING)
/// \name POLYNOMIAL MULTIPLICATION
//@{
/// \brief Polynomial multiplication
/// \param a the first term
/// \param b the second term
/// \returns vector product
/// \details VecPolyMultiply() performs polynomial multiplication. POWER8
/// polynomial multiplication multiplies the high and low terms, and then
/// XOR's the high and low products. That is, the result is ah*bh XOR
/// al*bl. It is different behavior than Intel polynomial
/// multiplication. To obtain a single product without the XOR, then set
/// one of the high or low terms to 0. For example, setting ah=0
/// results in 0*bh XOR al*bl = al*bl.
/// \par Wraps
/// __vpmsumw, __builtin_altivec_crypto_vpmsumw and __builtin_crypto_vpmsumw.
/// \since Crypto++ 8.1
inline uint32x4_p VecPolyMultiply(const uint32x4_p& a, const uint32x4_p& b)
{
#if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__))
return __vpmsumw (a, b);
#elif defined(__clang__)
return __builtin_altivec_crypto_vpmsumw (a, b);
#else
return __builtin_crypto_vpmsumw (a, b);
#endif
}
/// \brief Polynomial multiplication
/// \param a the first term
/// \param b the second term
/// \returns vector product
/// \details VecPolyMultiply() performs polynomial multiplication. POWER8
/// polynomial multiplication multiplies the high and low terms, and then
/// XOR's the high and low products. That is, the result is ah*bh XOR
/// al*bl. It is different behavior than Intel polynomial
/// multiplication. To obtain a single product without the XOR, then set
/// one of the high or low terms to 0. For example, setting ah=0
/// results in 0*bh XOR al*bl = al*bl.
/// \par Wraps
/// __vpmsumd, __builtin_altivec_crypto_vpmsumd and __builtin_crypto_vpmsumd.
/// \since Crypto++ 8.1
inline uint64x2_p VecPolyMultiply(const uint64x2_p& a, const uint64x2_p& b)
{
#if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__))
return __vpmsumd (a, b);
#elif defined(__clang__)
return __builtin_altivec_crypto_vpmsumd (a, b);
#else
return __builtin_crypto_vpmsumd (a, b);
#endif
}
/// \brief Polynomial multiplication
/// \param a the first term
/// \param b the second term
/// \returns vector product
/// \details VecIntelMultiply00() performs polynomial multiplication and presents
/// the result like Intel's c = _mm_clmulepi64_si128(a, b, 0x00).
/// The 0x00 indicates the low 64-bits of a and b
/// are multiplied.
/// \note An Intel XMM register is composed of 128-bits. The leftmost bit
/// is MSB and numbered 127, while the the rightmost bit is LSB and numbered 0.
/// \par Wraps
/// __vpmsumd, __builtin_altivec_crypto_vpmsumd and __builtin_crypto_vpmsumd.
/// \since Crypto++ 8.0
inline uint64x2_p VecIntelMultiply00(const uint64x2_p& a, const uint64x2_p& b)
{
#if defined(CRYPTOPP_BIG_ENDIAN)
return VecSwapWords(VecPolyMultiply(VecGetHigh(a), VecGetHigh(b)));
#else
return VecPolyMultiply(VecGetHigh(a), VecGetHigh(b));
#endif
}
/// \brief Polynomial multiplication
/// \param a the first term
/// \param b the second term
/// \returns vector product
/// \details VecIntelMultiply01 performs() polynomial multiplication and presents
/// the result like Intel's c = _mm_clmulepi64_si128(a, b, 0x01).
/// The 0x01 indicates the low 64-bits of a and high
/// 64-bits of b are multiplied.
/// \note An Intel XMM register is composed of 128-bits. The leftmost bit
/// is MSB and numbered 127, while the the rightmost bit is LSB and numbered 0.
/// \par Wraps
/// __vpmsumd, __builtin_altivec_crypto_vpmsumd and __builtin_crypto_vpmsumd.
/// \since Crypto++ 8.0
inline uint64x2_p VecIntelMultiply01(const uint64x2_p& a, const uint64x2_p& b)
{
#if defined(CRYPTOPP_BIG_ENDIAN)
return VecSwapWords(VecPolyMultiply(a, VecGetHigh(b)));
#else
return VecPolyMultiply(a, VecGetHigh(b));
#endif
}
/// \brief Polynomial multiplication
/// \param a the first term
/// \param b the second term
/// \returns vector product
/// \details VecIntelMultiply10() performs polynomial multiplication and presents
/// the result like Intel's c = _mm_clmulepi64_si128(a, b, 0x10).
/// The 0x10 indicates the high 64-bits of a and low
/// 64-bits of b are multiplied.
/// \note An Intel XMM register is composed of 128-bits. The leftmost bit
/// is MSB and numbered 127, while the the rightmost bit is LSB and numbered 0.
/// \par Wraps
/// __vpmsumd, __builtin_altivec_crypto_vpmsumd and __builtin_crypto_vpmsumd.
/// \since Crypto++ 8.0
inline uint64x2_p VecIntelMultiply10(const uint64x2_p& a, const uint64x2_p& b)
{
#if defined(CRYPTOPP_BIG_ENDIAN)
return VecSwapWords(VecPolyMultiply(VecGetHigh(a), b));
#else
return VecPolyMultiply(VecGetHigh(a), b);
#endif
}
/// \brief Polynomial multiplication
/// \param a the first term
/// \param b the second term
/// \returns vector product
/// \details VecIntelMultiply11() performs polynomial multiplication and presents
/// the result like Intel's c = _mm_clmulepi64_si128(a, b, 0x11).
/// The 0x11 indicates the high 64-bits of a and b
/// are multiplied.
/// \note An Intel XMM register is composed of 128-bits. The leftmost bit
/// is MSB and numbered 127, while the the rightmost bit is LSB and numbered 0.
/// \par Wraps
/// __vpmsumd, __builtin_altivec_crypto_vpmsumd and __builtin_crypto_vpmsumd.
/// \since Crypto++ 8.0
inline uint64x2_p VecIntelMultiply11(const uint64x2_p& a, const uint64x2_p& b)
{
#if defined(CRYPTOPP_BIG_ENDIAN)
return VecSwapWords(VecPolyMultiply(VecGetLow(a), b));
#else
return VecPolyMultiply(VecGetLow(a), b);
#endif
}
//@}
/// \name AES ENCRYPTION
//@{
/// \brief One round of AES encryption
/// \tparam T1 vector type
/// \tparam T2 vector type
/// \param state the state vector
/// \param key the subkey vector
/// \details VecEncrypt() performs one round of AES encryption of state
/// using subkey key. The return vector is the same type as state.
/// \details VecEncrypt() is available on POWER8 and above.
/// \par Wraps
/// __vcipher, __builtin_altivec_crypto_vcipher, __builtin_crypto_vcipher
/// \since GCC and XLC since Crypto++ 6.0, LLVM Clang since Crypto++ 8.0
template
inline T1 VecEncrypt(const T1 state, const T2 key)
{
#if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__))
return (T1)__vcipher((uint8x16_p)state, (uint8x16_p)key);
#elif defined(__clang__)
return (T1)__builtin_altivec_crypto_vcipher((uint64x2_p)state, (uint64x2_p)key);
#elif defined(__GNUC__)
return (T1)__builtin_crypto_vcipher((uint64x2_p)state, (uint64x2_p)key);
#else
CRYPTOPP_ASSERT(0);
#endif
}
/// \brief Final round of AES encryption
/// \tparam T1 vector type
/// \tparam T2 vector type
/// \param state the state vector
/// \param key the subkey vector
/// \details VecEncryptLast() performs the final round of AES encryption
/// of state using subkey key. The return vector is the same type as state.
/// \details VecEncryptLast() is available on POWER8 and above.
/// \par Wraps
/// __vcipherlast, __builtin_altivec_crypto_vcipherlast, __builtin_crypto_vcipherlast
/// \since GCC and XLC since Crypto++ 6.0, LLVM Clang since Crypto++ 8.0
template
inline T1 VecEncryptLast(const T1 state, const T2 key)
{
#if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__))
return (T1)__vcipherlast((uint8x16_p)state, (uint8x16_p)key);
#elif defined(__clang__)
return (T1)__builtin_altivec_crypto_vcipherlast((uint64x2_p)state, (uint64x2_p)key);
#elif defined(__GNUC__)
return (T1)__builtin_crypto_vcipherlast((uint64x2_p)state, (uint64x2_p)key);
#else
CRYPTOPP_ASSERT(0);
#endif
}
/// \brief One round of AES decryption
/// \tparam T1 vector type
/// \tparam T2 vector type
/// \param state the state vector
/// \param key the subkey vector
/// \details VecDecrypt() performs one round of AES decryption of state
/// using subkey key. The return vector is the same type as state.
/// \details VecDecrypt() is available on POWER8 and above.
/// \par Wraps
/// __vncipher, __builtin_altivec_crypto_vncipher, __builtin_crypto_vncipher
/// \since GCC and XLC since Crypto++ 6.0, LLVM Clang since Crypto++ 8.0
template
inline T1 VecDecrypt(const T1 state, const T2 key)
{
#if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__))
return (T1)__vncipher((uint8x16_p)state, (uint8x16_p)key);
#elif defined(__clang__)
return (T1)__builtin_altivec_crypto_vncipher((uint64x2_p)state, (uint64x2_p)key);
#elif defined(__GNUC__)
return (T1)__builtin_crypto_vncipher((uint64x2_p)state, (uint64x2_p)key);
#else
CRYPTOPP_ASSERT(0);
#endif
}
/// \brief Final round of AES decryption
/// \tparam T1 vector type
/// \tparam T2 vector type
/// \param state the state vector
/// \param key the subkey vector
/// \details VecDecryptLast() performs the final round of AES decryption
/// of state using subkey key. The return vector is the same type as state.
/// \details VecDecryptLast() is available on POWER8 and above.
/// \par Wraps
/// __vncipherlast, __builtin_altivec_crypto_vncipherlast, __builtin_crypto_vncipherlast
/// \since GCC and XLC since Crypto++ 6.0, LLVM Clang since Crypto++ 8.0
template
inline T1 VecDecryptLast(const T1 state, const T2 key)
{
#if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__))
return (T1)__vncipherlast((uint8x16_p)state, (uint8x16_p)key);
#elif defined(__clang__)
return (T1)__builtin_altivec_crypto_vncipherlast((uint64x2_p)state, (uint64x2_p)key);
#elif defined(__GNUC__)
return (T1)__builtin_crypto_vncipherlast((uint64x2_p)state, (uint64x2_p)key);
#else
CRYPTOPP_ASSERT(0);
#endif
}
//@}
/// \name SHA DIGESTS
//@{
/// \brief SHA256 Sigma functions
/// \tparam func function
/// \tparam fmask function mask
/// \tparam T vector type
/// \param data the block to transform
/// \details VecSHA256() selects sigma0, sigma1, Sigma0, Sigma1 based on
/// func and fmask. The return vector is the same type as data.
/// \details VecSHA256() is available on POWER8 and above.
/// \par Wraps
/// __vshasigmaw, __builtin_altivec_crypto_vshasigmaw, __builtin_crypto_vshasigmaw
/// \since GCC and XLC since Crypto++ 6.0, LLVM Clang since Crypto++ 8.0
template
inline T VecSHA256(const T data)
{
#if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__))
return (T)__vshasigmaw((uint32x4_p)data, func, fmask);
#elif defined(__clang__)
return (T)__builtin_altivec_crypto_vshasigmaw((uint32x4_p)data, func, fmask);
#elif defined(__GNUC__)
return (T)__builtin_crypto_vshasigmaw((uint32x4_p)data, func, fmask);
#else
CRYPTOPP_ASSERT(0);
#endif
}
/// \brief SHA512 Sigma functions
/// \tparam func function
/// \tparam fmask function mask
/// \tparam T vector type
/// \param data the block to transform
/// \details VecSHA512() selects sigma0, sigma1, Sigma0, Sigma1 based on
/// func and fmask. The return vector is the same type as data.
/// \details VecSHA512() is available on POWER8 and above.
/// \par Wraps
/// __vshasigmad, __builtin_altivec_crypto_vshasigmad, __builtin_crypto_vshasigmad
/// \since GCC and XLC since Crypto++ 6.0, LLVM Clang since Crypto++ 8.0
template
inline T VecSHA512(const T data)
{
#if defined(__ibmxl__) || (defined(_AIX) && defined(__xlC__))
return (T)__vshasigmad((uint64x2_p)data, func, fmask);
#elif defined(__clang__)
return (T)__builtin_altivec_crypto_vshasigmad((uint64x2_p)data, func, fmask);
#elif defined(__GNUC__)
return (T)__builtin_crypto_vshasigmad((uint64x2_p)data, func, fmask);
#else
CRYPTOPP_ASSERT(0);
#endif
}
//@}
#endif // __CRYPTO__
#endif // _ALTIVEC_
NAMESPACE_END
#if CRYPTOPP_GCC_DIAGNOSTIC_AVAILABLE
# pragma GCC diagnostic pop
#endif
#undef CONST_V8_CAST
#undef NCONST_V8_CAST
#endif // CRYPTOPP_PPC_CRYPTO_H