mirror of
https://github.com/shadps4-emu/ext-cryptopp.git
synced 2024-11-23 09:59:42 +00:00
More Clang workarounds on PowerPC (PR #901)
This commit is contained in:
parent
249eb807bf
commit
1f3e65fc35
@ -566,6 +566,14 @@ jobs:
|
||||
- BUILD_MODE=ios
|
||||
- BUILD_JOBS=2
|
||||
- BUILD_ARCH=iPhoneSimulator
|
||||
# We should be OK with PPC64 and GCC until Travis disallows
|
||||
# access to the build machines. Clang is a differnt story.
|
||||
# The LLVM devs have their heads so far up their ass I don't
|
||||
# think we can un-fuck the mess they created on PowerPC. The
|
||||
# LLVM devs appear to have given up and define everything
|
||||
# from all the compilers to all the features. It does not
|
||||
# seem to matter to them the compiler does not actually
|
||||
# support what LLVM is advertising in the preprocessor.
|
||||
- os: linux
|
||||
name: Linux with GCC (all)
|
||||
arch: ppc64le
|
||||
|
@ -760,45 +760,36 @@ void BLAKE2_Compress64_NEON(const byte* input, BLAKE2b_State& state)
|
||||
|
||||
inline uint64x2_p VecLoad64(const void* p)
|
||||
{
|
||||
#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
|
||||
return (uint64x2_p)vec_xl(0, (uint8_t*)p);
|
||||
#else
|
||||
return (uint64x2_p)vec_vsx_ld(0, (uint8_t*)p);
|
||||
#endif
|
||||
return (uint64x2_p)VecLoad((const byte*)p);
|
||||
}
|
||||
|
||||
inline uint64x2_p VecLoad64LE(const void* p)
|
||||
{
|
||||
#if __BIG_ENDIAN__
|
||||
const uint8x16_p m = {7,6,5,4, 3,2,1,0, 15,14,13,12, 11,10,9,8};
|
||||
const uint64x2_p v = VecLoad64(p);
|
||||
return VecPermute(v, v, m);
|
||||
return (uint64x2_p)VecPermute(VecLoad64(p), m, m);
|
||||
#else
|
||||
return VecLoad64(p);
|
||||
return (uint64x2_p)VecLoad64(p);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void VecStore64(void* p, const uint64x2_p x)
|
||||
{
|
||||
#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
|
||||
vec_xst((uint8x16_p)x,0,(uint8_t*)p);
|
||||
#else
|
||||
vec_vsx_st((uint8x16_p)x,0,(uint8_t*)p);
|
||||
#endif
|
||||
VecStore((uint8x16_p)x, (byte*)p);
|
||||
}
|
||||
|
||||
inline void VecStore64LE(void* p, const uint64x2_p x)
|
||||
{
|
||||
#if __BIG_ENDIAN__
|
||||
const uint8x16_p m = {7,6,5,4, 3,2,1,0, 15,14,13,12, 11,10,9,8};
|
||||
VecStore64(p, VecPermute(x, x, m));
|
||||
VecStore64(p, VecPermute(x, m, m));
|
||||
#else
|
||||
VecStore64(p, x);
|
||||
#endif
|
||||
}
|
||||
|
||||
template <unsigned int C>
|
||||
inline uint64x2_p VecShiftLeftOctet(const uint64x2_p a, const uint64x2_p b)
|
||||
inline uint64x2_p ShiftLeftOctet(const uint64x2_p a, const uint64x2_p b)
|
||||
{
|
||||
#if __BIG_ENDIAN__
|
||||
return (uint64x2_p)vec_sld((uint8x16_p)a, (uint8x16_p)b, C);
|
||||
@ -807,7 +798,7 @@ inline uint64x2_p VecShiftLeftOctet(const uint64x2_p a, const uint64x2_p b)
|
||||
#endif
|
||||
}
|
||||
|
||||
#define vec_shl_octet(a,b,c) VecShiftLeftOctet<c*8>(a, b)
|
||||
#define vec_shl_octet(a,b,c) ShiftLeftOctet<c*8>(a, b)
|
||||
|
||||
// vec_mergeh(a,b) is equivalent to VecPermute(a,b,HH_MASK); and
|
||||
// vec_mergel(a,b) is equivalent VecPermute(a,b,LL_MASK). Benchmarks
|
||||
|
Loading…
Reference in New Issue
Block a user