From bb4ddeea5f6951d13833e6a1235763257ff0a658 Mon Sep 17 00:00:00 2001 From: Michael Niedermayer Date: Tue, 11 Sep 2012 04:55:01 +0200 Subject: [PATCH] xtea: optimize core, about 4-5% faster. Signed-off-by: Michael Niedermayer --- libavutil/xtea.c | 84 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 83 insertions(+), 1 deletion(-) diff --git a/libavutil/xtea.c b/libavutil/xtea.c index 0a5df1cec9..d9c2858a5c 100644 --- a/libavutil/xtea.c +++ b/libavutil/xtea.c @@ -39,12 +39,17 @@ static void xtea_crypt_ecb(AVXTEA *ctx, uint8_t *dst, const uint8_t *src, int decrypt) { uint32_t v0, v1; - int i; + uint32_t k0 = ctx->key[0]; + uint32_t k1 = ctx->key[1]; + uint32_t k2 = ctx->key[2]; + uint32_t k3 = ctx->key[3]; v0 = AV_RB32(src); v1 = AV_RB32(src + 4); if (decrypt) { +#if CONFIG_SMALL + int i; uint32_t delta = 0x9E3779B9, sum = delta * 32; for (i = 0; i < 32; i++) { @@ -52,7 +57,47 @@ static void xtea_crypt_ecb(AVXTEA *ctx, uint8_t *dst, const uint8_t *src, sum -= delta; v0 -= (((v1 << 4) ^ (v1 >> 5)) + v1) ^ (sum + ctx->key[sum & 3]); } +#else +#define DSTEP(SUM, K0, K1) \ + v1 -= (((v0 << 4) ^ (v0 >> 5)) + v0) ^ (SUM + K0); \ + v0 -= (((v1 << 4) ^ (v1 >> 5)) + v1) ^ (SUM - 0x9E3779B9 + K1) + + DSTEP(0xC6EF3720U, k2, k3); + DSTEP(0x28B7BD67U, k3, k2); + DSTEP(0x8A8043AEU, k0, k1); + DSTEP(0xEC48C9F5U, k1, k0); + DSTEP(0x4E11503CU, k2, k3); + DSTEP(0xAFD9D683U, k2, k2); + DSTEP(0x11A25CCAU, k3, k1); + DSTEP(0x736AE311U, k0, k0); + DSTEP(0xD5336958U, k1, k3); + DSTEP(0x36FBEF9FU, k1, k2); + DSTEP(0x98C475E6U, k2, k1); + DSTEP(0xFA8CFC2DU, k3, k0); + DSTEP(0x5C558274U, k0, k3); + DSTEP(0xBE1E08BBU, k1, k2); + DSTEP(0x1FE68F02U, k1, k1); + DSTEP(0x81AF1549U, k2, k0); + DSTEP(0xE3779B90U, k3, k3); + DSTEP(0x454021D7U, k0, k2); + DSTEP(0xA708A81EU, k1, k1); + DSTEP(0x08D12E65U, k1, k0); + DSTEP(0x6A99B4ACU, k2, k3); + DSTEP(0xCC623AF3U, k3, k2); + DSTEP(0x2E2AC13AU, k0, k1); + DSTEP(0x8FF34781U, k0, k0); + DSTEP(0xF1BBCDC8U, k1, k3); + DSTEP(0x5384540FU, k2, k2); + DSTEP(0xB54CDA56U, k3, k1); + DSTEP(0x1715609DU, k0, k0); + DSTEP(0x78DDE6E4U, k0, k3); + DSTEP(0xDAA66D2BU, k1, k2); + DSTEP(0x3C6EF372U, k2, k1); + DSTEP(0x9E3779B9U, k3, k0); +#endif } else { +#if CONFIG_SMALL + int i; uint32_t sum = 0, delta = 0x9E3779B9; for (i = 0; i < 32; i++) { @@ -60,6 +105,43 @@ static void xtea_crypt_ecb(AVXTEA *ctx, uint8_t *dst, const uint8_t *src, sum += delta; v1 += (((v0 << 4) ^ (v0 >> 5)) + v0) ^ (sum + ctx->key[(sum >> 11) & 3]); } +#else +#define ESTEP(SUM, K0, K1) \ + v0 += (((v1 << 4) ^ (v1 >> 5)) + v1) ^ (SUM + K0);\ + v1 += (((v0 << 4) ^ (v0 >> 5)) + v0) ^ (SUM + 0x9E3779B9 + K1) + ESTEP(0x00000000U, k0, k3); + ESTEP(0x9E3779B9U, k1, k2); + ESTEP(0x3C6EF372U, k2, k1); + ESTEP(0xDAA66D2BU, k3, k0); + ESTEP(0x78DDE6E4U, k0, k0); + ESTEP(0x1715609DU, k1, k3); + ESTEP(0xB54CDA56U, k2, k2); + ESTEP(0x5384540FU, k3, k1); + ESTEP(0xF1BBCDC8U, k0, k0); + ESTEP(0x8FF34781U, k1, k0); + ESTEP(0x2E2AC13AU, k2, k3); + ESTEP(0xCC623AF3U, k3, k2); + ESTEP(0x6A99B4ACU, k0, k1); + ESTEP(0x08D12E65U, k1, k1); + ESTEP(0xA708A81EU, k2, k0); + ESTEP(0x454021D7U, k3, k3); + ESTEP(0xE3779B90U, k0, k2); + ESTEP(0x81AF1549U, k1, k1); + ESTEP(0x1FE68F02U, k2, k1); + ESTEP(0xBE1E08BBU, k3, k0); + ESTEP(0x5C558274U, k0, k3); + ESTEP(0xFA8CFC2DU, k1, k2); + ESTEP(0x98C475E6U, k2, k1); + ESTEP(0x36FBEF9FU, k3, k1); + ESTEP(0xD5336958U, k0, k0); + ESTEP(0x736AE311U, k1, k3); + ESTEP(0x11A25CCAU, k2, k2); + ESTEP(0xAFD9D683U, k3, k2); + ESTEP(0x4E11503CU, k0, k1); + ESTEP(0xEC48C9F5U, k1, k0); + ESTEP(0x8A8043AEU, k2, k3); + ESTEP(0x28B7BD67U, k3, k2); +#endif } AV_WB32(dst, v0);