llvm/lib/Support/SHA1.cpp

281 lines
9.4 KiB
C++
Raw Normal View History

//====- SHA1.cpp - Private copy of the SHA1 implementation ---*- C++ -* ======//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
SHA1: unroll loop in hashBlock. This code is taken from public domain. https://github.com/jsonn/src/blob/trunk/common/lib/libc/hash/sha1/sha1.c I wrote a sha1 command and ran it on my Xeon E5-2680 v2 2.80GHz machine. Here is a result. The new hash function is 37% faster than before. Performance counter stats for './llvm-sha1-old /ssd/build/bin/lld' (10 runs): 6640.503687 task-clock (msec) # 1.001 CPUs utilized ( +- 0.03% ) 54 context-switches # 0.008 K/sec ( +- 5.03% ) 5 cpu-migrations # 0.001 K/sec ( +- 31.73% ) 183,803 page-faults # 0.028 M/sec ( +- 0.00% ) 18,527,954,113 cycles # 2.790 GHz ( +- 0.03% ) 4,993,237,485 stalled-cycles-frontend # 26.95% frontend cycles idle ( +- 0.11% ) <not supported> stalled-cycles-backend 50,217,149,423 instructions # 2.71 insns per cycle # 0.10 stalled cycles per insn ( +- 0.00% ) 6,094,322,337 branches # 917.750 M/sec ( +- 0.00% ) 11,778,239 branch-misses # 0.19% of all branches ( +- 0.01% ) 6.634017401 seconds time elapsed ( +- 0.03% ) Performance counter stats for './llvm-sha1-new /ssd/build/bin/lld' (10 runs): 4167.062720 task-clock (msec) # 1.001 CPUs utilized ( +- 0.02% ) 52 context-switches # 0.012 K/sec ( +- 16.45% ) 7 cpu-migrations # 0.002 K/sec ( +- 32.20% ) 183,804 page-faults # 0.044 M/sec ( +- 0.00% ) 11,626,611,958 cycles # 2.790 GHz ( +- 0.02% ) 4,491,897,976 stalled-cycles-frontend # 38.63% frontend cycles idle ( +- 0.05% ) <not supported> stalled-cycles-backend 24,320,180,617 instructions # 2.09 insns per cycle # 0.18 stalled cycles per insn ( +- 0.00% ) 1,574,674,576 branches # 377.886 M/sec ( +- 0.00% ) 11,769,693 branch-misses # 0.75% of all branches ( +- 0.00% ) 4.163251552 seconds time elapsed ( +- 0.02% ) Differential Revision: https://reviews.llvm.org/D26890 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@287473 91177308-0d34-0410-b5e6-96231b3b80d8
2016-11-20 01:03:22 +00:00
//
// This code is taken from public domain
SHA1: unroll loop in hashBlock. This code is taken from public domain. https://github.com/jsonn/src/blob/trunk/common/lib/libc/hash/sha1/sha1.c I wrote a sha1 command and ran it on my Xeon E5-2680 v2 2.80GHz machine. Here is a result. The new hash function is 37% faster than before. Performance counter stats for './llvm-sha1-old /ssd/build/bin/lld' (10 runs): 6640.503687 task-clock (msec) # 1.001 CPUs utilized ( +- 0.03% ) 54 context-switches # 0.008 K/sec ( +- 5.03% ) 5 cpu-migrations # 0.001 K/sec ( +- 31.73% ) 183,803 page-faults # 0.028 M/sec ( +- 0.00% ) 18,527,954,113 cycles # 2.790 GHz ( +- 0.03% ) 4,993,237,485 stalled-cycles-frontend # 26.95% frontend cycles idle ( +- 0.11% ) <not supported> stalled-cycles-backend 50,217,149,423 instructions # 2.71 insns per cycle # 0.10 stalled cycles per insn ( +- 0.00% ) 6,094,322,337 branches # 917.750 M/sec ( +- 0.00% ) 11,778,239 branch-misses # 0.19% of all branches ( +- 0.01% ) 6.634017401 seconds time elapsed ( +- 0.03% ) Performance counter stats for './llvm-sha1-new /ssd/build/bin/lld' (10 runs): 4167.062720 task-clock (msec) # 1.001 CPUs utilized ( +- 0.02% ) 52 context-switches # 0.012 K/sec ( +- 16.45% ) 7 cpu-migrations # 0.002 K/sec ( +- 32.20% ) 183,804 page-faults # 0.044 M/sec ( +- 0.00% ) 11,626,611,958 cycles # 2.790 GHz ( +- 0.02% ) 4,491,897,976 stalled-cycles-frontend # 38.63% frontend cycles idle ( +- 0.05% ) <not supported> stalled-cycles-backend 24,320,180,617 instructions # 2.09 insns per cycle # 0.18 stalled cycles per insn ( +- 0.00% ) 1,574,674,576 branches # 377.886 M/sec ( +- 0.00% ) 11,769,693 branch-misses # 0.75% of all branches ( +- 0.00% ) 4.163251552 seconds time elapsed ( +- 0.02% ) Differential Revision: https://reviews.llvm.org/D26890 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@287473 91177308-0d34-0410-b5e6-96231b3b80d8
2016-11-20 01:03:22 +00:00
// (http://oauth.googlecode.com/svn/code/c/liboauth/src/sha1.c and
// http://cvsweb.netbsd.org/bsdweb.cgi/src/common/lib/libc/hash/sha1/sha1.c?rev=1.6)
// and modified by wrapping it in a C++ interface for LLVM,
// and removing unnecessary code.
//
//===----------------------------------------------------------------------===//
#include "llvm/Support/SHA1.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/Host.h"
using namespace llvm;
#include <stdint.h>
#include <string.h>
#if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && BYTE_ORDER == BIG_ENDIAN
#define SHA_BIG_ENDIAN
#endif
static uint32_t rol(uint32_t Number, int Bits) {
return (Number << Bits) | (Number >> (32 - Bits));
}
SHA1: unroll loop in hashBlock. This code is taken from public domain. https://github.com/jsonn/src/blob/trunk/common/lib/libc/hash/sha1/sha1.c I wrote a sha1 command and ran it on my Xeon E5-2680 v2 2.80GHz machine. Here is a result. The new hash function is 37% faster than before. Performance counter stats for './llvm-sha1-old /ssd/build/bin/lld' (10 runs): 6640.503687 task-clock (msec) # 1.001 CPUs utilized ( +- 0.03% ) 54 context-switches # 0.008 K/sec ( +- 5.03% ) 5 cpu-migrations # 0.001 K/sec ( +- 31.73% ) 183,803 page-faults # 0.028 M/sec ( +- 0.00% ) 18,527,954,113 cycles # 2.790 GHz ( +- 0.03% ) 4,993,237,485 stalled-cycles-frontend # 26.95% frontend cycles idle ( +- 0.11% ) <not supported> stalled-cycles-backend 50,217,149,423 instructions # 2.71 insns per cycle # 0.10 stalled cycles per insn ( +- 0.00% ) 6,094,322,337 branches # 917.750 M/sec ( +- 0.00% ) 11,778,239 branch-misses # 0.19% of all branches ( +- 0.01% ) 6.634017401 seconds time elapsed ( +- 0.03% ) Performance counter stats for './llvm-sha1-new /ssd/build/bin/lld' (10 runs): 4167.062720 task-clock (msec) # 1.001 CPUs utilized ( +- 0.02% ) 52 context-switches # 0.012 K/sec ( +- 16.45% ) 7 cpu-migrations # 0.002 K/sec ( +- 32.20% ) 183,804 page-faults # 0.044 M/sec ( +- 0.00% ) 11,626,611,958 cycles # 2.790 GHz ( +- 0.02% ) 4,491,897,976 stalled-cycles-frontend # 38.63% frontend cycles idle ( +- 0.05% ) <not supported> stalled-cycles-backend 24,320,180,617 instructions # 2.09 insns per cycle # 0.18 stalled cycles per insn ( +- 0.00% ) 1,574,674,576 branches # 377.886 M/sec ( +- 0.00% ) 11,769,693 branch-misses # 0.75% of all branches ( +- 0.00% ) 4.163251552 seconds time elapsed ( +- 0.02% ) Differential Revision: https://reviews.llvm.org/D26890 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@287473 91177308-0d34-0410-b5e6-96231b3b80d8
2016-11-20 01:03:22 +00:00
static uint32_t blk0(uint32_t *Buf, int I) { return Buf[I]; }
static uint32_t blk(uint32_t *Buf, int I) {
Buf[I & 15] = rol(Buf[(I + 13) & 15] ^ Buf[(I + 8) & 15] ^ Buf[(I + 2) & 15] ^
Buf[I & 15],
1);
return Buf[I & 15];
}
static void r0(uint32_t &A, uint32_t &B, uint32_t &C, uint32_t &D, uint32_t &E,
int I, uint32_t *Buf) {
E += ((B & (C ^ D)) ^ D) + blk0(Buf, I) + 0x5A827999 + rol(A, 5);
B = rol(B, 30);
}
static void r1(uint32_t &A, uint32_t &B, uint32_t &C, uint32_t &D, uint32_t &E,
int I, uint32_t *Buf) {
E += ((B & (C ^ D)) ^ D) + blk(Buf, I) + 0x5A827999 + rol(A, 5);
B = rol(B, 30);
}
static void r2(uint32_t &A, uint32_t &B, uint32_t &C, uint32_t &D, uint32_t &E,
int I, uint32_t *Buf) {
E += (B ^ C ^ D) + blk(Buf, I) + 0x6ED9EBA1 + rol(A, 5);
B = rol(B, 30);
}
static void r3(uint32_t &A, uint32_t &B, uint32_t &C, uint32_t &D, uint32_t &E,
int I, uint32_t *Buf) {
E += (((B | C) & D) | (B & C)) + blk(Buf, I) + 0x8F1BBCDC + rol(A, 5);
B = rol(B, 30);
}
static void r4(uint32_t &A, uint32_t &B, uint32_t &C, uint32_t &D, uint32_t &E,
int I, uint32_t *Buf) {
E += (B ^ C ^ D) + blk(Buf, I) + 0xCA62C1D6 + rol(A, 5);
B = rol(B, 30);
}
/* code */
#define SHA1_K0 0x5a827999
#define SHA1_K20 0x6ed9eba1
#define SHA1_K40 0x8f1bbcdc
#define SHA1_K60 0xca62c1d6
#define SEED_0 0x67452301
#define SEED_1 0xefcdab89
#define SEED_2 0x98badcfe
#define SEED_3 0x10325476
#define SEED_4 0xc3d2e1f0
void SHA1::init() {
InternalState.State[0] = SEED_0;
InternalState.State[1] = SEED_1;
InternalState.State[2] = SEED_2;
InternalState.State[3] = SEED_3;
InternalState.State[4] = SEED_4;
InternalState.ByteCount = 0;
InternalState.BufferOffset = 0;
}
void SHA1::hashBlock() {
SHA1: unroll loop in hashBlock. This code is taken from public domain. https://github.com/jsonn/src/blob/trunk/common/lib/libc/hash/sha1/sha1.c I wrote a sha1 command and ran it on my Xeon E5-2680 v2 2.80GHz machine. Here is a result. The new hash function is 37% faster than before. Performance counter stats for './llvm-sha1-old /ssd/build/bin/lld' (10 runs): 6640.503687 task-clock (msec) # 1.001 CPUs utilized ( +- 0.03% ) 54 context-switches # 0.008 K/sec ( +- 5.03% ) 5 cpu-migrations # 0.001 K/sec ( +- 31.73% ) 183,803 page-faults # 0.028 M/sec ( +- 0.00% ) 18,527,954,113 cycles # 2.790 GHz ( +- 0.03% ) 4,993,237,485 stalled-cycles-frontend # 26.95% frontend cycles idle ( +- 0.11% ) <not supported> stalled-cycles-backend 50,217,149,423 instructions # 2.71 insns per cycle # 0.10 stalled cycles per insn ( +- 0.00% ) 6,094,322,337 branches # 917.750 M/sec ( +- 0.00% ) 11,778,239 branch-misses # 0.19% of all branches ( +- 0.01% ) 6.634017401 seconds time elapsed ( +- 0.03% ) Performance counter stats for './llvm-sha1-new /ssd/build/bin/lld' (10 runs): 4167.062720 task-clock (msec) # 1.001 CPUs utilized ( +- 0.02% ) 52 context-switches # 0.012 K/sec ( +- 16.45% ) 7 cpu-migrations # 0.002 K/sec ( +- 32.20% ) 183,804 page-faults # 0.044 M/sec ( +- 0.00% ) 11,626,611,958 cycles # 2.790 GHz ( +- 0.02% ) 4,491,897,976 stalled-cycles-frontend # 38.63% frontend cycles idle ( +- 0.05% ) <not supported> stalled-cycles-backend 24,320,180,617 instructions # 2.09 insns per cycle # 0.18 stalled cycles per insn ( +- 0.00% ) 1,574,674,576 branches # 377.886 M/sec ( +- 0.00% ) 11,769,693 branch-misses # 0.75% of all branches ( +- 0.00% ) 4.163251552 seconds time elapsed ( +- 0.02% ) Differential Revision: https://reviews.llvm.org/D26890 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@287473 91177308-0d34-0410-b5e6-96231b3b80d8
2016-11-20 01:03:22 +00:00
uint32_t A = InternalState.State[0];
uint32_t B = InternalState.State[1];
uint32_t C = InternalState.State[2];
uint32_t D = InternalState.State[3];
uint32_t E = InternalState.State[4];
// 4 rounds of 20 operations each. Loop unrolled.
r0(A, B, C, D, E, 0, InternalState.Buffer.L);
r0(E, A, B, C, D, 1, InternalState.Buffer.L);
r0(D, E, A, B, C, 2, InternalState.Buffer.L);
r0(C, D, E, A, B, 3, InternalState.Buffer.L);
r0(B, C, D, E, A, 4, InternalState.Buffer.L);
r0(A, B, C, D, E, 5, InternalState.Buffer.L);
r0(E, A, B, C, D, 6, InternalState.Buffer.L);
r0(D, E, A, B, C, 7, InternalState.Buffer.L);
r0(C, D, E, A, B, 8, InternalState.Buffer.L);
r0(B, C, D, E, A, 9, InternalState.Buffer.L);
r0(A, B, C, D, E, 10, InternalState.Buffer.L);
r0(E, A, B, C, D, 11, InternalState.Buffer.L);
r0(D, E, A, B, C, 12, InternalState.Buffer.L);
r0(C, D, E, A, B, 13, InternalState.Buffer.L);
r0(B, C, D, E, A, 14, InternalState.Buffer.L);
r0(A, B, C, D, E, 15, InternalState.Buffer.L);
r1(E, A, B, C, D, 16, InternalState.Buffer.L);
r1(D, E, A, B, C, 17, InternalState.Buffer.L);
r1(C, D, E, A, B, 18, InternalState.Buffer.L);
r1(B, C, D, E, A, 19, InternalState.Buffer.L);
r2(A, B, C, D, E, 20, InternalState.Buffer.L);
r2(E, A, B, C, D, 21, InternalState.Buffer.L);
r2(D, E, A, B, C, 22, InternalState.Buffer.L);
r2(C, D, E, A, B, 23, InternalState.Buffer.L);
r2(B, C, D, E, A, 24, InternalState.Buffer.L);
r2(A, B, C, D, E, 25, InternalState.Buffer.L);
r2(E, A, B, C, D, 26, InternalState.Buffer.L);
r2(D, E, A, B, C, 27, InternalState.Buffer.L);
r2(C, D, E, A, B, 28, InternalState.Buffer.L);
r2(B, C, D, E, A, 29, InternalState.Buffer.L);
r2(A, B, C, D, E, 30, InternalState.Buffer.L);
r2(E, A, B, C, D, 31, InternalState.Buffer.L);
r2(D, E, A, B, C, 32, InternalState.Buffer.L);
r2(C, D, E, A, B, 33, InternalState.Buffer.L);
r2(B, C, D, E, A, 34, InternalState.Buffer.L);
r2(A, B, C, D, E, 35, InternalState.Buffer.L);
r2(E, A, B, C, D, 36, InternalState.Buffer.L);
r2(D, E, A, B, C, 37, InternalState.Buffer.L);
r2(C, D, E, A, B, 38, InternalState.Buffer.L);
r2(B, C, D, E, A, 39, InternalState.Buffer.L);
r3(A, B, C, D, E, 40, InternalState.Buffer.L);
r3(E, A, B, C, D, 41, InternalState.Buffer.L);
r3(D, E, A, B, C, 42, InternalState.Buffer.L);
r3(C, D, E, A, B, 43, InternalState.Buffer.L);
r3(B, C, D, E, A, 44, InternalState.Buffer.L);
r3(A, B, C, D, E, 45, InternalState.Buffer.L);
r3(E, A, B, C, D, 46, InternalState.Buffer.L);
r3(D, E, A, B, C, 47, InternalState.Buffer.L);
r3(C, D, E, A, B, 48, InternalState.Buffer.L);
r3(B, C, D, E, A, 49, InternalState.Buffer.L);
r3(A, B, C, D, E, 50, InternalState.Buffer.L);
r3(E, A, B, C, D, 51, InternalState.Buffer.L);
r3(D, E, A, B, C, 52, InternalState.Buffer.L);
r3(C, D, E, A, B, 53, InternalState.Buffer.L);
r3(B, C, D, E, A, 54, InternalState.Buffer.L);
r3(A, B, C, D, E, 55, InternalState.Buffer.L);
r3(E, A, B, C, D, 56, InternalState.Buffer.L);
r3(D, E, A, B, C, 57, InternalState.Buffer.L);
r3(C, D, E, A, B, 58, InternalState.Buffer.L);
r3(B, C, D, E, A, 59, InternalState.Buffer.L);
r4(A, B, C, D, E, 60, InternalState.Buffer.L);
r4(E, A, B, C, D, 61, InternalState.Buffer.L);
r4(D, E, A, B, C, 62, InternalState.Buffer.L);
r4(C, D, E, A, B, 63, InternalState.Buffer.L);
r4(B, C, D, E, A, 64, InternalState.Buffer.L);
r4(A, B, C, D, E, 65, InternalState.Buffer.L);
r4(E, A, B, C, D, 66, InternalState.Buffer.L);
r4(D, E, A, B, C, 67, InternalState.Buffer.L);
r4(C, D, E, A, B, 68, InternalState.Buffer.L);
r4(B, C, D, E, A, 69, InternalState.Buffer.L);
r4(A, B, C, D, E, 70, InternalState.Buffer.L);
r4(E, A, B, C, D, 71, InternalState.Buffer.L);
r4(D, E, A, B, C, 72, InternalState.Buffer.L);
r4(C, D, E, A, B, 73, InternalState.Buffer.L);
r4(B, C, D, E, A, 74, InternalState.Buffer.L);
r4(A, B, C, D, E, 75, InternalState.Buffer.L);
r4(E, A, B, C, D, 76, InternalState.Buffer.L);
r4(D, E, A, B, C, 77, InternalState.Buffer.L);
r4(C, D, E, A, B, 78, InternalState.Buffer.L);
r4(B, C, D, E, A, 79, InternalState.Buffer.L);
InternalState.State[0] += A;
InternalState.State[1] += B;
InternalState.State[2] += C;
InternalState.State[3] += D;
InternalState.State[4] += E;
}
void SHA1::addUncounted(uint8_t Data) {
#ifdef SHA_BIG_ENDIAN
InternalState.Buffer.C[InternalState.BufferOffset] = Data;
#else
InternalState.Buffer.C[InternalState.BufferOffset ^ 3] = Data;
#endif
SHA1: unroll loop in hashBlock. This code is taken from public domain. https://github.com/jsonn/src/blob/trunk/common/lib/libc/hash/sha1/sha1.c I wrote a sha1 command and ran it on my Xeon E5-2680 v2 2.80GHz machine. Here is a result. The new hash function is 37% faster than before. Performance counter stats for './llvm-sha1-old /ssd/build/bin/lld' (10 runs): 6640.503687 task-clock (msec) # 1.001 CPUs utilized ( +- 0.03% ) 54 context-switches # 0.008 K/sec ( +- 5.03% ) 5 cpu-migrations # 0.001 K/sec ( +- 31.73% ) 183,803 page-faults # 0.028 M/sec ( +- 0.00% ) 18,527,954,113 cycles # 2.790 GHz ( +- 0.03% ) 4,993,237,485 stalled-cycles-frontend # 26.95% frontend cycles idle ( +- 0.11% ) <not supported> stalled-cycles-backend 50,217,149,423 instructions # 2.71 insns per cycle # 0.10 stalled cycles per insn ( +- 0.00% ) 6,094,322,337 branches # 917.750 M/sec ( +- 0.00% ) 11,778,239 branch-misses # 0.19% of all branches ( +- 0.01% ) 6.634017401 seconds time elapsed ( +- 0.03% ) Performance counter stats for './llvm-sha1-new /ssd/build/bin/lld' (10 runs): 4167.062720 task-clock (msec) # 1.001 CPUs utilized ( +- 0.02% ) 52 context-switches # 0.012 K/sec ( +- 16.45% ) 7 cpu-migrations # 0.002 K/sec ( +- 32.20% ) 183,804 page-faults # 0.044 M/sec ( +- 0.00% ) 11,626,611,958 cycles # 2.790 GHz ( +- 0.02% ) 4,491,897,976 stalled-cycles-frontend # 38.63% frontend cycles idle ( +- 0.05% ) <not supported> stalled-cycles-backend 24,320,180,617 instructions # 2.09 insns per cycle # 0.18 stalled cycles per insn ( +- 0.00% ) 1,574,674,576 branches # 377.886 M/sec ( +- 0.00% ) 11,769,693 branch-misses # 0.75% of all branches ( +- 0.00% ) 4.163251552 seconds time elapsed ( +- 0.02% ) Differential Revision: https://reviews.llvm.org/D26890 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@287473 91177308-0d34-0410-b5e6-96231b3b80d8
2016-11-20 01:03:22 +00:00
InternalState.BufferOffset++;
if (InternalState.BufferOffset == BLOCK_LENGTH) {
hashBlock();
InternalState.BufferOffset = 0;
}
}
void SHA1::writebyte(uint8_t Data) {
++InternalState.ByteCount;
addUncounted(Data);
}
void SHA1::update(ArrayRef<uint8_t> Data) {
for (auto &C : Data)
writebyte(C);
}
void SHA1::pad() {
// Implement SHA-1 padding (fips180-2 5.1.1)
// Pad with 0x80 followed by 0x00 until the end of the block
addUncounted(0x80);
while (InternalState.BufferOffset != 56)
addUncounted(0x00);
// Append length in the last 8 bytes
addUncounted(0); // We're only using 32 bit lengths
addUncounted(0); // But SHA-1 supports 64 bit lengths
addUncounted(0); // So zero pad the top bits
addUncounted(InternalState.ByteCount >> 29); // Shifting to multiply by 8
addUncounted(InternalState.ByteCount >>
21); // as SHA-1 supports bitstreams as well as
addUncounted(InternalState.ByteCount >> 13); // byte.
addUncounted(InternalState.ByteCount >> 5);
addUncounted(InternalState.ByteCount << 3);
}
StringRef SHA1::final() {
// Pad to complete the last block
pad();
#ifdef SHA_BIG_ENDIAN
// Just copy the current state
for (int i = 0; i < 5; i++) {
HashResult[i] = InternalState.State[i];
}
#else
// Swap byte order back
for (int i = 0; i < 5; i++) {
HashResult[i] = (((InternalState.State[i]) << 24) & 0xff000000) |
(((InternalState.State[i]) << 8) & 0x00ff0000) |
(((InternalState.State[i]) >> 8) & 0x0000ff00) |
(((InternalState.State[i]) >> 24) & 0x000000ff);
}
#endif
// Return pointer to hash (20 characters)
return StringRef((char *)HashResult, HASH_LENGTH);
}
StringRef SHA1::result() {
auto StateToRestore = InternalState;
auto Hash = final();
// Restore the state
InternalState = StateToRestore;
// Return pointer to hash (20 characters)
return Hash;
}
std::array<uint8_t, 20> SHA1::hash(ArrayRef<uint8_t> Data) {
SHA1 Hash;
Hash.update(Data);
StringRef S = Hash.final();
std::array<uint8_t, 20> Arr;
memcpy(Arr.data(), S.data(), S.size());
return Arr;
}