llvm/unittests/DebugInfo/PDB/HashTableTest.cpp
Chandler Carruth 6b547686c5 Update the file headers across all of the LLVM projects in the monorepo
to reflect the new license.

We understand that people may be surprised that we're moving the header
entirely to discuss the new license. We checked this carefully with the
Foundation's lawyer and we believe this is the correct approach.

Essentially, all code in the project is now made available by the LLVM
project under our new license, so you will see that the license headers
include that license only. Some of our contributors have contributed
code under our old license, and accordingly, we have retained a copy of
our old license notice in the top-level files in each project and
repository.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@351636 91177308-0d34-0410-b5e6-96231b3b80d8
2019-01-19 08:50:56 +00:00

260 lines
7.5 KiB
C++

//===- llvm/unittest/DebugInfo/PDB/HashTableTest.cpp ----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "llvm/DebugInfo/PDB/Native/HashTable.h"
#include "llvm/DebugInfo/PDB/Native/Hash.h"
#include "llvm/DebugInfo/PDB/Native/NamedStreamMap.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/BinaryByteStream.h"
#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/BinaryStreamWriter.h"
#include "llvm/Support/StringSaver.h"
#include "llvm/Testing/Support/Error.h"
#include "gtest/gtest.h"
#include <vector>
using namespace llvm;
using namespace llvm::pdb;
using namespace llvm::support;
namespace {
class HashTableInternals : public HashTable<uint32_t> {
public:
using HashTable::Buckets;
using HashTable::Present;
using HashTable::Deleted;
};
}
TEST(HashTableTest, TestSimple) {
HashTableInternals Table;
EXPECT_EQ(0u, Table.size());
EXPECT_GT(Table.capacity(), 0u);
Table.set_as(3u, 7);
EXPECT_EQ(1u, Table.size());
ASSERT_NE(Table.end(), Table.find_as(3u));
EXPECT_EQ(7u, Table.get(3u));
}
TEST(HashTableTest, TestCollision) {
HashTableInternals Table;
EXPECT_EQ(0u, Table.size());
EXPECT_GT(Table.capacity(), 0u);
// We use knowledge of the hash table's implementation details to make sure
// to add another value that is the equivalent to the first value modulo the
// hash table's capacity.
uint32_t N1 = Table.capacity() + 1;
uint32_t N2 = 2 * N1;
Table.set_as(N1, 7);
Table.set_as(N2, 12);
EXPECT_EQ(2u, Table.size());
ASSERT_NE(Table.end(), Table.find_as(N1));
ASSERT_NE(Table.end(), Table.find_as(N2));
EXPECT_EQ(7u, Table.get(N1));
EXPECT_EQ(12u, Table.get(N2));
}
TEST(HashTableTest, TestRemove) {
HashTableInternals Table;
EXPECT_EQ(0u, Table.size());
EXPECT_GT(Table.capacity(), 0u);
Table.set_as(1u, 2);
Table.set_as(3u, 4);
EXPECT_EQ(2u, Table.size());
ASSERT_NE(Table.end(), Table.find_as(1u));
ASSERT_NE(Table.end(), Table.find_as(3u));
EXPECT_EQ(2u, Table.get(1u));
EXPECT_EQ(4u, Table.get(3u));
}
TEST(HashTableTest, TestCollisionAfterMultipleProbes) {
HashTableInternals Table;
EXPECT_EQ(0u, Table.size());
EXPECT_GT(Table.capacity(), 0u);
// Probing looks for the first available slot. A slot may already be filled
// as a result of an item with a *different* hash value already being there.
// Test that when this happens, the probe still finds the value.
uint32_t N1 = Table.capacity() + 1;
uint32_t N2 = N1 + 1;
uint32_t N3 = 2 * N1;
Table.set_as(N1, 7);
Table.set_as(N2, 11);
Table.set_as(N3, 13);
EXPECT_EQ(3u, Table.size());
ASSERT_NE(Table.end(), Table.find_as(N1));
ASSERT_NE(Table.end(), Table.find_as(N2));
ASSERT_NE(Table.end(), Table.find_as(N3));
EXPECT_EQ(7u, Table.get(N1));
EXPECT_EQ(11u, Table.get(N2));
EXPECT_EQ(13u, Table.get(N3));
}
TEST(HashTableTest, Grow) {
// So that we are independent of the load factor, `capacity` items, which is
// guaranteed to trigger a grow. Then verify that the size is the same, the
// capacity is larger, and all the original items are still in the table.
HashTableInternals Table;
uint32_t OldCapacity = Table.capacity();
for (uint32_t I = 0; I < OldCapacity; ++I) {
Table.set_as(OldCapacity + I * 2 + 1, I * 2 + 3);
}
EXPECT_EQ(OldCapacity, Table.size());
EXPECT_GT(Table.capacity(), OldCapacity);
for (uint32_t I = 0; I < OldCapacity; ++I) {
ASSERT_NE(Table.end(), Table.find_as(OldCapacity + I * 2 + 1));
EXPECT_EQ(I * 2 + 3, Table.get(OldCapacity + I * 2 + 1));
}
}
TEST(HashTableTest, Serialization) {
HashTableInternals Table;
uint32_t Cap = Table.capacity();
for (uint32_t I = 0; I < Cap; ++I) {
Table.set_as(Cap + I * 2 + 1, I * 2 + 3);
}
std::vector<uint8_t> Buffer(Table.calculateSerializedLength());
MutableBinaryByteStream Stream(Buffer, little);
BinaryStreamWriter Writer(Stream);
EXPECT_THAT_ERROR(Table.commit(Writer), Succeeded());
// We should have written precisely the number of bytes we calculated earlier.
EXPECT_EQ(Buffer.size(), Writer.getOffset());
HashTableInternals Table2;
BinaryStreamReader Reader(Stream);
EXPECT_THAT_ERROR(Table2.load(Reader), Succeeded());
// We should have read precisely the number of bytes we calculated earlier.
EXPECT_EQ(Buffer.size(), Reader.getOffset());
EXPECT_EQ(Table.size(), Table2.size());
EXPECT_EQ(Table.capacity(), Table2.capacity());
EXPECT_EQ(Table.Buckets, Table2.Buckets);
EXPECT_EQ(Table.Present, Table2.Present);
EXPECT_EQ(Table.Deleted, Table2.Deleted);
}
TEST(HashTableTest, NamedStreamMap) {
std::vector<StringRef> Streams = {"One", "Two", "Three", "Four",
"Five", "Six", "Seven"};
StringMap<uint32_t> ExpectedIndices;
for (uint32_t I = 0; I < Streams.size(); ++I)
ExpectedIndices[Streams[I]] = I + 1;
// To verify the hash table actually works, we want to verify that insertion
// order doesn't matter. So try inserting in every possible order of 7 items.
do {
NamedStreamMap NSM;
for (StringRef S : Streams)
NSM.set(S, ExpectedIndices[S]);
EXPECT_EQ(Streams.size(), NSM.size());
uint32_t N;
EXPECT_TRUE(NSM.get("One", N));
EXPECT_EQ(1U, N);
EXPECT_TRUE(NSM.get("Two", N));
EXPECT_EQ(2U, N);
EXPECT_TRUE(NSM.get("Three", N));
EXPECT_EQ(3U, N);
EXPECT_TRUE(NSM.get("Four", N));
EXPECT_EQ(4U, N);
EXPECT_TRUE(NSM.get("Five", N));
EXPECT_EQ(5U, N);
EXPECT_TRUE(NSM.get("Six", N));
EXPECT_EQ(6U, N);
EXPECT_TRUE(NSM.get("Seven", N));
EXPECT_EQ(7U, N);
} while (std::next_permutation(Streams.begin(), Streams.end()));
}
namespace {
struct FooBar {
uint32_t X;
uint32_t Y;
};
} // namespace
namespace llvm {
namespace pdb {
template <> struct PdbHashTraits<FooBar> {
std::vector<char> Buffer;
PdbHashTraits() { Buffer.push_back(0); }
uint32_t hashLookupKey(StringRef S) const {
return llvm::pdb::hashStringV1(S);
}
StringRef storageKeyToLookupKey(uint32_t N) const {
if (N >= Buffer.size())
return StringRef();
return StringRef(Buffer.data() + N);
}
uint32_t lookupKeyToStorageKey(StringRef S) {
uint32_t N = Buffer.size();
Buffer.insert(Buffer.end(), S.begin(), S.end());
Buffer.push_back('\0');
return N;
}
};
} // namespace pdb
} // namespace llvm
TEST(HashTableTest, NonTrivialValueType) {
HashTable<FooBar> Table;
uint32_t Cap = Table.capacity();
for (uint32_t I = 0; I < Cap; ++I) {
FooBar F;
F.X = I;
F.Y = I + 1;
Table.set_as(utostr(I), F);
}
std::vector<uint8_t> Buffer(Table.calculateSerializedLength());
MutableBinaryByteStream Stream(Buffer, little);
BinaryStreamWriter Writer(Stream);
EXPECT_THAT_ERROR(Table.commit(Writer), Succeeded());
// We should have written precisely the number of bytes we calculated earlier.
EXPECT_EQ(Buffer.size(), Writer.getOffset());
HashTable<FooBar> Table2;
BinaryStreamReader Reader(Stream);
EXPECT_THAT_ERROR(Table2.load(Reader), Succeeded());
// We should have read precisely the number of bytes we calculated earlier.
EXPECT_EQ(Buffer.size(), Reader.getOffset());
EXPECT_EQ(Table.size(), Table2.size());
EXPECT_EQ(Table.capacity(), Table2.capacity());
// EXPECT_EQ(Table.Buckets, Table2.Buckets);
// EXPECT_EQ(Table.Present, Table2.Present);
// EXPECT_EQ(Table.Deleted, Table2.Deleted);
}