executor: Protect the coverage buffer

Add functions to protect and unprotect the coverage buffer. The buffer is
protected from being written to while tracing. When the trace data is
sorted we need to make it read/write, but can return it to read only after
this has completed.

Leave the first page as read/write as we need to clear the length field.
This commit is contained in:
Andrew Turner 2019-06-03 13:19:51 +00:00 committed by Dmitry Vyukov
parent ad87cdf3c7
commit bfb4a51e30
4 changed files with 41 additions and 0 deletions

View File

@ -387,8 +387,10 @@ int main(int argc, char** argv)
for (int i = 0; i < kMaxThreads; i++) {
threads[i].cov.fd = kCoverFd + i;
cover_open(&threads[i].cov, false);
cover_protect(&threads[i].cov);
}
cover_open(&extra_cov, true);
cover_protect(&extra_cov);
if (flag_extra_cover) {
// Don't enable comps because we don't use them in the fuzzer yet.
cover_enable(&extra_cov, false, true);
@ -846,8 +848,10 @@ void write_coverage_signal(cover_t* cov, uint32* signal_count_pos, uint32* cover
uint32 cover_size = cov->size;
if (flag_dedup_cover) {
cover_data_t* end = cover_data + cover_size;
cover_unprotect(cov);
std::sort(cover_data, end);
cover_size = std::unique(cover_data, end) - cover_data;
cover_protect(cov);
}
// Truncate PCs to uint32 assuming that they fit into 32-bits.
// True for x86_64 and arm64 without KASLR.
@ -932,8 +936,10 @@ void write_call_output(thread_t* th, bool finished)
kcov_comparison_t* end = start + ncomps;
if ((char*)end > th->cov.data_end)
fail("too many comparisons %u", ncomps);
cover_unprotect(&th->cov);
std::sort(start, end);
ncomps = std::unique(start, end) - start;
cover_protect(&th->cov);
uint32 comps_size = 0;
for (uint32 i = 0; i < ncomps; ++i) {
if (start[i].ignore())

View File

@ -80,6 +80,25 @@ static void cover_open(cover_t* cov, bool extra)
cov->data_end = cov->data + mmap_alloc_size;
}
static void cover_protect(cover_t* cov)
{
#if GOOS_freebsd
size_t mmap_alloc_size = kCoverSize * KCOV_ENTRY_SIZE;
long page_size = sysconf(_SC_PAGESIZE);
if (page_size > 0)
mprotect(cov->data + page_size, mmap_alloc_size - page_size,
PROT_READ);
#endif
}
static void cover_unprotect(cover_t* cov)
{
#if GOOS_freebsd
size_t mmap_alloc_size = kCoverSize * KCOV_ENTRY_SIZE;
mprotect(cov->data, mmap_alloc_size, PROT_READ | PROT_WRITE);
#endif
}
static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
{
int kcov_mode = collect_comps ? KCOV_MODE_TRACE_CMP : KCOV_MODE_TRACE_PC;

View File

@ -75,6 +75,14 @@ static void cover_open(cover_t* cov, bool extra)
cov->data_end = cov->data + mmap_alloc_size;
}
static void cover_protect(cover_t* cov)
{
}
static void cover_unprotect(cover_t* cov)
{
}
static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
{
int kcov_mode = collect_comps ? KCOV_TRACE_CMP : KCOV_TRACE_PC;

View File

@ -17,7 +17,15 @@ static void cover_collect(cover_t* cov)
{
}
static void cover_protect(cover_t* cov)
{
}
#if SYZ_EXECUTOR_USES_SHMEM
static void cover_unprotect(cover_t* cov)
{
}
static bool cover_check(uint32 pc)
{
return true;