executor: collect coverage from unfinished syscalls

Write coverage from unfinished syscalls.
Also detect when a syscall was blocked during execution,
even if it finished. Helpful for fallback coverage.

Fixes #580
This commit is contained in:
Dmitry Vyukov 2018-07-29 19:46:05 +02:00
parent 0824d7a1bf
commit 1a381291a3
4 changed files with 221 additions and 152 deletions

View File

@ -184,7 +184,7 @@ struct thread_t {
uint64* copyout_pos;
uint64 copyout_index;
bool colliding;
bool handled;
bool executing;
int call_index;
int call_num;
int num_args;
@ -195,7 +195,8 @@ struct thread_t {
cover_t cov;
};
thread_t threads[kMaxThreads];
static thread_t threads[kMaxThreads];
static thread_t* last_scheduled;
struct res_t {
bool executed;
@ -233,12 +234,18 @@ struct execute_reply {
uint32 status;
};
// call_reply.flags
const uint32 call_flag_executed = 1 << 0;
const uint32 call_flag_finished = 1 << 1;
const uint32 call_flag_blocked = 1 << 2;
const uint32 call_flag_fault_injected = 1 << 3;
struct call_reply {
execute_reply header;
uint32 call_index;
uint32 call_num;
uint32 reserrno;
uint32 fault_injected;
uint32 flags;
uint32 signal_size;
uint32 cover_size;
uint32 comps_size;
@ -269,6 +276,8 @@ struct kcov_comparison_t {
static thread_t* schedule_call(int call_index, int call_num, bool colliding, uint64 copyout_index, uint64 num_args, uint64* args, uint64* pos);
static void handle_completion(thread_t* th);
static void copyout_call_results(thread_t* th);
static void write_call_output(thread_t* th, bool finished);
static void execute_call(thread_t* th);
static void thread_create(thread_t* th, int id);
static void* worker_thread(void* arg);
@ -625,38 +634,23 @@ retry:
args[i] = read_arg(&input_pos);
for (uint64 i = num_args; i < 6; i++)
args[i] = 0;
thread_t* th = schedule_call(call_index++, call_num, colliding, copyout_index, num_args, args, input_pos);
thread_t* th = schedule_call(call_index++, call_num, colliding, copyout_index,
num_args, args, input_pos);
if (colliding && (call_index % 2) == 0) {
// Don't wait for every other call.
// We already have results from the previous execution.
} else if (flag_threaded) {
// Wait for call completion.
// Note: sys knows about this 25ms timeout when it generates
// timespec/timeval values.
const uint64 timeout_ms = flag_debug ? 3000 : 25;
// Note: sys knows about this 25ms timeout when it generates timespec/timeval values.
const uint64 timeout_ms = flag_debug ? 1000 : 25;
if (event_timedwait(&th->done, timeout_ms))
handle_completion(th);
// Check if any of previous calls have completed.
// Give them some additional time, because they could have been
// just unblocked by the current call.
if (running < 0)
fail("running = %d", running);
if (running > 0) {
bool last = read_input(&input_pos, true) == instr_eof;
uint64 wait = last ? 100 : 2;
uint64 wait_start = current_time_ms();
uint64 wait_end = wait_start + wait;
if (!colliding && wait_end < start + 800)
wait_end = start + 800;
while (running > 0 && current_time_ms() <= wait_end) {
sleep_ms(1);
for (int i = 0; i < kMaxThreads; i++) {
th = &threads[i];
if (!th->handled && event_isset(&th->done))
handle_completion(th);
}
}
for (int i = 0; i < kMaxThreads; i++) {
th = &threads[i];
if (th->executing && event_isset(&th->done))
handle_completion(th);
}
} else {
// Execute directly.
@ -669,6 +663,33 @@ retry:
}
}
if (!colliding && !collide && running > 0) {
// Give unfinished syscalls some additional time.
uint64 wait = 100;
uint64 wait_start = current_time_ms();
uint64 wait_end = wait_start + wait;
if (wait_end < start + 800)
wait_end = start + 800;
while (running > 0 && current_time_ms() <= wait_end) {
sleep_ms(1);
for (int i = 0; i < kMaxThreads; i++) {
thread_t* th = &threads[i];
if (th->executing && event_isset(&th->done))
handle_completion(th);
}
}
// Write output coverage for unfinished calls.
if (flag_cover && running > 0) {
for (int i = 0; i < kMaxThreads; i++) {
thread_t* th = &threads[i];
if (th->executing) {
cover_collect(&th->cov);
write_call_output(th, false);
}
}
}
}
if (flag_collide && !flag_inject_fault && !colliding && !collide) {
debug("enabling collider\n");
collide = colliding = true;
@ -685,7 +706,7 @@ thread_t* schedule_call(int call_index, int call_num, bool colliding, uint64 cop
if (!th->created)
thread_create(th, i);
if (event_isset(&th->done)) {
if (!th->handled)
if (th->executing)
handle_completion(th);
break;
}
@ -694,14 +715,15 @@ thread_t* schedule_call(int call_index, int call_num, bool colliding, uint64 cop
exitf("out of threads");
thread_t* th = &threads[i];
debug("scheduling call %d [%s] on thread %d\n", call_index, syscalls[call_num].name, th->id);
if (event_isset(&th->ready) || !event_isset(&th->done) || !th->handled)
fail("bad thread state in schedule: ready=%d done=%d handled=%d",
event_isset(&th->ready), event_isset(&th->done), th->handled);
if (event_isset(&th->ready) || !event_isset(&th->done) || th->executing)
fail("bad thread state in schedule: ready=%d done=%d executing=%d",
event_isset(&th->ready), event_isset(&th->done), th->executing);
last_scheduled = th;
th->colliding = colliding;
th->copyout_pos = pos;
th->copyout_index = copyout_index;
event_reset(&th->done);
th->handled = false;
th->executing = true;
th->call_index = call_index;
th->call_num = call_num;
th->num_args = num_args;
@ -757,105 +779,124 @@ void write_coverage_signal(thread_t* th, uint32* signal_count_pos, uint32* cover
void handle_completion(thread_t* th)
{
debug("completion of call %d [%s] on thread %d\n", th->call_index, syscalls[th->call_num].name, th->id);
if (event_isset(&th->ready) || !event_isset(&th->done) || th->handled)
fail("bad thread state in completion: ready=%d done=%d handled=%d",
event_isset(&th->ready), event_isset(&th->done), th->handled);
if (th->res != (long)-1) {
if (th->copyout_index != no_copyout) {
if (th->copyout_index >= kMaxCommands)
fail("result idx %lld overflows kMaxCommands", th->copyout_index);
results[th->copyout_index].executed = true;
results[th->copyout_index].val = th->res;
}
for (bool done = false; !done;) {
uint64 instr = read_input(&th->copyout_pos);
switch (instr) {
case instr_copyout: {
uint64 index = read_input(&th->copyout_pos);
if (index >= kMaxCommands)
fail("result idx %lld overflows kMaxCommands", index);
char* addr = (char*)read_input(&th->copyout_pos);
uint64 size = read_input(&th->copyout_pos);
uint64 val = 0;
if (copyout(addr, size, &val)) {
results[index].executed = true;
results[index].val = val;
}
debug("copyout 0x%llx from %p\n", val, addr);
break;
}
default:
done = true;
break;
}
}
}
if (!collide && !th->colliding) {
uint32 reserrno = th->res != -1 ? 0 : th->reserrno;
#if SYZ_EXECUTOR_USES_SHMEM
write_output(th->call_index);
write_output(th->call_num);
write_output(reserrno);
write_output(th->fault_injected);
uint32* signal_count_pos = write_output(0); // filled in later
uint32* cover_count_pos = write_output(0); // filled in later
uint32* comps_count_pos = write_output(0); // filled in later
if (flag_collect_comps) {
// Collect only the comparisons
uint32 ncomps = th->cov.size;
kcov_comparison_t* start = (kcov_comparison_t*)(th->cov.data + sizeof(uint64));
kcov_comparison_t* end = start + ncomps;
if ((char*)end > th->cov.data_end)
fail("too many comparisons %u", ncomps);
std::sort(start, end);
ncomps = std::unique(start, end) - start;
uint32 comps_size = 0;
for (uint32 i = 0; i < ncomps; ++i) {
if (start[i].ignore())
continue;
comps_size++;
start[i].write();
}
// Write out number of comparisons.
*comps_count_pos = comps_size;
} else if (flag_cover) {
if (is_kernel_64_bit)
write_coverage_signal<uint64>(th, signal_count_pos, cover_count_pos);
else
write_coverage_signal<uint32>(th, signal_count_pos, cover_count_pos);
}
debug("out #%u: index=%u num=%u errno=%d sig=%u cover=%u comps=%u\n",
completed, th->call_index, th->call_num, reserrno,
*signal_count_pos, *cover_count_pos, *comps_count_pos);
completed++;
write_completed(completed);
#else
call_reply reply;
reply.header.magic = kOutMagic;
reply.header.done = 0;
reply.header.status = 0;
reply.call_index = th->call_index;
reply.call_num = th->call_num;
reply.reserrno = reserrno;
reply.fault_injected = th->fault_injected;
reply.signal_size = 0;
reply.cover_size = 0;
reply.comps_size = 0;
if (write(kOutPipeFd, &reply, sizeof(reply)) != sizeof(reply))
fail("control pipe call write failed");
debug("out: index=%u num=%u errno=%d\n", th->call_index, th->call_num, reserrno);
#endif
}
th->handled = true;
if (event_isset(&th->ready) || !event_isset(&th->done) || !th->executing)
fail("bad thread state in completion: ready=%d done=%d executing=%d",
event_isset(&th->ready), event_isset(&th->done), th->executing);
if (th->res != (long)-1)
copyout_call_results(th);
if (!collide && !th->colliding)
write_call_output(th, true);
th->executing = false;
running--;
if (running < 0)
fail("running = %d", running);
}
void copyout_call_results(thread_t* th)
{
if (th->copyout_index != no_copyout) {
if (th->copyout_index >= kMaxCommands)
fail("result idx %lld overflows kMaxCommands", th->copyout_index);
results[th->copyout_index].executed = true;
results[th->copyout_index].val = th->res;
}
for (bool done = false; !done;) {
uint64 instr = read_input(&th->copyout_pos);
switch (instr) {
case instr_copyout: {
uint64 index = read_input(&th->copyout_pos);
if (index >= kMaxCommands)
fail("result idx %lld overflows kMaxCommands", index);
char* addr = (char*)read_input(&th->copyout_pos);
uint64 size = read_input(&th->copyout_pos);
uint64 val = 0;
if (copyout(addr, size, &val)) {
results[index].executed = true;
results[index].val = val;
}
debug("copyout 0x%llx from %p\n", val, addr);
break;
}
default:
done = true;
break;
}
}
}
void write_call_output(thread_t* th, bool finished)
{
uint32 reserrno = 999;
uint32 call_flags = call_flag_executed;
const bool blocked = th != last_scheduled;
if (finished) {
reserrno = th->res != -1 ? 0 : th->reserrno;
call_flags |= call_flag_finished |
(blocked ? call_flag_blocked : 0) |
(th->fault_injected ? call_flag_fault_injected : 0);
}
#if SYZ_EXECUTOR_USES_SHMEM
write_output(th->call_index);
write_output(th->call_num);
write_output(reserrno);
write_output(call_flags);
uint32* signal_count_pos = write_output(0); // filled in later
uint32* cover_count_pos = write_output(0); // filled in later
uint32* comps_count_pos = write_output(0); // filled in later
if (flag_collect_comps) {
// Collect only the comparisons
uint32 ncomps = th->cov.size;
kcov_comparison_t* start = (kcov_comparison_t*)(th->cov.data + sizeof(uint64));
kcov_comparison_t* end = start + ncomps;
if ((char*)end > th->cov.data_end)
fail("too many comparisons %u", ncomps);
std::sort(start, end);
ncomps = std::unique(start, end) - start;
uint32 comps_size = 0;
for (uint32 i = 0; i < ncomps; ++i) {
if (start[i].ignore())
continue;
comps_size++;
start[i].write();
}
// Write out number of comparisons.
*comps_count_pos = comps_size;
} else if (flag_cover) {
if (is_kernel_64_bit)
write_coverage_signal<uint64>(th, signal_count_pos, cover_count_pos);
else
write_coverage_signal<uint32>(th, signal_count_pos, cover_count_pos);
}
debug("out #%u: index=%u num=%u errno=%d finished=%d blocked=%d sig=%u cover=%u comps=%u\n",
completed, th->call_index, th->call_num, reserrno, finished, blocked,
*signal_count_pos, *cover_count_pos, *comps_count_pos);
completed++;
write_completed(completed);
#else
call_reply reply;
reply.header.magic = kOutMagic;
reply.header.done = 0;
reply.header.status = 0;
reply.call_index = th->call_index;
reply.call_num = th->call_num;
reply.reserrno = reserrno;
reply.flags = call_flags;
reply.signal_size = 0;
reply.cover_size = 0;
reply.comps_size = 0;
if (write(kOutPipeFd, &reply, sizeof(reply)) != sizeof(reply))
fail("control pipe call write failed");
debug("out: index=%u num=%u errno=%d finished=%d blocked=%d\n",
th->call_index, th->call_num, reserrno, finished, blocked);
#endif
}
void thread_create(thread_t* th, int id)
{
th->created = true;
th->id = id;
th->handled = true;
th->executing = false;
event_init(&th->ready);
event_init(&th->done);
event_set(&th->done);

View File

@ -152,14 +152,22 @@ func DefaultConfig(target *prog.Target) (*Config, *ExecOpts, error) {
return c, opts, nil
}
type CallFlags uint32
const (
CallExecuted CallFlags = 1 << iota // was started at all
CallFinished // finished executing (rather than blocked forever)
CallBlocked // finished but blocked during execution
CallFaultInjected // fault was injected into this call
)
type CallInfo struct {
Executed bool
Signal []uint32 // feedback signal, filled if FlagSignal is set
Cover []uint32 // per-call coverage, filled if FlagSignal is set and cover == true,
Flags CallFlags
Signal []uint32 // feedback signal, filled if FlagSignal is set
Cover []uint32 // per-call coverage, filled if FlagSignal is set and cover == true,
//if dedup == false, then cov effectively contains a trace, otherwise duplicates are removed
Comps prog.CompMap // per-call comparison operands
Errno int // call errno (0 if the call was successful)
FaultInjected bool
Comps prog.CompMap // per-call comparison operands
Errno int // call errno (0 if the call was successful)
}
type Env struct {
@ -326,7 +334,15 @@ func (env *Env) Exec(opts *ExecOpts, p *prog.Prog) (output []byte, info []CallIn
func addFallbackSignal(p *prog.Prog, info []CallInfo) {
callInfos := make([]prog.CallInfo, len(info))
for i, inf := range info {
callInfos[i].Executed = inf.Executed
if inf.Flags&CallExecuted != 0 {
callInfos[i].Flags |= prog.CallExecuted
}
if inf.Flags&CallFinished != 0 {
callInfos[i].Flags |= prog.CallFinished
}
if inf.Flags&CallBlocked != 0 {
callInfos[i].Flags |= prog.CallBlocked
}
callInfos[i].Errno = inf.Errno
}
p.FallbackSignal(callInfos)
@ -385,9 +401,9 @@ func (env *Env) readOutCoverage(p *prog.Prog) (info []CallInfo, err0 error) {
return buf.String()
}
for i := uint32(0); i < ncmd; i++ {
var callIndex, callNum, errno, faultInjected, signalSize, coverSize, compsSize uint32
var callIndex, callNum, errno, callFlags, signalSize, coverSize, compsSize uint32
if !readOut(&callIndex) || !readOut(&callNum) || !readOut(&errno) ||
!readOut(&faultInjected) || !readOut(&signalSize) ||
!readOut(&callFlags) || !readOut(&signalSize) ||
!readOut(&coverSize) || !readOut(&compsSize) {
err0 = fmt.Errorf("executor %v: failed to read output coverage", env.pid)
return
@ -398,7 +414,6 @@ func (env *Env) readOutCoverage(p *prog.Prog) (info []CallInfo, err0 error) {
return
}
c := p.Calls[callIndex]
info[callIndex].Executed = true
if num := c.Meta.ID; uint32(num) != callNum {
err0 = fmt.Errorf("executor %v: failed to read output coverage:"+
" record %v call %v: expect syscall %v, got %v, executed %v (cov: %v)",
@ -411,7 +426,7 @@ func (env *Env) readOutCoverage(p *prog.Prog) (info []CallInfo, err0 error) {
return
}
info[callIndex].Errno = int(errno)
info[callIndex].FaultInjected = faultInjected != 0
info[callIndex].Flags = CallFlags(callFlags)
if signalSize > uint32(len(out)) {
err0 = fmt.Errorf("executor %v: failed to read output signal: record %v, call %v, signalsize=%v coversize=%v",
env.pid, i, callIndex, signalSize, coverSize)
@ -529,13 +544,13 @@ type executeReply struct {
// gometalinter complains about unused fields
// nolint
type callReply struct {
callIndex uint32
callNum uint32
errno uint32
faultInjected uint32
signalSize uint32
coverSize uint32
compsSize uint32
callIndex uint32
callNum uint32
errno uint32
flags uint32 // see CallFlags
signalSize uint32
coverSize uint32
compsSize uint32
// signal/cover/comps follow
}

View File

@ -171,28 +171,41 @@ func RequiredFeatures(p *Prog) (bitmasks, csums bool) {
return
}
type CallFlags int
const (
CallExecuted CallFlags = 1 << iota // was started at all
CallFinished // finished executing (rather than blocked forever)
CallBlocked // finished but blocked during execution
)
type CallInfo struct {
Executed bool
Errno int
Signal []uint32
Flags CallFlags
Errno int
Signal []uint32
}
const (
fallbackSignalErrno = iota
fallbackSignalErrnoBlocked
fallbackSignalCtor
fallbackSignalFlags
fallbackCallMask = 0x3fff
fallbackCallMask = 0x1fff
)
func (p *Prog) FallbackSignal(info []CallInfo) {
resources := make(map[*ResultArg]*Call)
for i, c := range p.Calls {
inf := &info[i]
if !inf.Executed {
if inf.Flags&CallExecuted == 0 {
continue
}
id := c.Meta.ID
inf.Signal = append(inf.Signal, encodeFallbackSignal(fallbackSignalErrno, id, inf.Errno))
typ := fallbackSignalErrno
if inf.Flags&CallFinished != 0 && inf.Flags&CallBlocked != 0 {
typ = fallbackSignalErrnoBlocked
}
inf.Signal = append(inf.Signal, encodeFallbackSignal(typ, id, inf.Errno))
if inf.Errno != 0 {
continue
}
@ -261,7 +274,7 @@ func (p *Prog) FallbackSignal(info []CallInfo) {
func DecodeFallbackSignal(s uint32) (callID, errno int) {
typ, id, aux := decodeFallbackSignal(s)
switch typ {
case fallbackSignalErrno:
case fallbackSignalErrno, fallbackSignalErrnoBlocked:
return id, aux
case fallbackSignalCtor, fallbackSignalFlags:
return id, 0
@ -271,15 +284,15 @@ func DecodeFallbackSignal(s uint32) (callID, errno int) {
}
func encodeFallbackSignal(typ, id, aux int) uint32 {
if typ & ^3 != 0 {
if typ & ^7 != 0 {
panic(fmt.Sprintf("bad fallback signal type %v", typ))
}
if id & ^fallbackCallMask != 0 {
panic(fmt.Sprintf("bad call id in fallback signal %v", id))
}
return uint32(typ) | uint32(id&0x3fff)<<2 | uint32(aux)<<16
return uint32(typ) | uint32(id&fallbackCallMask)<<3 | uint32(aux)<<16
}
func decodeFallbackSignal(s uint32) (typ, id, aux int) {
return int(s & 3), int((s >> 2) & fallbackCallMask), int(s >> 16)
return int(s & 7), int((s >> 3) & fallbackCallMask), int(s >> 16)
}

View File

@ -207,7 +207,7 @@ func (proc *Proc) failCall(p *prog.Prog, call int) {
opts.FaultCall = call
opts.FaultNth = nth
info := proc.executeRaw(&opts, p, StatSmash)
if info != nil && len(info) > call && !info[call].FaultInjected {
if info != nil && len(info) > call && info[call].Flags&ipc.CallFaultInjected == 0 {
break
}
}