mirror of
https://github.com/reactos/syzkaller.git
synced 2024-11-23 11:29:46 +00:00
executor: refactor extra cover handling
One observation is that checking for extra cover is very fast (effectively a memory load), so we can simplify code by removing th->extra_cover and just check for it always. Additionally, we may grab some coverage that we would miss otherwise. Don't sleep for 500 ms at the end if colliding, we are not going to use the extra coverage in that case anyway. Check for extra coverage at the end every 100ms to avoid being killed on timeout before we write any. Make the 500ms sleep at the end parametrizable. Enable it for syz_usb syscalls, so we get the same behavior for usb. But this also allows to get extra coverage for other subsystems. Some subsystems don't have a good way to detect if we will get any extra coverage or not. Sleeping for 500ms for all programs slows down fuzzing too much. So we check for extra coverage at the end for all programs (cheap anyway), but sleep only for usb program. This allows to collect extra coverage for vhost and maybe wireguard in future. Update #806
This commit is contained in:
parent
72bfa6f2b7
commit
ed8812ac86
@ -199,7 +199,6 @@ struct thread_t {
|
||||
uint32 reserrno;
|
||||
bool fault_injected;
|
||||
cover_t cov;
|
||||
bool extra_cover;
|
||||
};
|
||||
|
||||
static thread_t threads[kMaxThreads];
|
||||
@ -288,7 +287,7 @@ struct feature_t {
|
||||
void (*setup)();
|
||||
};
|
||||
|
||||
static thread_t* schedule_call(int call_index, int call_num, bool colliding, uint64 copyout_index, uint64 num_args, uint64* args, uint64* pos, bool extra_cover);
|
||||
static thread_t* schedule_call(int call_index, int call_num, bool colliding, uint64 copyout_index, uint64 num_args, uint64* args, uint64* pos);
|
||||
static void handle_completion(thread_t* th);
|
||||
static void copyout_call_results(thread_t* th);
|
||||
static void write_call_output(thread_t* th, bool finished);
|
||||
@ -591,8 +590,8 @@ retry:
|
||||
}
|
||||
|
||||
int call_index = 0;
|
||||
bool prog_extra_cover = false;
|
||||
int prog_extra_timeout = 0;
|
||||
int prog_extra_cover_timeout = 0;
|
||||
for (;;) {
|
||||
uint64 call_num = read_input(&input_pos);
|
||||
if (call_num == instr_eof)
|
||||
@ -687,14 +686,11 @@ retry:
|
||||
// Normal syscall.
|
||||
if (call_num >= ARRAY_SIZE(syscalls))
|
||||
fail("invalid command number %llu", call_num);
|
||||
bool call_extra_cover = false;
|
||||
// call_extra_timeout must match timeout in pkg/csource/csource.go.
|
||||
int call_extra_timeout = 0;
|
||||
// TODO: find a way to tune timeout values.
|
||||
if (strncmp(syscalls[call_num].name, "syz_usb", strlen("syz_usb")) == 0) {
|
||||
prog_extra_cover = true;
|
||||
call_extra_cover = true;
|
||||
}
|
||||
if (strncmp(syscalls[call_num].name, "syz_usb", strlen("syz_usb")) == 0)
|
||||
prog_extra_cover_timeout = 500;
|
||||
if (strncmp(syscalls[call_num].name, "syz_usb_connect", strlen("syz_usb_connect")) == 0) {
|
||||
prog_extra_timeout = 2000;
|
||||
call_extra_timeout = 2000;
|
||||
@ -721,7 +717,7 @@ retry:
|
||||
for (uint64 i = num_args; i < kMaxArgs; i++)
|
||||
args[i] = 0;
|
||||
thread_t* th = schedule_call(call_index++, call_num, colliding, copyout_index,
|
||||
num_args, args, input_pos, call_extra_cover);
|
||||
num_args, args, input_pos);
|
||||
|
||||
if (colliding && (call_index % 2) == 0) {
|
||||
// Don't wait for every other call.
|
||||
@ -779,8 +775,6 @@ retry:
|
||||
write_call_output(th, false);
|
||||
}
|
||||
}
|
||||
if (prog_extra_cover)
|
||||
write_extra_output();
|
||||
}
|
||||
}
|
||||
|
||||
@ -788,10 +782,16 @@ retry:
|
||||
close_fds();
|
||||
#endif
|
||||
|
||||
if (prog_extra_cover) {
|
||||
sleep_ms(500);
|
||||
if (!colliding && !collide)
|
||||
if (!colliding && !collide) {
|
||||
write_extra_output();
|
||||
// Check for new extra coverage in small intervals to avoid situation
|
||||
// that we were killed on timeout before we write any.
|
||||
// Check for extra coverage is very cheap, effectively a memory load.
|
||||
const int kSleepMs = 100;
|
||||
for (int i = 0; i < prog_extra_cover_timeout / kSleepMs; i++) {
|
||||
sleep_ms(kSleepMs);
|
||||
write_extra_output();
|
||||
}
|
||||
}
|
||||
|
||||
if (flag_collide && !flag_fault && !colliding && !collide) {
|
||||
@ -801,7 +801,7 @@ retry:
|
||||
}
|
||||
}
|
||||
|
||||
thread_t* schedule_call(int call_index, int call_num, bool colliding, uint64 copyout_index, uint64 num_args, uint64* args, uint64* pos, bool extra_cover)
|
||||
thread_t* schedule_call(int call_index, int call_num, bool colliding, uint64 copyout_index, uint64 num_args, uint64* args, uint64* pos)
|
||||
{
|
||||
// Find a spare thread to execute the call.
|
||||
int i;
|
||||
@ -832,7 +832,6 @@ thread_t* schedule_call(int call_index, int call_num, bool colliding, uint64 cop
|
||||
th->num_args = num_args;
|
||||
for (int i = 0; i < kMaxArgs; i++)
|
||||
th->args[i] = args[i];
|
||||
th->extra_cover = extra_cover;
|
||||
event_set(&th->ready);
|
||||
running++;
|
||||
return th;
|
||||
@ -891,8 +890,7 @@ void handle_completion(thread_t* th)
|
||||
copyout_call_results(th);
|
||||
if (!collide && !th->colliding) {
|
||||
write_call_output(th, true);
|
||||
if (th->extra_cover)
|
||||
write_extra_output();
|
||||
write_extra_output();
|
||||
}
|
||||
th->executing = false;
|
||||
running--;
|
||||
|
Loading…
Reference in New Issue
Block a user