mirror of
https://github.com/reactos/syzkaller.git
synced 2024-11-23 03:19:51 +00:00
tools/syz-crush: Add feature to run C-program
usage: syz-crush -config=config.json creprog.c Produced output is same as syz-manager produces. New directory called reproduced_with is added that lists all the reproducers that triggered the crash. Example: ~/workdir/crashes $ tree . ├── 2a8fff76c6267ecfeaf3530a602ed48afbc4a151 │ ├── description │ ├── log0 │ ├── log1 │ ├── log2 │ ├── report0 │ ├── report1 │ ├── report2 │ └── reproduced_with │ └── 17a6ed226769660f3e5dad7b22b13466b938e290.c ├── 2b6b921457afe8cebd0a96ca40f8569e6ee95174 │ ├── description │ ├── log0 │ ├── log1 │ ├── log2 │ ├── log3 │ ├── log4 │ ├── log5 │ ├── report0 │ ├── report1 │ ├── report2 │ ├── report3 │ ├── report4 │ ├── report5 │ └── reproduced_with │ ├── 9fb2f5b37bf4428382334e336bbbb201634c7c4c.c │ └── ab27002b46d19cafb1ebb8b040f0a3b0f8f88974.c Signed-off-by: Jukka Kaartinen <jukka.kaartinen@unikie.com>
This commit is contained in:
parent
8e3ab94116
commit
f56b572638
1
AUTHORS
1
AUTHORS
@ -38,3 +38,4 @@ JinWoo Lee
|
||||
Andrew Turner
|
||||
Ethercflow
|
||||
Collabora
|
||||
Bayerische Motoren Werke Aktiengesellschaft (BMW AG)
|
||||
|
@ -61,3 +61,4 @@ Collabora
|
||||
Ricardo Cañuelo
|
||||
Dipanjan Das
|
||||
Daimeng Wang
|
||||
Jukka Kaartinen
|
||||
|
5
Makefile
5
Makefile
@ -95,7 +95,7 @@ endif
|
||||
manager runtest fuzzer executor \
|
||||
ci hub \
|
||||
execprog mutate prog2c trace2syz stress repro upgrade db \
|
||||
usbgen symbolize \
|
||||
usbgen symbolize crush \
|
||||
bin/syz-extract bin/syz-fmt \
|
||||
extract generate generate_go generate_sys \
|
||||
format format_go format_cpp format_sys \
|
||||
@ -173,6 +173,9 @@ mutate: descriptions
|
||||
prog2c: descriptions
|
||||
GOOS=$(HOSTOS) GOARCH=$(HOSTARCH) $(HOSTGO) build $(GOHOSTFLAGS) -o ./bin/syz-prog2c github.com/google/syzkaller/tools/syz-prog2c
|
||||
|
||||
crush: descriptions
|
||||
GOOS=$(HOSTOS) GOARCH=$(HOSTARCH) $(HOSTGO) build $(GOHOSTFLAGS) -o ./bin/syz-crush github.com/google/syzkaller/tools/syz-crush
|
||||
|
||||
stress: descriptions
|
||||
GOOS=$(TARGETGOOS) GOARCH=$(TARGETGOARCH) $(GO) build $(GOTARGETFLAGS) -o ./bin/$(TARGETOS)_$(TARGETVMARCH)/syz-stress$(EXE) github.com/google/syzkaller/tools/syz-stress
|
||||
|
||||
|
@ -8,11 +8,15 @@ package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/google/syzkaller/pkg/csource"
|
||||
"github.com/google/syzkaller/pkg/hash"
|
||||
"github.com/google/syzkaller/pkg/instance"
|
||||
"github.com/google/syzkaller/pkg/log"
|
||||
"github.com/google/syzkaller/pkg/mgrconfig"
|
||||
@ -24,107 +28,252 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
flagConfig = flag.String("config", "", "configuration file")
|
||||
flagConfig = flag.String("config", "", "configuration file")
|
||||
flagRestartTime = flag.Duration("restartTime", 0, "restartPeriod how long to run the test.")
|
||||
flagInfinite = flag.Bool("infinite", true, "by default test is run for ever. -infinite=false to stop on crash")
|
||||
)
|
||||
|
||||
type CrashReport struct {
|
||||
vmIndex int
|
||||
Report *report.Report
|
||||
}
|
||||
|
||||
type FileType int
|
||||
|
||||
const (
|
||||
LogFile FileType = iota
|
||||
CProg
|
||||
)
|
||||
|
||||
func getType(fileName string) FileType {
|
||||
extension := filepath.Ext(fileName)
|
||||
|
||||
switch extension {
|
||||
case ".c":
|
||||
return CProg
|
||||
case ".txt", ".log":
|
||||
return LogFile
|
||||
default:
|
||||
log.Logf(0, "assuming logfile type")
|
||||
return LogFile
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
cfg, err := mgrconfig.LoadFile(*flagConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("%v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if len(flag.Args()) != 1 {
|
||||
log.Fatalf("usage: syz-crush -config=config.file execution.log")
|
||||
log.Fatalf("usage: syz-crush -config=config.file <execution.log|creprog.c>")
|
||||
}
|
||||
if _, err := prog.GetTarget(cfg.TargetOS, cfg.TargetArch); err != nil {
|
||||
|
||||
if err := osutil.MkdirAll(cfg.Workdir); err != nil {
|
||||
log.Fatalf("failed to create tmp dir: %v", err)
|
||||
}
|
||||
|
||||
if *flagInfinite {
|
||||
log.Logf(0, "running infinitely and restarting VM every %v", *flagRestartTime)
|
||||
} else {
|
||||
log.Logf(0, "running until crash is found or till %v", *flagRestartTime)
|
||||
}
|
||||
|
||||
target, err := prog.GetTarget(cfg.TargetOS, cfg.TargetArch)
|
||||
if err != nil {
|
||||
log.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
vmPool, err := vm.Create(cfg, false)
|
||||
if err != nil {
|
||||
log.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
reporter, err := report.NewReporter(cfg)
|
||||
if err != nil {
|
||||
log.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
log.Logf(0, "booting test machines...")
|
||||
var shutdown uint32
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(vmPool.Count() + 1)
|
||||
for i := 0; i < vmPool.Count(); i++ {
|
||||
i := i
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
runInstance(cfg, reporter, vmPool, i)
|
||||
if atomic.LoadUint32(&shutdown) != 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
reproduceMe := flag.Args()[0]
|
||||
baseName := filepath.Base(reproduceMe)
|
||||
|
||||
runType := getType(reproduceMe)
|
||||
if runType == CProg {
|
||||
execprog, err := ioutil.ReadFile(reproduceMe)
|
||||
if err != nil {
|
||||
log.Fatalf("error reading source file from '%s'", reproduceMe)
|
||||
}
|
||||
|
||||
cfg.SyzExecprogBin, err = csource.BuildNoWarn(target, execprog)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to build source file: %v", err)
|
||||
}
|
||||
|
||||
log.Logf(0, "compiled csource %v to cprog: %v", reproduceMe, cfg.SyzExecprogBin)
|
||||
} else {
|
||||
log.Logf(0, "reproducing from logfile: %v", reproduceMe)
|
||||
}
|
||||
|
||||
restartPeriod := *flagRestartTime
|
||||
if restartPeriod == 0 {
|
||||
// Set default restart period to 1h
|
||||
restartPeriod = time.Hour
|
||||
}
|
||||
log.Logf(0, "restartTime set to: %v", *flagRestartTime)
|
||||
|
||||
log.Logf(0, "booting test machines... %v", vmPool.Count())
|
||||
runDone := make(chan *CrashReport, vmPool.Count())
|
||||
var shutdown uint32
|
||||
var runningWorkers uint32
|
||||
|
||||
for i := 0; i < vmPool.Count(); i++ {
|
||||
atomic.AddUint32(&runningWorkers, 1)
|
||||
go func(index int) {
|
||||
for {
|
||||
runDone <- runInstance(target, cfg, reporter, vmPool, index, *flagRestartTime,
|
||||
runType)
|
||||
if atomic.LoadUint32(&shutdown) != 0 || !*flagInfinite {
|
||||
atomic.AddUint32(&runningWorkers, ^uint32(0))
|
||||
|
||||
// If this is the last worker then we can close the channel
|
||||
if atomic.LoadUint32(&runningWorkers) == 0 {
|
||||
log.Logf(0, "vm-%v: closing channel", index)
|
||||
close(runDone)
|
||||
}
|
||||
break
|
||||
} else {
|
||||
log.Logf(0, "vm-%v: restarting", index)
|
||||
}
|
||||
}
|
||||
log.Logf(0, "vm-%v: done", index)
|
||||
}(i)
|
||||
}
|
||||
|
||||
log.Logf(0, "restart/timeout set to: %v", *flagRestartTime)
|
||||
shutdownC := make(chan struct{})
|
||||
osutil.HandleInterrupts(shutdownC)
|
||||
go func() {
|
||||
<-shutdownC
|
||||
wg.Done()
|
||||
atomic.StoreUint32(&shutdown, 1)
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
var count int
|
||||
var crashes int
|
||||
for res := range runDone {
|
||||
count++
|
||||
crashes += storeCrash(res, cfg, baseName)
|
||||
log.Logf(0, "instances executed: %v", count)
|
||||
}
|
||||
|
||||
log.Logf(0, "all done. reproduced %v crashes. reproduce rate %.2f%%", crashes, float64(crashes)/float64(count)*100.0)
|
||||
}
|
||||
|
||||
func runInstance(cfg *mgrconfig.Config, reporter report.Reporter, vmPool *vm.Pool, index int) {
|
||||
func storeCrash(res *CrashReport, cfg *mgrconfig.Config, baseName string) int {
|
||||
log.Logf(0, "storing results...")
|
||||
if res == nil || res.Report == nil {
|
||||
log.Logf(0, "nothing to store")
|
||||
return 0
|
||||
}
|
||||
|
||||
log.Logf(0, "loop: instance %v finished, crash=%v", res.vmIndex, res.Report.Title)
|
||||
|
||||
crashdir := filepath.Join(cfg.Workdir, "crashes")
|
||||
osutil.MkdirAll(crashdir)
|
||||
|
||||
sig := hash.Hash([]byte(res.Report.Title))
|
||||
id := sig.String()
|
||||
dir := filepath.Join(crashdir, id)
|
||||
log.Logf(0, "vm-%v: crashed: %v, saving to %v", res.vmIndex, res.Report.Title, dir)
|
||||
|
||||
osutil.MkdirAll(dir)
|
||||
if err := osutil.WriteFile(filepath.Join(dir, "description"), []byte(res.Report.Title+"\n")); err != nil {
|
||||
log.Logf(0, "failed to write crash: %v", err)
|
||||
}
|
||||
// Save up to 100 reports. If we already have 100, overwrite the oldest one.
|
||||
// Newer reports are generally more useful. Overwriting is also needed
|
||||
// to be able to understand if a particular bug still happens or already fixed.
|
||||
oldestI := 0
|
||||
var oldestTime time.Time
|
||||
for i := 0; i < 100; i++ {
|
||||
info, err := os.Stat(filepath.Join(dir, fmt.Sprintf("log%v", i)))
|
||||
if err != nil {
|
||||
oldestI = i
|
||||
break
|
||||
}
|
||||
if oldestTime.IsZero() || info.ModTime().Before(oldestTime) {
|
||||
oldestI = i
|
||||
oldestTime = info.ModTime()
|
||||
}
|
||||
}
|
||||
osutil.WriteFile(filepath.Join(dir, fmt.Sprintf("log%v", oldestI)), res.Report.Output)
|
||||
if len(cfg.Tag) > 0 {
|
||||
osutil.WriteFile(filepath.Join(dir, fmt.Sprintf("tag%v", oldestI)), []byte(cfg.Tag))
|
||||
}
|
||||
if len(res.Report.Report) > 0 {
|
||||
osutil.WriteFile(filepath.Join(dir, fmt.Sprintf("report%v", oldestI)), res.Report.Report)
|
||||
}
|
||||
|
||||
reproducedWithdir := filepath.Join(dir, "reproduced_with")
|
||||
osutil.MkdirAll(reproducedWithdir)
|
||||
if err := osutil.WriteFile(filepath.Join(reproducedWithdir, baseName), []byte(baseName+"\n")); err != nil {
|
||||
log.Logf(0, "failed to write reproducer: %v", err)
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
func runInstance(target *prog.Target, cfg *mgrconfig.Config, reporter report.Reporter,
|
||||
vmPool *vm.Pool, index int, timeout time.Duration, runType FileType) *CrashReport {
|
||||
inst, err := vmPool.Create(index)
|
||||
if err != nil {
|
||||
log.Logf(0, "failed to create instance: %v", err)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
defer inst.Close()
|
||||
|
||||
execprogBin, err := inst.Copy(cfg.SyzExecprogBin)
|
||||
if err != nil {
|
||||
log.Logf(0, "failed to copy execprog: %v", err)
|
||||
return
|
||||
}
|
||||
// If SyzExecutorCmd is provided, it means that syz-executor is already in
|
||||
// the image, so no need to copy it.
|
||||
executorCmd := targets.Get(cfg.TargetOS, cfg.TargetArch).SyzExecutorCmd
|
||||
if executorCmd == "" {
|
||||
executorCmd, err = inst.Copy(cfg.SyzExecutorBin)
|
||||
if err != nil {
|
||||
log.Logf(0, "failed to copy executor: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
logFile, err := inst.Copy(flag.Args()[0])
|
||||
if err != nil {
|
||||
log.Logf(0, "failed to copy log: %v", err)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
cmd := instance.ExecprogCmd(execprogBin, executorCmd, cfg.TargetOS, cfg.TargetArch, cfg.Sandbox,
|
||||
true, true, true, cfg.Procs, -1, -1, logFile)
|
||||
outc, errc, err := inst.Run(time.Hour, nil, cmd)
|
||||
cmd := ""
|
||||
if runType == LogFile {
|
||||
// If SyzExecutorCmd is provided, it means that syz-executor is already in
|
||||
// the image, so no need to copy it.
|
||||
executorCmd := targets.Get(cfg.TargetOS, cfg.TargetArch).SyzExecutorCmd
|
||||
if executorCmd == "" {
|
||||
executorCmd, err = inst.Copy(cfg.SyzExecutorBin)
|
||||
if err != nil {
|
||||
log.Logf(0, "failed to copy executor: %v", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
logFile, err := inst.Copy(flag.Args()[0])
|
||||
if err != nil {
|
||||
log.Logf(0, "failed to copy log: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
cmd = instance.ExecprogCmd(execprogBin, executorCmd, cfg.TargetOS, cfg.TargetArch, cfg.Sandbox,
|
||||
true, true, true, cfg.Procs, -1, -1, logFile)
|
||||
} else {
|
||||
cmd = execprogBin
|
||||
}
|
||||
|
||||
outc, errc, err := inst.Run(timeout, nil, cmd)
|
||||
if err != nil {
|
||||
log.Logf(0, "failed to run execprog: %v", err)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Logf(0, "vm-%v: crushing...", index)
|
||||
rep := inst.MonitorExecution(outc, errc, reporter, vm.ExitTimeout)
|
||||
if rep == nil {
|
||||
// This is the only "OK" outcome.
|
||||
log.Logf(0, "vm-%v: running long enough, restarting", index)
|
||||
} else {
|
||||
f, err := ioutil.TempFile(".", "syz-crush")
|
||||
if err != nil {
|
||||
log.Logf(0, "failed to create temp file: %v", err)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
log.Logf(0, "vm-%v: crashed: %v, saving to %v", index, rep.Title, f.Name())
|
||||
f.Write(rep.Output)
|
||||
log.Logf(0, "vm-%v: running long enough, stopping", index)
|
||||
}
|
||||
|
||||
return &CrashReport{vmIndex: index, Report: rep}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user