pkg/repro: provide stats even for failed repro

Provide stats and logs for failed repro and save it in manager.
In particular log is useful for failed repros,
currently there is no visibility into why bugs
failed to reproduce.
This commit is contained in:
Dmitry Vyukov 2018-07-04 11:05:48 +02:00
parent ea88000dd9
commit 538df42ec7
5 changed files with 52 additions and 43 deletions

View File

@ -20,6 +20,16 @@ import (
"github.com/google/syzkaller/vm"
)
type Result struct {
Prog *prog.Prog
Duration time.Duration
Opts csource.Options
CRepro bool
// Information about the final (non-symbolized) crash that we reproduced.
// Can be different from what we started reproducing.
Report *report.Report
}
type Stats struct {
Log []byte
ExtractProgTime time.Duration
@ -29,24 +39,13 @@ type Stats struct {
SimplifyCTime time.Duration
}
type Result struct {
Prog *prog.Prog
Duration time.Duration
Opts csource.Options
CRepro bool
Stats Stats
// Information about the final (non-symbolized) crash that we reproduced.
// Can be different from what we started reproducing.
Report *report.Report
}
type context struct {
cfg *mgrconfig.Config
reporter report.Reporter
crashTitle string
instances chan *instance
bootRequests chan int
stats Stats
stats *Stats
report *report.Report
}
@ -58,17 +57,17 @@ type instance struct {
}
func Run(crashLog []byte, cfg *mgrconfig.Config, reporter report.Reporter, vmPool *vm.Pool,
vmIndexes []int) (*Result, error) {
vmIndexes []int) (*Result, *Stats, error) {
if len(vmIndexes) == 0 {
return nil, fmt.Errorf("no VMs provided")
return nil, nil, fmt.Errorf("no VMs provided")
}
target, err := prog.GetTarget(cfg.TargetOS, cfg.TargetArch)
if err != nil {
return nil, err
return nil, nil, err
}
entries := target.ParseLog(crashLog)
if len(entries) == 0 {
return nil, fmt.Errorf("crash log does not contain any programs")
return nil, nil, fmt.Errorf("crash log does not contain any programs")
}
crashStart := len(crashLog) // assuming VM hanged
crashTitle := "hang"
@ -83,6 +82,7 @@ func Run(crashLog []byte, cfg *mgrconfig.Config, reporter report.Reporter, vmPoo
crashTitle: crashTitle,
instances: make(chan *instance, len(vmIndexes)),
bootRequests: make(chan int, len(vmIndexes)),
stats: new(Stats),
}
ctx.reproLog(0, "%v programs, %v VMs", len(entries), len(vmIndexes))
var wg sync.WaitGroup
@ -144,7 +144,7 @@ func Run(crashLog []byte, cfg *mgrconfig.Config, reporter report.Reporter, vmPoo
res, err := ctx.repro(entries, crashStart)
if err != nil {
return nil, err
return nil, nil, err
}
if res != nil {
ctx.reproLog(3, "repro crashed as (corrupted=%v):\n%s",
@ -158,20 +158,19 @@ func Run(crashLog []byte, cfg *mgrconfig.Config, reporter report.Reporter, vmPoo
_, err = ctx.testProg(res.Prog, res.Duration, res.Opts)
}
if err != nil {
return nil, err
return nil, nil, err
}
}
ctx.reproLog(3, "final repro crashed as (corrupted=%v):\n%s",
ctx.report.Corrupted, ctx.report.Report)
res.Report = ctx.report
res.Stats = ctx.stats
}
close(ctx.bootRequests)
for inst := range ctx.instances {
inst.Close()
}
return res, err
return res, ctx.stats, nil
}
func (ctx *context) repro(entries []*prog.LogEntry, crashStart int) (*Result, error) {

View File

@ -24,7 +24,9 @@ func initTest(t *testing.T) (*rand.Rand, int) {
}
func TestBisect(t *testing.T) {
ctx := &context{}
ctx := &context{
stats: new(Stats),
}
rd, iters := initTest(t)
for n := 0; n < iters; n++ {

View File

@ -330,8 +330,6 @@ func (mgr *Manager) httpReport(w http.ResponseWriter, r *http.Request) {
prog, _ := ioutil.ReadFile(filepath.Join(mgr.crashdir, crashID, "repro.prog"))
cprog, _ := ioutil.ReadFile(filepath.Join(mgr.crashdir, crashID, "repro.cprog"))
rep, _ := ioutil.ReadFile(filepath.Join(mgr.crashdir, crashID, "repro.report"))
log, _ := ioutil.ReadFile(filepath.Join(mgr.crashdir, crashID, "repro.stats.log"))
stats, _ := ioutil.ReadFile(filepath.Join(mgr.crashdir, crashID, "repro.stats"))
commitDesc := ""
if len(tag) != 0 {
@ -349,12 +347,6 @@ func (mgr *Manager) httpReport(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "C reproducer:\n%s\n\n", cprog)
}
}
if len(stats) > 0 {
fmt.Fprintf(w, "Reproducing stats:\n%s\n\n", stats)
}
if len(log) > 0 {
fmt.Fprintf(w, "Reproducing log:\n%s\n\n", log)
}
}
func (mgr *Manager) httpRawCover(w http.ResponseWriter, r *http.Request) {

View File

@ -301,6 +301,7 @@ type ReproResult struct {
instances []int
title0 string
res *repro.Result
stats *repro.Stats
err error
hub bool // repro came from hub
}
@ -385,8 +386,8 @@ func (mgr *Manager) vmLoop() {
atomic.AddUint32(&mgr.numReproducing, 1)
log.Logf(1, "loop: starting repro of '%v' on instances %+v", crash.Title, vmIndexes)
go func() {
res, err := repro.Run(crash.Output, mgr.cfg, mgr.reporter, mgr.vmPool, vmIndexes)
reproDone <- &ReproResult{vmIndexes, crash.Title, res, err, crash.hub}
res, stats, err := repro.Run(crash.Output, mgr.cfg, mgr.reporter, mgr.vmPool, vmIndexes)
reproDone <- &ReproResult{vmIndexes, crash.Title, res, stats, err, crash.hub}
}()
}
for !canRepro() && len(instances) != 0 {
@ -444,10 +445,10 @@ func (mgr *Manager) vmLoop() {
reproInstances -= instancesPerRepro
if res.res == nil {
if !res.hub {
mgr.saveFailedRepro(res.title0)
mgr.saveFailedRepro(res.title0, res.stats)
}
} else {
mgr.saveRepro(res.res, res.hub)
mgr.saveRepro(res.res, res.stats, res.hub)
}
case <-shutdown:
log.Logf(1, "loop: shutting down...")
@ -714,7 +715,7 @@ func (mgr *Manager) needRepro(crash *Crash) bool {
return false
}
func (mgr *Manager) saveFailedRepro(desc string) {
func (mgr *Manager) saveFailedRepro(desc string, stats *repro.Stats) {
if mgr.dash != nil {
cid := &dashapi.CrashID{
BuildID: mgr.cfg.Tag,
@ -722,6 +723,8 @@ func (mgr *Manager) saveFailedRepro(desc string) {
}
if err := mgr.dash.ReportFailedRepro(cid); err != nil {
log.Logf(0, "failed to report failed repro to dashboard: %v", err)
} else {
return
}
}
dir := filepath.Join(mgr.crashdir, hash.String([]byte(desc)))
@ -729,13 +732,13 @@ func (mgr *Manager) saveFailedRepro(desc string) {
for i := 0; i < maxReproAttempts; i++ {
name := filepath.Join(dir, fmt.Sprintf("repro%v", i))
if !osutil.IsExist(name) {
osutil.WriteFile(name, nil)
saveReproStats(name, stats)
break
}
}
}
func (mgr *Manager) saveRepro(res *repro.Result, hub bool) {
func (mgr *Manager) saveRepro(res *repro.Result, stats *repro.Stats, hub bool) {
rep := res.Report
if err := mgr.reporter.Symbolize(rep); err != nil {
log.Logf(0, "failed to symbolize repro: %v", err)
@ -810,12 +813,18 @@ func (mgr *Manager) saveRepro(res *repro.Result, hub bool) {
if len(cprogText) > 0 {
osutil.WriteFile(filepath.Join(dir, "repro.cprog"), cprogText)
}
osutil.WriteFile(filepath.Join(dir, "repro.stats.log"), res.Stats.Log)
stats := fmt.Sprintf("Extracting prog: %s\nMinimizing prog: %s\nSimplifying prog options: %s\n"+
"Extracting C: %s\nSimplifying C: %s\n",
res.Stats.ExtractProgTime, res.Stats.MinimizeProgTime, res.Stats.SimplifyProgTime,
res.Stats.ExtractCTime, res.Stats.SimplifyCTime)
osutil.WriteFile(filepath.Join(dir, "repro.stats"), []byte(stats))
saveReproStats(filepath.Join(dir, "repro.stats"), stats)
}
func saveReproStats(filename string, stats *repro.Stats) {
text := ""
if stats != nil {
text = fmt.Sprintf("Extracting prog: %v\nMinimizing prog: %v\n"+
"Simplifying prog options: %v\nExtracting C: %v\nSimplifying C: %v\n\n\n%s",
stats.ExtractProgTime, stats.MinimizeProgTime,
stats.SimplifyProgTime, stats.ExtractCTime, stats.SimplifyCTime, stats.Log)
}
osutil.WriteFile(filename, []byte(text))
}
func (mgr *Manager) minimizeCorpus() {

View File

@ -63,10 +63,17 @@ func main() {
}
osutil.HandleInterrupts(vm.Shutdown)
res, err := repro.Run(data, cfg, reporter, vmPool, vmIndexes)
res, stats, err := repro.Run(data, cfg, reporter, vmPool, vmIndexes)
if err != nil {
log.Logf(0, "reproduction failed: %v", err)
}
if stats != nil {
fmt.Printf("Extracting prog: %v\n", stats.ExtractProgTime)
fmt.Printf("Minimizing prog: %v\n", stats.MinimizeProgTime)
fmt.Printf("Simplifying prog options: %v\n", stats.SimplifyProgTime)
fmt.Printf("Extracting C: %v\n", stats.ExtractCTime)
fmt.Printf("Simplifying C: %v\n", stats.SimplifyCTime)
}
if res == nil {
return
}