syzkaller/prog/analysis.go

154 lines
3.6 KiB
Go
Raw Normal View History

2015-10-12 08:16:57 +00:00
// Copyright 2015 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
// Conservative resource-related analysis of programs.
// The analysis figures out what files descriptors are [potentially] opened
// at a particular point in program, what pages are [potentially] mapped,
// what files were already referenced in calls, etc.
package prog
import (
"fmt"
)
const (
maxPages = 4 << 10
)
type state struct {
target *Target
2015-10-14 14:55:09 +00:00
ct *ChoiceTable
files map[string]bool
resources map[string][]Arg
2015-10-14 14:55:09 +00:00
strings map[string]bool
pages [maxPages]bool
2015-10-12 08:16:57 +00:00
}
// analyze analyzes the program p up to but not including call c.
2015-10-14 14:55:09 +00:00
func analyze(ct *ChoiceTable, p *Prog, c *Call) *state {
s := newState(p.Target, ct)
2015-10-12 08:16:57 +00:00
for _, c1 := range p.Calls {
if c1 == c {
break
}
s.analyze(c1)
}
return s
}
func newState(target *Target, ct *ChoiceTable) *state {
2015-10-12 08:16:57 +00:00
s := &state{
target: target,
2015-10-14 14:55:09 +00:00
ct: ct,
files: make(map[string]bool),
resources: make(map[string][]Arg),
2015-10-14 14:55:09 +00:00
strings: make(map[string]bool),
2015-10-12 08:16:57 +00:00
}
return s
}
func (s *state) analyze(c *Call) {
ForeachArg(c, func(arg Arg, _ *ArgCtx) {
switch typ := arg.Type().(type) {
case *ResourceType:
if typ.Dir() != DirIn {
s.resources[typ.Desc.Name] = append(s.resources[typ.Desc.Name], arg)
// TODO: negative PIDs and add them as well (that's process groups).
2015-10-12 08:16:57 +00:00
}
case *BufferType:
a := arg.(*DataArg)
if typ.Dir() != DirOut && len(a.Data()) != 0 {
switch typ.Kind {
case BufferString:
s.strings[string(a.Data())] = true
case BufferFilename:
s.files[string(a.Data())] = true
}
2015-10-12 08:16:57 +00:00
}
}
})
start, npages, mapped := s.target.AnalyzeMmap(c)
if npages != 0 {
if start+npages > uint64(len(s.pages)) {
panic(fmt.Sprintf("address is out of bounds: page=%v len=%v bound=%v",
start, npages, len(s.pages)))
2015-10-12 08:16:57 +00:00
}
for i := uint64(0); i < npages; i++ {
s.pages[start+i] = mapped
2015-10-13 15:06:01 +00:00
}
2015-10-12 08:16:57 +00:00
}
}
type ArgCtx struct {
Parent *[]Arg // GroupArg.Inner (for structs) or Call.Args containing this arg
Base *PointerArg // pointer to the base of the heap object containing this arg
Offset uint64 // offset of this arg from the base
Stop bool // if set by the callback, subargs of this arg are not visited
2015-12-31 14:24:08 +00:00
}
func ForeachSubArg(arg Arg, f func(Arg, *ArgCtx)) {
foreachArgImpl(arg, ArgCtx{}, f)
2015-12-31 14:24:08 +00:00
}
func ForeachArg(c *Call, f func(Arg, *ArgCtx)) {
ctx := ArgCtx{}
if c.Ret != nil {
foreachArgImpl(c.Ret, ctx, f)
2015-10-12 08:16:57 +00:00
}
ctx.Parent = &c.Args
for _, arg := range c.Args {
foreachArgImpl(arg, ctx, f)
2015-10-12 08:16:57 +00:00
}
}
func foreachArgImpl(arg Arg, ctx ArgCtx, f func(Arg, *ArgCtx)) {
f(arg, &ctx)
if ctx.Stop {
return
}
switch a := arg.(type) {
case *GroupArg:
if _, ok := a.Type().(*StructType); ok {
ctx.Parent = &a.Inner
}
var totalSize uint64
for _, arg1 := range a.Inner {
foreachArgImpl(arg1, ctx, f)
if !arg1.Type().BitfieldMiddle() {
size := arg1.Size()
ctx.Offset += size
totalSize += size
}
}
if totalSize > a.Size() {
panic(fmt.Sprintf("bad group arg size %v, should be <= %v for %+v",
totalSize, a.Size(), a))
}
case *PointerArg:
if a.Res != nil {
ctx.Base = a
ctx.Offset = 0
foreachArgImpl(a.Res, ctx, f)
}
case *UnionArg:
foreachArgImpl(a.Option, ctx, f)
}
2015-10-12 08:16:57 +00:00
}
func RequiredFeatures(p *Prog) (bitmasks, csums bool) {
for _, c := range p.Calls {
ForeachArg(c, func(arg Arg, _ *ArgCtx) {
if a, ok := arg.(*ConstArg); ok {
if a.Type().BitfieldOffset() != 0 || a.Type().BitfieldLength() != 0 {
bitmasks = true
}
}
if _, ok := arg.Type().(*CsumType); ok {
csums = true
2017-05-29 16:22:55 +00:00
}
})
}
return
2017-05-29 16:22:55 +00:00
}