mirror of
https://github.com/reactos/syzkaller.git
synced 2025-02-16 01:28:33 +00:00
all: speed up tests
Mark tests as parallel where makes sense. Speed up sys.TransitivelyEnabledCalls. Execution time is now: ok github.com/google/syzkaller/config 0.172s ok github.com/google/syzkaller/cover 0.060s ok github.com/google/syzkaller/csource 3.081s ok github.com/google/syzkaller/db 0.395s ok github.com/google/syzkaller/executor 0.060s ok github.com/google/syzkaller/fileutil 0.106s ok github.com/google/syzkaller/host 1.530s ok github.com/google/syzkaller/ifuzz 0.491s ok github.com/google/syzkaller/ipc 1.374s ok github.com/google/syzkaller/log 0.014s ok github.com/google/syzkaller/prog 2.604s ok github.com/google/syzkaller/report 0.045s ok github.com/google/syzkaller/symbolizer 0.062s ok github.com/google/syzkaller/sys 0.365s ok github.com/google/syzkaller/syz-dash 0.014s ok github.com/google/syzkaller/syz-hub/state 0.427s ok github.com/google/syzkaller/vm 0.052s However, main time is still taken by rebuilding sys package. Fixes #182
This commit is contained in:
parent
220dc49106
commit
0fcd5fd3dd
@ -15,6 +15,7 @@ import (
|
||||
)
|
||||
|
||||
func initTest(t *testing.T) (rand.Source, int) {
|
||||
t.Parallel()
|
||||
iters := 10
|
||||
if testing.Short() {
|
||||
iters = 1
|
||||
@ -70,11 +71,12 @@ func TestSyz(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test(t *testing.T) {
|
||||
rs, iters := initTest(t)
|
||||
rs, _ := initTest(t)
|
||||
syzProg := prog.GenerateAllSyzProg(rs)
|
||||
t.Logf("syz program:\n%s\n", syzProg.Serialize())
|
||||
for i, opts := range allOptionsPermutations() {
|
||||
t.Run(fmt.Sprintf("%v", i), func(t *testing.T) {
|
||||
rs, iters := initTest(t)
|
||||
t.Logf("opts: %+v", opts)
|
||||
for i := 0; i < iters; i++ {
|
||||
p := prog.Generate(rs, 10, nil)
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
)
|
||||
|
||||
func TestLog(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Dump for manual inspection.
|
||||
supp, err := DetectSupportedSyscalls()
|
||||
if err != nil {
|
||||
@ -40,6 +41,7 @@ func TestLog(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSupportedSyscalls(t *testing.T) {
|
||||
t.Parallel()
|
||||
supp, err := DetectSupportedSyscalls()
|
||||
if err != nil {
|
||||
t.Skipf("skipping: %v", err)
|
||||
|
@ -38,6 +38,7 @@ func buildProgram(t *testing.T, src string) string {
|
||||
}
|
||||
|
||||
func initTest(t *testing.T) (rand.Source, int) {
|
||||
t.Parallel()
|
||||
iters := 100
|
||||
if testing.Short() {
|
||||
iters = 10
|
||||
@ -76,11 +77,12 @@ func TestEmptyProg(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExecute(t *testing.T) {
|
||||
rs, iters := initTest(t)
|
||||
flags := []uint64{0, FlagThreaded, FlagThreaded | FlagCollide}
|
||||
|
||||
bin := buildExecutor(t)
|
||||
defer os.Remove(bin)
|
||||
|
||||
rs, iters := initTest(t)
|
||||
flags := []uint64{0, FlagThreaded, FlagThreaded | FlagCollide}
|
||||
for _, flag := range flags {
|
||||
t.Logf("testing flags 0x%x\n", flag)
|
||||
cfg := Config{
|
||||
|
@ -382,8 +382,6 @@ func (r *randGen) randPageAddr(s *state, typ sys.Type, npages uintptr, data *Arg
|
||||
}
|
||||
starts = append(starts, i)
|
||||
}
|
||||
*poolPtr = starts
|
||||
pageStartPool.Put(poolPtr)
|
||||
var page uintptr
|
||||
if len(starts) != 0 {
|
||||
page = starts[r.rand(len(starts))]
|
||||
@ -393,6 +391,8 @@ func (r *randGen) randPageAddr(s *state, typ sys.Type, npages uintptr, data *Arg
|
||||
if !vma {
|
||||
npages = 0
|
||||
}
|
||||
*poolPtr = starts
|
||||
pageStartPool.Put(poolPtr)
|
||||
return pointerArg(typ, page, 0, npages, data)
|
||||
}
|
||||
|
||||
|
16
sys/decl.go
16
sys/decl.go
@ -521,14 +521,26 @@ func TransitivelyEnabledCalls(enabled map[*Call]bool) map[*Call]bool {
|
||||
for c := range enabled {
|
||||
supported[c] = true
|
||||
}
|
||||
inputResources := make(map[*Call][]*ResourceType)
|
||||
ctors := make(map[string][]*Call)
|
||||
for c := range supported {
|
||||
inputs := c.InputResources()
|
||||
inputResources[c] = inputs
|
||||
for _, res := range inputs {
|
||||
if _, ok := ctors[res.Desc.Name]; ok {
|
||||
continue
|
||||
}
|
||||
ctors[res.Desc.Name] = resourceCtors(res.Desc.Kind, true)
|
||||
}
|
||||
}
|
||||
for {
|
||||
n := len(supported)
|
||||
haveGettime := supported[CallMap["clock_gettime"]]
|
||||
for c := range supported {
|
||||
canCreate := true
|
||||
for _, res := range c.InputResources() {
|
||||
for _, res := range inputResources[c] {
|
||||
noctors := true
|
||||
for _, ctor := range resourceCtors(res.Desc.Kind, true) {
|
||||
for _, ctor := range ctors[res.Desc.Name] {
|
||||
if supported[ctor] {
|
||||
noctors = false
|
||||
break
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
)
|
||||
|
||||
func TestTransitivelyEnabledCalls(t *testing.T) {
|
||||
t.Parallel()
|
||||
calls := make(map[*Call]bool)
|
||||
for _, c := range Calls {
|
||||
calls[c] = true
|
||||
@ -37,6 +38,7 @@ func TestTransitivelyEnabledCalls(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestClockGettime(t *testing.T) {
|
||||
t.Parallel()
|
||||
calls := make(map[*Call]bool)
|
||||
for _, c := range Calls {
|
||||
calls[c] = true
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
type OutputMerger struct {
|
||||
Output chan []byte
|
||||
Err chan error
|
||||
teeMu sync.Mutex
|
||||
tee io.Writer
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
@ -42,7 +43,9 @@ func (merger *OutputMerger) Add(name string, r io.ReadCloser) {
|
||||
if pos := bytes.LastIndexByte(pending, '\n'); pos != -1 {
|
||||
out := pending[:pos+1]
|
||||
if merger.tee != nil {
|
||||
merger.teeMu.Lock()
|
||||
merger.tee.Write(out)
|
||||
merger.teeMu.Unlock()
|
||||
}
|
||||
select {
|
||||
case merger.Output <- append([]byte{}, out...):
|
||||
@ -56,7 +59,9 @@ func (merger *OutputMerger) Add(name string, r io.ReadCloser) {
|
||||
if len(pending) != 0 {
|
||||
pending = append(pending, '\n')
|
||||
if merger.tee != nil {
|
||||
merger.teeMu.Lock()
|
||||
merger.tee.Write(pending)
|
||||
merger.teeMu.Unlock()
|
||||
}
|
||||
select {
|
||||
case merger.Output <- pending:
|
||||
|
Loading…
x
Reference in New Issue
Block a user