2017-06-22 14:28:04 +00:00
|
|
|
// Copyright 2017 syzkaller project authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
// +build aetest
|
|
|
|
|
|
|
|
package dash
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2018-11-23 19:10:07 +00:00
|
|
|
"strconv"
|
2018-01-08 12:39:47 +00:00
|
|
|
"strings"
|
2017-06-22 14:28:04 +00:00
|
|
|
"testing"
|
2018-01-17 18:43:04 +00:00
|
|
|
"time"
|
2017-06-22 14:28:04 +00:00
|
|
|
|
|
|
|
"github.com/google/syzkaller/dashboard/dashapi"
|
|
|
|
)
|
|
|
|
|
2018-06-11 08:02:45 +00:00
|
|
|
func init() {
|
|
|
|
initMocks()
|
|
|
|
installConfig(testConfig)
|
|
|
|
}
|
|
|
|
|
2017-06-22 14:28:04 +00:00
|
|
|
// Config used in tests.
|
2018-06-11 08:02:45 +00:00
|
|
|
var testConfig = &GlobalConfig{
|
2018-02-13 19:13:00 +00:00
|
|
|
AccessLevel: AccessPublic,
|
|
|
|
AuthDomain: "@syzkaller.com",
|
2017-06-22 14:28:04 +00:00
|
|
|
Clients: map[string]string{
|
|
|
|
"reporting": "reportingkeyreportingkeyreportingkey",
|
|
|
|
},
|
2018-01-17 10:51:41 +00:00
|
|
|
EmailBlacklist: []string{
|
|
|
|
"\"Bar\" <BlackListed@Domain.com>",
|
|
|
|
},
|
2017-06-22 14:28:04 +00:00
|
|
|
Namespaces: map[string]*Config{
|
2018-07-31 10:16:54 +00:00
|
|
|
"test1": {
|
2018-02-13 19:13:00 +00:00
|
|
|
AccessLevel: AccessAdmin,
|
|
|
|
Key: "test1keytest1keytest1key",
|
2017-06-22 14:28:04 +00:00
|
|
|
Clients: map[string]string{
|
|
|
|
client1: key1,
|
|
|
|
},
|
|
|
|
Reporting: []Reporting{
|
|
|
|
{
|
|
|
|
Name: "reporting1",
|
|
|
|
DailyLimit: 3,
|
|
|
|
Config: &TestConfig{
|
|
|
|
Index: 1,
|
|
|
|
},
|
2018-01-08 12:39:47 +00:00
|
|
|
Filter: func(bug *Bug) FilterResult {
|
|
|
|
if strings.HasPrefix(bug.Title, "skip without repro") &&
|
|
|
|
bug.ReproLevel != dashapi.ReproLevelNone {
|
|
|
|
return FilterSkip
|
|
|
|
}
|
|
|
|
return FilterReport
|
|
|
|
},
|
2017-06-22 14:28:04 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "reporting2",
|
|
|
|
DailyLimit: 3,
|
|
|
|
Config: &TestConfig{
|
|
|
|
Index: 2,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2018-07-31 10:16:54 +00:00
|
|
|
"test2": {
|
2018-02-13 19:13:00 +00:00
|
|
|
AccessLevel: AccessAdmin,
|
|
|
|
Key: "test2keytest2keytest2key",
|
2017-06-22 14:28:04 +00:00
|
|
|
Clients: map[string]string{
|
|
|
|
client2: key2,
|
|
|
|
},
|
2018-04-24 12:30:32 +00:00
|
|
|
Managers: map[string]ConfigManager{
|
|
|
|
"restricted-manager": {
|
|
|
|
RestrictedTestingRepo: "git://restricted.git/restricted.git",
|
|
|
|
RestrictedTestingReason: "you should test only on restricted.git",
|
|
|
|
},
|
|
|
|
},
|
2017-06-22 14:28:04 +00:00
|
|
|
Reporting: []Reporting{
|
|
|
|
{
|
|
|
|
Name: "reporting1",
|
2017-08-11 14:10:26 +00:00
|
|
|
DailyLimit: 5,
|
|
|
|
Config: &EmailConfig{
|
|
|
|
Email: "test@syzkaller.com",
|
|
|
|
Moderation: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "reporting2",
|
2017-06-22 14:28:04 +00:00
|
|
|
DailyLimit: 3,
|
2017-08-11 14:10:26 +00:00
|
|
|
Config: &EmailConfig{
|
2018-02-22 11:53:31 +00:00
|
|
|
Email: "bugs@syzkaller.com",
|
|
|
|
DefaultMaintainers: []string{"default@maintainers.com"},
|
|
|
|
MailMaintainers: true,
|
2017-08-11 14:10:26 +00:00
|
|
|
},
|
2017-06-22 14:28:04 +00:00
|
|
|
},
|
2018-09-11 09:19:14 +00:00
|
|
|
{
|
|
|
|
Name: "reporting3",
|
|
|
|
DailyLimit: 3,
|
|
|
|
Config: &EmailConfig{
|
|
|
|
Email: "bugs@syzkaller.com",
|
|
|
|
DefaultMaintainers: []string{"default@maintainers.com"},
|
|
|
|
MailMaintainers: true,
|
|
|
|
},
|
|
|
|
},
|
2017-06-22 14:28:04 +00:00
|
|
|
},
|
|
|
|
},
|
2018-02-13 19:13:00 +00:00
|
|
|
// Namespaces for access level testing.
|
2018-07-31 10:16:54 +00:00
|
|
|
"access-admin": {
|
2018-02-13 19:13:00 +00:00
|
|
|
AccessLevel: AccessAdmin,
|
|
|
|
Key: "adminkeyadminkeyadminkey",
|
|
|
|
Clients: map[string]string{
|
|
|
|
clientAdmin: keyAdmin,
|
|
|
|
},
|
|
|
|
Reporting: []Reporting{
|
|
|
|
{
|
|
|
|
Name: "access-admin-reporting1",
|
|
|
|
Config: &TestConfig{Index: 1},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "access-admin-reporting2",
|
|
|
|
Config: &TestConfig{Index: 2},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2018-07-31 10:16:54 +00:00
|
|
|
"access-user": {
|
2018-02-13 19:13:00 +00:00
|
|
|
AccessLevel: AccessUser,
|
|
|
|
Key: "userkeyuserkeyuserkey",
|
|
|
|
Clients: map[string]string{
|
|
|
|
clientUser: keyUser,
|
|
|
|
},
|
|
|
|
Reporting: []Reporting{
|
|
|
|
{
|
|
|
|
AccessLevel: AccessAdmin,
|
|
|
|
Name: "access-admin-reporting1",
|
|
|
|
Config: &TestConfig{Index: 1},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "access-user-reporting2",
|
|
|
|
Config: &TestConfig{Index: 2},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2018-07-31 10:16:54 +00:00
|
|
|
"access-public": {
|
2018-06-11 08:02:45 +00:00
|
|
|
AccessLevel: AccessPublic,
|
|
|
|
Key: "publickeypublickeypublickey",
|
2018-02-13 19:13:00 +00:00
|
|
|
Clients: map[string]string{
|
|
|
|
clientPublic: keyPublic,
|
|
|
|
},
|
|
|
|
Reporting: []Reporting{
|
|
|
|
{
|
|
|
|
AccessLevel: AccessUser,
|
|
|
|
Name: "access-user-reporting1",
|
|
|
|
Config: &TestConfig{Index: 1},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "access-public-reporting2",
|
|
|
|
Config: &TestConfig{Index: 2},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2017-06-22 14:28:04 +00:00
|
|
|
},
|
2018-09-03 07:45:50 +00:00
|
|
|
KernelRepos: map[string]KernelRepo{
|
|
|
|
"repo10/branch10": {
|
|
|
|
Alias: "repo10alias",
|
|
|
|
},
|
|
|
|
},
|
2017-06-22 14:28:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
2018-02-13 19:13:00 +00:00
|
|
|
client1 = "client1"
|
|
|
|
client2 = "client2"
|
|
|
|
key1 = "client1keyclient1keyclient1key"
|
|
|
|
key2 = "client2keyclient2keyclient2key"
|
|
|
|
clientAdmin = "client-admin"
|
|
|
|
keyAdmin = "clientadminkeyclientadminkey"
|
|
|
|
clientUser = "client-user"
|
|
|
|
keyUser = "clientuserkeyclientuserkey"
|
|
|
|
clientPublic = "client-public"
|
|
|
|
keyPublic = "clientpublickeyclientpublickey"
|
2017-06-22 14:28:04 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type TestConfig struct {
|
|
|
|
Index int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cfg *TestConfig) Type() string {
|
|
|
|
return "test"
|
|
|
|
}
|
|
|
|
|
2017-08-17 17:09:07 +00:00
|
|
|
func (cfg *TestConfig) NeedMaintainers() bool {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2017-06-22 14:28:04 +00:00
|
|
|
func (cfg *TestConfig) Validate() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func testBuild(id int) *dashapi.Build {
|
|
|
|
return &dashapi.Build{
|
2018-01-17 18:43:04 +00:00
|
|
|
Manager: fmt.Sprintf("manager%v", id),
|
|
|
|
ID: fmt.Sprintf("build%v", id),
|
|
|
|
SyzkallerCommit: fmt.Sprintf("syzkaller_commit%v", id),
|
|
|
|
CompilerID: fmt.Sprintf("compiler%v", id),
|
|
|
|
KernelRepo: fmt.Sprintf("repo%v", id),
|
|
|
|
KernelBranch: fmt.Sprintf("branch%v", id),
|
2018-05-03 10:18:26 +00:00
|
|
|
KernelCommit: strings.Repeat(fmt.Sprint(id), 40)[:40],
|
2018-01-17 18:43:04 +00:00
|
|
|
KernelCommitTitle: fmt.Sprintf("kernel_commit_title%v", id),
|
|
|
|
KernelCommitDate: buildCommitDate,
|
|
|
|
KernelConfig: []byte(fmt.Sprintf("config%v", id)),
|
2017-06-22 14:28:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-17 18:43:04 +00:00
|
|
|
var buildCommitDate = time.Date(1, 2, 3, 4, 5, 6, 0, time.UTC)
|
|
|
|
|
2017-06-22 14:28:04 +00:00
|
|
|
func testCrash(build *dashapi.Build, id int) *dashapi.Crash {
|
|
|
|
return &dashapi.Crash{
|
|
|
|
BuildID: build.ID,
|
|
|
|
Title: fmt.Sprintf("title%v", id),
|
|
|
|
Log: []byte(fmt.Sprintf("log%v", id)),
|
|
|
|
Report: []byte(fmt.Sprintf("report%v", id)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-13 19:13:00 +00:00
|
|
|
func testCrashWithRepro(build *dashapi.Build, id int) *dashapi.Crash {
|
|
|
|
crash := testCrash(build, id)
|
|
|
|
crash.ReproOpts = []byte(fmt.Sprintf("repro opts %v", id))
|
|
|
|
crash.ReproSyz = []byte(fmt.Sprintf("syncfs(%v)", id))
|
|
|
|
crash.ReproC = []byte(fmt.Sprintf("int main() { return %v; }", id))
|
|
|
|
return crash
|
|
|
|
}
|
|
|
|
|
2017-08-10 17:29:42 +00:00
|
|
|
func testCrashID(crash *dashapi.Crash) *dashapi.CrashID {
|
|
|
|
return &dashapi.CrashID{
|
|
|
|
BuildID: crash.BuildID,
|
|
|
|
Title: crash.Title,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-22 14:28:04 +00:00
|
|
|
func TestApp(t *testing.T) {
|
|
|
|
c := NewCtx(t)
|
|
|
|
defer c.Close()
|
|
|
|
|
|
|
|
c.expectOK(c.GET("/"))
|
|
|
|
|
2018-06-11 08:02:46 +00:00
|
|
|
apiClient1 := c.makeClient(client1, key1, false)
|
|
|
|
apiClient2 := c.makeClient(client2, key2, false)
|
|
|
|
c.expectFail("unknown api method", apiClient1.Query("unsupported_method", nil, nil))
|
|
|
|
c.client.LogError("name", "msg %s", "arg")
|
2017-06-22 14:28:04 +00:00
|
|
|
|
|
|
|
build := testBuild(1)
|
2018-06-11 08:02:46 +00:00
|
|
|
c.client.UploadBuild(build)
|
2017-06-22 14:28:04 +00:00
|
|
|
// Uploading the same build must be OK.
|
2018-06-11 08:02:46 +00:00
|
|
|
c.client.UploadBuild(build)
|
2017-06-22 14:28:04 +00:00
|
|
|
|
|
|
|
// Some bad combinations of client/key.
|
2018-06-11 08:02:46 +00:00
|
|
|
c.expectFail("unauthorized", c.makeClient(client1, "", false).Query("upload_build", build, nil))
|
|
|
|
c.expectFail("unauthorized", c.makeClient("unknown", key1, false).Query("upload_build", build, nil))
|
|
|
|
c.expectFail("unauthorized", c.makeClient(client1, key2, false).Query("upload_build", build, nil))
|
2017-06-22 14:28:04 +00:00
|
|
|
|
2018-06-11 08:02:47 +00:00
|
|
|
crash1 := testCrash(build, 1)
|
2018-06-11 08:02:46 +00:00
|
|
|
c.client.ReportCrash(crash1)
|
2017-06-22 14:28:04 +00:00
|
|
|
|
|
|
|
// Test that namespace isolation works.
|
2018-06-11 08:02:46 +00:00
|
|
|
c.expectFail("unknown build", apiClient2.Query("report_crash", crash1, nil))
|
2017-06-22 14:28:04 +00:00
|
|
|
|
2018-06-11 08:02:47 +00:00
|
|
|
crash2 := testCrashWithRepro(build, 2)
|
2018-06-11 08:02:46 +00:00
|
|
|
c.client.ReportCrash(crash2)
|
2017-06-22 14:28:04 +00:00
|
|
|
|
|
|
|
// Provoke purgeOldCrashes.
|
|
|
|
for i := 0; i < 30; i++ {
|
2018-06-11 08:02:47 +00:00
|
|
|
crash := testCrash(build, 3)
|
|
|
|
crash.Log = []byte(fmt.Sprintf("log%v", i))
|
|
|
|
crash.Report = []byte(fmt.Sprintf("report%v", i))
|
2018-06-11 08:02:46 +00:00
|
|
|
c.client.ReportCrash(crash)
|
2017-06-22 14:28:04 +00:00
|
|
|
}
|
|
|
|
|
2017-08-10 17:29:42 +00:00
|
|
|
cid := &dashapi.CrashID{
|
2017-06-22 14:28:04 +00:00
|
|
|
BuildID: "build1",
|
|
|
|
Title: "title1",
|
|
|
|
}
|
2018-06-11 08:02:46 +00:00
|
|
|
c.client.ReportFailedRepro(cid)
|
2017-06-22 14:28:04 +00:00
|
|
|
|
2018-06-11 08:02:46 +00:00
|
|
|
c.client.ReportingPollBugs("test")
|
2017-06-22 14:28:04 +00:00
|
|
|
|
2018-06-11 08:02:46 +00:00
|
|
|
c.client.ReportingUpdate(&dashapi.BugUpdate{
|
2017-06-22 14:28:04 +00:00
|
|
|
ID: "id",
|
|
|
|
Status: dashapi.BugStatusOpen,
|
|
|
|
ReproLevel: dashapi.ReproLevelC,
|
2018-06-11 08:02:46 +00:00
|
|
|
})
|
2017-06-22 14:28:04 +00:00
|
|
|
}
|
2018-11-23 19:10:07 +00:00
|
|
|
|
|
|
|
// Test purging of old crashes for bugs with lots of crashes.
|
|
|
|
func TestPurgeOldCrashes(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip()
|
|
|
|
}
|
|
|
|
c := NewCtx(t)
|
|
|
|
defer c.Close()
|
|
|
|
|
|
|
|
build := testBuild(1)
|
|
|
|
c.client.UploadBuild(build)
|
|
|
|
|
|
|
|
// First, send 3 crashes that are reported. These need to be preserved regardless.
|
|
|
|
crash := testCrash(build, 1)
|
|
|
|
crash.ReproOpts = []byte("no repro")
|
|
|
|
c.client.ReportCrash(crash)
|
|
|
|
rep := c.client.pollBug()
|
|
|
|
|
|
|
|
crash.ReproSyz = []byte("getpid()")
|
|
|
|
crash.ReproOpts = []byte("syz repro")
|
|
|
|
c.client.ReportCrash(crash)
|
|
|
|
c.client.pollBug()
|
|
|
|
|
|
|
|
crash.ReproC = []byte("int main() {}")
|
|
|
|
crash.ReproOpts = []byte("C repro")
|
|
|
|
c.client.ReportCrash(crash)
|
|
|
|
c.client.pollBug()
|
|
|
|
|
|
|
|
// Now report lots of bugs with/without repros. Some of the older ones should be purged.
|
|
|
|
const totalReported = 3 * maxCrashes
|
|
|
|
for i := 0; i < totalReported; i++ {
|
|
|
|
c.advanceTime(2 * time.Hour) // This ensures that crashes are saved.
|
|
|
|
crash.ReproSyz = nil
|
|
|
|
crash.ReproC = nil
|
|
|
|
crash.ReproOpts = []byte(fmt.Sprintf("%v", i))
|
|
|
|
c.client.ReportCrash(crash)
|
|
|
|
|
|
|
|
crash.ReproSyz = []byte("syz repro")
|
|
|
|
crash.ReproC = []byte("C repro")
|
|
|
|
crash.ReproOpts = []byte(fmt.Sprintf("%v", i))
|
|
|
|
c.client.ReportCrash(crash)
|
|
|
|
}
|
|
|
|
bug, _, _ := c.loadBug(rep.ID)
|
|
|
|
crashes, _, err := queryCrashesForBug(c.ctx, bug.key(c.ctx), 10*totalReported)
|
|
|
|
if err != nil {
|
|
|
|
c.t.Fatal(err)
|
|
|
|
}
|
|
|
|
// First, count how many crashes of different types we have.
|
|
|
|
// We should get all 3 reported crashes + some with repros and some without repros.
|
|
|
|
reported, norepro, repro := 0, 0, 0
|
|
|
|
for _, crash := range crashes {
|
|
|
|
if !crash.Reported.IsZero() {
|
|
|
|
reported++
|
|
|
|
} else if crash.ReproSyz == 0 {
|
|
|
|
norepro++
|
|
|
|
} else {
|
|
|
|
repro++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.t.Logf("got reported=%v, norepro=%v, repro=%v, maxCrashes=%v",
|
|
|
|
reported, norepro, repro, maxCrashes)
|
|
|
|
if reported != 3 ||
|
|
|
|
norepro < maxCrashes || norepro > maxCrashes+10 ||
|
|
|
|
repro < maxCrashes || repro > maxCrashes+10 {
|
|
|
|
c.t.Fatalf("bad purged crashes")
|
|
|
|
}
|
|
|
|
// Then, check that latest crashes were preserved.
|
|
|
|
for _, crash := range crashes {
|
|
|
|
if !crash.Reported.IsZero() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
idx, err := strconv.Atoi(string(crash.ReproOpts))
|
|
|
|
if err != nil {
|
|
|
|
c.t.Fatal(err)
|
|
|
|
}
|
|
|
|
count := norepro
|
|
|
|
if crash.ReproSyz != 0 {
|
|
|
|
count = repro
|
|
|
|
}
|
|
|
|
if idx < totalReported-count {
|
|
|
|
c.t.Errorf("preserved bad crash repro=%v: %v", crash.ReproC != 0, idx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|