mirror of
https://github.com/reactos/syzkaller.git
synced 2024-11-23 11:29:46 +00:00
9133037195
Two virtual wireless devices are instantiated during network devices initialization. A new flag (-wifi) is added that controls whether these virtual wifi devices are instantiated and configured during proc initialization. Also, two new pseudo syscalls are added: 1. syz_80211_inject_frame(mac_addr, packet, packet_len) -- injects an arbitrary packet into the wireless stack. It is injected as if it originated from the device identitied by mac_addr. 2. syz_80211_join_ibss(interface_name, ssid, ssid_len, mode) -- puts a specific network interface into IBSS state and joins an IBSS network. Arguments of syz_80211_join_ibss: 1) interface_name -- null-terminated string that identifies a wireless interface 2) ssid, ssid_len -- SSID of an IBSS network to join to 3) mode -- mode of syz_80211_join_ibss operation (see below) Modes of operation: JOIN_IBSS_NO_SCAN (0x0) -- channel scan is not performed and syz_80211_join_ibss waits until the interface reaches IF_OPER_UP. JOIN_IBSS_BG_SCAN (0x1) -- channel scan is performed (takes ~ 9 seconds), syz_80211_join_ibss does not await IF_OPER_UP. JOIN_IBSS_BG_NO_SCAN (0x2) -- channel scan is not performed, syz_80211_join_ibss does not await IF_OPER_UP. Local testing ensured that these syscalls are indeed able to set up an operating network and inject packets into mac80211.
563 lines
15 KiB
Go
563 lines
15 KiB
Go
// Copyright 2015 syzkaller project authors. All rights reserved.
|
|
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
|
|
|
|
package main
|
|
|
|
import (
|
|
"flag"
|
|
"fmt"
|
|
"math/rand"
|
|
"net/http"
|
|
_ "net/http/pprof"
|
|
"os"
|
|
"runtime"
|
|
"runtime/debug"
|
|
"sort"
|
|
"sync"
|
|
"sync/atomic"
|
|
"time"
|
|
|
|
"github.com/google/syzkaller/pkg/csource"
|
|
"github.com/google/syzkaller/pkg/hash"
|
|
"github.com/google/syzkaller/pkg/host"
|
|
"github.com/google/syzkaller/pkg/ipc"
|
|
"github.com/google/syzkaller/pkg/ipc/ipcconfig"
|
|
"github.com/google/syzkaller/pkg/log"
|
|
"github.com/google/syzkaller/pkg/osutil"
|
|
"github.com/google/syzkaller/pkg/rpctype"
|
|
"github.com/google/syzkaller/pkg/signal"
|
|
"github.com/google/syzkaller/prog"
|
|
_ "github.com/google/syzkaller/sys"
|
|
)
|
|
|
|
type Fuzzer struct {
|
|
name string
|
|
outputType OutputType
|
|
config *ipc.Config
|
|
execOpts *ipc.ExecOpts
|
|
procs []*Proc
|
|
gate *ipc.Gate
|
|
workQueue *WorkQueue
|
|
needPoll chan struct{}
|
|
choiceTable *prog.ChoiceTable
|
|
stats [StatCount]uint64
|
|
manager *rpctype.RPCClient
|
|
target *prog.Target
|
|
triagedCandidates uint32
|
|
|
|
faultInjectionEnabled bool
|
|
comparisonTracingEnabled bool
|
|
|
|
corpusMu sync.RWMutex
|
|
corpus []*prog.Prog
|
|
corpusHashes map[hash.Sig]struct{}
|
|
corpusPrios []int64
|
|
sumPrios int64
|
|
|
|
signalMu sync.RWMutex
|
|
corpusSignal signal.Signal // signal of inputs in corpus
|
|
maxSignal signal.Signal // max signal ever observed including flakes
|
|
newSignal signal.Signal // diff of maxSignal since last sync with master
|
|
|
|
logMu sync.Mutex
|
|
}
|
|
|
|
type FuzzerSnapshot struct {
|
|
corpus []*prog.Prog
|
|
corpusPrios []int64
|
|
sumPrios int64
|
|
}
|
|
|
|
type Stat int
|
|
|
|
const (
|
|
StatGenerate Stat = iota
|
|
StatFuzz
|
|
StatCandidate
|
|
StatTriage
|
|
StatMinimize
|
|
StatSmash
|
|
StatHint
|
|
StatSeed
|
|
StatCount
|
|
)
|
|
|
|
var statNames = [StatCount]string{
|
|
StatGenerate: "exec gen",
|
|
StatFuzz: "exec fuzz",
|
|
StatCandidate: "exec candidate",
|
|
StatTriage: "exec triage",
|
|
StatMinimize: "exec minimize",
|
|
StatSmash: "exec smash",
|
|
StatHint: "exec hints",
|
|
StatSeed: "exec seeds",
|
|
}
|
|
|
|
type OutputType int
|
|
|
|
const (
|
|
OutputNone OutputType = iota
|
|
OutputStdout
|
|
OutputDmesg
|
|
OutputFile
|
|
)
|
|
|
|
func createIPCConfig(features *host.Features, config *ipc.Config) {
|
|
if features[host.FeatureExtraCoverage].Enabled {
|
|
config.Flags |= ipc.FlagExtraCover
|
|
}
|
|
if features[host.FeatureNetInjection].Enabled {
|
|
config.Flags |= ipc.FlagEnableTun
|
|
}
|
|
if features[host.FeatureNetDevices].Enabled {
|
|
config.Flags |= ipc.FlagEnableNetDev
|
|
}
|
|
config.Flags |= ipc.FlagEnableNetReset
|
|
config.Flags |= ipc.FlagEnableCgroups
|
|
config.Flags |= ipc.FlagEnableCloseFds
|
|
if features[host.FeatureDevlinkPCI].Enabled {
|
|
config.Flags |= ipc.FlagEnableDevlinkPCI
|
|
}
|
|
if features[host.FeatureVhciInjection].Enabled {
|
|
config.Flags |= ipc.FlagEnableVhciInjection
|
|
}
|
|
if features[host.FeatureWifiEmulation].Enabled {
|
|
config.Flags |= ipc.FlagEnableWifi
|
|
}
|
|
}
|
|
|
|
// nolint: funlen
|
|
func main() {
|
|
debug.SetGCPercent(50)
|
|
|
|
var (
|
|
flagName = flag.String("name", "test", "unique name for manager")
|
|
flagOS = flag.String("os", runtime.GOOS, "target OS")
|
|
flagArch = flag.String("arch", runtime.GOARCH, "target arch")
|
|
flagManager = flag.String("manager", "", "manager rpc address")
|
|
flagProcs = flag.Int("procs", 1, "number of parallel test processes")
|
|
flagOutput = flag.String("output", "stdout", "write programs to none/stdout/dmesg/file")
|
|
flagPprof = flag.String("pprof", "", "address to serve pprof profiles")
|
|
flagTest = flag.Bool("test", false, "enable image testing mode") // used by syz-ci
|
|
flagRunTest = flag.Bool("runtest", false, "enable program testing mode") // used by pkg/runtest
|
|
)
|
|
flag.Parse()
|
|
outputType := parseOutputType(*flagOutput)
|
|
log.Logf(0, "fuzzer started")
|
|
|
|
target, err := prog.GetTarget(*flagOS, *flagArch)
|
|
if err != nil {
|
|
log.Fatalf("%v", err)
|
|
}
|
|
|
|
config, execOpts, err := ipcconfig.Default(target)
|
|
if err != nil {
|
|
log.Fatalf("failed to create default ipc config: %v", err)
|
|
}
|
|
sandbox := ipc.FlagsToSandbox(config.Flags)
|
|
shutdown := make(chan struct{})
|
|
osutil.HandleInterrupts(shutdown)
|
|
go func() {
|
|
// Handles graceful preemption on GCE.
|
|
<-shutdown
|
|
log.Logf(0, "SYZ-FUZZER: PREEMPTED")
|
|
os.Exit(1)
|
|
}()
|
|
|
|
checkArgs := &checkArgs{
|
|
target: target,
|
|
sandbox: sandbox,
|
|
ipcConfig: config,
|
|
ipcExecOpts: execOpts,
|
|
}
|
|
if *flagTest {
|
|
testImage(*flagManager, checkArgs)
|
|
return
|
|
}
|
|
|
|
if *flagPprof != "" {
|
|
go func() {
|
|
err := http.ListenAndServe(*flagPprof, nil)
|
|
log.Fatalf("failed to serve pprof profiles: %v", err)
|
|
}()
|
|
} else {
|
|
runtime.MemProfileRate = 0
|
|
}
|
|
|
|
machineInfo, err := host.CollectMachineInfo()
|
|
if err != nil {
|
|
log.Fatalf("failed to collect machine information: %v", err)
|
|
}
|
|
|
|
log.Logf(0, "dialing manager at %v", *flagManager)
|
|
manager, err := rpctype.NewRPCClient(*flagManager)
|
|
if err != nil {
|
|
log.Fatalf("failed to connect to manager: %v ", err)
|
|
}
|
|
log.Logf(1, "connecting to manager...")
|
|
a := &rpctype.ConnectArgs{
|
|
Name: *flagName,
|
|
MachineInfo: machineInfo,
|
|
}
|
|
r := &rpctype.ConnectRes{}
|
|
if err := manager.Call("Manager.Connect", a, r); err != nil {
|
|
log.Fatalf("failed to connect to manager: %v ", err)
|
|
}
|
|
featureFlags, err := csource.ParseFeaturesFlags("none", "none", true)
|
|
if err != nil {
|
|
log.Fatal(err)
|
|
}
|
|
if r.CheckResult == nil {
|
|
checkArgs.gitRevision = r.GitRevision
|
|
checkArgs.targetRevision = r.TargetRevision
|
|
checkArgs.enabledCalls = r.EnabledCalls
|
|
checkArgs.allSandboxes = r.AllSandboxes
|
|
checkArgs.featureFlags = featureFlags
|
|
r.CheckResult, err = checkMachine(checkArgs)
|
|
if err != nil {
|
|
if r.CheckResult == nil {
|
|
r.CheckResult = new(rpctype.CheckArgs)
|
|
}
|
|
r.CheckResult.Error = err.Error()
|
|
}
|
|
r.CheckResult.Name = *flagName
|
|
if err := manager.Call("Manager.Check", r.CheckResult, nil); err != nil {
|
|
log.Fatalf("Manager.Check call failed: %v", err)
|
|
}
|
|
if r.CheckResult.Error != "" {
|
|
log.Fatalf("%v", r.CheckResult.Error)
|
|
}
|
|
} else {
|
|
if err = host.Setup(target, r.CheckResult.Features, featureFlags, config.Executor); err != nil {
|
|
log.Fatal(err)
|
|
}
|
|
}
|
|
log.Logf(0, "syscalls: %v", len(r.CheckResult.EnabledCalls[sandbox]))
|
|
for _, feat := range r.CheckResult.Features.Supported() {
|
|
log.Logf(0, "%v: %v", feat.Name, feat.Reason)
|
|
}
|
|
createIPCConfig(r.CheckResult.Features, config)
|
|
|
|
if *flagRunTest {
|
|
runTest(target, manager, *flagName, config.Executor)
|
|
return
|
|
}
|
|
|
|
needPoll := make(chan struct{}, 1)
|
|
needPoll <- struct{}{}
|
|
fuzzer := &Fuzzer{
|
|
name: *flagName,
|
|
outputType: outputType,
|
|
config: config,
|
|
execOpts: execOpts,
|
|
workQueue: newWorkQueue(*flagProcs, needPoll),
|
|
needPoll: needPoll,
|
|
manager: manager,
|
|
target: target,
|
|
faultInjectionEnabled: r.CheckResult.Features[host.FeatureFault].Enabled,
|
|
comparisonTracingEnabled: r.CheckResult.Features[host.FeatureComparisons].Enabled,
|
|
corpusHashes: make(map[hash.Sig]struct{}),
|
|
}
|
|
gateCallback := fuzzer.useBugFrames(r, *flagProcs)
|
|
fuzzer.gate = ipc.NewGate(2**flagProcs, gateCallback)
|
|
|
|
for i := 0; fuzzer.poll(i == 0, nil); i++ {
|
|
}
|
|
calls := make(map[*prog.Syscall]bool)
|
|
for _, id := range r.CheckResult.EnabledCalls[sandbox] {
|
|
calls[target.Syscalls[id]] = true
|
|
}
|
|
fuzzer.choiceTable = target.BuildChoiceTable(fuzzer.corpus, calls)
|
|
|
|
for pid := 0; pid < *flagProcs; pid++ {
|
|
proc, err := newProc(fuzzer, pid)
|
|
if err != nil {
|
|
log.Fatalf("failed to create proc: %v", err)
|
|
}
|
|
fuzzer.procs = append(fuzzer.procs, proc)
|
|
go proc.loop()
|
|
}
|
|
|
|
fuzzer.pollLoop()
|
|
}
|
|
|
|
// Returns gateCallback for leak checking if enabled.
|
|
func (fuzzer *Fuzzer) useBugFrames(r *rpctype.ConnectRes, flagProcs int) func() {
|
|
var gateCallback func()
|
|
|
|
if r.CheckResult.Features[host.FeatureLeak].Enabled {
|
|
gateCallback = func() { fuzzer.gateCallback(r.MemoryLeakFrames) }
|
|
}
|
|
|
|
if r.CheckResult.Features[host.FeatureKCSAN].Enabled && len(r.DataRaceFrames) != 0 {
|
|
fuzzer.filterDataRaceFrames(r.DataRaceFrames)
|
|
}
|
|
|
|
return gateCallback
|
|
}
|
|
|
|
func (fuzzer *Fuzzer) gateCallback(leakFrames []string) {
|
|
// Leak checking is very slow so we don't do it while triaging the corpus
|
|
// (otherwise it takes infinity). When we have presumably triaged the corpus
|
|
// (triagedCandidates == 1), we run leak checking bug ignore the result
|
|
// to flush any previous leaks. After that (triagedCandidates == 2)
|
|
// we do actual leak checking and report leaks.
|
|
triagedCandidates := atomic.LoadUint32(&fuzzer.triagedCandidates)
|
|
if triagedCandidates == 0 {
|
|
return
|
|
}
|
|
args := append([]string{"leak"}, leakFrames...)
|
|
output, err := osutil.RunCmd(10*time.Minute, "", fuzzer.config.Executor, args...)
|
|
if err != nil && triagedCandidates == 2 {
|
|
// If we exit right away, dying executors will dump lots of garbage to console.
|
|
os.Stdout.Write(output)
|
|
fmt.Printf("BUG: leak checking failed")
|
|
time.Sleep(time.Hour)
|
|
os.Exit(1)
|
|
}
|
|
if triagedCandidates == 1 {
|
|
atomic.StoreUint32(&fuzzer.triagedCandidates, 2)
|
|
}
|
|
}
|
|
|
|
func (fuzzer *Fuzzer) filterDataRaceFrames(frames []string) {
|
|
args := append([]string{"setup_kcsan_filterlist"}, frames...)
|
|
output, err := osutil.RunCmd(10*time.Minute, "", fuzzer.config.Executor, args...)
|
|
if err != nil {
|
|
log.Fatalf("failed to set KCSAN filterlist: %v", err)
|
|
}
|
|
log.Logf(0, "%s", output)
|
|
}
|
|
|
|
func (fuzzer *Fuzzer) pollLoop() {
|
|
var execTotal uint64
|
|
var lastPoll time.Time
|
|
var lastPrint time.Time
|
|
ticker := time.NewTicker(3 * time.Second).C
|
|
for {
|
|
poll := false
|
|
select {
|
|
case <-ticker:
|
|
case <-fuzzer.needPoll:
|
|
poll = true
|
|
}
|
|
if fuzzer.outputType != OutputStdout && time.Since(lastPrint) > 10*time.Second {
|
|
// Keep-alive for manager.
|
|
log.Logf(0, "alive, executed %v", execTotal)
|
|
lastPrint = time.Now()
|
|
}
|
|
if poll || time.Since(lastPoll) > 10*time.Second {
|
|
needCandidates := fuzzer.workQueue.wantCandidates()
|
|
if poll && !needCandidates {
|
|
continue
|
|
}
|
|
stats := make(map[string]uint64)
|
|
for _, proc := range fuzzer.procs {
|
|
stats["exec total"] += atomic.SwapUint64(&proc.env.StatExecs, 0)
|
|
stats["executor restarts"] += atomic.SwapUint64(&proc.env.StatRestarts, 0)
|
|
}
|
|
for stat := Stat(0); stat < StatCount; stat++ {
|
|
v := atomic.SwapUint64(&fuzzer.stats[stat], 0)
|
|
stats[statNames[stat]] = v
|
|
execTotal += v
|
|
}
|
|
if !fuzzer.poll(needCandidates, stats) {
|
|
lastPoll = time.Now()
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
func (fuzzer *Fuzzer) poll(needCandidates bool, stats map[string]uint64) bool {
|
|
a := &rpctype.PollArgs{
|
|
Name: fuzzer.name,
|
|
NeedCandidates: needCandidates,
|
|
MaxSignal: fuzzer.grabNewSignal().Serialize(),
|
|
Stats: stats,
|
|
}
|
|
r := &rpctype.PollRes{}
|
|
if err := fuzzer.manager.Call("Manager.Poll", a, r); err != nil {
|
|
log.Fatalf("Manager.Poll call failed: %v", err)
|
|
}
|
|
maxSignal := r.MaxSignal.Deserialize()
|
|
log.Logf(1, "poll: candidates=%v inputs=%v signal=%v",
|
|
len(r.Candidates), len(r.NewInputs), maxSignal.Len())
|
|
fuzzer.addMaxSignal(maxSignal)
|
|
for _, inp := range r.NewInputs {
|
|
fuzzer.addInputFromAnotherFuzzer(inp)
|
|
}
|
|
for _, candidate := range r.Candidates {
|
|
fuzzer.addCandidateInput(candidate)
|
|
}
|
|
if needCandidates && len(r.Candidates) == 0 && atomic.LoadUint32(&fuzzer.triagedCandidates) == 0 {
|
|
atomic.StoreUint32(&fuzzer.triagedCandidates, 1)
|
|
}
|
|
return len(r.NewInputs) != 0 || len(r.Candidates) != 0 || maxSignal.Len() != 0
|
|
}
|
|
|
|
func (fuzzer *Fuzzer) sendInputToManager(inp rpctype.RPCInput) {
|
|
a := &rpctype.NewInputArgs{
|
|
Name: fuzzer.name,
|
|
RPCInput: inp,
|
|
}
|
|
if err := fuzzer.manager.Call("Manager.NewInput", a, nil); err != nil {
|
|
log.Fatalf("Manager.NewInput call failed: %v", err)
|
|
}
|
|
}
|
|
|
|
func (fuzzer *Fuzzer) addInputFromAnotherFuzzer(inp rpctype.RPCInput) {
|
|
p := fuzzer.deserializeInput(inp.Prog)
|
|
if p == nil {
|
|
return
|
|
}
|
|
sig := hash.Hash(inp.Prog)
|
|
sign := inp.Signal.Deserialize()
|
|
fuzzer.addInputToCorpus(p, sign, sig)
|
|
}
|
|
|
|
func (fuzzer *Fuzzer) addCandidateInput(candidate rpctype.RPCCandidate) {
|
|
p := fuzzer.deserializeInput(candidate.Prog)
|
|
if p == nil {
|
|
return
|
|
}
|
|
flags := ProgCandidate
|
|
if candidate.Minimized {
|
|
flags |= ProgMinimized
|
|
}
|
|
if candidate.Smashed {
|
|
flags |= ProgSmashed
|
|
}
|
|
fuzzer.workQueue.enqueue(&WorkCandidate{
|
|
p: p,
|
|
flags: flags,
|
|
})
|
|
}
|
|
|
|
func (fuzzer *Fuzzer) deserializeInput(inp []byte) *prog.Prog {
|
|
p, err := fuzzer.target.Deserialize(inp, prog.NonStrict)
|
|
if err != nil {
|
|
log.Fatalf("failed to deserialize prog: %v\n%s", err, inp)
|
|
}
|
|
if len(p.Calls) > prog.MaxCalls {
|
|
return nil
|
|
}
|
|
return p
|
|
}
|
|
|
|
func (fuzzer *FuzzerSnapshot) chooseProgram(r *rand.Rand) *prog.Prog {
|
|
randVal := r.Int63n(fuzzer.sumPrios + 1)
|
|
idx := sort.Search(len(fuzzer.corpusPrios), func(i int) bool {
|
|
return fuzzer.corpusPrios[i] >= randVal
|
|
})
|
|
return fuzzer.corpus[idx]
|
|
}
|
|
|
|
func (fuzzer *Fuzzer) addInputToCorpus(p *prog.Prog, sign signal.Signal, sig hash.Sig) {
|
|
fuzzer.corpusMu.Lock()
|
|
if _, ok := fuzzer.corpusHashes[sig]; !ok {
|
|
fuzzer.corpus = append(fuzzer.corpus, p)
|
|
fuzzer.corpusHashes[sig] = struct{}{}
|
|
prio := int64(len(sign))
|
|
if sign.Empty() {
|
|
prio = 1
|
|
}
|
|
fuzzer.sumPrios += prio
|
|
fuzzer.corpusPrios = append(fuzzer.corpusPrios, fuzzer.sumPrios)
|
|
}
|
|
fuzzer.corpusMu.Unlock()
|
|
|
|
if !sign.Empty() {
|
|
fuzzer.signalMu.Lock()
|
|
fuzzer.corpusSignal.Merge(sign)
|
|
fuzzer.maxSignal.Merge(sign)
|
|
fuzzer.signalMu.Unlock()
|
|
}
|
|
}
|
|
|
|
func (fuzzer *Fuzzer) snapshot() FuzzerSnapshot {
|
|
fuzzer.corpusMu.RLock()
|
|
defer fuzzer.corpusMu.RUnlock()
|
|
return FuzzerSnapshot{fuzzer.corpus, fuzzer.corpusPrios, fuzzer.sumPrios}
|
|
}
|
|
|
|
func (fuzzer *Fuzzer) addMaxSignal(sign signal.Signal) {
|
|
if sign.Len() == 0 {
|
|
return
|
|
}
|
|
fuzzer.signalMu.Lock()
|
|
defer fuzzer.signalMu.Unlock()
|
|
fuzzer.maxSignal.Merge(sign)
|
|
}
|
|
|
|
func (fuzzer *Fuzzer) grabNewSignal() signal.Signal {
|
|
fuzzer.signalMu.Lock()
|
|
defer fuzzer.signalMu.Unlock()
|
|
sign := fuzzer.newSignal
|
|
if sign.Empty() {
|
|
return nil
|
|
}
|
|
fuzzer.newSignal = nil
|
|
return sign
|
|
}
|
|
|
|
func (fuzzer *Fuzzer) corpusSignalDiff(sign signal.Signal) signal.Signal {
|
|
fuzzer.signalMu.RLock()
|
|
defer fuzzer.signalMu.RUnlock()
|
|
return fuzzer.corpusSignal.Diff(sign)
|
|
}
|
|
|
|
func (fuzzer *Fuzzer) checkNewSignal(p *prog.Prog, info *ipc.ProgInfo) (calls []int, extra bool) {
|
|
fuzzer.signalMu.RLock()
|
|
defer fuzzer.signalMu.RUnlock()
|
|
for i, inf := range info.Calls {
|
|
if fuzzer.checkNewCallSignal(p, &inf, i) {
|
|
calls = append(calls, i)
|
|
}
|
|
}
|
|
extra = fuzzer.checkNewCallSignal(p, &info.Extra, -1)
|
|
return
|
|
}
|
|
|
|
func (fuzzer *Fuzzer) checkNewCallSignal(p *prog.Prog, info *ipc.CallInfo, call int) bool {
|
|
diff := fuzzer.maxSignal.DiffRaw(info.Signal, signalPrio(p, info, call))
|
|
if diff.Empty() {
|
|
return false
|
|
}
|
|
fuzzer.signalMu.RUnlock()
|
|
fuzzer.signalMu.Lock()
|
|
fuzzer.maxSignal.Merge(diff)
|
|
fuzzer.newSignal.Merge(diff)
|
|
fuzzer.signalMu.Unlock()
|
|
fuzzer.signalMu.RLock()
|
|
return true
|
|
}
|
|
|
|
func signalPrio(p *prog.Prog, info *ipc.CallInfo, call int) (prio uint8) {
|
|
if call == -1 {
|
|
return 0
|
|
}
|
|
if info.Errno == 0 {
|
|
prio |= 1 << 1
|
|
}
|
|
if !p.Target.CallContainsAny(p.Calls[call]) {
|
|
prio |= 1 << 0
|
|
}
|
|
return
|
|
}
|
|
|
|
func parseOutputType(str string) OutputType {
|
|
switch str {
|
|
case "none":
|
|
return OutputNone
|
|
case "stdout":
|
|
return OutputStdout
|
|
case "dmesg":
|
|
return OutputDmesg
|
|
case "file":
|
|
return OutputFile
|
|
default:
|
|
log.Fatalf("-output flag must be one of none/stdout/dmesg/file")
|
|
return OutputNone
|
|
}
|
|
}
|