diff --git a/go.mod b/go.mod
index 464663b1..4061e105 100644
--- a/go.mod
+++ b/go.mod
@@ -6,19 +6,45 @@ require (
cloud.google.com/go v0.60.0 // indirect
cloud.google.com/go/storage v1.10.0
github.com/BurntSushi/toml v0.3.1 // indirect
+ github.com/Djarvur/go-err113 v0.1.0 // indirect
+ github.com/coreos/go-etcd v2.0.0+incompatible // indirect
+ github.com/cpuguy83/go-md2man v1.0.10 // indirect
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813
+ github.com/go-lintpack/lintpack v0.5.2 // indirect
+ github.com/gogo/protobuf v1.3.1 // indirect
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/golang/protobuf v1.4.2 // indirect
- github.com/golangci/golangci-lint v1.27.0
- github.com/google/go-cmp v0.5.0
+ github.com/golangci/golangci-lint v1.31.0
+ github.com/golangci/misspell v0.3.5 // indirect
+ github.com/golangci/revgrep v0.0.0-20180812185044-276a5c0a1039 // indirect
+ github.com/google/go-cmp v0.5.2
github.com/googleapis/gax-go v1.0.4-0.20191018151119-b443e5a67ec8 // indirect
+ github.com/gostaticanalysis/analysisutil v0.2.1 // indirect
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6
+ github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect
github.com/jstemmer/go-junit-report v0.9.2-0.20191008195320-984a47ca6b0a // indirect
- golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2
+ github.com/klauspost/cpuid v1.2.0 // indirect
+ github.com/magiconair/properties v1.8.3 // indirect
+ github.com/matoous/godox v0.0.0-20200801072554-4fb83dc2941e // indirect
+ github.com/mitchellh/mapstructure v1.3.3 // indirect
+ github.com/pelletier/go-toml v1.8.1 // indirect
+ github.com/quasilyte/regex/syntax v0.0.0-20200805063351-8f842688393c // indirect
+ github.com/spf13/afero v1.4.0 // indirect
+ github.com/spf13/cast v1.3.1 // indirect
+ github.com/spf13/jwalterweatherman v1.1.0 // indirect
+ github.com/stretchr/objx v0.3.0 // indirect
+ github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b // indirect
+ github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94 // indirect
+ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 // indirect
+ golang.org/x/net v0.0.0-20200822124328-c89045814202
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
- golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae
- golang.org/x/text v0.3.3-0.20191230102452-929e72ca90de // indirect
- golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f
+ golang.org/x/sys v0.0.0-20200915084602-288bc346aa39
+ golang.org/x/tools v0.0.0-20200915031644-64986481280e
google.golang.org/api v0.28.0
google.golang.org/appengine v1.6.6
+ gopkg.in/ini.v1 v1.61.0 // indirect
+ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect
+ mvdan.cc/gofumpt v0.0.0-20200802201014-ab5a8192947d // indirect
+ mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7 // indirect
+ sourcegraph.com/sqs/pbtypes v1.0.0 // indirect
)
diff --git a/go.sum b/go.sum
index 37f41a87..520328e4 100644
--- a/go.sum
+++ b/go.sum
@@ -23,6 +23,7 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -40,17 +41,30 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Djarvur/go-err113 v0.0.0-20200410182137-af658d038157 h1:hY39LwQHh+1kaovmIjOrlqnXNX6tygSRfLkkK33IkZU=
github.com/Djarvur/go-err113 v0.0.0-20200410182137-af658d038157/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
+github.com/Djarvur/go-err113 v0.0.0-20200511133814-5174e21577d5/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
+github.com/Djarvur/go-err113 v0.1.0 h1:uCRZZOdMQ0TZPHYTdYpoC0bLYJKPEHPUJ8MeAa51lNU=
+github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
+github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
+github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OpenPeeDeeP/depguard v1.0.1 h1:VlW4R6jmBIv3/u1JNlawEvJMM4J+dPORPaZasQee8Us=
github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/bombsimon/wsl/v3 v3.0.0 h1:w9f49xQatuaeTJFaNP4SpiWSR5vfT6IstPtM62JjcqA=
github.com/bombsimon/wsl/v3 v3.0.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc=
+github.com/bombsimon/wsl/v3 v3.1.0 h1:E5SRssoBgtVFPcYWUOFJEcgaySgdtTNYzsSKDOY7ss8=
+github.com/bombsimon/wsl/v3 v3.1.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@@ -60,15 +74,22 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/daixiang0/gci v0.2.4 h1:BUCKk5nlK2m+kRIsoj+wb/5hazHvHeZieBKWd9Afa8Q=
+github.com/daixiang0/gci v0.2.4/go.mod h1:+AV8KmHTGxxwp/pY84TLQfFKp2vuKXXJVzF3kD/hfR4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/denis-tingajkin/go-header v0.3.1 h1:ymEpSiFjeItCy1FOP+x0M2KdCELdEAHUsNa8F+hHc6w=
+github.com/denis-tingajkin/go-header v0.3.1/go.mod h1:sq/2IxMhaZX+RRcgHfCRx/m0M5na0fBt4/CRe7Lrji0=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813 h1:NgO45/5mBLRVfiXerEFzH6ikcZ7DNRPS639xFg3ENzU=
@@ -79,11 +100,17 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-critic/go-critic v0.4.1 h1:4DTQfT1wWwLg/hzxwD9bkdhDQrdJtxe6DUTadPlrIeE=
github.com/go-critic/go-critic v0.4.1/go.mod h1:7/14rZGnZbY6E38VEGk2kVhoq6itzc1E68facVDK23g=
+github.com/go-critic/go-critic v0.5.2 h1:3RJdgf6u4NZUumoP8nzbqiiNT8e1tC2Oc7jlgqre/IA=
+github.com/go-critic/go-critic v0.5.2/go.mod h1:cc0+HvdE3lFpqLecgqMaJcvWWH77sLdBp+wLGPM1Yyo=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -115,14 +142,20 @@ github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUD
github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8=
github.com/go-toolsmith/typep v1.0.0 h1:zKymWyA1TRYvqYrYDrfEMZULyrhcnGY3x7LDKU2XQaA=
github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
+github.com/go-toolsmith/typep v1.0.2 h1:8xdsa1+FSIH/RhEkgnD1j2CJOy5mNllW1Q9tRiYwvlk=
+github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
+github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo=
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b h1:ekuhfTjngPhisSjOJ0QWKpPQE8/rbknHaes6WVJj5Hw=
github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/gofrs/flock v0.8.0 h1:MSdYClljsF3PbENUUEx85nkWfJSGfzYI9yEBZOJz6CY=
+github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -162,10 +195,14 @@ github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3 h1:pe9JHs3cHHDQgO
github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o=
github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee h1:J2XAy40+7yz70uaOiMbNnluTg7gyQhtGqLQncQh+4J8=
github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU=
+github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d h1:pXTK/gkVNs7Zyy7WKgLXmpQ5bHTrq5GDsp8R9Qs67g0=
+github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU=
github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks=
github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU=
github.com/golangci/golangci-lint v1.27.0 h1:VYLx63qb+XJsHdZ27PMS2w5JZacN0XG8ffUwe7yQomo=
github.com/golangci/golangci-lint v1.27.0/go.mod h1:+eZALfxIuthdrHPtfM7w/R3POJLjHDfJJw8XZl9xOng=
+github.com/golangci/golangci-lint v1.31.0 h1:+m9I3LEmxXLpymkXRPkDQGzOVBmBYm16UtDiXqZxWek=
+github.com/golangci/golangci-lint v1.31.0/go.mod h1:aMQuNCA+NDU5+4jLL5pEuFHoue0IznKE2+/GsFvvs8A=
github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc h1:gLLhTLMk2/SutryVJ6D4VZCU3CUqr8YloG7FPIBWFpI=
github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU=
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=
@@ -174,10 +211,14 @@ github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29M
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770 h1:EL/O5HGrF7Jaq0yNhBLucz9hTuRzj2LdwGBOaENgxIk=
github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA=
+github.com/golangci/misspell v0.3.5 h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo=
+github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA=
github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21 h1:leSNB7iYzLYSSx3J/s5sVf4Drkc68W2wm4Ixh/mr0us=
github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI=
github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0 h1:HVfrLniijszjS1aiNg8JbBMO2+E1WIQ+j/gL4SQqGPg=
github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4=
+github.com/golangci/revgrep v0.0.0-20180812185044-276a5c0a1039 h1:XQKc8IYQOeRwVs36tDrEmTgDgP88d5iEURwpmtiAlOM=
+github.com/golangci/revgrep v0.0.0-20180812185044-276a5c0a1039/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4=
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys=
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -190,6 +231,9 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@@ -206,17 +250,41 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gookit/color v1.2.4/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg=
+github.com/gookit/color v1.2.5/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3 h1:JVnpOZS+qxli+rgVl98ILOXVNbW+kb5wcxeGx8ShUIw=
github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
+github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
+github.com/gostaticanalysis/analysisutil v0.2.1 h1:OfNeM+FV1AOdvwfQY8Iuq2XTQrsc2isCVXw+l3SftP0=
+github.com/gostaticanalysis/analysisutil v0.2.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0=
+github.com/gostaticanalysis/comment v1.4.1 h1:xHopR5L2lRz6OsjH4R2HG5wRhW9ySl3FsHIvi5pcXwc=
+github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@@ -226,8 +294,12 @@ github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a h1:Gmsqmapf
github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a/go.mod h1:xRskid8CManxVta/ALEhJha/pweKBaVG6fWgc0yH25s=
github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3 h1:jNYPNLe3d8smommaoQlK7LOA5ESyUJJ+Wf79ZtA7Vp4=
github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0=
+github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48=
+github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0=
+github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jstemmer/go-junit-report v0.9.2-0.20191008195320-984a47ca6b0a h1:vQd3e1rv9toNsvB8kUX/aFeU+jYKC0+N1K/weBj/Deo=
@@ -235,40 +307,71 @@ github.com/jstemmer/go-junit-report v0.9.2-0.20191008195320-984a47ca6b0a/go.mod
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kyoh86/exportloopref v0.1.7 h1:u+iHuTbkbTS2D/JP7fCuZDo/t3rBVGo3Hf58Rc+lQVY=
+github.com/kyoh86/exportloopref v0.1.7/go.mod h1:h1rDl2Kdj97+Kwh4gdz3ujE7XHmH51Q0lUiZ1z4NLj8=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.3 h1:kJSsc6EXkBLgr3SphHk9w5mtjn0bjlR4JYEXKrJ45rQ=
+github.com/magiconair/properties v1.8.3/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/maratori/testpackage v1.0.1 h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKoALdbQ=
github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU=
github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb h1:RHba4YImhrUVQDHUCe2BNSOz4tVy2yGyXhvYDvxGgeE=
github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
+github.com/matoous/godox v0.0.0-20200801072554-4fb83dc2941e h1:2U5rOmpaB96l35w+NDjMtmmrp2e6a6AJKoc4B5+7UwA=
+github.com/matoous/godox v0.0.0-20200801072554-4fb83dc2941e/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw=
+github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk=
+github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8=
+github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mozilla/tls-observatory v0.0.0-20200317151703-4fa42e1c2dee/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nakabonne/nestif v0.3.0 h1:+yOViDGhg8ygGrmII72nV9B/zGxY188TYpfolntsaPw=
@@ -276,20 +379,34 @@ github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5w
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E=
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nishanths/exhaustive v0.0.0-20200811152831-6cf413ae40e0 h1:eMV1t2NQRc3r1k3guWiv/zEeqZZP6kPvpUfy6byfL1g=
+github.com/nishanths/exhaustive v0.0.0-20200811152831-6cf413ae40e0/go.mod h1:wBEpHwM2OdmeNpdCvRPUlkEbBuaFmcK4Wv8Q7FuGW3c=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM=
+github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
+github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA=
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
@@ -301,56 +418,107 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI=
+github.com/quasilyte/go-ruleguard v0.2.0 h1:UOVMyH2EKkxIfzrULvA9n/tO+HtEhqD9mrLSWMr5FwU=
+github.com/quasilyte/go-ruleguard v0.2.0/go.mod h1:2RT/tf0Ce0UDj5y243iWKosQogJd8+1G3Rs2fxmlYnw=
+github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY=
+github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
+github.com/quasilyte/regex/syntax v0.0.0-20200805063351-8f842688393c h1:+gtJ/Pwj2dgUGlZgTrNFqajGYKZQc7Piqus/S6DK9CE=
+github.com/quasilyte/regex/syntax v0.0.0-20200805063351-8f842688393c/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.6.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryancurrah/gomodguard v1.0.4 h1:oCreMAt9GuFXDe9jW4HBpc3GjdX3R/sUEcLAGh1zPx8=
github.com/ryancurrah/gomodguard v1.0.4/go.mod h1:9T/Cfuxs5StfsocWr4WzDL36HqnX0fVb9d5fSEaLhoE=
+github.com/ryancurrah/gomodguard v1.1.0 h1:DWbye9KyMgytn8uYpuHkwf0RHqAYO6Ay/D0TbCpPtVU=
+github.com/ryancurrah/gomodguard v1.1.0/go.mod h1:4O8tr7hBODaGE6VIhfJDHcwzh5GUccKSJBU0UMXJFVM=
+github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw=
+github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/securego/gosec/v2 v2.3.0 h1:y/9mCF2WPDbSDpL3QDWZD3HHGrSYw0QSHnCqTfs4JPE=
github.com/securego/gosec/v2 v2.3.0/go.mod h1:UzeVyUXbxukhLeHKV3VVqo7HdoQR9MrRfFmZYotn8ME=
+github.com/securego/gosec/v2 v2.4.0 h1:ivAoWcY5DMs9n04Abc1VkqZBO0FL0h4ShTcVsC53lCE=
+github.com/securego/gosec/v2 v2.4.0/go.mod h1:0/Q4cjmlFDfDUj1+Fib61sc+U5IQb2w+Iv9/C3wPVko=
+github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU=
+github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs=
github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc=
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/sonatard/noctx v0.0.1 h1:VC1Qhl6Oxx9vvWo3UDgrGXYCeKCe3Wbw7qAWL6FrmTY=
+github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI=
github.com/sourcegraph/go-diff v0.5.1 h1:gO6i5zugwzo1RVTvgvfwCOSVegNuvnNi6bAD1QCmkHs=
github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE=
+github.com/sourcegraph/go-diff v0.6.0 h1:WbN9e/jD8ujU+o0vd9IFN5AEwtfB0rn/zM/AANaClqQ=
+github.com/sourcegraph/go-diff v0.6.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.4.0 h1:jsLTaI1zwYO3vjrzHalkVcIHXTNmdQFepW4OI8H3+x8=
+github.com/spf13/afero v1.4.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
+github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
+github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.6.1 h1:VPZzIkznI1YhVMRi6vNFLHSwhnhReBfgTxIPccpfdZk=
github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
+github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk=
+github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
+github.com/ssgreg/nlreturn/v2 v2.1.0 h1:6/s4Rc49L6Uo6RLjhWZGBpWWjfzk2yrf1nIW8m4wgVA=
+github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.3.0 h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As=
+github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2 h1:Xr9gkxfOP0KQWXKNqmwe8vEeSUiUj4Rlee9CMVX2ZUQ=
github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM=
+github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b h1:HxLVTlqcHhFAz3nWUcuvpH7WuOMv8LQoCWmruLfFH2U=
+github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM=
github.com/tetafro/godot v0.3.7 h1:+mecr7RKrUKB5UQ1gwqEMn13sDKTyDR8KNIquB9mm+8=
github.com/tetafro/godot v0.3.7/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0=
+github.com/tetafro/godot v0.4.8 h1:h61+hQraWhdI6WYqMwAwZYCE5yxL6a9/Orw4REbabSU=
+github.com/tetafro/godot v0.4.8/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0=
github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e h1:RumXZ56IrCj4CL+g1b9OL/oH0QnsF976bC8xQFYUD5Q=
github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
+github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94 h1:ig99OeTyDwQWhPe2iw9lwfQVF1KB3Q4fpP3X7/2VBG8=
+github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa h1:RC4maTWLKKwb7p1cnoygsbKIgNlJqSYBeAFON3Ar8As=
github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig=
@@ -358,18 +526,24 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ultraware/funlen v0.0.2 h1:Av96YVBwwNSe4MLR7iI/BIa3VyI7/djnto/pK3Uxbdo=
github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
+github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA=
+github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
github.com/ultraware/whitespace v0.0.4 h1:If7Va4cM03mpgrNH9k49/VOicWpGoG70XPBFFODYDsg=
github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
github.com/uudashr/gocognit v1.0.1 h1:MoG2fZ0b/Eo7NXoIwCVFLG5JED3qgQz5/NEE+rOsjPs=
github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s=
+github.com/valyala/fasthttp v1.15.1/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA=
github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4=
+github.com/valyala/quicktemplate v1.6.2/go.mod h1:mtEJpQtUiBV0SHhMX6RtiJtqxncgrfmjcUy5T68X8TM=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
@@ -382,11 +556,14 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190221220918-438050ddec5e/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -426,7 +603,9 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -449,8 +628,13 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 h1:eDrdRpKgkcCqKZQwyZRyeFZgfqt37SL7Kv3tok06cKE=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -464,9 +648,12 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -480,11 +667,15 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -496,21 +687,28 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200915084602-288bc346aa39 h1:356XA7ITklAU2//sYkjFeco+dH1bCRD8XCJ9FIEsvo4=
+golang.org/x/sys v0.0.0-20200915084602-288bc346aa39/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3-0.20191230102452-929e72ca90de h1:aYKJLPSrddB2N7/6OKyFqJ337SXpo61bBuvO5p1+7iY=
golang.org/x/text v0.3.3-0.20191230102452-929e72ca90de/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -535,6 +733,7 @@ golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -544,6 +743,7 @@ golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20191217033636-bbbf87ae2631/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200128002243-345141a36859 h1:xIszjAtlVeHg9hhv6Zhntvwqowji1k2rrgoOhj/aaKw=
golang.org/x/tools v0.0.0-20200128002243-345141a36859/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@@ -555,22 +755,35 @@ golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200321224714-0d839f3cf2ed/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200331202046-9d5940d49312/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200502202811-ed308ab3e770/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200519015757-0d0afa43d58a/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f h1:JcoF/bowzCDI+MXu1yLqQGNO3ibqWsWq+Sk7pOT218w=
golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200701041122-1837592efa10/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200731060945-b5fad4ed8dd6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200915031644-64986481280e h1:tfSNPIxC48Azhz4nLSPskz/yE9R6ftFRK8pfgfqWUAc=
+golang.org/x/tools v0.0.0-20200915031644-64986481280e/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -661,6 +874,8 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.61.0 h1:LBCdW4FmFYL4s/vDZD1RQYX7oAR6IjujCYgMdbHBR10=
+gopkg.in/ini.v1 v1.61.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
@@ -669,6 +884,12 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -679,14 +900,23 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.5 h1:nI5egYTGJakVyOryqLs1cQO5dO0ksin5XXs2pspk75k=
+honnef.co/go/tools v0.0.1-2020.1.5/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+mvdan.cc/gofumpt v0.0.0-20200709182408-4fd085cb6d5f h1:gi7cb8HTDZ6q8VqsUpkdoFi3vxwHMneQ6+Q5Ap5hjPE=
+mvdan.cc/gofumpt v0.0.0-20200709182408-4fd085cb6d5f/go.mod h1:9VQ397fNXEnF84t90W4r4TRCQK+pg9f8ugVfyj+S26w=
+mvdan.cc/gofumpt v0.0.0-20200802201014-ab5a8192947d h1:t8TAw9WgTLghti7RYkpPmqk4JtQ3+wcP5GgZqgWeWLQ=
+mvdan.cc/gofumpt v0.0.0-20200802201014-ab5a8192947d/go.mod h1:bzrjFmaD6+xqohD3KYP0H2FEuxknnBmyyOxdhLdaIws=
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I=
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo=
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f h1:Cq7MalBHYACRd6EesksG1Q8EoIAKOsiZviGKbOLIej4=
mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw=
+mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7 h1:kAREL6MPwpsk1/PQPFD3Eg7WAQR5mPTWZJaBiG5LDbY=
+mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7/go.mod h1:HGC5lll35J70Y5v7vCGb9oLhHoScFwkHDJm/05RdSTc=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4 h1:JPJh2pk3+X4lXAkZIk2RuE/7/FoK9maXw+TNPJhVS/c=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
+sourcegraph.com/sqs/pbtypes v1.0.0/go.mod h1:3AciMUv4qUuRHRHhOG4TZOB+72GdPVz5k+c648qsFS4=
diff --git a/vendor/github.com/Djarvur/go-err113/README.adoc b/vendor/github.com/Djarvur/go-err113/README.adoc
index b4c1f437..b26af403 100644
--- a/vendor/github.com/Djarvur/go-err113/README.adoc
+++ b/vendor/github.com/Djarvur/go-err113/README.adoc
@@ -23,7 +23,7 @@ So, `err113` reports every `==` and `!=` comparison for exact `error` type varia
Also, any call of `errors.New()` and `fmt.Errorf()` methods are reported except the calls used to initialise package-level variables and the `fmt.Errorf()` calls wrapping the other errors.
-Note: non-standard packages, like `github.com/pkg/errors` are ignored complitely.
+Note: non-standard packages, like `github.com/pkg/errors` are ignored completely.
== Install
@@ -71,3 +71,5 @@ Flags:
== Thanks
To link:https://github.com/quasilyte[Iskander (Alex) Sharipov] for the really useful advices.
+
+To link:https://github.com/jackwhelpton[Jack Whelpton] for the bugfix provided.
\ No newline at end of file
diff --git a/vendor/github.com/Djarvur/go-err113/comparison.go b/vendor/github.com/Djarvur/go-err113/comparison.go
index 7e7777df..0ffe2863 100644
--- a/vendor/github.com/Djarvur/go-err113/comparison.go
+++ b/vendor/github.com/Djarvur/go-err113/comparison.go
@@ -21,18 +21,7 @@ func inspectComparision(pass *analysis.Pass, n ast.Node) bool { // nolint: unpar
return true
}
- // check that both left and right hand side are not nil
- if pass.TypesInfo.Types[be.X].IsNil() || pass.TypesInfo.Types[be.Y].IsNil() {
- return true
- }
-
- // check that both left and right hand side are not io.EOF
- if isEOF(be.X, pass.TypesInfo) || isEOF(be.Y, pass.TypesInfo) {
- return true
- }
-
- // check that both left and right hand side are errors
- if !isError(be.X, pass.TypesInfo) && !isError(be.Y, pass.TypesInfo) {
+ if !areBothErrors(be.X, be.Y, pass.TypesInfo) {
return true
}
@@ -99,5 +88,24 @@ func asImportedName(ex ast.Expr, info *types.Info) (string, bool) {
return "", false
}
- return ep.Imported().Name(), true
+ return ep.Imported().Path(), true
+}
+
+func areBothErrors(x, y ast.Expr, typesInfo *types.Info) bool {
+ // check that both left and right hand side are not nil
+ if typesInfo.Types[x].IsNil() || typesInfo.Types[y].IsNil() {
+ return false
+ }
+
+ // check that both left and right hand side are not io.EOF
+ if isEOF(x, typesInfo) || isEOF(y, typesInfo) {
+ return false
+ }
+
+ // check that both left and right hand side are errors
+ if !isError(x, typesInfo) && !isError(y, typesInfo) {
+ return false
+ }
+
+ return true
}
diff --git a/vendor/github.com/Djarvur/go-err113/err113.go b/vendor/github.com/Djarvur/go-err113/err113.go
index e9d93a7a..ec4f52ac 100644
--- a/vendor/github.com/Djarvur/go-err113/err113.go
+++ b/vendor/github.com/Djarvur/go-err113/err113.go
@@ -10,7 +10,7 @@ import (
"golang.org/x/tools/go/analysis"
)
-// NewAnalyzer creates a new analysis.Analyzer instance tuned to run err113 checks
+// NewAnalyzer creates a new analysis.Analyzer instance tuned to run err113 checks.
func NewAnalyzer() *analysis.Analyzer {
return &analysis.Analyzer{
Name: "err113",
@@ -35,7 +35,7 @@ func run(pass *analysis.Pass) (interface{}, error) {
return nil, nil
}
-// render returns the pretty-print of the given node
+// render returns the pretty-print of the given node.
func render(fset *token.FileSet, x interface{}) string {
var buf bytes.Buffer
if err := printer.Fprint(&buf, fset, x); err != nil {
diff --git a/vendor/github.com/Masterminds/semver/.travis.yml b/vendor/github.com/Masterminds/semver/.travis.yml
new file mode 100644
index 00000000..096369d4
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/.travis.yml
@@ -0,0 +1,29 @@
+language: go
+
+go:
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+ - 1.12.x
+ - tip
+
+# Setting sudo access to false will let Travis CI use containers rather than
+# VMs to run the tests. For more details see:
+# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/
+# - http://docs.travis-ci.com/user/workers/standard-infrastructure/
+sudo: false
+
+script:
+ - make setup
+ - make test
+
+notifications:
+ webhooks:
+ urls:
+ - https://webhooks.gitter.im/e/06e3328629952dabe3e0
+ on_success: change # options: [always|never|change] default: always
+ on_failure: always # options: [always|never|change] default: always
+ on_start: never # options: [always|never|change] default: always
diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md
new file mode 100644
index 00000000..e405c9a8
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/CHANGELOG.md
@@ -0,0 +1,109 @@
+# 1.5.0 (2019-09-11)
+
+## Added
+
+- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c)
+
+## Changed
+
+- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil)
+- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil)
+- #72: Adding docs comment pointing to vert for a cli
+- #71: Update the docs on pre-release comparator handling
+- #89: Test with new go versions (thanks @thedevsaddam)
+- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll)
+
+## Fixed
+
+- #78: Fix unchecked error in example code (thanks @ravron)
+- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
+- #97: Fixed copyright file for proper display on GitHub
+- #107: Fix handling prerelease when sorting alphanum and num
+- #109: Fixed where Validate sometimes returns wrong message on error
+
+# 1.4.2 (2018-04-10)
+
+## Changed
+- #72: Updated the docs to point to vert for a console appliaction
+- #71: Update the docs on pre-release comparator handling
+
+## Fixed
+- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
+
+# 1.4.1 (2018-04-02)
+
+## Fixed
+- Fixed #64: Fix pre-release precedence issue (thanks @uudashr)
+
+# 1.4.0 (2017-10-04)
+
+## Changed
+- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill)
+
+# 1.3.1 (2017-07-10)
+
+## Fixed
+- Fixed #57: number comparisons in prerelease sometimes inaccurate
+
+# 1.3.0 (2017-05-02)
+
+## Added
+- #45: Added json (un)marshaling support (thanks @mh-cbon)
+- Stability marker. See https://masterminds.github.io/stability/
+
+## Fixed
+- #51: Fix handling of single digit tilde constraint (thanks @dgodd)
+
+## Changed
+- #55: The godoc icon moved from png to svg
+
+# 1.2.3 (2017-04-03)
+
+## Fixed
+- #46: Fixed 0.x.x and 0.0.x in constraints being treated as *
+
+# Release 1.2.2 (2016-12-13)
+
+## Fixed
+- #34: Fixed issue where hyphen range was not working with pre-release parsing.
+
+# Release 1.2.1 (2016-11-28)
+
+## Fixed
+- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha"
+ properly.
+
+# Release 1.2.0 (2016-11-04)
+
+## Added
+- #20: Added MustParse function for versions (thanks @adamreese)
+- #15: Added increment methods on versions (thanks @mh-cbon)
+
+## Fixed
+- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and
+ might not satisfy the intended compatibility. The change here ignores pre-releases
+ on constraint checks (e.g., ~ or ^) when a pre-release is not part of the
+ constraint. For example, `^1.2.3` will ignore pre-releases while
+ `^1.2.3-alpha` will include them.
+
+# Release 1.1.1 (2016-06-30)
+
+## Changed
+- Issue #9: Speed up version comparison performance (thanks @sdboyer)
+- Issue #8: Added benchmarks (thanks @sdboyer)
+- Updated Go Report Card URL to new location
+- Updated Readme to add code snippet formatting (thanks @mh-cbon)
+- Updating tagging to v[SemVer] structure for compatibility with other tools.
+
+# Release 1.1.0 (2016-03-11)
+
+- Issue #2: Implemented validation to provide reasons a versions failed a
+ constraint.
+
+# Release 1.0.1 (2015-12-31)
+
+- Fixed #1: * constraint failing on valid versions.
+
+# Release 1.0.0 (2015-10-20)
+
+- Initial release
diff --git a/vendor/github.com/Masterminds/semver/LICENSE.txt b/vendor/github.com/Masterminds/semver/LICENSE.txt
new file mode 100644
index 00000000..9ff7da9c
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (C) 2014-2019, Matt Butcher and Matt Farina
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/Masterminds/semver/Makefile b/vendor/github.com/Masterminds/semver/Makefile
new file mode 100644
index 00000000..a7a1b4e3
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/Makefile
@@ -0,0 +1,36 @@
+.PHONY: setup
+setup:
+ go get -u gopkg.in/alecthomas/gometalinter.v1
+ gometalinter.v1 --install
+
+.PHONY: test
+test: validate lint
+ @echo "==> Running tests"
+ go test -v
+
+.PHONY: validate
+validate:
+ @echo "==> Running static validations"
+ @gometalinter.v1 \
+ --disable-all \
+ --enable deadcode \
+ --severity deadcode:error \
+ --enable gofmt \
+ --enable gosimple \
+ --enable ineffassign \
+ --enable misspell \
+ --enable vet \
+ --tests \
+ --vendor \
+ --deadline 60s \
+ ./... || exit_code=1
+
+.PHONY: lint
+lint:
+ @echo "==> Running linters"
+ @gometalinter.v1 \
+ --disable-all \
+ --enable golint \
+ --vendor \
+ --deadline 60s \
+ ./... || :
diff --git a/vendor/github.com/Masterminds/semver/README.md b/vendor/github.com/Masterminds/semver/README.md
new file mode 100644
index 00000000..1b52d2f4
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/README.md
@@ -0,0 +1,194 @@
+# SemVer
+
+The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to:
+
+* Parse semantic versions
+* Sort semantic versions
+* Check if a semantic version fits within a set of constraints
+* Optionally work with a `v` prefix
+
+[![Stability:
+Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html)
+[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.svg)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver)
+
+If you are looking for a command line tool for version comparisons please see
+[vert](https://github.com/Masterminds/vert) which uses this library.
+
+## Parsing Semantic Versions
+
+To parse a semantic version use the `NewVersion` function. For example,
+
+```go
+ v, err := semver.NewVersion("1.2.3-beta.1+build345")
+```
+
+If there is an error the version wasn't parseable. The version object has methods
+to get the parts of the version, compare it to other versions, convert the
+version back into a string, and get the original string. For more details
+please see the [documentation](https://godoc.org/github.com/Masterminds/semver).
+
+## Sorting Semantic Versions
+
+A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/)
+package from the standard library. For example,
+
+```go
+ raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
+ vs := make([]*semver.Version, len(raw))
+ for i, r := range raw {
+ v, err := semver.NewVersion(r)
+ if err != nil {
+ t.Errorf("Error parsing version: %s", err)
+ }
+
+ vs[i] = v
+ }
+
+ sort.Sort(semver.Collection(vs))
+```
+
+## Checking Version Constraints
+
+Checking a version against version constraints is one of the most featureful
+parts of the package.
+
+```go
+ c, err := semver.NewConstraint(">= 1.2.3")
+ if err != nil {
+ // Handle constraint not being parseable.
+ }
+
+ v, _ := semver.NewVersion("1.3")
+ if err != nil {
+ // Handle version not being parseable.
+ }
+ // Check if the version meets the constraints. The a variable will be true.
+ a := c.Check(v)
+```
+
+## Basic Comparisons
+
+There are two elements to the comparisons. First, a comparison string is a list
+of comma separated and comparisons. These are then separated by || separated or
+comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a
+comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
+greater than or equal to 4.2.3.
+
+The basic comparisons are:
+
+* `=`: equal (aliased to no operator)
+* `!=`: not equal
+* `>`: greater than
+* `<`: less than
+* `>=`: greater than or equal to
+* `<=`: less than or equal to
+
+## Working With Pre-release Versions
+
+Pre-releases, for those not familiar with them, are used for software releases
+prior to stable or generally available releases. Examples of pre-releases include
+development, alpha, beta, and release candidate releases. A pre-release may be
+a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the
+order of precidence, pre-releases come before their associated releases. In this
+example `1.2.3-beta.1 < 1.2.3`.
+
+According to the Semantic Version specification pre-releases may not be
+API compliant with their release counterpart. It says,
+
+> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version.
+
+SemVer comparisons without a pre-release comparator will skip pre-release versions.
+For example, `>=1.2.3` will skip pre-releases when looking at a list of releases
+while `>=1.2.3-0` will evaluate and find pre-releases.
+
+The reason for the `0` as a pre-release version in the example comparison is
+because pre-releases can only contain ASCII alphanumerics and hyphens (along with
+`.` separators), per the spec. Sorting happens in ASCII sort order, again per the spec. The lowest character is a `0` in ASCII sort order (see an [ASCII Table](http://www.asciitable.com/))
+
+Understanding ASCII sort ordering is important because A-Z comes before a-z. That
+means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case
+sensitivity doesn't apply here. This is due to ASCII sort ordering which is what
+the spec specifies.
+
+## Hyphen Range Comparisons
+
+There are multiple methods to handle ranges and the first is hyphens ranges.
+These look like:
+
+* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5`
+* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5`
+
+## Wildcards In Comparisons
+
+The `x`, `X`, and `*` characters can be used as a wildcard character. This works
+for all comparison operators. When used on the `=` operator it falls
+back to the pack level comparison (see tilde below). For example,
+
+* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+* `>= 1.2.x` is equivalent to `>= 1.2.0`
+* `<= 2.x` is equivalent to `< 3`
+* `*` is equivalent to `>= 0.0.0`
+
+## Tilde Range Comparisons (Patch)
+
+The tilde (`~`) comparison operator is for patch level ranges when a minor
+version is specified and major level changes when the minor number is missing.
+For example,
+
+* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0`
+* `~1` is equivalent to `>= 1, < 2`
+* `~2.3` is equivalent to `>= 2.3, < 2.4`
+* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+* `~1.x` is equivalent to `>= 1, < 2`
+
+## Caret Range Comparisons (Major)
+
+The caret (`^`) comparison operator is for major level changes. This is useful
+when comparisons of API versions as a major change is API breaking. For example,
+
+* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
+* `^0.0.1` is equivalent to `>= 0.0.1, < 1.0.0`
+* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
+* `^2.3` is equivalent to `>= 2.3, < 3`
+* `^2.x` is equivalent to `>= 2.0.0, < 3`
+
+# Validation
+
+In addition to testing a version against a constraint, a version can be validated
+against a constraint. When validation fails a slice of errors containing why a
+version didn't meet the constraint is returned. For example,
+
+```go
+ c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
+ if err != nil {
+ // Handle constraint not being parseable.
+ }
+
+ v, _ := semver.NewVersion("1.3")
+ if err != nil {
+ // Handle version not being parseable.
+ }
+
+ // Validate a version against a constraint.
+ a, msgs := c.Validate(v)
+ // a is false
+ for _, m := range msgs {
+ fmt.Println(m)
+
+ // Loops over the errors which would read
+ // "1.3 is greater than 1.2.3"
+ // "1.3 is less than 1.4"
+ }
+```
+
+# Fuzzing
+
+ [dvyukov/go-fuzz](https://github.com/dvyukov/go-fuzz) is used for fuzzing.
+
+1. `go-fuzz-build`
+2. `go-fuzz -workdir=fuzz`
+
+# Contribute
+
+If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues)
+or [create a pull request](https://github.com/Masterminds/semver/pulls).
diff --git a/vendor/github.com/Masterminds/semver/appveyor.yml b/vendor/github.com/Masterminds/semver/appveyor.yml
new file mode 100644
index 00000000..b2778df1
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/appveyor.yml
@@ -0,0 +1,44 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\Masterminds\semver
+shallow_clone: true
+
+environment:
+ GOPATH: C:\gopath
+
+platform:
+ - x64
+
+install:
+ - go version
+ - go env
+ - go get -u gopkg.in/alecthomas/gometalinter.v1
+ - set PATH=%PATH%;%GOPATH%\bin
+ - gometalinter.v1.exe --install
+
+build_script:
+ - go install -v ./...
+
+test_script:
+ - "gometalinter.v1 \
+ --disable-all \
+ --enable deadcode \
+ --severity deadcode:error \
+ --enable gofmt \
+ --enable gosimple \
+ --enable ineffassign \
+ --enable misspell \
+ --enable vet \
+ --tests \
+ --vendor \
+ --deadline 60s \
+ ./... || exit_code=1"
+ - "gometalinter.v1 \
+ --disable-all \
+ --enable golint \
+ --vendor \
+ --deadline 60s \
+ ./... || :"
+ - go test -v
+
+deploy: off
diff --git a/vendor/github.com/Masterminds/semver/collection.go b/vendor/github.com/Masterminds/semver/collection.go
new file mode 100644
index 00000000..a7823589
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/collection.go
@@ -0,0 +1,24 @@
+package semver
+
+// Collection is a collection of Version instances and implements the sort
+// interface. See the sort package for more details.
+// https://golang.org/pkg/sort/
+type Collection []*Version
+
+// Len returns the length of a collection. The number of Version instances
+// on the slice.
+func (c Collection) Len() int {
+ return len(c)
+}
+
+// Less is needed for the sort interface to compare two Version objects on the
+// slice. If checks if one is less than the other.
+func (c Collection) Less(i, j int) bool {
+ return c[i].LessThan(c[j])
+}
+
+// Swap is needed for the sort interface to replace the Version objects
+// at two different positions in the slice.
+func (c Collection) Swap(i, j int) {
+ c[i], c[j] = c[j], c[i]
+}
diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go
new file mode 100644
index 00000000..b94b9341
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/constraints.go
@@ -0,0 +1,423 @@
+package semver
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// Constraints is one or more constraint that a semantic version can be
+// checked against.
+type Constraints struct {
+ constraints [][]*constraint
+}
+
+// NewConstraint returns a Constraints instance that a Version instance can
+// be checked against. If there is a parse error it will be returned.
+func NewConstraint(c string) (*Constraints, error) {
+
+ // Rewrite - ranges into a comparison operation.
+ c = rewriteRange(c)
+
+ ors := strings.Split(c, "||")
+ or := make([][]*constraint, len(ors))
+ for k, v := range ors {
+ cs := strings.Split(v, ",")
+ result := make([]*constraint, len(cs))
+ for i, s := range cs {
+ pc, err := parseConstraint(s)
+ if err != nil {
+ return nil, err
+ }
+
+ result[i] = pc
+ }
+ or[k] = result
+ }
+
+ o := &Constraints{constraints: or}
+ return o, nil
+}
+
+// Check tests if a version satisfies the constraints.
+func (cs Constraints) Check(v *Version) bool {
+ // loop over the ORs and check the inner ANDs
+ for _, o := range cs.constraints {
+ joy := true
+ for _, c := range o {
+ if !c.check(v) {
+ joy = false
+ break
+ }
+ }
+
+ if joy {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Validate checks if a version satisfies a constraint. If not a slice of
+// reasons for the failure are returned in addition to a bool.
+func (cs Constraints) Validate(v *Version) (bool, []error) {
+ // loop over the ORs and check the inner ANDs
+ var e []error
+
+ // Capture the prerelease message only once. When it happens the first time
+ // this var is marked
+ var prerelesase bool
+ for _, o := range cs.constraints {
+ joy := true
+ for _, c := range o {
+ // Before running the check handle the case there the version is
+ // a prerelease and the check is not searching for prereleases.
+ if c.con.pre == "" && v.pre != "" {
+ if !prerelesase {
+ em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ e = append(e, em)
+ prerelesase = true
+ }
+ joy = false
+
+ } else {
+
+ if !c.check(v) {
+ em := fmt.Errorf(c.msg, v, c.orig)
+ e = append(e, em)
+ joy = false
+ }
+ }
+ }
+
+ if joy {
+ return true, []error{}
+ }
+ }
+
+ return false, e
+}
+
+var constraintOps map[string]cfunc
+var constraintMsg map[string]string
+var constraintRegex *regexp.Regexp
+
+func init() {
+ constraintOps = map[string]cfunc{
+ "": constraintTildeOrEqual,
+ "=": constraintTildeOrEqual,
+ "!=": constraintNotEqual,
+ ">": constraintGreaterThan,
+ "<": constraintLessThan,
+ ">=": constraintGreaterThanEqual,
+ "=>": constraintGreaterThanEqual,
+ "<=": constraintLessThanEqual,
+ "=<": constraintLessThanEqual,
+ "~": constraintTilde,
+ "~>": constraintTilde,
+ "^": constraintCaret,
+ }
+
+ constraintMsg = map[string]string{
+ "": "%s is not equal to %s",
+ "=": "%s is not equal to %s",
+ "!=": "%s is equal to %s",
+ ">": "%s is less than or equal to %s",
+ "<": "%s is greater than or equal to %s",
+ ">=": "%s is less than %s",
+ "=>": "%s is less than %s",
+ "<=": "%s is greater than %s",
+ "=<": "%s is greater than %s",
+ "~": "%s does not have same major and minor version as %s",
+ "~>": "%s does not have same major and minor version as %s",
+ "^": "%s does not have same major version as %s",
+ }
+
+ ops := make([]string, 0, len(constraintOps))
+ for k := range constraintOps {
+ ops = append(ops, regexp.QuoteMeta(k))
+ }
+
+ constraintRegex = regexp.MustCompile(fmt.Sprintf(
+ `^\s*(%s)\s*(%s)\s*$`,
+ strings.Join(ops, "|"),
+ cvRegex))
+
+ constraintRangeRegex = regexp.MustCompile(fmt.Sprintf(
+ `\s*(%s)\s+-\s+(%s)\s*`,
+ cvRegex, cvRegex))
+}
+
+// An individual constraint
+type constraint struct {
+ // The callback function for the restraint. It performs the logic for
+ // the constraint.
+ function cfunc
+
+ msg string
+
+ // The version used in the constraint check. For example, if a constraint
+ // is '<= 2.0.0' the con a version instance representing 2.0.0.
+ con *Version
+
+ // The original parsed version (e.g., 4.x from != 4.x)
+ orig string
+
+ // When an x is used as part of the version (e.g., 1.x)
+ minorDirty bool
+ dirty bool
+ patchDirty bool
+}
+
+// Check if a version meets the constraint
+func (c *constraint) check(v *Version) bool {
+ return c.function(v, c)
+}
+
+type cfunc func(v *Version, c *constraint) bool
+
+func parseConstraint(c string) (*constraint, error) {
+ m := constraintRegex.FindStringSubmatch(c)
+ if m == nil {
+ return nil, fmt.Errorf("improper constraint: %s", c)
+ }
+
+ ver := m[2]
+ orig := ver
+ minorDirty := false
+ patchDirty := false
+ dirty := false
+ if isX(m[3]) {
+ ver = "0.0.0"
+ dirty = true
+ } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" {
+ minorDirty = true
+ dirty = true
+ ver = fmt.Sprintf("%s.0.0%s", m[3], m[6])
+ } else if isX(strings.TrimPrefix(m[5], ".")) {
+ dirty = true
+ patchDirty = true
+ ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6])
+ }
+
+ con, err := NewVersion(ver)
+ if err != nil {
+
+ // The constraintRegex should catch any regex parsing errors. So,
+ // we should never get here.
+ return nil, errors.New("constraint Parser Error")
+ }
+
+ cs := &constraint{
+ function: constraintOps[m[1]],
+ msg: constraintMsg[m[1]],
+ con: con,
+ orig: orig,
+ minorDirty: minorDirty,
+ patchDirty: patchDirty,
+ dirty: dirty,
+ }
+ return cs, nil
+}
+
+// Constraint functions
+func constraintNotEqual(v *Version, c *constraint) bool {
+ if c.dirty {
+
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false
+ }
+
+ if c.con.Major() != v.Major() {
+ return true
+ }
+ if c.con.Minor() != v.Minor() && !c.minorDirty {
+ return true
+ } else if c.minorDirty {
+ return false
+ }
+
+ return false
+ }
+
+ return !v.Equal(c.con)
+}
+
+func constraintGreaterThan(v *Version, c *constraint) bool {
+
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false
+ }
+
+ return v.Compare(c.con) == 1
+}
+
+func constraintLessThan(v *Version, c *constraint) bool {
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false
+ }
+
+ if !c.dirty {
+ return v.Compare(c.con) < 0
+ }
+
+ if v.Major() > c.con.Major() {
+ return false
+ } else if v.Minor() > c.con.Minor() && !c.minorDirty {
+ return false
+ }
+
+ return true
+}
+
+func constraintGreaterThanEqual(v *Version, c *constraint) bool {
+
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false
+ }
+
+ return v.Compare(c.con) >= 0
+}
+
+func constraintLessThanEqual(v *Version, c *constraint) bool {
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false
+ }
+
+ if !c.dirty {
+ return v.Compare(c.con) <= 0
+ }
+
+ if v.Major() > c.con.Major() {
+ return false
+ } else if v.Minor() > c.con.Minor() && !c.minorDirty {
+ return false
+ }
+
+ return true
+}
+
+// ~*, ~>* --> >= 0.0.0 (any)
+// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0
+// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0
+// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0
+// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0
+// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0
+func constraintTilde(v *Version, c *constraint) bool {
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false
+ }
+
+ if v.LessThan(c.con) {
+ return false
+ }
+
+ // ~0.0.0 is a special case where all constraints are accepted. It's
+ // equivalent to >= 0.0.0.
+ if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 &&
+ !c.minorDirty && !c.patchDirty {
+ return true
+ }
+
+ if v.Major() != c.con.Major() {
+ return false
+ }
+
+ if v.Minor() != c.con.Minor() && !c.minorDirty {
+ return false
+ }
+
+ return true
+}
+
+// When there is a .x (dirty) status it automatically opts in to ~. Otherwise
+// it's a straight =
+func constraintTildeOrEqual(v *Version, c *constraint) bool {
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false
+ }
+
+ if c.dirty {
+ c.msg = constraintMsg["~"]
+ return constraintTilde(v, c)
+ }
+
+ return v.Equal(c.con)
+}
+
+// ^* --> (any)
+// ^2, ^2.x, ^2.x.x --> >=2.0.0, <3.0.0
+// ^2.0, ^2.0.x --> >=2.0.0, <3.0.0
+// ^1.2, ^1.2.x --> >=1.2.0, <2.0.0
+// ^1.2.3 --> >=1.2.3, <2.0.0
+// ^1.2.0 --> >=1.2.0, <2.0.0
+func constraintCaret(v *Version, c *constraint) bool {
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false
+ }
+
+ if v.LessThan(c.con) {
+ return false
+ }
+
+ if v.Major() != c.con.Major() {
+ return false
+ }
+
+ return true
+}
+
+var constraintRangeRegex *regexp.Regexp
+
+const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` +
+ `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+ `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
+
+func isX(x string) bool {
+ switch x {
+ case "x", "*", "X":
+ return true
+ default:
+ return false
+ }
+}
+
+func rewriteRange(i string) string {
+ m := constraintRangeRegex.FindAllStringSubmatch(i, -1)
+ if m == nil {
+ return i
+ }
+ o := i
+ for _, v := range m {
+ t := fmt.Sprintf(">= %s, <= %s", v[1], v[11])
+ o = strings.Replace(o, v[0], t, 1)
+ }
+
+ return o
+}
diff --git a/vendor/github.com/Masterminds/semver/doc.go b/vendor/github.com/Masterminds/semver/doc.go
new file mode 100644
index 00000000..6a6c24c6
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/doc.go
@@ -0,0 +1,115 @@
+/*
+Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go.
+
+Specifically it provides the ability to:
+
+ * Parse semantic versions
+ * Sort semantic versions
+ * Check if a semantic version fits within a set of constraints
+ * Optionally work with a `v` prefix
+
+Parsing Semantic Versions
+
+To parse a semantic version use the `NewVersion` function. For example,
+
+ v, err := semver.NewVersion("1.2.3-beta.1+build345")
+
+If there is an error the version wasn't parseable. The version object has methods
+to get the parts of the version, compare it to other versions, convert the
+version back into a string, and get the original string. For more details
+please see the documentation at https://godoc.org/github.com/Masterminds/semver.
+
+Sorting Semantic Versions
+
+A set of versions can be sorted using the `sort` package from the standard library.
+For example,
+
+ raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
+ vs := make([]*semver.Version, len(raw))
+ for i, r := range raw {
+ v, err := semver.NewVersion(r)
+ if err != nil {
+ t.Errorf("Error parsing version: %s", err)
+ }
+
+ vs[i] = v
+ }
+
+ sort.Sort(semver.Collection(vs))
+
+Checking Version Constraints
+
+Checking a version against version constraints is one of the most featureful
+parts of the package.
+
+ c, err := semver.NewConstraint(">= 1.2.3")
+ if err != nil {
+ // Handle constraint not being parseable.
+ }
+
+ v, err := semver.NewVersion("1.3")
+ if err != nil {
+ // Handle version not being parseable.
+ }
+ // Check if the version meets the constraints. The a variable will be true.
+ a := c.Check(v)
+
+Basic Comparisons
+
+There are two elements to the comparisons. First, a comparison string is a list
+of comma separated and comparisons. These are then separated by || separated or
+comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a
+comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
+greater than or equal to 4.2.3.
+
+The basic comparisons are:
+
+ * `=`: equal (aliased to no operator)
+ * `!=`: not equal
+ * `>`: greater than
+ * `<`: less than
+ * `>=`: greater than or equal to
+ * `<=`: less than or equal to
+
+Hyphen Range Comparisons
+
+There are multiple methods to handle ranges and the first is hyphens ranges.
+These look like:
+
+ * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5`
+ * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5`
+
+Wildcards In Comparisons
+
+The `x`, `X`, and `*` characters can be used as a wildcard character. This works
+for all comparison operators. When used on the `=` operator it falls
+back to the pack level comparison (see tilde below). For example,
+
+ * `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+ * `>= 1.2.x` is equivalent to `>= 1.2.0`
+ * `<= 2.x` is equivalent to `<= 3`
+ * `*` is equivalent to `>= 0.0.0`
+
+Tilde Range Comparisons (Patch)
+
+The tilde (`~`) comparison operator is for patch level ranges when a minor
+version is specified and major level changes when the minor number is missing.
+For example,
+
+ * `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0`
+ * `~1` is equivalent to `>= 1, < 2`
+ * `~2.3` is equivalent to `>= 2.3, < 2.4`
+ * `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+ * `~1.x` is equivalent to `>= 1, < 2`
+
+Caret Range Comparisons (Major)
+
+The caret (`^`) comparison operator is for major level changes. This is useful
+when comparisons of API versions as a major change is API breaking. For example,
+
+ * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
+ * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
+ * `^2.3` is equivalent to `>= 2.3, < 3`
+ * `^2.x` is equivalent to `>= 2.0.0, < 3`
+*/
+package semver
diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go
new file mode 100644
index 00000000..400d4f93
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/version.go
@@ -0,0 +1,425 @@
+package semver
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// The compiled version of the regex created at init() is cached here so it
+// only needs to be created once.
+var versionRegex *regexp.Regexp
+var validPrereleaseRegex *regexp.Regexp
+
+var (
+ // ErrInvalidSemVer is returned a version is found to be invalid when
+ // being parsed.
+ ErrInvalidSemVer = errors.New("Invalid Semantic Version")
+
+ // ErrInvalidMetadata is returned when the metadata is an invalid format
+ ErrInvalidMetadata = errors.New("Invalid Metadata string")
+
+ // ErrInvalidPrerelease is returned when the pre-release is an invalid format
+ ErrInvalidPrerelease = errors.New("Invalid Prerelease string")
+)
+
+// SemVerRegex is the regular expression used to parse a semantic version.
+const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
+ `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+ `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
+
+// ValidPrerelease is the regular expression which validates
+// both prerelease and metadata values.
+const ValidPrerelease string = `^([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*)$`
+
+// Version represents a single semantic version.
+type Version struct {
+ major, minor, patch int64
+ pre string
+ metadata string
+ original string
+}
+
+func init() {
+ versionRegex = regexp.MustCompile("^" + SemVerRegex + "$")
+ validPrereleaseRegex = regexp.MustCompile(ValidPrerelease)
+}
+
+// NewVersion parses a given version and returns an instance of Version or
+// an error if unable to parse the version.
+func NewVersion(v string) (*Version, error) {
+ m := versionRegex.FindStringSubmatch(v)
+ if m == nil {
+ return nil, ErrInvalidSemVer
+ }
+
+ sv := &Version{
+ metadata: m[8],
+ pre: m[5],
+ original: v,
+ }
+
+ var temp int64
+ temp, err := strconv.ParseInt(m[1], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing version segment: %s", err)
+ }
+ sv.major = temp
+
+ if m[2] != "" {
+ temp, err = strconv.ParseInt(strings.TrimPrefix(m[2], "."), 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing version segment: %s", err)
+ }
+ sv.minor = temp
+ } else {
+ sv.minor = 0
+ }
+
+ if m[3] != "" {
+ temp, err = strconv.ParseInt(strings.TrimPrefix(m[3], "."), 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing version segment: %s", err)
+ }
+ sv.patch = temp
+ } else {
+ sv.patch = 0
+ }
+
+ return sv, nil
+}
+
+// MustParse parses a given version and panics on error.
+func MustParse(v string) *Version {
+ sv, err := NewVersion(v)
+ if err != nil {
+ panic(err)
+ }
+ return sv
+}
+
+// String converts a Version object to a string.
+// Note, if the original version contained a leading v this version will not.
+// See the Original() method to retrieve the original value. Semantic Versions
+// don't contain a leading v per the spec. Instead it's optional on
+// implementation.
+func (v *Version) String() string {
+ var buf bytes.Buffer
+
+ fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch)
+ if v.pre != "" {
+ fmt.Fprintf(&buf, "-%s", v.pre)
+ }
+ if v.metadata != "" {
+ fmt.Fprintf(&buf, "+%s", v.metadata)
+ }
+
+ return buf.String()
+}
+
+// Original returns the original value passed in to be parsed.
+func (v *Version) Original() string {
+ return v.original
+}
+
+// Major returns the major version.
+func (v *Version) Major() int64 {
+ return v.major
+}
+
+// Minor returns the minor version.
+func (v *Version) Minor() int64 {
+ return v.minor
+}
+
+// Patch returns the patch version.
+func (v *Version) Patch() int64 {
+ return v.patch
+}
+
+// Prerelease returns the pre-release version.
+func (v *Version) Prerelease() string {
+ return v.pre
+}
+
+// Metadata returns the metadata on the version.
+func (v *Version) Metadata() string {
+ return v.metadata
+}
+
+// originalVPrefix returns the original 'v' prefix if any.
+func (v *Version) originalVPrefix() string {
+
+ // Note, only lowercase v is supported as a prefix by the parser.
+ if v.original != "" && v.original[:1] == "v" {
+ return v.original[:1]
+ }
+ return ""
+}
+
+// IncPatch produces the next patch version.
+// If the current version does not have prerelease/metadata information,
+// it unsets metadata and prerelease values, increments patch number.
+// If the current version has any of prerelease or metadata information,
+// it unsets both values and keeps curent patch value
+func (v Version) IncPatch() Version {
+ vNext := v
+ // according to http://semver.org/#spec-item-9
+ // Pre-release versions have a lower precedence than the associated normal version.
+ // according to http://semver.org/#spec-item-10
+ // Build metadata SHOULD be ignored when determining version precedence.
+ if v.pre != "" {
+ vNext.metadata = ""
+ vNext.pre = ""
+ } else {
+ vNext.metadata = ""
+ vNext.pre = ""
+ vNext.patch = v.patch + 1
+ }
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext
+}
+
+// IncMinor produces the next minor version.
+// Sets patch to 0.
+// Increments minor number.
+// Unsets metadata.
+// Unsets prerelease status.
+func (v Version) IncMinor() Version {
+ vNext := v
+ vNext.metadata = ""
+ vNext.pre = ""
+ vNext.patch = 0
+ vNext.minor = v.minor + 1
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext
+}
+
+// IncMajor produces the next major version.
+// Sets patch to 0.
+// Sets minor to 0.
+// Increments major number.
+// Unsets metadata.
+// Unsets prerelease status.
+func (v Version) IncMajor() Version {
+ vNext := v
+ vNext.metadata = ""
+ vNext.pre = ""
+ vNext.patch = 0
+ vNext.minor = 0
+ vNext.major = v.major + 1
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext
+}
+
+// SetPrerelease defines the prerelease value.
+// Value must not include the required 'hypen' prefix.
+func (v Version) SetPrerelease(prerelease string) (Version, error) {
+ vNext := v
+ if len(prerelease) > 0 && !validPrereleaseRegex.MatchString(prerelease) {
+ return vNext, ErrInvalidPrerelease
+ }
+ vNext.pre = prerelease
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext, nil
+}
+
+// SetMetadata defines metadata value.
+// Value must not include the required 'plus' prefix.
+func (v Version) SetMetadata(metadata string) (Version, error) {
+ vNext := v
+ if len(metadata) > 0 && !validPrereleaseRegex.MatchString(metadata) {
+ return vNext, ErrInvalidMetadata
+ }
+ vNext.metadata = metadata
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext, nil
+}
+
+// LessThan tests if one version is less than another one.
+func (v *Version) LessThan(o *Version) bool {
+ return v.Compare(o) < 0
+}
+
+// GreaterThan tests if one version is greater than another one.
+func (v *Version) GreaterThan(o *Version) bool {
+ return v.Compare(o) > 0
+}
+
+// Equal tests if two versions are equal to each other.
+// Note, versions can be equal with different metadata since metadata
+// is not considered part of the comparable version.
+func (v *Version) Equal(o *Version) bool {
+ return v.Compare(o) == 0
+}
+
+// Compare compares this version to another one. It returns -1, 0, or 1 if
+// the version smaller, equal, or larger than the other version.
+//
+// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is
+// lower than the version without a prerelease.
+func (v *Version) Compare(o *Version) int {
+ // Compare the major, minor, and patch version for differences. If a
+ // difference is found return the comparison.
+ if d := compareSegment(v.Major(), o.Major()); d != 0 {
+ return d
+ }
+ if d := compareSegment(v.Minor(), o.Minor()); d != 0 {
+ return d
+ }
+ if d := compareSegment(v.Patch(), o.Patch()); d != 0 {
+ return d
+ }
+
+ // At this point the major, minor, and patch versions are the same.
+ ps := v.pre
+ po := o.Prerelease()
+
+ if ps == "" && po == "" {
+ return 0
+ }
+ if ps == "" {
+ return 1
+ }
+ if po == "" {
+ return -1
+ }
+
+ return comparePrerelease(ps, po)
+}
+
+// UnmarshalJSON implements JSON.Unmarshaler interface.
+func (v *Version) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ temp, err := NewVersion(s)
+ if err != nil {
+ return err
+ }
+ v.major = temp.major
+ v.minor = temp.minor
+ v.patch = temp.patch
+ v.pre = temp.pre
+ v.metadata = temp.metadata
+ v.original = temp.original
+ temp = nil
+ return nil
+}
+
+// MarshalJSON implements JSON.Marshaler interface.
+func (v *Version) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+func compareSegment(v, o int64) int {
+ if v < o {
+ return -1
+ }
+ if v > o {
+ return 1
+ }
+
+ return 0
+}
+
+func comparePrerelease(v, o string) int {
+
+ // split the prelease versions by their part. The separator, per the spec,
+ // is a .
+ sparts := strings.Split(v, ".")
+ oparts := strings.Split(o, ".")
+
+ // Find the longer length of the parts to know how many loop iterations to
+ // go through.
+ slen := len(sparts)
+ olen := len(oparts)
+
+ l := slen
+ if olen > slen {
+ l = olen
+ }
+
+ // Iterate over each part of the prereleases to compare the differences.
+ for i := 0; i < l; i++ {
+ // Since the lentgh of the parts can be different we need to create
+ // a placeholder. This is to avoid out of bounds issues.
+ stemp := ""
+ if i < slen {
+ stemp = sparts[i]
+ }
+
+ otemp := ""
+ if i < olen {
+ otemp = oparts[i]
+ }
+
+ d := comparePrePart(stemp, otemp)
+ if d != 0 {
+ return d
+ }
+ }
+
+ // Reaching here means two versions are of equal value but have different
+ // metadata (the part following a +). They are not identical in string form
+ // but the version comparison finds them to be equal.
+ return 0
+}
+
+func comparePrePart(s, o string) int {
+ // Fastpath if they are equal
+ if s == o {
+ return 0
+ }
+
+ // When s or o are empty we can use the other in an attempt to determine
+ // the response.
+ if s == "" {
+ if o != "" {
+ return -1
+ }
+ return 1
+ }
+
+ if o == "" {
+ if s != "" {
+ return 1
+ }
+ return -1
+ }
+
+ // When comparing strings "99" is greater than "103". To handle
+ // cases like this we need to detect numbers and compare them. According
+ // to the semver spec, numbers are always positive. If there is a - at the
+ // start like -99 this is to be evaluated as an alphanum. numbers always
+ // have precedence over alphanum. Parsing as Uints because negative numbers
+ // are ignored.
+
+ oi, n1 := strconv.ParseUint(o, 10, 64)
+ si, n2 := strconv.ParseUint(s, 10, 64)
+
+ // The case where both are strings compare the strings
+ if n1 != nil && n2 != nil {
+ if s > o {
+ return 1
+ }
+ return -1
+ } else if n1 != nil {
+ // o is a string and s is a number
+ return -1
+ } else if n2 != nil {
+ // s is a string and o is a number
+ return 1
+ }
+ // Both are numbers
+ if si > oi {
+ return 1
+ }
+ return -1
+
+}
diff --git a/vendor/github.com/Masterminds/semver/version_fuzz.go b/vendor/github.com/Masterminds/semver/version_fuzz.go
new file mode 100644
index 00000000..b42bcd62
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/version_fuzz.go
@@ -0,0 +1,10 @@
+// +build gofuzz
+
+package semver
+
+func Fuzz(data []byte) int {
+ if _, err := NewVersion(string(data)); err != nil {
+ return 0
+ }
+ return 1
+}
diff --git a/vendor/github.com/bombsimon/wsl/v3/wsl.go b/vendor/github.com/bombsimon/wsl/v3/wsl.go
index 3b4a4e9a..31520fb4 100644
--- a/vendor/github.com/bombsimon/wsl/v3/wsl.go
+++ b/vendor/github.com/bombsimon/wsl/v3/wsl.go
@@ -398,6 +398,18 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) {
if !cuddledWithLastStmt {
checkingErr := atLeastOneInListsMatch(rightAndLeftHandSide, p.config.ErrorVariableNames)
if checkingErr {
+ // We only want to enforce cuddling error checks if the
+ // error was assigned on the line above. See
+ // https://github.com/bombsimon/wsl/issues/78.
+ // This is needed since `assignedOnLineAbove` is not
+ // actually just assignments but everything from LHS in the
+ // previous statement. This means that if previous line was
+ // `if err ...`, `err` will now be in the list
+ // `assignedOnLineAbove`.
+ if _, ok := previousStatement.(*ast.AssignStmt); !ok {
+ continue
+ }
+
if checkingErrInitializedInline() {
continue
}
diff --git a/vendor/github.com/daixiang0/gci/LICENSE b/vendor/github.com/daixiang0/gci/LICENSE
new file mode 100644
index 00000000..e1292f73
--- /dev/null
+++ b/vendor/github.com/daixiang0/gci/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2020, Xiang Dai
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/daixiang0/gci/pkg/gci/gci.go b/vendor/github.com/daixiang0/gci/pkg/gci/gci.go
new file mode 100644
index 00000000..f91d9b37
--- /dev/null
+++ b/vendor/github.com/daixiang0/gci/pkg/gci/gci.go
@@ -0,0 +1,366 @@
+package gci
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+const (
+ // pkg type: standard, remote, local
+ standard int = iota
+ // 3rd-party packages
+ remote
+ local
+
+ commentFlag = "//"
+)
+
+var (
+ importStartFlag = []byte(`
+import (
+`)
+
+ importEndFlag = []byte(`
+)
+`)
+)
+
+type FlagSet struct {
+ LocalFlag string
+ DoWrite, DoDiff *bool
+}
+
+type pkg struct {
+ list map[int][]string
+ comment map[string]string
+ alias map[string]string
+}
+
+func newPkg(data [][]byte, localFlag string) *pkg {
+ listMap := make(map[int][]string)
+ commentMap := make(map[string]string)
+ aliasMap := make(map[string]string)
+ p := &pkg{
+ list: listMap,
+ comment: commentMap,
+ alias: aliasMap,
+ }
+
+ formatData := make([]string, 0)
+ // remove all empty lines
+ for _, v := range data {
+ if len(v) > 0 {
+ formatData = append(formatData, strings.TrimSpace(string(v)))
+ }
+ }
+
+ n := len(formatData)
+ for i := n - 1; i >= 0; i-- {
+ line := formatData[i]
+
+ // check commentFlag:
+ // 1. one line commentFlag
+ // 2. commentFlag after import path
+ commentIndex := strings.Index(line, commentFlag)
+ if commentIndex == 0 {
+ // comment in the last line is useless, ignore it
+ if i+1 >= n {
+ continue
+ }
+ pkg, _, _ := getPkgInfo(formatData[i+1], strings.Index(formatData[i+1], commentFlag) >= 0)
+ p.comment[pkg] = line
+ continue
+ } else if commentIndex > 0 {
+ pkg, alias, comment := getPkgInfo(line, true)
+ if alias != "" {
+ p.alias[pkg] = alias
+ }
+
+ p.comment[pkg] = comment
+ pkgType := getPkgType(pkg, localFlag)
+ p.list[pkgType] = append(p.list[pkgType], pkg)
+ continue
+ }
+
+ pkg, alias, _ := getPkgInfo(line, false)
+
+ if alias != "" {
+ p.alias[pkg] = alias
+ }
+
+ pkgType := getPkgType(pkg, localFlag)
+ p.list[pkgType] = append(p.list[pkgType], pkg)
+ }
+
+ return p
+}
+
+// fmt format import pkgs as expected
+func (p *pkg) fmt() []byte {
+ ret := make([]string, 0, 100)
+
+ for pkgType := range []int{standard, remote, local} {
+ sort.Strings(p.list[pkgType])
+ for _, s := range p.list[pkgType] {
+ if p.comment[s] != "" {
+ l := fmt.Sprintf("%s%s%s%s", linebreak, indent, p.comment[s], linebreak)
+ ret = append(ret, l)
+ }
+
+ if p.alias[s] != "" {
+ s = fmt.Sprintf("%s%s%s%s%s", indent, p.alias[s], blank, s, linebreak)
+ } else {
+ s = fmt.Sprintf("%s%s%s", indent, s, linebreak)
+ }
+
+ ret = append(ret, s)
+ }
+
+ if len(p.list[pkgType]) > 0 {
+ ret = append(ret, linebreak)
+ }
+ }
+ if ret[len(ret)-1] == linebreak {
+ ret = ret[:len(ret)-1]
+ }
+
+ // remove duplicate empty lines
+ s1 := fmt.Sprintf("%s%s%s%s", linebreak, linebreak, linebreak, indent)
+ s2 := fmt.Sprintf("%s%s%s", linebreak, linebreak, indent)
+ return []byte(strings.ReplaceAll(strings.Join(ret, ""), s1, s2))
+}
+
+// getPkgInfo assume line is a import path, and return (path, alias, comment)
+func getPkgInfo(line string, comment bool) (string, string, string) {
+ if comment {
+ s := strings.Split(line, commentFlag)
+ pkgArray := strings.Split(s[0], blank)
+ if len(pkgArray) > 1 {
+ return pkgArray[1], pkgArray[0], fmt.Sprintf("%s%s%s", commentFlag, blank, strings.TrimSpace(s[1]))
+ } else {
+ return strings.TrimSpace(pkgArray[0]), "", fmt.Sprintf("%s%s%s", commentFlag, blank, strings.TrimSpace(s[1]))
+ }
+ } else {
+ pkgArray := strings.Split(line, blank)
+ if len(pkgArray) > 1 {
+ return pkgArray[1], pkgArray[0], ""
+ } else {
+ return pkgArray[0], "", ""
+ }
+ }
+}
+
+func getPkgType(line, localFlag string) int {
+ if !strings.Contains(line, dot) {
+ return standard
+ } else if strings.Contains(line, localFlag) {
+ return local
+ } else {
+ return remote
+ }
+}
+
+const (
+ dot = "."
+ blank = " "
+ indent = "\t"
+ linebreak = "\n"
+)
+
+func diff(b1, b2 []byte, filename string) (data []byte, err error) {
+ f1, err := writeTempFile("", "gci", b1)
+ if err != nil {
+ return
+ }
+ defer os.Remove(f1)
+
+ f2, err := writeTempFile("", "gci", b2)
+ if err != nil {
+ return
+ }
+ defer os.Remove(f2)
+
+ cmd := "diff"
+
+ data, err = exec.Command(cmd, "-u", f1, f2).CombinedOutput()
+ if len(data) > 0 {
+ // diff exits with a non-zero status when the files don't match.
+ // Ignore that failure as long as we get output.
+ return replaceTempFilename(data, filename)
+ }
+ return
+}
+
+func writeTempFile(dir, prefix string, data []byte) (string, error) {
+ file, err := ioutil.TempFile(dir, prefix)
+ if err != nil {
+ return "", err
+ }
+ _, err = file.Write(data)
+ if err1 := file.Close(); err == nil {
+ err = err1
+ }
+ if err != nil {
+ os.Remove(file.Name())
+ return "", err
+ }
+ return file.Name(), nil
+}
+
+// replaceTempFilename replaces temporary filenames in diff with actual one.
+//
+// --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500
+// +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500
+// ...
+// ->
+// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500
+// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500
+// ...
+func replaceTempFilename(diff []byte, filename string) ([]byte, error) {
+ bs := bytes.SplitN(diff, []byte{'\n'}, 3)
+ if len(bs) < 3 {
+ return nil, fmt.Errorf("got unexpected diff for %s", filename)
+ }
+ // Preserve timestamps.
+ var t0, t1 []byte
+ if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 {
+ t0 = bs[0][i:]
+ }
+ if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 {
+ t1 = bs[1][i:]
+ }
+ // Always print filepath with slash separator.
+ f := filepath.ToSlash(filename)
+ bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0))
+ bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1))
+ return bytes.Join(bs, []byte{'\n'}), nil
+}
+
+func visitFile(set *FlagSet) filepath.WalkFunc {
+ return func(path string, f os.FileInfo, err error) error {
+ if err == nil && isGoFile(f) {
+ err = processFile(path, os.Stdout, set)
+ }
+ return err
+ }
+}
+
+func WalkDir(path string, set *FlagSet) error {
+ return filepath.Walk(path, visitFile(set))
+}
+
+func isGoFile(f os.FileInfo) bool {
+ // ignore non-Go files
+ name := f.Name()
+ return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go")
+}
+
+func ProcessFile(filename string, out io.Writer, set *FlagSet) error {
+ return processFile(filename, out, set)
+}
+
+func processFile(filename string, out io.Writer, set *FlagSet) error {
+ var err error
+
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ src, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
+
+ ori := make([]byte, len(src))
+ copy(ori, src)
+ start := bytes.Index(src, importStartFlag)
+ // in case no importStartFlag or importStartFlag exist in the commentFlag
+ if start < 0 {
+ fmt.Printf("skip file %s since no import\n", filename)
+ return nil
+ }
+ end := bytes.Index(src[start:], importEndFlag) + start
+
+ ret := bytes.Split(src[start+len(importStartFlag):end], []byte(linebreak))
+
+ p := newPkg(ret, set.LocalFlag)
+
+ res := append(src[:start+len(importStartFlag)], append(p.fmt(), src[end+1:]...)...)
+
+ if !bytes.Equal(ori, res) {
+ if *set.DoWrite {
+ // On Windows, we need to re-set the permissions from the file. See golang/go#38225.
+ var perms os.FileMode
+ if fi, err := os.Stat(filename); err == nil {
+ perms = fi.Mode() & os.ModePerm
+ }
+ err = ioutil.WriteFile(filename, res, perms)
+ if err != nil {
+ return err
+ }
+ }
+ if *set.DoDiff {
+ data, err := diff(ori, res, filename)
+ if err != nil {
+ return fmt.Errorf("failed to diff: %v", err)
+ }
+ fmt.Printf("diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename))
+ if _, err := out.Write(data); err != nil {
+ return fmt.Errorf("failed to write: %v", err)
+ }
+ }
+ }
+ if !*set.DoWrite && !*set.DoDiff {
+ if _, err = out.Write(res); err != nil {
+ return fmt.Errorf("failed to write: %v", err)
+ }
+ }
+
+ return err
+}
+
+// Run return source and result in []byte if succeed
+func Run(filename string, set *FlagSet) ([]byte, []byte, error) {
+ var err error
+
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer f.Close()
+
+ src, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ ori := make([]byte, len(src))
+ copy(ori, src)
+ start := bytes.Index(src, importStartFlag)
+ // in case no importStartFlag or importStartFlag exist in the commentFlag
+ if start < 0 {
+ return nil, nil, nil
+ }
+ end := bytes.Index(src[start:], importEndFlag) + start
+
+ ret := bytes.Split(src[start+len(importStartFlag):end], []byte(linebreak))
+
+ p := newPkg(ret, set.LocalFlag)
+
+ res := append(src[:start+len(importStartFlag)], append(p.fmt(), src[end+1:]...)...)
+
+ if bytes.Equal(ori, res) {
+ return ori, nil, nil
+ }
+
+ return ori, res, nil
+}
diff --git a/vendor/github.com/denis-tingajkin/go-header/.gitignore b/vendor/github.com/denis-tingajkin/go-header/.gitignore
new file mode 100644
index 00000000..62c89355
--- /dev/null
+++ b/vendor/github.com/denis-tingajkin/go-header/.gitignore
@@ -0,0 +1 @@
+.idea/
\ No newline at end of file
diff --git a/vendor/github.com/denis-tingajkin/go-header/LICENSE b/vendor/github.com/denis-tingajkin/go-header/LICENSE
new file mode 100644
index 00000000..a2c9fda2
--- /dev/null
+++ b/vendor/github.com/denis-tingajkin/go-header/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/vendor/github.com/denis-tingajkin/go-header/README.md b/vendor/github.com/denis-tingajkin/go-header/README.md
new file mode 100644
index 00000000..43348b8b
--- /dev/null
+++ b/vendor/github.com/denis-tingajkin/go-header/README.md
@@ -0,0 +1,69 @@
+# go-header
+[![Actions Status](https://github.com/denis-tingajkin/go-header/workflows/ci/badge.svg)](https://github.com/denis-tingajkin/go-header/actions)
+
+Go source code linter providing checks for license headers.
+
+# Installation
+
+For installation you can simply use `go get`.
+
+```
+go get github.com/denis-tingajkin/go-header/cmd/go-header
+```
+
+# Configuration
+
+To configuring `go-header.yml` linter you simply need to fill the next structures in YAML format.
+```go
+// Configuration represents go-header linter setup parameters
+type Configuration struct {
+ // Values is map of values. Supports two types 'const` and `regexp`. Values can be used recursively.
+ Values map[string]map[string]string `yaml:"values"'`
+ // Template is template for checking. Uses values.
+ Template string `yaml:"template"`
+ // TemplatePath path to the template file. Useful if need to load the template from a specific file.
+ TemplatePath string `yaml:"template-path"`
+}
+```
+Where supported two kinds of values: `const` and `regexp`. NOTE: values can be used recursively.
+Values with type `const` checks on equality.
+Values with type `regexp` checks on the match.
+
+# Execution
+
+`go-header` linter expects file path on input. If you want to run `go-header` only on diff files, then you can use this command
+```bash
+go-header $(git diff --name-only)
+```
+
+# Setup example
+
+## Step 1
+Create configuration file `.go-header.yaml` in the root of project.
+```yaml
+---
+values:
+ const:
+ MY COMPANY: mycompany.com
+template-path: ./mypath/mytemplate.txt
+```
+## Step 2
+Write the template file. For example for config above `mytemplate.txt` could be
+```text
+{{ MY COMPANY }}
+SPDX-License-Identifier: Apache-2.0
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at:
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+```
+## Step 3
+You are ready! Execute `go-header {FILES}` from the root of the project.
diff --git a/vendor/github.com/denis-tingajkin/go-header/analyzer.go b/vendor/github.com/denis-tingajkin/go-header/analyzer.go
new file mode 100644
index 00000000..f4efe75a
--- /dev/null
+++ b/vendor/github.com/denis-tingajkin/go-header/analyzer.go
@@ -0,0 +1,98 @@
+package goheader
+
+import (
+ "fmt"
+ "go/ast"
+ "strings"
+)
+
+type Analyzer interface {
+ Analyze(file *ast.File) Issue
+}
+
+type analyzer struct {
+ values map[string]Value
+ template string
+}
+
+func (a *analyzer) Analyze(file *ast.File) Issue {
+ if a.template == "" {
+ return NewIssue("Missed template for check")
+ }
+ var header string
+ if len(file.Comments) > 0 && file.Comments[0].Pos() < file.Package {
+ if strings.HasPrefix(file.Comments[0].List[0].Text, "/*") {
+ header = (&ast.CommentGroup{List: []*ast.Comment{file.Comments[0].List[0]}}).Text()
+ } else {
+ header = file.Comments[0].Text()
+ }
+ }
+ header = strings.TrimSpace(header)
+ if header == "" {
+ return NewIssue("Missed header for check")
+ }
+ s := NewReader(header)
+ t := NewReader(a.template)
+ for !s.Done() && !t.Done() {
+ templateCh := t.Peek()
+ if templateCh == '{' {
+ name := a.readField(t)
+ if a.values[name] == nil {
+ return NewIssue(fmt.Sprintf("Template has unknown value: %v", name))
+ }
+ if i := a.values[name].Read(s); i != nil {
+ return i
+ }
+ continue
+ }
+ sourceCh := s.Peek()
+ if sourceCh != templateCh {
+ l := s.Location()
+ notNextLine := func(r rune) bool {
+ return r != '\n'
+ }
+ actual := s.ReadWhile(notNextLine)
+ expected := t.ReadWhile(notNextLine)
+ return NewIssueWithLocation(fmt.Sprintf("Actual: %v\nExpected:%v", actual, expected), l)
+ }
+ s.Next()
+ t.Next()
+ }
+ if !s.Done() {
+ l := s.Location()
+ return NewIssueWithLocation(fmt.Sprintf("Unexpected string: %v", s.Finish()), l)
+ }
+ if !t.Done() {
+ l := s.Location()
+ return NewIssueWithLocation(fmt.Sprintf("Missed string: %v", t.Finish()), l)
+ }
+ return nil
+}
+
+func (a *analyzer) readField(reader Reader) string {
+ _ = reader.Next()
+ _ = reader.Next()
+
+ r := reader.ReadWhile(func(r rune) bool {
+ return r != '}'
+ })
+
+ _ = reader.Next()
+ _ = reader.Next()
+
+ return strings.ToLower(strings.TrimSpace(r))
+}
+
+func New(options ...AnalyzerOption) Analyzer {
+ a := &analyzer{}
+ for _, o := range options {
+ o.apply(a)
+ }
+ for _, v := range a.values {
+ err := v.Calculate(a.values)
+ if err != nil {
+ panic(err.Error())
+ }
+ }
+ return a
+}
diff --git a/vendor/github.com/denis-tingajkin/go-header/config.go b/vendor/github.com/denis-tingajkin/go-header/config.go
new file mode 100644
index 00000000..67e273b9
--- /dev/null
+++ b/vendor/github.com/denis-tingajkin/go-header/config.go
@@ -0,0 +1,69 @@
+package goheader
+
+import (
+ "errors"
+ "fmt"
+ "gopkg.in/yaml.v2"
+ "io/ioutil"
+ "strings"
+)
+
+// Configuration represents go-header linter setup parameters
+type Configuration struct {
+ // Values is map of values. Supports two types 'const` and `regexp`. Values can be used recursively.
+ Values map[string]map[string]string `yaml:"values"'`
+ // Template is template for checking. Uses values.
+ Template string `yaml:"template"`
+ // TemplatePath path to the template file. Useful if need to load the template from a specific file.
+ TemplatePath string `yaml:"template-path"`
+}
+
+func (c *Configuration) GetValues() (map[string]Value, error) {
+ var result = make(map[string]Value)
+ createConst := func(raw string) Value {
+ return &ConstValue{RawValue: raw}
+ }
+ createRegexp := func(raw string) Value {
+ return &RegexpValue{RawValue: raw}
+ }
+ appendValues := func(m map[string]string, create func(string) Value) {
+ for k, v := range m {
+ key := strings.ToLower(k)
+ result[key] = create(v)
+ }
+ }
+ for k, v := range c.Values {
+ switch k {
+ case "const":
+ appendValues(v, createConst)
+ case "regexp":
+ appendValues(v, createRegexp)
+ default:
+ return nil, fmt.Errorf("unknown value type %v", k)
+ }
+ }
+ return result, nil
+}
+
+func (c *Configuration) GetTemplate() (string, error) {
+ if c.Template != "" {
+ return c.Template, nil
+ }
+ if c.TemplatePath == "" {
+ return "", errors.New("template has not passed")
+ }
+ if b, err := ioutil.ReadFile(c.TemplatePath); err != nil {
+ return "", err
+ } else {
+ c.Template = string(b)
+ return c.Template, nil
+ }
+}
+
+func (c *Configuration) Parse(p string) error {
+ b, err := ioutil.ReadFile(p)
+ if err != nil {
+ return err
+ }
+ return yaml.Unmarshal(b, c)
+}
diff --git a/vendor/github.com/denis-tingajkin/go-header/go.mod b/vendor/github.com/denis-tingajkin/go-header/go.mod
new file mode 100644
index 00000000..c557afeb
--- /dev/null
+++ b/vendor/github.com/denis-tingajkin/go-header/go.mod
@@ -0,0 +1,10 @@
+module github.com/denis-tingajkin/go-header
+
+go 1.13
+
+require (
+ github.com/fatih/color v1.9.0
+ github.com/sirupsen/logrus v1.6.0
+ github.com/stretchr/testify v1.5.1
+ gopkg.in/yaml.v2 v2.2.2
+)
diff --git a/vendor/github.com/denis-tingajkin/go-header/go.sum b/vendor/github.com/denis-tingajkin/go-header/go.sum
new file mode 100644
index 00000000..4033b08f
--- /dev/null
+++ b/vendor/github.com/denis-tingajkin/go-header/go.sum
@@ -0,0 +1,31 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/denis-tingajkin/go-header/issue.go b/vendor/github.com/denis-tingajkin/go-header/issue.go
new file mode 100644
index 00000000..d4921966
--- /dev/null
+++ b/vendor/github.com/denis-tingajkin/go-header/issue.go
@@ -0,0 +1,32 @@
+package goheader
+
+type Issue interface {
+ Location() Location
+ Message() string
+}
+
+type issue struct {
+ msg string
+ location Location
+}
+
+func (i *issue) Location() Location {
+ return i.location
+}
+
+func (i *issue) Message() string {
+ return i.msg
+}
+
+func NewIssueWithLocation(msg string, location Location) Issue {
+ return &issue{
+ msg: msg,
+ location: location,
+ }
+}
+
+func NewIssue(msg string) Issue {
+ return &issue{
+ msg: msg,
+ }
+}
diff --git a/vendor/github.com/denis-tingajkin/go-header/location.go b/vendor/github.com/denis-tingajkin/go-header/location.go
new file mode 100644
index 00000000..fc33e48d
--- /dev/null
+++ b/vendor/github.com/denis-tingajkin/go-header/location.go
@@ -0,0 +1,12 @@
+package goheader
+
+import "fmt"
+
+type Location struct {
+ Line int
+ Position int
+}
+
+func (l Location) String() string {
+ return fmt.Sprintf("%v:%v", l.Line+1, l.Position)
+}
diff --git a/vendor/github.com/denis-tingajkin/go-header/option.go b/vendor/github.com/denis-tingajkin/go-header/option.go
new file mode 100644
index 00000000..2adaa9a9
--- /dev/null
+++ b/vendor/github.com/denis-tingajkin/go-header/option.go
@@ -0,0 +1,28 @@
+package goheader
+
+import "strings"
+
+type AnalyzerOption interface {
+ apply(*analyzer)
+}
+
+type applyAnalyzerOptionFunc func(*analyzer)
+
+func (f applyAnalyzerOptionFunc) apply(a *analyzer) {
+ f(a)
+}
+
+func WithValues(values map[string]Value) AnalyzerOption {
+ return applyAnalyzerOptionFunc(func(a *analyzer) {
+ a.values = make(map[string]Value)
+ for k, v := range values {
+ a.values[strings.ToLower(k)] = v
+ }
+ })
+}
+
+func WithTemplate(template string) AnalyzerOption {
+ return applyAnalyzerOptionFunc(func(a *analyzer) {
+ a.template = template
+ })
+}
diff --git a/vendor/github.com/denis-tingajkin/go-header/reader.go b/vendor/github.com/denis-tingajkin/go-header/reader.go
new file mode 100644
index 00000000..4386f30d
--- /dev/null
+++ b/vendor/github.com/denis-tingajkin/go-header/reader.go
@@ -0,0 +1,105 @@
+package goheader
+
+type Reader interface {
+ Peek() rune
+ Next() rune
+ Done() bool
+ Finish() string
+ Position() int
+ Location() Location
+ SetPosition(int)
+ ReadWhile(func(rune) bool) string
+}
+
+func NewReader(text string) Reader {
+ return &reader{source: text}
+}
+
+type reader struct {
+ source string
+ position int
+ location Location
+}
+
+func (r *reader) Position() int {
+ return r.position
+}
+
+func (r *reader) Location() Location {
+ return r.location
+}
+
+func (r *reader) Peek() rune {
+ if r.Done() {
+ return rune(0)
+ }
+ return rune(r.source[r.position])
+}
+
+func (r *reader) Done() bool {
+ return r.position >= len(r.source)
+}
+
+func (r *reader) Next() rune {
+ if r.Done() {
+ return rune(0)
+ }
+ reuslt := r.Peek()
+ if reuslt == '\n' {
+ r.location.Line++
+ r.location.Position = 0
+ } else {
+ r.location.Position++
+ }
+ r.position++
+ return reuslt
+}
+
+func (r *reader) Finish() string {
+ if r.position >= len(r.source) {
+ return ""
+ }
+ defer r.till()
+ return r.source[r.position:]
+}
+
+func (r *reader) SetPosition(pos int) {
+ if pos < 0 {
+ r.position = 0
+ }
+ r.position = pos
+ r.location = r.calculateLocation()
+}
+
+func (r *reader) ReadWhile(match func(rune) bool) string {
+ if match == nil {
+ return ""
+ }
+ start := r.position
+ for !r.Done() && match(r.Peek()) {
+ r.Next()
+ }
+ return r.source[start:r.position]
+}
+
+func (r *reader) till() {
+ r.position = len(r.source)
+ r.location = r.calculateLocation()
+}
+
+func (r *reader) calculateLocation() Location {
+ min := len(r.source)
+ if min > r.position {
+ min = r.position
+ }
+ x, y := 0, 0
+ for i := 0; i < min; i++ {
+ if r.source[i] == '\n' {
+ y++
+ x = 0
+ } else {
+ x++
+ }
+ }
+ return Location{Line: y, Position: x}
+}
diff --git a/vendor/github.com/denis-tingajkin/go-header/value.go b/vendor/github.com/denis-tingajkin/go-header/value.go
new file mode 100644
index 00000000..bebc03b6
--- /dev/null
+++ b/vendor/github.com/denis-tingajkin/go-header/value.go
@@ -0,0 +1,112 @@
+package goheader
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+type Calculable interface {
+ Calculate(map[string]Value) error
+ Get() string
+}
+
+type Value interface {
+ Calculable
+ Read(Reader) Issue
+}
+
+func calculateValue(calculable Calculable, values map[string]Value) (string, error) {
+ sb := strings.Builder{}
+ r := calculable.Get()
+ var endIndex int
+ var startIndex int
+ for startIndex = strings.Index(r, "{{"); startIndex >= 0; startIndex = strings.Index(r, "{{") {
+ _, _ = sb.WriteString(r[:startIndex])
+ endIndex = strings.Index(r, "}}")
+ if endIndex < 0 {
+ return "", errors.New("missed value ending")
+ }
+ subVal := strings.ToLower(strings.TrimSpace(r[startIndex+2 : endIndex]))
+ if val := values[subVal]; val != nil {
+ if err := val.Calculate(values); err != nil {
+ return "", err
+ }
+ sb.WriteString(val.Get())
+ } else {
+ return "", fmt.Errorf("unknown value name %v", subVal)
+ }
+ endIndex += 2
+ r = r[endIndex:]
+ }
+ _, _ = sb.WriteString(r)
+ return sb.String(), nil
+}
+
+type ConstValue struct {
+ RawValue string
+}
+
+func (c *ConstValue) Calculate(values map[string]Value) error {
+ v, err := calculateValue(c, values)
+ if err != nil {
+ return err
+ }
+ c.RawValue = v
+ return nil
+}
+
+func (c *ConstValue) Get() string {
+ return c.RawValue
+}
+
+func (c *ConstValue) Read(s Reader) Issue {
+ l := s.Location()
+ p := s.Position()
+ for _, ch := range c.Get() {
+ if ch != s.Peek() {
+ s.SetPosition(p)
+ f := s.ReadWhile(func(r rune) bool {
+ return r != '\n'
+ })
+ return NewIssueWithLocation(fmt.Sprintf("Expected:%v, Actual: %v", c.Get(), f), l)
+ }
+ s.Next()
+ }
+ return nil
+}
+
+type RegexpValue struct {
+ RawValue string
+}
+
+func (r *RegexpValue) Calculate(values map[string]Value) error {
+ v, err := calculateValue(r, values)
+ if err != nil {
+ return err
+ }
+ r.RawValue = v
+ return nil
+}
+
+func (r *RegexpValue) Get() string {
+ return r.RawValue
+}
+
+func (r *RegexpValue) Read(s Reader) Issue {
+ l := s.Location()
+ p := regexp.MustCompile(r.Get())
+ pos := s.Position()
+ str := s.Finish()
+ s.SetPosition(pos)
+ indexes := p.FindAllIndex([]byte(str), -1)
+ if len(indexes) == 0 {
+ return NewIssueWithLocation(fmt.Sprintf("Pattern %v doesn't match.", p.String()), l)
+ }
+ s.SetPosition(pos + indexes[0][1])
+ return nil
+}
+
+var _ Value = &ConstValue{}
+var _ Value = &RegexpValue{}
diff --git a/vendor/github.com/fatih/color/.travis.yml b/vendor/github.com/fatih/color/.travis.yml
deleted file mode 100644
index 95f8a1ff..00000000
--- a/vendor/github.com/fatih/color/.travis.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-language: go
-go:
- - 1.8.x
- - tip
-
diff --git a/vendor/github.com/fatih/color/Gopkg.lock b/vendor/github.com/fatih/color/Gopkg.lock
deleted file mode 100644
index 7d879e9c..00000000
--- a/vendor/github.com/fatih/color/Gopkg.lock
+++ /dev/null
@@ -1,27 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
- name = "github.com/mattn/go-colorable"
- packages = ["."]
- revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072"
- version = "v0.0.9"
-
-[[projects]]
- name = "github.com/mattn/go-isatty"
- packages = ["."]
- revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39"
- version = "v0.0.3"
-
-[[projects]]
- branch = "master"
- name = "golang.org/x/sys"
- packages = ["unix"]
- revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd"
-
-[solve-meta]
- analyzer-name = "dep"
- analyzer-version = 1
- inputs-digest = "e8a50671c3cb93ea935bf210b1cd20702876b9d9226129be581ef646d1565cdc"
- solver-name = "gps-cdcl"
- solver-version = 1
diff --git a/vendor/github.com/fatih/color/Gopkg.toml b/vendor/github.com/fatih/color/Gopkg.toml
deleted file mode 100644
index ff1617f7..00000000
--- a/vendor/github.com/fatih/color/Gopkg.toml
+++ /dev/null
@@ -1,30 +0,0 @@
-
-# Gopkg.toml example
-#
-# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
-# for detailed Gopkg.toml documentation.
-#
-# required = ["github.com/user/thing/cmd/thing"]
-# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
-#
-# [[constraint]]
-# name = "github.com/user/project"
-# version = "1.0.0"
-#
-# [[constraint]]
-# name = "github.com/user/project2"
-# branch = "dev"
-# source = "github.com/myfork/project2"
-#
-# [[override]]
-# name = "github.com/x/y"
-# version = "2.4.0"
-
-
-[[constraint]]
- name = "github.com/mattn/go-colorable"
- version = "0.0.9"
-
-[[constraint]]
- name = "github.com/mattn/go-isatty"
- version = "0.0.3"
diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md
index 3fc95446..42d9abc0 100644
--- a/vendor/github.com/fatih/color/README.md
+++ b/vendor/github.com/fatih/color/README.md
@@ -1,6 +1,12 @@
-# Color [![GoDoc](https://godoc.org/github.com/fatih/color?status.svg)](https://godoc.org/github.com/fatih/color) [![Build Status](https://img.shields.io/travis/fatih/color.svg?style=flat-square)](https://travis-ci.org/fatih/color)
+# Archived project. No maintenance.
+
+This project is not maintained anymore and is archived. Feel free to fork and
+make your own changes if needed. For more detail read my blog post: [Taking an indefinite sabbatical from my projects](https://arslan.io/2018/10/09/taking-an-indefinite-sabbatical-from-my-projects/)
+
+Thanks to everyone for their valuable feedback and contributions.
+# Color [![GoDoc](https://godoc.org/github.com/fatih/color?status.svg)](https://godoc.org/github.com/fatih/color)
Color lets you use colorized outputs in terms of [ANSI Escape
Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It
@@ -17,9 +23,6 @@ suits you.
go get github.com/fatih/color
```
-Note that the `vendor` folder is here for stability. Remove the folder if you
-already have the dependencies in your GOPATH.
-
## Examples
### Standard colors
diff --git a/vendor/github.com/fatih/color/go.mod b/vendor/github.com/fatih/color/go.mod
new file mode 100644
index 00000000..bc0df754
--- /dev/null
+++ b/vendor/github.com/fatih/color/go.mod
@@ -0,0 +1,8 @@
+module github.com/fatih/color
+
+go 1.13
+
+require (
+ github.com/mattn/go-colorable v0.1.4
+ github.com/mattn/go-isatty v0.0.11
+)
diff --git a/vendor/github.com/fatih/color/go.sum b/vendor/github.com/fatih/color/go.sum
new file mode 100644
index 00000000..44328a8d
--- /dev/null
+++ b/vendor/github.com/fatih/color/go.sum
@@ -0,0 +1,8 @@
+github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig
index ba49e3c2..fad89585 100644
--- a/vendor/github.com/fsnotify/fsnotify/.editorconfig
+++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig
@@ -1,5 +1,12 @@
root = true
-[*]
+[*.go]
indent_style = tab
indent_size = 4
+insert_final_newline = true
+
+[*.{yml,yaml}]
+indent_style = space
+indent_size = 2
+insert_final_newline = true
+trim_trailing_whitespace = true
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes
new file mode 100644
index 00000000..32f1001b
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.gitattributes
@@ -0,0 +1 @@
+go.sum linguist-generated
diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml
index 981d1bb8..a9c30165 100644
--- a/vendor/github.com/fsnotify/fsnotify/.travis.yml
+++ b/vendor/github.com/fsnotify/fsnotify/.travis.yml
@@ -2,29 +2,35 @@ sudo: false
language: go
go:
- - 1.8.x
- - 1.9.x
- - tip
+ - "stable"
+ - "1.11.x"
+ - "1.10.x"
+ - "1.9.x"
matrix:
+ include:
+ - go: "stable"
+ env: GOLINT=true
allow_failures:
- go: tip
fast_finish: true
-before_script:
- - go get -u github.com/golang/lint/golint
+
+before_install:
+ - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi
script:
- - go test -v --race ./...
+ - go test --race ./...
after_script:
- test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
- - test -z "$(golint ./... | tee /dev/stderr)"
+ - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi
- go vet ./...
os:
- linux
- osx
+ - windows
notifications:
email: false
diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE
index f21e5408..e180c8fb 100644
--- a/vendor/github.com/fsnotify/fsnotify/LICENSE
+++ b/vendor/github.com/fsnotify/fsnotify/LICENSE
@@ -1,5 +1,5 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
-Copyright (c) 2012 fsnotify Authors. All rights reserved.
+Copyright (c) 2012-2019 fsnotify Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md
index 39932074..b2629e52 100644
--- a/vendor/github.com/fsnotify/fsnotify/README.md
+++ b/vendor/github.com/fsnotify/fsnotify/README.md
@@ -10,16 +10,16 @@ go get -u golang.org/x/sys/...
Cross platform: Windows, Linux, BSD and macOS.
-|Adapter |OS |Status |
-|----------|----------|----------|
-|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
-|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
-|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
-|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
-|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
-|fanotify |Linux 2.6.37+ | |
-|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
-|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
+| Adapter | OS | Status |
+| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
+| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) |
+| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) |
+| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) |
+| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
+| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) |
+| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) |
+| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
+| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
\* Android and iOS are untested.
@@ -33,6 +33,53 @@ All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based o
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
+## Usage
+
+```go
+package main
+
+import (
+ "log"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+func main() {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer watcher.Close()
+
+ done := make(chan bool)
+ go func() {
+ for {
+ select {
+ case event, ok := <-watcher.Events:
+ if !ok {
+ return
+ }
+ log.Println("event:", event)
+ if event.Op&fsnotify.Write == fsnotify.Write {
+ log.Println("modified file:", event.Name)
+ }
+ case err, ok := <-watcher.Errors:
+ if !ok {
+ return
+ }
+ log.Println("error:", err)
+ }
+ }
+ }()
+
+ err = watcher.Add("/tmp/foo")
+ if err != nil {
+ log.Fatal(err)
+ }
+ <-done
+}
+```
+
## Contributing
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
@@ -65,6 +112,10 @@ There are OS-specific limits as to how many watches can be created:
* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
+**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?**
+
+fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications.
+
[#62]: https://github.com/howeyc/fsnotify/issues/62
[#18]: https://github.com/fsnotify/fsnotify/issues/18
[#11]: https://github.com/fsnotify/fsnotify/issues/11
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
index 190bf0de..89cab046 100644
--- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -63,4 +63,6 @@ func (e Event) String() string {
}
// Common errors that can be reported by a watcher
-var ErrEventOverflow = errors.New("fsnotify queue overflow")
+var (
+ ErrEventOverflow = errors.New("fsnotify queue overflow")
+)
diff --git a/vendor/github.com/fsnotify/fsnotify/go.mod b/vendor/github.com/fsnotify/fsnotify/go.mod
new file mode 100644
index 00000000..ff11e13f
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/go.mod
@@ -0,0 +1,5 @@
+module github.com/fsnotify/fsnotify
+
+go 1.13
+
+require golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9
diff --git a/vendor/github.com/fsnotify/fsnotify/go.sum b/vendor/github.com/fsnotify/fsnotify/go.sum
new file mode 100644
index 00000000..f60af985
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/go.sum
@@ -0,0 +1,2 @@
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
index cc7db4b2..b33f2b4d 100644
--- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
+++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
@@ -40,12 +40,12 @@ func newFdPoller(fd int) (*fdPoller, error) {
poller.fd = fd
// Create epoll fd
- poller.epfd, errno = unix.EpollCreate1(0)
+ poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC)
if poller.epfd == -1 {
return nil, errno
}
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
- errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
+ errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC)
if errno != nil {
return nil, errno
}
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
index 7d8de145..2306c462 100644
--- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
@@ -8,4 +8,4 @@ package fsnotify
import "golang.org/x/sys/unix"
-const openMode = unix.O_NONBLOCK | unix.O_RDONLY
+const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
index 9139e171..870c4d6d 100644
--- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
@@ -9,4 +9,4 @@ package fsnotify
import "golang.org/x/sys/unix"
// note: this constant is not defined on BSD
-const openMode = unix.O_EVTONLY
+const openMode = unix.O_EVTONLY | unix.O_CLOEXEC
diff --git a/vendor/github.com/go-critic/go-critic/checkers/appendAssign_checker.go b/vendor/github.com/go-critic/go-critic/checkers/appendAssign_checker.go
index 47d12f01..d2692177 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/appendAssign_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/appendAssign_checker.go
@@ -5,15 +5,15 @@ import (
"go/token"
"go/types"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astequal"
"github.com/go-toolsmith/astp"
"golang.org/x/tools/go/ast/astutil"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "appendAssign"
info.Tags = []string{"diagnostic"}
info.Summary = "Detects suspicious append result assignments"
@@ -24,14 +24,14 @@ p.negatives = append(p.negatives, y)`
p.positives = append(p.positives, x)
p.negatives = append(p.negatives, y)`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmt(&appendAssignChecker{ctx: ctx})
})
}
type appendAssignChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *appendAssignChecker) VisitStmt(stmt ast.Stmt) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/appendCombine_checker.go b/vendor/github.com/go-critic/go-critic/checkers/appendCombine_checker.go
index 63f5d9fe..a761f2a8 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/appendCombine_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/appendCombine_checker.go
@@ -4,14 +4,14 @@ import (
"go/ast"
"go/token"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astequal"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "appendCombine"
info.Tags = []string{"performance"}
info.Summary = "Detects `append` chains to the same slice that can be done in a single `append` call"
@@ -20,14 +20,14 @@ xs = append(xs, 1)
xs = append(xs, 2)`
info.After = `xs = append(xs, 1, 2)`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmtList(&appendCombineChecker{ctx: ctx})
})
}
type appendCombineChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *appendCombineChecker) VisitStmtList(list []ast.Stmt) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/argOrder_checker.go b/vendor/github.com/go-critic/go-critic/checkers/argOrder_checker.go
index 85a6f7c6..2eb7cf7d 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/argOrder_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/argOrder_checker.go
@@ -4,8 +4,8 @@ import (
"go/ast"
"go/types"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astcopy"
"github.com/go-toolsmith/astp"
@@ -13,21 +13,21 @@ import (
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "argOrder"
- info.Tags = []string{"diagnostic", "experimental"}
+ info.Tags = []string{"diagnostic"}
info.Summary = "Detects suspicious arguments order"
info.Before = `strings.HasPrefix("#", userpass)`
info.After = `strings.HasPrefix(userpass, "#")`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(&argOrderChecker{ctx: ctx})
})
}
type argOrderChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *argOrderChecker) VisitExpr(expr ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/assignOp_checker.go b/vendor/github.com/go-critic/go-critic/checkers/assignOp_checker.go
index eb342866..e3acd09e 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/assignOp_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/assignOp_checker.go
@@ -4,29 +4,29 @@ import (
"go/ast"
"go/token"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcopy"
"github.com/go-toolsmith/astequal"
"github.com/go-toolsmith/typep"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "assignOp"
info.Tags = []string{"style"}
info.Summary = "Detects assignments that can be simplified by using assignment operators"
info.Before = `x = x * 2`
info.After = `x *= 2`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmt(&assignOpChecker{ctx: ctx})
})
}
type assignOpChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *assignOpChecker) VisitStmt(stmt ast.Stmt) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/badCall_checker.go b/vendor/github.com/go-critic/go-critic/checkers/badCall_checker.go
index 150cc690..3e96a39c 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/badCall_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/badCall_checker.go
@@ -3,28 +3,28 @@ package checkers
import (
"go/ast"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astcopy"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "badCall"
- info.Tags = []string{"diagnostic", "experimental"}
+ info.Tags = []string{"diagnostic"}
info.Summary = "Detects suspicious function calls"
info.Before = `strings.Replace(s, from, to, 0)`
info.After = `strings.Replace(s, from, to, -1)`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(&badCallChecker{ctx: ctx})
})
}
type badCallChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *badCallChecker) VisitExpr(expr ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/badCond_checker.go b/vendor/github.com/go-critic/go-critic/checkers/badCond_checker.go
index 466a89cc..6ce81f35 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/badCond_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/badCond_checker.go
@@ -5,9 +5,9 @@ import (
"go/constant"
"go/token"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
"github.com/go-critic/go-critic/checkers/internal/lintutil"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astcopy"
"github.com/go-toolsmith/astequal"
@@ -16,9 +16,9 @@ import (
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "badCond"
- info.Tags = []string{"diagnostic", "experimental"}
+ info.Tags = []string{"diagnostic"}
info.Summary = "Detects suspicious condition expressions"
info.Before = `
for i := 0; i > n; i++ {
@@ -29,14 +29,14 @@ for i := 0; i < n; i++ {
xs[i] = 0
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForFuncDecl(&badCondChecker{ctx: ctx})
})
}
type badCondChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *badCondChecker) VisitFuncDecl(decl *ast.FuncDecl) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/badRegexp_checker.go b/vendor/github.com/go-critic/go-critic/checkers/badRegexp_checker.go
new file mode 100644
index 00000000..1025454d
--- /dev/null
+++ b/vendor/github.com/go-critic/go-critic/checkers/badRegexp_checker.go
@@ -0,0 +1,445 @@
+package checkers
+
+import (
+ "go/ast"
+ "go/constant"
+ "sort"
+ "strconv"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
+ "github.com/quasilyte/regex/syntax"
+)
+
+func init() {
+ var info linter.CheckerInfo
+ info.Name = "badRegexp"
+ info.Tags = []string{"diagnostic", "experimental"}
+ info.Summary = "Detects suspicious regexp patterns"
+ info.Before = "regexp.MustCompile(`(?:^aa|bb|cc)foo[aba]`)"
+ info.After = "regexp.MustCompile(`^(?:aa|bb|cc)foo[ab]`)"
+
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
+ opts := &syntax.ParserOptions{}
+ c := &badRegexpChecker{
+ ctx: ctx,
+ parser: syntax.NewParser(opts),
+ }
+ return astwalk.WalkerForExpr(c)
+ })
+}
+
+type badRegexpChecker struct {
+ astwalk.WalkHandler
+ ctx *linter.CheckerContext
+
+ parser *syntax.Parser
+ cause ast.Expr
+
+ flagStates []regexpFlagState
+ goodAnchors []syntax.Position
+}
+
+type regexpFlagState [utf8.RuneSelf]bool
+
+func (c *badRegexpChecker) VisitExpr(x ast.Expr) {
+ call, ok := x.(*ast.CallExpr)
+ if !ok {
+ return
+ }
+
+ switch qualifiedName(call.Fun) {
+ case "regexp.Compile", "regexp.MustCompile":
+ cv := c.ctx.TypesInfo.Types[call.Args[0]].Value
+ if cv == nil || cv.Kind() != constant.String {
+ return
+ }
+ pat := constant.StringVal(cv)
+ c.cause = call.Args[0]
+ c.checkPattern(pat)
+ }
+}
+
+func (c *badRegexpChecker) checkPattern(pat string) {
+ re, err := c.parser.Parse(pat)
+ if err != nil {
+ return
+ }
+
+ c.flagStates = c.flagStates[:0]
+ c.goodAnchors = c.goodAnchors[:0]
+
+ // In Go all flags (modifiers) are set to false by default,
+ // so we start from the empty flag set.
+ c.flagStates = append(c.flagStates, regexpFlagState{})
+
+ c.markGoodCarets(re.Expr)
+ c.walk(re.Expr)
+}
+
+func (c *badRegexpChecker) markGoodCarets(e syntax.Expr) {
+ canSkip := func(e syntax.Expr) bool {
+ switch e.Op {
+ case syntax.OpFlagOnlyGroup:
+ return true
+ case syntax.OpGroup:
+ x := e.Args[0]
+ return x.Op == syntax.OpConcat && len(x.Args) == 0
+ }
+ return false
+ }
+
+ if e.Op == syntax.OpConcat && len(e.Args) > 1 {
+ i := 0
+ for i < len(e.Args) && canSkip(e.Args[i]) {
+ i++
+ }
+ if i < len(e.Args) {
+ c.markGoodCarets(e.Args[i])
+ }
+ return
+ }
+ if e.Op == syntax.OpCaret {
+ c.addGoodAnchor(e.Pos)
+ }
+ for _, a := range e.Args {
+ c.markGoodCarets(a)
+ }
+}
+
+func (c *badRegexpChecker) walk(e syntax.Expr) {
+ switch e.Op {
+ case syntax.OpAlt:
+ c.checkAltAnchor(e)
+ c.checkAltDups(e)
+ for _, a := range e.Args {
+ c.walk(a)
+ }
+
+ case syntax.OpCharClass, syntax.OpNegCharClass:
+ if c.checkCharClassRanges(e) {
+ c.checkCharClassDups(e)
+ }
+
+ case syntax.OpStar, syntax.OpPlus:
+ c.checkNestedQuantifier(e)
+ c.walk(e.Args[0])
+
+ case syntax.OpFlagOnlyGroup:
+ c.updateFlagState(c.currentFlagState(), e, e.Args[0].Value)
+ case syntax.OpGroupWithFlags:
+ // Creates a new context using the current context copy.
+ // New flags are evaluated inside a new context.
+ // After nested expressions are processed, previous context is restored.
+ nflags := len(c.flagStates)
+ c.flagStates = append(c.flagStates, *c.currentFlagState())
+ c.updateFlagState(c.currentFlagState(), e, e.Args[1].Value)
+ c.walk(e.Args[0])
+ c.flagStates = c.flagStates[:nflags]
+ case syntax.OpGroup, syntax.OpCapture, syntax.OpNamedCapture:
+ // Like with OpGroupWithFlags, but doesn't evaluate any new flags.
+ nflags := len(c.flagStates)
+ c.flagStates = append(c.flagStates, *c.currentFlagState())
+ c.walk(e.Args[0])
+ c.flagStates = c.flagStates[:nflags]
+
+ case syntax.OpCaret:
+ if !c.isGoodAnchor(e) {
+ c.warn("dangling or redundant ^, maybe \\^ is intended?")
+ }
+
+ default:
+ for _, a := range e.Args {
+ c.walk(a)
+ }
+ }
+}
+
+func (c *badRegexpChecker) currentFlagState() *regexpFlagState {
+ return &c.flagStates[len(c.flagStates)-1]
+}
+
+func (c *badRegexpChecker) updateFlagState(state *regexpFlagState, e syntax.Expr, flagString string) {
+ clearing := false
+ for i := 0; i < len(flagString); i++ {
+ ch := flagString[i]
+ if ch == '-' {
+ clearing = true
+ continue
+ }
+ if int(ch) >= len(state) {
+ continue // Should never happen in practice, but we don't want a panic
+ }
+
+ if clearing {
+ if !state[ch] {
+ c.warn("clearing unset flag %c in %s", ch, e.Value)
+ }
+ } else {
+ if state[ch] {
+ c.warn("redundant flag %c in %s", ch, e.Value)
+ }
+ }
+ state[ch] = !clearing
+ }
+}
+
+func (c *badRegexpChecker) checkNestedQuantifier(e syntax.Expr) {
+ x := e.Args[0]
+ switch x.Op {
+ case syntax.OpGroup, syntax.OpCapture, syntax.OpGroupWithFlags:
+ if len(e.Args) == 1 {
+ x = x.Args[0]
+ }
+ }
+
+ switch x.Op {
+ case syntax.OpPlus, syntax.OpStar:
+ c.warn("repeated greedy quantifier in %s", e.Value)
+ }
+}
+
+func (c *badRegexpChecker) checkAltDups(alt syntax.Expr) {
+ // Seek duplicated alternation expressions.
+
+ set := make(map[string]struct{}, len(alt.Args))
+ for _, a := range alt.Args {
+ if _, ok := set[a.Value]; ok {
+ c.warn("`%s` is duplicated in %s", a.Value, alt.Value)
+ }
+ set[a.Value] = struct{}{}
+ }
+}
+
+func (c *badRegexpChecker) isCharOrLit(e syntax.Expr) bool {
+ return e.Op == syntax.OpChar || e.Op == syntax.OpLiteral
+}
+
+func (c *badRegexpChecker) checkAltAnchor(alt syntax.Expr) {
+ // Seek suspicious anchors.
+
+ // Case 1: an alternation of literals where 1st expr begins with ^ anchor.
+ first := alt.Args[0]
+ if first.Op == syntax.OpConcat && len(first.Args) == 2 && first.Args[0].Op == syntax.OpCaret && c.isCharOrLit(first.Args[1]) {
+ matched := true
+ for _, a := range alt.Args[1:] {
+ if !c.isCharOrLit(a) {
+ matched = false
+ break
+ }
+ }
+ if matched {
+ c.warn("^ applied only to `%s` in %s", first.Value[len(`^`):], alt.Value)
+ }
+ }
+
+ // Case 2: an alternation of literals where last expr ends with $ anchor.
+ last := alt.Args[len(alt.Args)-1]
+ if last.Op == syntax.OpConcat && len(last.Args) == 2 && last.Args[1].Op == syntax.OpDollar && c.isCharOrLit(last.Args[0]) {
+ matched := true
+ for _, a := range alt.Args[:len(alt.Args)-1] {
+ if !c.isCharOrLit(a) {
+ matched = false
+ break
+ }
+ }
+ if matched {
+ c.warn("$ applied only to `%s` in %s", last.Value[:len(last.Value)-len(`$`)], alt.Value)
+ }
+ }
+}
+
+func (c *badRegexpChecker) checkCharClassRanges(cc syntax.Expr) bool {
+ // Seek for suspicious ranges like `!-_`.
+ //
+ // We permit numerical ranges (0-9, hex and octal literals)
+ // and simple ascii letter ranges.
+
+ for _, e := range cc.Args {
+ if e.Op != syntax.OpCharRange {
+ continue
+ }
+ switch e.Args[0].Op {
+ case syntax.OpEscapeOctal, syntax.OpEscapeHex:
+ continue
+ }
+ ch := c.charClassBoundRune(e.Args[0])
+ if ch == 0 {
+ return false
+ }
+ good := unicode.IsLetter(ch) || (ch >= '0' && ch <= '9')
+ if !good {
+ c.warnSloppyCharRange(e.Value, cc.Value)
+ }
+ }
+
+ return true
+}
+
+func (c *badRegexpChecker) checkCharClassDups(cc syntax.Expr) {
+ // Seek for excessive elements inside a character class.
+ // Report them as intersections.
+
+ if len(cc.Args) == 1 {
+ return // Can't had duplicates.
+ }
+
+ type charRange struct {
+ low rune
+ high rune
+ source string
+ }
+ ranges := make([]charRange, 0, 8)
+ addRange := func(source string, low, high rune) {
+ ranges = append(ranges, charRange{source: source, low: low, high: high})
+ }
+ addRange1 := func(source string, ch rune) {
+ addRange(source, ch, ch)
+ }
+
+ // 1. Collect ranges, O(n).
+ for _, e := range cc.Args {
+ switch e.Op {
+ case syntax.OpEscapeOctal:
+ addRange1(e.Value, c.octalToRune(e))
+ case syntax.OpEscapeHex:
+ addRange1(e.Value, c.hexToRune(e))
+ case syntax.OpChar:
+ addRange1(e.Value, c.stringToRune(e.Value))
+ case syntax.OpCharRange:
+ addRange(e.Value, c.charClassBoundRune(e.Args[0]), c.charClassBoundRune(e.Args[1]))
+ case syntax.OpEscapeMeta:
+ addRange1(e.Value, rune(e.Value[1]))
+ case syntax.OpEscapeChar:
+ ch := c.stringToRune(e.Value[len(`\`):])
+ if unicode.IsPunct(ch) {
+ addRange1(e.Value, ch)
+ break
+ }
+ switch e.Value {
+ case `\|`, `\<`, `\>`, `\+`, `\=`: // How to cover all symbols?
+ addRange1(e.Value, c.stringToRune(e.Value[len(`\`):]))
+ case `\t`:
+ addRange1(e.Value, '\t')
+ case `\n`:
+ addRange1(e.Value, '\n')
+ case `\r`:
+ addRange1(e.Value, '\r')
+ case `\v`:
+ addRange1(e.Value, '\v')
+ case `\d`:
+ addRange(e.Value, '0', '9')
+ case `\D`:
+ addRange(e.Value, 0, '0'-1)
+ addRange(e.Value, '9'+1, utf8.MaxRune)
+ case `\s`:
+ addRange(e.Value, '\t', '\n') // 9-10
+ addRange(e.Value, '\f', '\r') // 12-13
+ addRange1(e.Value, ' ') // 32
+ case `\S`:
+ addRange(e.Value, 0, '\t'-1)
+ addRange(e.Value, '\n'+1, '\f'-1)
+ addRange(e.Value, '\r'+1, ' '-1)
+ addRange(e.Value, ' '+1, utf8.MaxRune)
+ case `\w`:
+ addRange(e.Value, '0', '9') // 48-57
+ addRange(e.Value, 'A', 'Z') // 65-90
+ addRange1(e.Value, '_') // 95
+ addRange(e.Value, 'a', 'z') // 97-122
+ case `\W`:
+ addRange(e.Value, 0, '0'-1)
+ addRange(e.Value, '9'+1, 'A'-1)
+ addRange(e.Value, 'Z'+1, '_'-1)
+ addRange(e.Value, '_'+1, 'a'-1)
+ addRange(e.Value, 'z'+1, utf8.MaxRune)
+ default:
+ // Give up: unknown escape sequence.
+ return
+ }
+ default:
+ // Give up: unexpected operation inside char class.
+ return
+ }
+ }
+
+ // 2. Sort ranges, O(nlogn).
+ sort.Slice(ranges, func(i, j int) bool {
+ return ranges[i].low < ranges[j].low
+ })
+
+ // 3. Search for duplicates, O(n).
+ for i := 0; i < len(ranges)-1; i++ {
+ x := ranges[i+0]
+ y := ranges[i+1]
+ if x.high >= y.low {
+ c.warnCharClassDup(x.source, y.source, cc.Value)
+ break
+ }
+ }
+}
+
+func (c *badRegexpChecker) charClassBoundRune(e syntax.Expr) rune {
+ switch e.Op {
+ case syntax.OpChar:
+ return c.stringToRune(e.Value)
+ case syntax.OpEscapeHex:
+ return c.hexToRune(e)
+ case syntax.OpEscapeOctal:
+ return c.octalToRune(e)
+ default:
+ return 0
+ }
+}
+
+func (c *badRegexpChecker) octalToRune(e syntax.Expr) rune {
+ v, _ := strconv.ParseInt(e.Value[len(`\`):], 8, 32)
+ return rune(v)
+}
+
+func (c *badRegexpChecker) hexToRune(e syntax.Expr) rune {
+ var s string
+ switch e.Form {
+ case syntax.FormEscapeHexFull:
+ s = e.Value[len(`\x{`) : len(e.Value)-len(`}`)]
+ default:
+ s = e.Value[len(`\x`):]
+ }
+ v, _ := strconv.ParseInt(s, 16, 32)
+ return rune(v)
+}
+
+func (c *badRegexpChecker) stringToRune(s string) rune {
+ ch, _ := utf8.DecodeRuneInString(s)
+ return ch
+}
+
+func (c *badRegexpChecker) addGoodAnchor(pos syntax.Position) {
+ c.goodAnchors = append(c.goodAnchors, pos)
+}
+
+func (c *badRegexpChecker) isGoodAnchor(e syntax.Expr) bool {
+ for _, pos := range c.goodAnchors {
+ if e.Pos == pos {
+ return true
+ }
+ }
+ return false
+}
+
+func (c *badRegexpChecker) warn(format string, args ...interface{}) {
+ c.ctx.Warn(c.cause, format, args...)
+}
+
+func (c *badRegexpChecker) warnSloppyCharRange(rng, charClass string) {
+ c.ctx.Warn(c.cause, "suspicious char range `%s` in %s", rng, charClass)
+}
+
+func (c *badRegexpChecker) warnCharClassDup(x, y, charClass string) {
+ if x == y {
+ c.ctx.Warn(c.cause, "`%s` is duplicated in %s", x, charClass)
+ } else {
+ c.ctx.Warn(c.cause, "`%s` intersects with `%s` in %s", x, y, charClass)
+ }
+}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/boolExprSimplify_checker.go b/vendor/github.com/go-critic/go-critic/checkers/boolExprSimplify_checker.go
index f4eb9ed7..8c599031 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/boolExprSimplify_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/boolExprSimplify_checker.go
@@ -6,9 +6,9 @@ import (
"go/token"
"strconv"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
"github.com/go-critic/go-critic/checkers/internal/lintutil"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astcopy"
"github.com/go-toolsmith/astequal"
@@ -18,7 +18,7 @@ import (
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "boolExprSimplify"
info.Tags = []string{"style", "experimental"}
info.Summary = "Detects bool expressions that can be simplified"
@@ -29,14 +29,14 @@ b := !(x) == !(y)`
a := elapsed < expectElapsedMin
b := (x) == (y)`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(&boolExprSimplifyChecker{ctx: ctx})
})
}
type boolExprSimplifyChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
hasFloats bool
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/builtinShadow_checker.go b/vendor/github.com/go-critic/go-critic/checkers/builtinShadow_checker.go
index 24d8b7ff..ff5e5174 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/builtinShadow_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/builtinShadow_checker.go
@@ -3,26 +3,26 @@ package checkers
import (
"go/ast"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "builtinShadow"
info.Tags = []string{"style", "opinionated"}
info.Summary = "Detects when predeclared identifiers shadowed in assignments"
info.Before = `len := 10`
info.After = `length := 10`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForLocalDef(&builtinShadowChecker{ctx: ctx}, ctx.TypesInfo)
})
}
type builtinShadowChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *builtinShadowChecker) VisitLocalDef(name astwalk.Name, _ ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/captLocal_checker.go b/vendor/github.com/go-critic/go-critic/checkers/captLocal_checker.go
index bc9a2115..76b6fb4f 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/captLocal_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/captLocal_checker.go
@@ -3,15 +3,15 @@ package checkers
import (
"go/ast"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "captLocal"
info.Tags = []string{"style"}
- info.Params = lintpack.CheckerParams{
+ info.Params = linter.CheckerParams{
"paramsOnly": {
Value: true,
Usage: "whether to restrict checker to params only",
@@ -21,7 +21,7 @@ func init() {
info.Before = `func f(IN int, OUT *int) (ERR error) {}`
info.After = `func f(in int, out *int) (err error) {}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
c := &captLocalChecker{ctx: ctx}
c.paramsOnly = info.Params.Bool("paramsOnly")
return astwalk.WalkerForLocalDef(c, ctx.TypesInfo)
@@ -30,7 +30,7 @@ func init() {
type captLocalChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
paramsOnly bool
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/caseOrder_checker.go b/vendor/github.com/go-critic/go-critic/checkers/caseOrder_checker.go
index 1ef4b53b..95036752 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/caseOrder_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/caseOrder_checker.go
@@ -4,12 +4,12 @@ import (
"go/ast"
"go/types"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "caseOrder"
info.Tags = []string{"diagnostic"}
info.Summary = "Detects erroneous case order inside switch statements"
@@ -28,14 +28,14 @@ case ast.Expr:
fmt.Println("expr")
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmt(&caseOrderChecker{ctx: ctx})
})
}
type caseOrderChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *caseOrderChecker) VisitStmt(stmt ast.Stmt) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/checkers.go b/vendor/github.com/go-critic/go-critic/checkers/checkers.go
index 96202221..0c2ebc00 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/checkers.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/checkers.go
@@ -2,9 +2,18 @@
package checkers
import (
- "github.com/go-lintpack/lintpack"
+ "os"
+
+ "github.com/go-critic/go-critic/framework/linter"
)
-var collection = &lintpack.CheckerCollection{
+var collection = &linter.CheckerCollection{
URL: "https://github.com/go-critic/go-critic/checkers",
}
+
+var debug = func() func() bool {
+ v := os.Getenv("DEBUG") != ""
+ return func() bool {
+ return v
+ }
+}()
diff --git a/vendor/github.com/go-critic/go-critic/checkers/codegenComment_checker.go b/vendor/github.com/go-critic/go-critic/checkers/codegenComment_checker.go
index 14d89da3..ecadba10 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/codegenComment_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/codegenComment_checker.go
@@ -5,19 +5,19 @@ import (
"regexp"
"strings"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "codegenComment"
- info.Tags = []string{"diagnostic", "experimental"}
+ info.Tags = []string{"diagnostic"}
info.Summary = "Detects malformed 'code generated' file comments"
info.Before = `// This file was automatically generated by foogen`
info.After = `// Code generated by foogen. DO NOT EDIT.`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
patterns := []string{
"this (?:file|code) (?:was|is) auto(?:matically)? generated",
"this (?:file|code) (?:was|is) generated automatically",
@@ -38,7 +38,7 @@ func init() {
type codegenCommentChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
badCommentRE *regexp.Regexp
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/commentFormatting_checker.go b/vendor/github.com/go-critic/go-critic/checkers/commentFormatting_checker.go
index ed75015e..de7bfc19 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/commentFormatting_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/commentFormatting_checker.go
@@ -7,23 +7,25 @@ import (
"unicode"
"unicode/utf8"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "commentFormatting"
- info.Tags = []string{"style", "experimental"}
+ info.Tags = []string{"style"}
info.Summary = "Detects comments with non-idiomatic formatting"
info.Before = `//This is a comment`
info.After = `// This is a comment`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
parts := []string{
- `^//\w+:.*$`, //key: value
- `^//nolint$`, //nolint
- `^//line /.*:\d+`, //line /path/to/file:123
+ `^//go:generate .*$`, // e.g.: go:generate value
+ `^//\w+:.*$`, // e.g.: key: value
+ `^//nolint$`, // e.g.: nolint
+ `^//line /.*:\d+`, // e.g.: line /path/to/file:123
+ `^//export \w+$`, // e.g.: export Foo
}
pat := "(?m)" + strings.Join(parts, "|")
pragmaRE := regexp.MustCompile(pat)
@@ -36,7 +38,7 @@ func init() {
type commentFormattingChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
pragmaRE *regexp.Regexp
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go b/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go
index 0554e365..8d938704 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go
@@ -7,13 +7,13 @@ import (
"regexp"
"strings"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/strparse"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "commentedOutCode"
info.Tags = []string{"diagnostic", "experimental"}
info.Summary = "Detects commented-out code inside function bodies"
@@ -22,7 +22,7 @@ func init() {
foo(1, 2)`
info.After = `foo(1, 2)`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForLocalComment(&commentedOutCodeChecker{
ctx: ctx,
notQuiteFuncCall: regexp.MustCompile(`\w+\s+\([^)]*\)\s*$`),
@@ -32,7 +32,7 @@ foo(1, 2)`
type commentedOutCodeChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
fn *ast.FuncDecl
notQuiteFuncCall *regexp.Regexp
diff --git a/vendor/github.com/go-critic/go-critic/checkers/commentedOutImport_checker.go b/vendor/github.com/go-critic/go-critic/checkers/commentedOutImport_checker.go
index 5aeb86c0..096a9024 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/commentedOutImport_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/commentedOutImport_checker.go
@@ -5,12 +5,12 @@ import (
"go/token"
"regexp"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "commentedOutImport"
info.Tags = []string{"style", "experimental"}
info.Summary = "Detects commented-out imports"
@@ -24,7 +24,7 @@ import (
"fmt"
)`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
const pattern = `(?m)^(?://|/\*)?\s*"([a-zA-Z0-9_/]+)"\s*(?:\*/)?$`
return &commentedOutImportChecker{
ctx: ctx,
@@ -35,7 +35,7 @@ import (
type commentedOutImportChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
importStringRE *regexp.Regexp
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/defaultCaseOrder_checker.go b/vendor/github.com/go-critic/go-critic/checkers/defaultCaseOrder_checker.go
index caa0de65..755449e0 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/defaultCaseOrder_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/defaultCaseOrder_checker.go
@@ -3,12 +3,12 @@ package checkers
import (
"go/ast"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "defaultCaseOrder"
info.Tags = []string{"style"}
info.Summary = "Detects when default case in switch isn't on 1st or last position"
@@ -31,14 +31,14 @@ default: // <- last case (could also be the first one)
// ...
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmt(&defaultCaseOrderChecker{ctx: ctx})
})
}
type defaultCaseOrderChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *defaultCaseOrderChecker) VisitStmt(stmt ast.Stmt) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/deferUnlambda_checker.go b/vendor/github.com/go-critic/go-critic/checkers/deferUnlambda_checker.go
new file mode 100644
index 00000000..3cab7827
--- /dev/null
+++ b/vendor/github.com/go-critic/go-critic/checkers/deferUnlambda_checker.go
@@ -0,0 +1,94 @@
+package checkers
+
+import (
+ "go/ast"
+ "go/types"
+
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
+ "github.com/go-toolsmith/astcast"
+)
+
+func init() {
+ var info linter.CheckerInfo
+ info.Name = "deferUnlambda"
+ info.Tags = []string{"style", "experimental"}
+ info.Summary = "Detects deferred function literals that can be simplified"
+ info.Before = `defer func() { f() }()`
+ info.After = `f()`
+
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
+ return astwalk.WalkerForStmt(&deferUnlambdaChecker{ctx: ctx})
+ })
+}
+
+type deferUnlambdaChecker struct {
+ astwalk.WalkHandler
+ ctx *linter.CheckerContext
+}
+
+func (c *deferUnlambdaChecker) VisitStmt(x ast.Stmt) {
+ def, ok := x.(*ast.DeferStmt)
+ if !ok {
+ return
+ }
+
+ // We don't analyze deferred function args.
+ // Most deferred calls don't have them, so it's not a big deal to skip them.
+ if len(def.Call.Args) != 0 {
+ return
+ }
+
+ fn, ok := def.Call.Fun.(*ast.FuncLit)
+ if !ok {
+ return
+ }
+
+ if len(fn.Body.List) != 1 {
+ return
+ }
+
+ call, ok := astcast.ToExprStmt(fn.Body.List[0]).X.(*ast.CallExpr)
+ if !ok || !c.isFunctionCall(call) {
+ return
+ }
+
+ // Skip recover() as it can't be moved outside of the lambda.
+ // Skip panic() to avoid affecting the stack trace.
+ switch qualifiedName(call.Fun) {
+ case "recover", "panic":
+ return
+ }
+
+ for _, arg := range call.Args {
+ if !c.isConstExpr(arg) {
+ return
+ }
+ }
+
+ c.warn(def, call)
+}
+
+func (c *deferUnlambdaChecker) isFunctionCall(e *ast.CallExpr) bool {
+ switch fnExpr := e.Fun.(type) {
+ case *ast.Ident:
+ return true
+ case *ast.SelectorExpr:
+ x, ok := fnExpr.X.(*ast.Ident)
+ if !ok {
+ return false
+ }
+ _, ok = c.ctx.TypesInfo.ObjectOf(x).(*types.PkgName)
+ return ok
+ default:
+ return false
+ }
+}
+
+func (c *deferUnlambdaChecker) isConstExpr(e ast.Expr) bool {
+ return c.ctx.TypesInfo.Types[e].Value != nil
+}
+
+func (c *deferUnlambdaChecker) warn(cause, suggestion ast.Node) {
+ c.ctx.Warn(cause, "can rewrite as `defer %s`", suggestion)
+}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/deprecatedComment_checker.go b/vendor/github.com/go-critic/go-critic/checkers/deprecatedComment_checker.go
index 37675735..82f300b3 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/deprecatedComment_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/deprecatedComment_checker.go
@@ -5,14 +5,14 @@ import (
"regexp"
"strings"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "deprecatedComment"
- info.Tags = []string{"diagnostic", "experimental"}
+ info.Tags = []string{"diagnostic"}
info.Summary = "Detects malformed 'deprecated' doc-comments"
info.Before = `
// deprecated, use FuncNew instead
@@ -21,7 +21,7 @@ func FuncOld() int`
// Deprecated: use FuncNew instead
func FuncOld() int`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
c := &deprecatedCommentChecker{ctx: ctx}
c.commonPatterns = []*regexp.Regexp{
@@ -61,7 +61,7 @@ func FuncOld() int`
type deprecatedCommentChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
commonPatterns []*regexp.Regexp
commonTypos []string
diff --git a/vendor/github.com/go-critic/go-critic/checkers/docStub_checker.go b/vendor/github.com/go-critic/go-critic/checkers/docStub_checker.go
index 5c771b31..2a3b393a 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/docStub_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/docStub_checker.go
@@ -6,12 +6,12 @@ import (
"regexp"
"strings"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "docStub"
info.Tags = []string{"style", "experimental"}
info.Summary = "Detects comments that silence go lint complaints about doc-comment"
@@ -26,7 +26,7 @@ func Foo() {}
// Foo is a demonstration-only function.
func Foo() {}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
re := `(?i)^\.\.\.$|^\.$|^xxx\.?$|^whatever\.?$`
c := &docStubChecker{
ctx: ctx,
@@ -38,7 +38,7 @@ func Foo() {}`
type docStubChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
stubCommentRE *regexp.Regexp
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupArg_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupArg_checker.go
index 81975940..24e921b0 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/dupArg_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/dupArg_checker.go
@@ -4,21 +4,21 @@ import (
"go/ast"
"go/types"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astequal"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "dupArg"
info.Tags = []string{"diagnostic"}
info.Summary = "Detects suspicious duplicated arguments"
info.Before = `copy(dst, dst)`
info.After = `copy(dst, src)`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
c := &dupArgChecker{ctx: ctx}
// newMatcherFunc returns a function that matches a call if
// args[xIndex] and args[yIndex] are equal.
@@ -101,7 +101,7 @@ func init() {
type dupArgChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
matchers map[string]func(*ast.CallExpr) bool
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupBranchBody_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupBranchBody_checker.go
index a1388487..3399f053 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/dupBranchBody_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/dupBranchBody_checker.go
@@ -3,13 +3,13 @@ package checkers
import (
"go/ast"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astequal"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "dupBranchBody"
info.Tags = []string{"diagnostic"}
info.Summary = "Detects duplicated branch bodies inside conditional statements"
@@ -26,14 +26,14 @@ if cond {
println("cond=false")
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmt(&dupBranchBodyChecker{ctx: ctx})
})
}
type dupBranchBodyChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *dupBranchBodyChecker) VisitStmt(stmt ast.Stmt) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupCase_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupCase_checker.go
index 26ef1739..89ec66bb 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/dupCase_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/dupCase_checker.go
@@ -3,13 +3,13 @@ package checkers
import (
"go/ast"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
"github.com/go-critic/go-critic/checkers/internal/lintutil"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "dupCase"
info.Tags = []string{"diagnostic"}
info.Summary = "Detects duplicated case clauses inside switch statements"
@@ -22,14 +22,14 @@ switch x {
case ys[0], ys[1], ys[2], ys[3], ys[4]:
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmt(&dupCaseChecker{ctx: ctx})
})
}
type dupCaseChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
astSet lintutil.AstSet
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupImports_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupImports_checker.go
index d531413a..27b796cd 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/dupImports_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/dupImports_checker.go
@@ -4,11 +4,11 @@ import (
"fmt"
"go/ast"
- "github.com/go-lintpack/lintpack"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "dupImport"
info.Tags = []string{"style", "experimental"}
info.Summary = "Detects multiple imports of the same package under different aliases"
@@ -22,13 +22,13 @@ import(
"fmt"
)`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return &dupImportChecker{ctx: ctx}
})
}
type dupImportChecker struct {
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *dupImportChecker) WalkFile(f *ast.File) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupSubExpr_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupSubExpr_checker.go
index 24bb5243..4966cd2a 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/dupSubExpr_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/dupSubExpr_checker.go
@@ -5,14 +5,14 @@ import (
"go/token"
"go/types"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astequal"
"github.com/go-toolsmith/typep"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "dupSubExpr"
info.Tags = []string{"diagnostic"}
info.Summary = "Detects suspicious duplicated sub-expressions"
@@ -25,7 +25,7 @@ sort.Slice(xs, func(i, j int) bool {
return xs[i].v < xs[j].v
})`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
c := &dupSubExprChecker{ctx: ctx}
ops := []struct {
@@ -65,7 +65,7 @@ sort.Slice(xs, func(i, j int) bool {
type dupSubExprChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
// opSet is a set of binary operations that do not make
// sense with duplicated (same) RHS and LHS.
diff --git a/vendor/github.com/go-critic/go-critic/checkers/elseif_checker.go b/vendor/github.com/go-critic/go-critic/checkers/elseif_checker.go
index c3a9546b..9e56d1c4 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/elseif_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/elseif_checker.go
@@ -3,16 +3,16 @@ package checkers
import (
"go/ast"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astp"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "elseif"
info.Tags = []string{"style"}
- info.Params = lintpack.CheckerParams{
+ info.Params = linter.CheckerParams{
"skipBalanced": {
Value: true,
Usage: "whether to skip balanced if-else pairs",
@@ -30,7 +30,7 @@ if cond1 {
} else if x := cond2; x {
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
c := &elseifChecker{ctx: ctx}
c.skipBalanced = info.Params.Bool("skipBalanced")
return astwalk.WalkerForStmt(c)
@@ -39,7 +39,7 @@ if cond1 {
type elseifChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
skipBalanced bool
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/emptyFallthrough_checker.go b/vendor/github.com/go-critic/go-critic/checkers/emptyFallthrough_checker.go
index 5908dfa3..16bfa7e4 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/emptyFallthrough_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/emptyFallthrough_checker.go
@@ -4,12 +4,12 @@ import (
"go/ast"
"go/token"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "emptyFallthrough"
info.Tags = []string{"style", "experimental"}
info.Summary = "Detects fallthrough that can be avoided by using multi case values"
@@ -24,14 +24,14 @@ case reflect.Int, reflect.Int32:
return Int
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmt(&emptyFallthroughChecker{ctx: ctx})
})
}
type emptyFallthroughChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *emptyFallthroughChecker) VisitStmt(stmt ast.Stmt) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/emptyStringTest_checker.go b/vendor/github.com/go-critic/go-critic/checkers/emptyStringTest_checker.go
index a7be906e..20d647ff 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/emptyStringTest_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/emptyStringTest_checker.go
@@ -4,15 +4,15 @@ import (
"go/ast"
"go/token"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astcopy"
"github.com/go-toolsmith/typep"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "emptyStringTest"
info.Tags = []string{"style", "experimental"}
info.Summary = "Detects empty string checks that can be written more idiomatically"
@@ -20,14 +20,14 @@ func init() {
info.After = `s == ""`
info.Note = "See https://dmitri.shuralyov.com/idiomatic-go#empty-string-check."
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(&emptyStringTestChecker{ctx: ctx})
})
}
type emptyStringTestChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *emptyStringTestChecker) VisitExpr(e ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/equalFold_checker.go b/vendor/github.com/go-critic/go-critic/checkers/equalFold_checker.go
index 265b2f79..b8dfdc02 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/equalFold_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/equalFold_checker.go
@@ -4,28 +4,28 @@ import (
"go/ast"
"go/token"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astequal"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "equalFold"
info.Tags = []string{"performance", "experimental"}
info.Summary = "Detects unoptimal strings/bytes case-insensitive comparison"
info.Before = `strings.ToLower(x) == strings.ToLower(y)`
info.After = `strings.EqualFold(x, y)`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(&equalFoldChecker{ctx: ctx})
})
}
type equalFoldChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *equalFoldChecker) VisitExpr(e ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/evalOrder_checker.go b/vendor/github.com/go-critic/go-critic/checkers/evalOrder_checker.go
index f76519cd..0bec0e83 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/evalOrder_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/evalOrder_checker.go
@@ -5,16 +5,16 @@ import (
"go/token"
"go/types"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
"github.com/go-critic/go-critic/checkers/internal/lintutil"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astequal"
"github.com/go-toolsmith/typep"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "evalOrder"
info.Tags = []string{"diagnostic", "experimental"}
info.Summary = "Detects unwanted dependencies on the evaluation order"
@@ -24,14 +24,14 @@ err := f(&x)
return x, err
`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmt(&evalOrderChecker{ctx: ctx})
})
}
type evalOrderChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *evalOrderChecker) VisitStmt(stmt ast.Stmt) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/exitAfterDefer_checker.go b/vendor/github.com/go-critic/go-critic/checkers/exitAfterDefer_checker.go
index 05ed6ae9..65800fc5 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/exitAfterDefer_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/exitAfterDefer_checker.go
@@ -3,17 +3,17 @@ package checkers
import (
"go/ast"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astfmt"
"github.com/go-toolsmith/astp"
"golang.org/x/tools/go/ast/astutil"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "exitAfterDefer"
- info.Tags = []string{"diagnostic", "experimental"}
+ info.Tags = []string{"diagnostic"}
info.Summary = "Detects calls to exit/fatal inside functions that use defer"
info.Before = `
defer os.Remove(filename)
@@ -27,14 +27,14 @@ if bad {
return
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForFuncDecl(&exitAfterDeferChecker{ctx: ctx})
})
}
type exitAfterDeferChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *exitAfterDeferChecker) VisitFuncDecl(fn *ast.FuncDecl) {
@@ -66,13 +66,11 @@ func (c *exitAfterDeferChecker) VisitFuncDecl(fn *ast.FuncDecl) {
}
func (c *exitAfterDeferChecker) warn(cause *ast.CallExpr, deferStmt *ast.DeferStmt) {
- var s string
+ s := astfmt.Sprint(deferStmt)
if fnlit, ok := deferStmt.Call.Fun.(*ast.FuncLit); ok {
// To avoid long and multi-line warning messages,
// collapse the function literals.
s = "defer " + astfmt.Sprint(fnlit.Type) + "{...}(...)"
- } else {
- s = astfmt.Sprint(deferStmt)
}
c.ctx.Warn(cause, "%s clutters `%s`", cause.Fun, s)
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/filepathJoin_checker.go b/vendor/github.com/go-critic/go-critic/checkers/filepathJoin_checker.go
new file mode 100644
index 00000000..b11dc247
--- /dev/null
+++ b/vendor/github.com/go-critic/go-critic/checkers/filepathJoin_checker.go
@@ -0,0 +1,50 @@
+package checkers
+
+import (
+ "go/ast"
+ "strings"
+
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
+ "github.com/go-toolsmith/astcast"
+)
+
+func init() {
+ var info linter.CheckerInfo
+ info.Name = "filepathJoin"
+ info.Tags = []string{"diagnostic", "experimental"}
+ info.Summary = "Detects problems in filepath.Join() function calls"
+ info.Before = `filepath.Join("dir/", filename)`
+ info.After = `filepath.Join("dir", filename)`
+
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
+ return astwalk.WalkerForExpr(&filepathJoinChecker{ctx: ctx})
+ })
+}
+
+type filepathJoinChecker struct {
+ astwalk.WalkHandler
+ ctx *linter.CheckerContext
+}
+
+func (c *filepathJoinChecker) VisitExpr(expr ast.Expr) {
+ call := astcast.ToCallExpr(expr)
+ if qualifiedName(call.Fun) != "filepath.Join" {
+ return
+ }
+
+ for _, arg := range call.Args {
+ arg, ok := arg.(*ast.BasicLit)
+ if ok && c.hasSeparator(arg) {
+ c.warnSeparator(arg)
+ }
+ }
+}
+
+func (c *filepathJoinChecker) hasSeparator(v *ast.BasicLit) bool {
+ return strings.ContainsAny(v.Value, `/\`)
+}
+
+func (c *filepathJoinChecker) warnSeparator(sep ast.Expr) {
+ c.ctx.Warn(sep, "%s contains a path separator", sep)
+}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/flagDeref_checker.go b/vendor/github.com/go-critic/go-critic/checkers/flagDeref_checker.go
index cb9faee7..393274c4 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/flagDeref_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/flagDeref_checker.go
@@ -3,12 +3,12 @@ package checkers
import (
"go/ast"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "flagDeref"
info.Tags = []string{"diagnostic"}
info.Summary = "Detects immediate dereferencing of `flag` package pointers"
@@ -21,7 +21,7 @@ flag.BoolVar(&b, "b", false, "b docs")`
Dereferencing returned pointers will lead to hard to find errors
where flag values are not updated after flag.Parse().`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
c := &flagDerefChecker{
ctx: ctx,
flagPtrFuncs: map[string]bool{
@@ -41,7 +41,7 @@ where flag values are not updated after flag.Parse().`
type flagDerefChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
flagPtrFuncs map[string]bool
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/flagName_checker.go b/vendor/github.com/go-critic/go-critic/checkers/flagName_checker.go
index 1d43ba52..36d2e450 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/flagName_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/flagName_checker.go
@@ -6,27 +6,27 @@ import (
"go/types"
"strings"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "flagName"
- info.Tags = []string{"diagnostic", "experimental"}
+ info.Tags = []string{"diagnostic"}
info.Summary = "Detects flag names with whitespace"
info.Before = `b := flag.Bool(" foo ", false, "description")`
info.After = `b := flag.Bool("foo", false, "description")`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(&flagNameChecker{ctx: ctx})
})
}
type flagNameChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *flagNameChecker) VisitExpr(expr ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/hexLiteral_checker.go b/vendor/github.com/go-critic/go-critic/checkers/hexLiteral_checker.go
index a700314c..f3f9c535 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/hexLiteral_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/hexLiteral_checker.go
@@ -5,13 +5,13 @@ import (
"go/token"
"strings"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "hexLiteral"
info.Tags = []string{"style", "experimental"}
info.Summary = "Detects hex literals that have mixed case letter digits"
@@ -25,14 +25,14 @@ y := 0xff
// (B)
y := 0xFF`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(&hexLiteralChecker{ctx: ctx})
})
}
type hexLiteralChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *hexLiteralChecker) warn0X(lit *ast.BasicLit) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/hugeParam_checker.go b/vendor/github.com/go-critic/go-critic/checkers/hugeParam_checker.go
index 656b4cc2..54be7763 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/hugeParam_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/hugeParam_checker.go
@@ -3,15 +3,15 @@ package checkers
import (
"go/ast"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "hugeParam"
info.Tags = []string{"performance"}
- info.Params = lintpack.CheckerParams{
+ info.Params = linter.CheckerParams{
"sizeThreshold": {
Value: 80,
Usage: "size in bytes that makes the warning trigger",
@@ -21,7 +21,7 @@ func init() {
info.Before = `func f(x [1024]int) {}`
info.After = `func f(x *[1024]int) {}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForFuncDecl(&hugeParamChecker{
ctx: ctx,
sizeThreshold: int64(info.Params.Int("sizeThreshold")),
@@ -31,7 +31,7 @@ func init() {
type hugeParamChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
sizeThreshold int64
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/ifElseChain_checker.go b/vendor/github.com/go-critic/go-critic/checkers/ifElseChain_checker.go
index c0a456af..91e1cfb3 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/ifElseChain_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/ifElseChain_checker.go
@@ -3,12 +3,12 @@ package checkers
import (
"go/ast"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "ifElseChain"
info.Tags = []string{"style"}
info.Summary = "Detects repeated if-else statements and suggests to replace them with switch statement"
@@ -34,14 +34,14 @@ Permits single else or else-if; repeated else-if or else + else-if
will trigger suggestion to use switch statement.
See [EffectiveGo#switch](https://golang.org/doc/effective_go.html#switch).`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmt(&ifElseChainChecker{ctx: ctx})
})
}
type ifElseChainChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
cause *ast.IfStmt
visited map[*ast.IfStmt]bool
diff --git a/vendor/github.com/go-critic/go-critic/checkers/importShadow_checker.go b/vendor/github.com/go-critic/go-critic/checkers/importShadow_checker.go
index 9a2ccc55..60c0ab21 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/importShadow_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/importShadow_checker.go
@@ -4,12 +4,12 @@ import (
"go/ast"
"go/types"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "importShadow"
info.Tags = []string{"style", "opinionated"}
info.Summary = "Detects when imported package names shadowed in the assignments"
@@ -19,7 +19,7 @@ filepath := "foo.txt"`
info.After = `
filename := "foo.txt"`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
ctx.Require.PkgObjects = true
return astwalk.WalkerForLocalDef(&importShadowChecker{ctx: ctx}, ctx.TypesInfo)
})
@@ -27,7 +27,7 @@ filename := "foo.txt"`
type importShadowChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *importShadowChecker) VisitLocalDef(def astwalk.Name, _ ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/indexAlloc_checker.go b/vendor/github.com/go-critic/go-critic/checkers/indexAlloc_checker.go
index 8fbe98c9..73399887 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/indexAlloc_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/indexAlloc_checker.go
@@ -3,14 +3,14 @@ package checkers
import (
"go/ast"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/typep"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "indexAlloc"
info.Tags = []string{"performance"}
info.Summary = "Detects strings.Index calls that may cause unwanted allocs"
@@ -18,14 +18,14 @@ func init() {
info.After = `bytes.Index(x, []byte(y))`
info.Note = `See Go issue for details: https://github.com/golang/go/issues/25864`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(&indexAllocChecker{ctx: ctx})
})
}
type indexAllocChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *indexAllocChecker) VisitExpr(e ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/initClause_checker.go b/vendor/github.com/go-critic/go-critic/checkers/initClause_checker.go
index bfbd661b..91e8816d 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/initClause_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/initClause_checker.go
@@ -3,13 +3,13 @@ package checkers
import (
"go/ast"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astp"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "initClause"
info.Tags = []string{"style", "opinionated", "experimental"}
info.Summary = "Detects non-assignment statements inside if/switch init clause"
@@ -19,14 +19,14 @@ func init() {
if cond {
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmt(&initClauseChecker{ctx: ctx})
})
}
type initClauseChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *initClauseChecker) VisitStmt(stmt ast.Stmt) {
diff --git a/vendor/github.com/go-lintpack/lintpack/astwalk/comment_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/comment_walker.go
similarity index 100%
rename from vendor/github.com/go-lintpack/lintpack/astwalk/comment_walker.go
rename to vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/comment_walker.go
diff --git a/vendor/github.com/go-lintpack/lintpack/astwalk/doc_comment_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/doc_comment_walker.go
similarity index 100%
rename from vendor/github.com/go-lintpack/lintpack/astwalk/doc_comment_walker.go
rename to vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/doc_comment_walker.go
diff --git a/vendor/github.com/go-lintpack/lintpack/astwalk/expr_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/expr_walker.go
similarity index 100%
rename from vendor/github.com/go-lintpack/lintpack/astwalk/expr_walker.go
rename to vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/expr_walker.go
diff --git a/vendor/github.com/go-lintpack/lintpack/astwalk/func_decl_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/func_decl_walker.go
similarity index 100%
rename from vendor/github.com/go-lintpack/lintpack/astwalk/func_decl_walker.go
rename to vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/func_decl_walker.go
diff --git a/vendor/github.com/go-lintpack/lintpack/astwalk/local_comment_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_comment_walker.go
similarity index 100%
rename from vendor/github.com/go-lintpack/lintpack/astwalk/local_comment_walker.go
rename to vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_comment_walker.go
diff --git a/vendor/github.com/go-lintpack/lintpack/astwalk/local_def_visitor.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_visitor.go
similarity index 100%
rename from vendor/github.com/go-lintpack/lintpack/astwalk/local_def_visitor.go
rename to vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_visitor.go
diff --git a/vendor/github.com/go-lintpack/lintpack/astwalk/local_def_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_walker.go
similarity index 100%
rename from vendor/github.com/go-lintpack/lintpack/astwalk/local_def_walker.go
rename to vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_walker.go
diff --git a/vendor/github.com/go-lintpack/lintpack/astwalk/local_expr_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_expr_walker.go
similarity index 100%
rename from vendor/github.com/go-lintpack/lintpack/astwalk/local_expr_walker.go
rename to vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_expr_walker.go
diff --git a/vendor/github.com/go-lintpack/lintpack/astwalk/stmt_list_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/stmt_list_walker.go
similarity index 100%
rename from vendor/github.com/go-lintpack/lintpack/astwalk/stmt_list_walker.go
rename to vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/stmt_list_walker.go
diff --git a/vendor/github.com/go-lintpack/lintpack/astwalk/stmt_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/stmt_walker.go
similarity index 100%
rename from vendor/github.com/go-lintpack/lintpack/astwalk/stmt_walker.go
rename to vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/stmt_walker.go
diff --git a/vendor/github.com/go-lintpack/lintpack/astwalk/type_expr_walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/type_expr_walker.go
similarity index 100%
rename from vendor/github.com/go-lintpack/lintpack/astwalk/type_expr_walker.go
rename to vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/type_expr_walker.go
diff --git a/vendor/github.com/go-lintpack/lintpack/astwalk/visitor.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/visitor.go
similarity index 100%
rename from vendor/github.com/go-lintpack/lintpack/astwalk/visitor.go
rename to vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/visitor.go
diff --git a/vendor/github.com/go-lintpack/lintpack/astwalk/walk_handler.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walk_handler.go
similarity index 100%
rename from vendor/github.com/go-lintpack/lintpack/astwalk/walk_handler.go
rename to vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walk_handler.go
diff --git a/vendor/github.com/go-lintpack/lintpack/astwalk/walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walker.go
similarity index 70%
rename from vendor/github.com/go-lintpack/lintpack/astwalk/walker.go
rename to vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walker.go
index fddae710..cd5e1c97 100644
--- a/vendor/github.com/go-lintpack/lintpack/astwalk/walker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walker.go
@@ -3,55 +3,55 @@ package astwalk
import (
"go/types"
- "github.com/go-lintpack/lintpack"
+ "github.com/go-critic/go-critic/framework/linter"
)
// WalkerForFuncDecl returns file walker implementation for FuncDeclVisitor.
-func WalkerForFuncDecl(v FuncDeclVisitor) lintpack.FileWalker {
+func WalkerForFuncDecl(v FuncDeclVisitor) linter.FileWalker {
return &funcDeclWalker{visitor: v}
}
// WalkerForExpr returns file walker implementation for ExprVisitor.
-func WalkerForExpr(v ExprVisitor) lintpack.FileWalker {
+func WalkerForExpr(v ExprVisitor) linter.FileWalker {
return &exprWalker{visitor: v}
}
// WalkerForLocalExpr returns file walker implementation for LocalExprVisitor.
-func WalkerForLocalExpr(v LocalExprVisitor) lintpack.FileWalker {
+func WalkerForLocalExpr(v LocalExprVisitor) linter.FileWalker {
return &localExprWalker{visitor: v}
}
// WalkerForStmtList returns file walker implementation for StmtListVisitor.
-func WalkerForStmtList(v StmtListVisitor) lintpack.FileWalker {
+func WalkerForStmtList(v StmtListVisitor) linter.FileWalker {
return &stmtListWalker{visitor: v}
}
// WalkerForStmt returns file walker implementation for StmtVisitor.
-func WalkerForStmt(v StmtVisitor) lintpack.FileWalker {
+func WalkerForStmt(v StmtVisitor) linter.FileWalker {
return &stmtWalker{visitor: v}
}
// WalkerForTypeExpr returns file walker implementation for TypeExprVisitor.
-func WalkerForTypeExpr(v TypeExprVisitor, info *types.Info) lintpack.FileWalker {
+func WalkerForTypeExpr(v TypeExprVisitor, info *types.Info) linter.FileWalker {
return &typeExprWalker{visitor: v, info: info}
}
// WalkerForLocalComment returns file walker implementation for LocalCommentVisitor.
-func WalkerForLocalComment(v LocalCommentVisitor) lintpack.FileWalker {
+func WalkerForLocalComment(v LocalCommentVisitor) linter.FileWalker {
return &localCommentWalker{visitor: v}
}
// WalkerForComment returns file walker implementation for CommentVisitor.
-func WalkerForComment(v CommentVisitor) lintpack.FileWalker {
+func WalkerForComment(v CommentVisitor) linter.FileWalker {
return &commentWalker{visitor: v}
}
// WalkerForDocComment returns file walker implementation for DocCommentVisitor.
-func WalkerForDocComment(v DocCommentVisitor) lintpack.FileWalker {
+func WalkerForDocComment(v DocCommentVisitor) linter.FileWalker {
return &docCommentWalker{visitor: v}
}
// WalkerForLocalDef returns file walker implementation for LocalDefVisitor.
-func WalkerForLocalDef(v LocalDefVisitor, info *types.Info) lintpack.FileWalker {
+func WalkerForLocalDef(v LocalDefVisitor, info *types.Info) linter.FileWalker {
return &localDefWalker{visitor: v, info: info}
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/mapKey_checker.go b/vendor/github.com/go-critic/go-critic/checkers/mapKey_checker.go
index de3e781e..fdbbb45a 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/mapKey_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/mapKey_checker.go
@@ -5,18 +5,18 @@ import (
"go/types"
"strings"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
"github.com/go-critic/go-critic/checkers/internal/lintutil"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astp"
"github.com/go-toolsmith/typep"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "mapKey"
- info.Tags = []string{"diagnostic", "experimental"}
+ info.Tags = []string{"diagnostic"}
info.Summary = "Detects suspicious map literal keys"
info.Before = `
_ = map[string]int{
@@ -29,14 +29,14 @@ _ = map[string]int{
"bar": 2,
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(&mapKeyChecker{ctx: ctx})
})
}
type mapKeyChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
astSet lintutil.AstSet
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/methodExprCall_checker.go b/vendor/github.com/go-critic/go-critic/checkers/methodExprCall_checker.go
index 60da1165..efd63114 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/methodExprCall_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/methodExprCall_checker.go
@@ -4,15 +4,15 @@ import (
"go/ast"
"go/token"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astcopy"
"github.com/go-toolsmith/typep"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "methodExprCall"
info.Tags = []string{"style", "experimental"}
info.Summary = "Detects method expression call that can be replaced with a method call"
@@ -21,14 +21,14 @@ foo.bar(f)`
info.After = `f := foo{}
f.bar()`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(&methodExprCallChecker{ctx: ctx})
})
}
type methodExprCallChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *methodExprCallChecker) VisitExpr(x ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/nestingReduce_checker.go b/vendor/github.com/go-critic/go-critic/checkers/nestingReduce_checker.go
index 4a0331d5..de02c7ee 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/nestingReduce_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/nestingReduce_checker.go
@@ -3,15 +3,15 @@ package checkers
import (
"go/ast"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "nestingReduce"
info.Tags = []string{"style", "opinionated", "experimental"}
- info.Params = lintpack.CheckerParams{
+ info.Params = linter.CheckerParams{
"bodyWidth": {
Value: 5,
Usage: "min number of statements inside a branch to trigger a warning",
@@ -32,7 +32,7 @@ for _, v := range a {
body()
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
c := &nestingReduceChecker{ctx: ctx}
c.bodyWidth = info.Params.Int("bodyWidth")
return astwalk.WalkerForStmt(c)
@@ -41,7 +41,7 @@ for _, v := range a {
type nestingReduceChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
bodyWidth int
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/newDeref_checker.go b/vendor/github.com/go-critic/go-critic/checkers/newDeref_checker.go
index 75e7f642..7b9f02bd 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/newDeref_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/newDeref_checker.go
@@ -3,29 +3,29 @@ package checkers
import (
"go/ast"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
"github.com/go-critic/go-critic/checkers/internal/lintutil"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"golang.org/x/tools/go/ast/astutil"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "newDeref"
- info.Tags = []string{"style", "experimental"}
+ info.Tags = []string{"style"}
info.Summary = "Detects immediate dereferencing of `new` expressions"
info.Before = `x := *new(bool)`
info.After = `x := false`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(&newDerefChecker{ctx: ctx})
})
}
type newDerefChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *newDerefChecker) VisitExpr(expr ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/nilValReturn_checker.go b/vendor/github.com/go-critic/go-critic/checkers/nilValReturn_checker.go
index 231e2580..37f964f7 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/nilValReturn_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/nilValReturn_checker.go
@@ -4,14 +4,14 @@ import (
"go/ast"
"go/token"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astequal"
"github.com/go-toolsmith/typep"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "nilValReturn"
info.Tags = []string{"diagnostic", "experimental"}
info.Summary = "Detects return statements those results evaluate to nil"
@@ -29,14 +29,14 @@ if err != nil {
return err
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmt(&nilValReturnChecker{ctx: ctx})
})
}
type nilValReturnChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *nilValReturnChecker) VisitStmt(stmt ast.Stmt) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/octalLiteral_checker.go b/vendor/github.com/go-critic/go-critic/checkers/octalLiteral_checker.go
index e40ec6db..a1d96804 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/octalLiteral_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/octalLiteral_checker.go
@@ -5,20 +5,20 @@ import (
"go/token"
"go/types"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "octalLiteral"
info.Tags = []string{"diagnostic", "experimental"}
info.Summary = "Detects octal literals passed to functions"
info.Before = `foo(02)`
info.After = `foo(2)`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
c := &octalLiteralChecker{
ctx: ctx,
octFriendlyPkg: map[string]bool{
@@ -32,7 +32,7 @@ func init() {
type octalLiteralChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
octFriendlyPkg map[string]bool
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/offBy1_checker.go b/vendor/github.com/go-critic/go-critic/checkers/offBy1_checker.go
index d5c8de0b..df20b429 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/offBy1_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/offBy1_checker.go
@@ -4,8 +4,8 @@ import (
"go/ast"
"go/token"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astcopy"
"github.com/go-toolsmith/astequal"
@@ -13,21 +13,21 @@ import (
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "offBy1"
- info.Tags = []string{"diagnostic", "experimental"}
+ info.Tags = []string{"diagnostic"}
info.Summary = "Detects various off-by-one kind of errors"
info.Before = `xs[len(xs)]`
info.After = `xs[len(xs)-1]`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(&offBy1Checker{ctx: ctx})
})
}
type offBy1Checker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *offBy1Checker) VisitExpr(e ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/paramTypeCombine_checker.go b/vendor/github.com/go-critic/go-critic/checkers/paramTypeCombine_checker.go
index ffa74061..f9f9d6c5 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/paramTypeCombine_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/paramTypeCombine_checker.go
@@ -3,27 +3,27 @@ package checkers
import (
"go/ast"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astequal"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "paramTypeCombine"
info.Tags = []string{"style", "opinionated"}
info.Summary = "Detects if function parameters could be combined by type and suggest the way to do it"
info.Before = `func foo(a, b int, c, d int, e, f int, g int) {}`
info.After = `func foo(a, b, c, d, e, f, g int) {}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForFuncDecl(¶mTypeCombineChecker{ctx: ctx})
})
}
type paramTypeCombineChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *paramTypeCombineChecker) EnterFunc(*ast.FuncDecl) bool {
@@ -51,7 +51,8 @@ func (c *paramTypeCombineChecker) optimizeParams(params *ast.FieldList) *ast.Fie
// ast.Field have empty name list.
skip := params == nil ||
len(params.List) < 2 ||
- len(params.List[0].Names) == 0
+ len(params.List[0].Names) == 0 ||
+ c.paramsAreMultiLine(params)
if skip {
return params
}
@@ -84,3 +85,9 @@ func (c *paramTypeCombineChecker) optimizeParams(params *ast.FieldList) *ast.Fie
func (c *paramTypeCombineChecker) warn(f1, f2 *ast.FuncType) {
c.ctx.Warn(f1, "%s could be replaced with %s", f1, f2)
}
+
+func (c *paramTypeCombineChecker) paramsAreMultiLine(params *ast.FieldList) bool {
+ startPos := c.ctx.FileSet.Position(params.Opening)
+ endPos := c.ctx.FileSet.Position(params.Closing)
+ return startPos.Line != endPos.Line
+}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/ptrToRefParam_checker.go b/vendor/github.com/go-critic/go-critic/checkers/ptrToRefParam_checker.go
index dacffc85..2716fe04 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/ptrToRefParam_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/ptrToRefParam_checker.go
@@ -4,26 +4,26 @@ import (
"go/ast"
"go/types"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "ptrToRefParam"
info.Tags = []string{"style", "opinionated", "experimental"}
info.Summary = "Detects input and output parameters that have a type of pointer to referential type"
info.Before = `func f(m *map[string]int) (*chan *int)`
info.After = `func f(m map[string]int) (chan *int)`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForFuncDecl(&ptrToRefParamChecker{ctx: ctx})
})
}
type ptrToRefParamChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *ptrToRefParamChecker) VisitFuncDecl(fn *ast.FuncDecl) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/rangeExprCopy_checker.go b/vendor/github.com/go-critic/go-critic/checkers/rangeExprCopy_checker.go
index 387d1bbb..90b5987a 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/rangeExprCopy_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/rangeExprCopy_checker.go
@@ -4,15 +4,15 @@ import (
"go/ast"
"go/types"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "rangeExprCopy"
info.Tags = []string{"performance"}
- info.Params = lintpack.CheckerParams{
+ info.Params = linter.CheckerParams{
"sizeThreshold": {
Value: 512,
Usage: "size in bytes that makes the warning trigger",
@@ -36,7 +36,7 @@ for _, x := range &xs { // No copy
}`
info.Note = "See Go issue for details: https://github.com/golang/go/issues/15812."
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
c := &rangeExprCopyChecker{ctx: ctx}
c.sizeThreshold = int64(info.Params.Int("sizeThreshold"))
c.skipTestFuncs = info.Params.Bool("skipTestFuncs")
@@ -46,7 +46,7 @@ for _, x := range &xs { // No copy
type rangeExprCopyChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
sizeThreshold int64
skipTestFuncs bool
diff --git a/vendor/github.com/go-critic/go-critic/checkers/rangeValCopy_checker.go b/vendor/github.com/go-critic/go-critic/checkers/rangeValCopy_checker.go
index 182538a9..57dcc315 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/rangeValCopy_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/rangeValCopy_checker.go
@@ -3,15 +3,15 @@ package checkers
import (
"go/ast"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "rangeValCopy"
info.Tags = []string{"performance"}
- info.Params = lintpack.CheckerParams{
+ info.Params = linter.CheckerParams{
"sizeThreshold": {
Value: 128,
Usage: "size in bytes that makes the warning trigger",
@@ -35,7 +35,7 @@ for i := range xs {
// Loop body.
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
c := &rangeValCopyChecker{ctx: ctx}
c.sizeThreshold = int64(info.Params.Int("sizeThreshold"))
c.skipTestFuncs = info.Params.Bool("skipTestFuncs")
@@ -45,7 +45,7 @@ for i := range xs {
type rangeValCopyChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
sizeThreshold int64
skipTestFuncs bool
diff --git a/vendor/github.com/go-critic/go-critic/checkers/regexpMust_checker.go b/vendor/github.com/go-critic/go-critic/checkers/regexpMust_checker.go
index ef7a3978..411932ee 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/regexpMust_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/regexpMust_checker.go
@@ -4,28 +4,28 @@ import (
"go/ast"
"strings"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astp"
"golang.org/x/tools/go/ast/astutil"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "regexpMust"
info.Tags = []string{"style"}
info.Summary = "Detects `regexp.Compile*` that can be replaced with `regexp.MustCompile*`"
info.Before = `re, _ := regexp.Compile("const pattern")`
info.After = `re := regexp.MustCompile("const pattern")`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(®expMustChecker{ctx: ctx})
})
}
type regexpMustChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *regexpMustChecker) VisitExpr(x ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/regexpPattern_checker.go b/vendor/github.com/go-critic/go-critic/checkers/regexpPattern_checker.go
index 383deb5d..018ab42d 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/regexpPattern_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/regexpPattern_checker.go
@@ -6,19 +6,19 @@ import (
"regexp"
"strings"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "regexpPattern"
info.Tags = []string{"diagnostic", "experimental"}
info.Summary = "Detects suspicious regexp patterns"
info.Before = "regexp.MustCompile(`google.com|yandex.ru`)"
info.After = "regexp.MustCompile(`google\\.com|yandex\\.ru`)"
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
domains := []string{
"com",
"org",
@@ -39,7 +39,7 @@ func init() {
type regexpPatternChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
domainRE *regexp.Regexp
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/regexpSimplify_checker.go b/vendor/github.com/go-critic/go-critic/checkers/regexpSimplify_checker.go
new file mode 100644
index 00000000..10dcb327
--- /dev/null
+++ b/vendor/github.com/go-critic/go-critic/checkers/regexpSimplify_checker.go
@@ -0,0 +1,511 @@
+package checkers
+
+import (
+ "fmt"
+ "go/ast"
+ "go/constant"
+ "log"
+ "strings"
+ "unicode/utf8"
+
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
+ "github.com/quasilyte/regex/syntax"
+)
+
+func init() {
+ var info linter.CheckerInfo
+ info.Name = "regexpSimplify"
+ info.Tags = []string{"style", "experimental", "opinionated"}
+ info.Summary = "Detects regexp patterns that can be simplified"
+ info.Before = "regexp.MustCompile(`(?:a|b|c) [a-z][a-z]*`)"
+ info.After = "regexp.MustCompile(`[abc] {3}[a-z]+`)"
+
+ // TODO(quasilyte): add params to control most opinionated replacements
+ // like `[0-9] -> \d`
+ // `[[:digit:]] -> \d`
+ // `[A-Za-z0-9_]` -> `\w`
+
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
+ opts := &syntax.ParserOptions{
+ NoLiterals: true,
+ }
+ c := ®expSimplifyChecker{
+ ctx: ctx,
+ parser: syntax.NewParser(opts),
+ out: &strings.Builder{},
+ }
+ return astwalk.WalkerForExpr(c)
+ })
+}
+
+type regexpSimplifyChecker struct {
+ astwalk.WalkHandler
+ ctx *linter.CheckerContext
+ parser *syntax.Parser
+
+ // out is a tmp buffer where we build a simplified regexp pattern.
+ out *strings.Builder
+ // score is a number of applied simplifications
+ score int
+}
+
+func (c *regexpSimplifyChecker) VisitExpr(x ast.Expr) {
+ call, ok := x.(*ast.CallExpr)
+ if !ok {
+ return
+ }
+
+ switch qualifiedName(call.Fun) {
+ case "regexp.Compile", "regexp.MustCompile":
+ cv := c.ctx.TypesInfo.Types[call.Args[0]].Value
+ if cv == nil || cv.Kind() != constant.String {
+ return
+ }
+ pat := constant.StringVal(cv)
+ if len(pat) > 60 {
+ // Skip scary regexp patterns for now.
+ break
+ }
+
+ // Only do 2 passes.
+ simplified := pat
+ for pass := 0; pass < 2; pass++ {
+ candidate := c.simplify(pass, simplified)
+ if candidate == "" {
+ break
+ }
+ simplified = candidate
+ }
+ if simplified != "" && simplified != pat {
+ c.warn(call.Args[0], pat, simplified)
+ }
+ }
+}
+
+func (c *regexpSimplifyChecker) simplify(pass int, pat string) string {
+ re, err := c.parser.Parse(pat)
+ if err != nil {
+ return ""
+ }
+
+ c.score = 0
+ c.out.Reset()
+
+ // TODO(quasilyte): suggest char ranges for things like [012345689]?
+ // TODO(quasilyte): evaluate char range to suggest better replacements.
+ // TODO(quasilyte): (?:ab|ac) -> a[bc]
+ // TODO(quasilyte): suggest "s" and "." flag if things like [\w\W] are used.
+ // TODO(quasilyte): x{n}x? -> x{n,n+1}
+
+ c.walk(re.Expr)
+
+ if debug() {
+ // This happens only in one of two cases:
+ // 1. Parser has a bug and we got invalid AST for the given pattern.
+ // 2. Simplifier incorrectly built a replacement string from the AST.
+ if c.score == 0 && c.out.String() != pat {
+ log.Printf("pass %d: unexpected pattern diff:\n\thave: %q\n\twant: %q",
+ pass, c.out.String(), pat)
+ }
+ }
+
+ if c.score > 0 {
+ return c.out.String()
+ }
+ return ""
+}
+
+func (c *regexpSimplifyChecker) walk(e syntax.Expr) {
+ out := c.out
+
+ switch e.Op {
+ case syntax.OpConcat:
+ c.walkConcat(e)
+
+ case syntax.OpAlt:
+ c.walkAlt(e)
+
+ case syntax.OpCharRange:
+ s := c.simplifyCharRange(e)
+ if s != "" {
+ out.WriteString(s)
+ c.score++
+ } else {
+ out.WriteString(e.Value)
+ }
+
+ case syntax.OpGroupWithFlags:
+ out.WriteString("(")
+ out.WriteString(e.Args[1].Value)
+ out.WriteString(":")
+ c.walk(e.Args[0])
+ out.WriteString(")")
+ case syntax.OpGroup:
+ c.walkGroup(e)
+ case syntax.OpCapture:
+ out.WriteString("(")
+ c.walk(e.Args[0])
+ out.WriteString(")")
+ case syntax.OpNamedCapture:
+ out.WriteString("(?P<")
+ out.WriteString(e.Args[1].Value)
+ out.WriteString(">")
+ c.walk(e.Args[0])
+ out.WriteString(")")
+
+ case syntax.OpRepeat:
+ // TODO(quasilyte): is it worth it to analyze repeat argument
+ // more closely and handle `{n,n} -> {n}` cases?
+ rep := e.Args[1].Value
+ switch rep {
+ case "{0,1}":
+ c.walk(e.Args[0])
+ out.WriteString("?")
+ c.score++
+ case "{1,}":
+ c.walk(e.Args[0])
+ out.WriteString("+")
+ c.score++
+ case "{0,}":
+ c.walk(e.Args[0])
+ out.WriteString("*")
+ c.score++
+ case "{0}":
+ // Maybe {0} should be reported by another check, regexpLint?
+ c.score++
+ case "{1}":
+ c.walk(e.Args[0])
+ c.score++
+ default:
+ c.walk(e.Args[0])
+ out.WriteString(rep)
+ }
+
+ case syntax.OpPosixClass:
+ out.WriteString(e.Value)
+
+ case syntax.OpNegCharClass:
+ s := c.simplifyNegCharClass(e)
+ if s != "" {
+ c.out.WriteString(s)
+ c.score++
+ } else {
+ out.WriteString("[^")
+ for _, e := range e.Args {
+ c.walk(e)
+ }
+ out.WriteString("]")
+ }
+
+ case syntax.OpCharClass:
+ s := c.simplifyCharClass(e)
+ if s != "" {
+ c.out.WriteString(s)
+ c.score++
+ } else {
+ out.WriteString("[")
+ for _, e := range e.Args {
+ c.walk(e)
+ }
+ out.WriteString("]")
+ }
+
+ case syntax.OpEscapeChar:
+ switch e.Value {
+ case `\&`, `\#`, `\!`, `\@`, `\%`, `\<`, `\>`, `\:`, `\;`, `\/`, `\,`, `\=`, `\.`:
+ c.score++
+ out.WriteString(e.Value[len(`\`):])
+ default:
+ out.WriteString(e.Value)
+ }
+
+ case syntax.OpQuestion, syntax.OpNonGreedy:
+ c.walk(e.Args[0])
+ out.WriteString("?")
+ case syntax.OpStar:
+ c.walk(e.Args[0])
+ out.WriteString("*")
+ case syntax.OpPlus:
+ c.walk(e.Args[0])
+ out.WriteString("+")
+
+ default:
+ out.WriteString(e.Value)
+ }
+}
+
+func (c *regexpSimplifyChecker) walkGroup(g syntax.Expr) {
+ switch g.Args[0].Op {
+ case syntax.OpChar, syntax.OpEscapeChar, syntax.OpEscapeMeta, syntax.OpCharClass:
+ c.walk(g.Args[0])
+ c.score++
+ return
+ }
+
+ c.out.WriteString("(?:")
+ c.walk(g.Args[0])
+ c.out.WriteString(")")
+}
+
+func (c *regexpSimplifyChecker) simplifyNegCharClass(e syntax.Expr) string {
+ switch e.Value {
+ case `[^0-9]`:
+ return `\D`
+ case `[^\s]`:
+ return `\S`
+ case `[^\S]`:
+ return `\s`
+ case `[^\w]`:
+ return `\W`
+ case `[^\W]`:
+ return `\w`
+ case `[^\d]`:
+ return `\D`
+ case `[^\D]`:
+ return `\d`
+ case `[^[:^space:]]`:
+ return `\s`
+ case `[^[:space:]]`:
+ return `\S`
+ case `[^[:^word:]]`:
+ return `\w`
+ case `[^[:word:]]`:
+ return `\W`
+ case `[^[:^digit:]]`:
+ return `\d`
+ case `[^[:digit:]]`:
+ return `\D`
+ }
+
+ return ""
+}
+
+func (c *regexpSimplifyChecker) simplifyCharClass(e syntax.Expr) string {
+ switch e.Value {
+ case `[0-9]`:
+ return `\d`
+ case `[[:word:]]`:
+ return `\w`
+ case `[[:^word:]]`:
+ return `\W`
+ case `[[:digit:]]`:
+ return `\d`
+ case `[[:^digit:]]`:
+ return `\D`
+ case `[[:space:]]`:
+ return `\s`
+ case `[[:^space:]]`:
+ return `\S`
+ case `[][]`:
+ return `\]\[`
+ case `[]]`:
+ return `\]`
+ }
+
+ if len(e.Args) == 1 {
+ switch e.Args[0].Op {
+ case syntax.OpChar:
+ switch v := e.Args[0].Value; v {
+ case "|", "*", "+", "?", ".", "[", "^", "$", "(", ")":
+ // Can't take outside of the char group without escaping.
+ default:
+ return v
+ }
+ case syntax.OpEscapeChar:
+ return e.Args[0].Value
+ }
+ }
+
+ return ""
+}
+
+func (c *regexpSimplifyChecker) canMerge(x, y syntax.Expr) bool {
+ if x.Op != y.Op {
+ return false
+ }
+ switch x.Op {
+ case syntax.OpChar, syntax.OpCharClass, syntax.OpEscapeMeta, syntax.OpEscapeChar, syntax.OpNegCharClass, syntax.OpGroup:
+ return x.Value == y.Value
+ default:
+ return false
+ }
+}
+
+func (c *regexpSimplifyChecker) canCombine(x, y syntax.Expr) (threshold int, ok bool) {
+ if x.Op != y.Op {
+ return 0, false
+ }
+
+ switch x.Op {
+ case syntax.OpDot:
+ return 3, true
+
+ case syntax.OpChar:
+ if x.Value != y.Value {
+ return 0, false
+ }
+ if x.Value == " " {
+ return 1, true
+ }
+ return 4, true
+
+ case syntax.OpEscapeMeta, syntax.OpEscapeChar:
+ if x.Value == y.Value {
+ return 2, true
+ }
+
+ case syntax.OpCharClass, syntax.OpNegCharClass, syntax.OpGroup:
+ if x.Value == y.Value {
+ return 1, true
+ }
+ }
+
+ return 0, false
+}
+
+func (c *regexpSimplifyChecker) concatLiteral(e syntax.Expr) string {
+ if e.Op == syntax.OpConcat && c.allChars(e) {
+ return e.Value
+ }
+ return ""
+}
+
+func (c *regexpSimplifyChecker) allChars(e syntax.Expr) bool {
+ for _, a := range e.Args {
+ if a.Op != syntax.OpChar {
+ return false
+ }
+ }
+ return true
+}
+
+func (c *regexpSimplifyChecker) factorPrefixSuffix(alt syntax.Expr) bool {
+ // TODO: more forms of prefixes/suffixes?
+ //
+ // A more generalized algorithm could handle `fo|fo1|fo2` -> `fo[12]?`.
+ // but it's an open question whether the latter form universally better.
+ //
+ // Right now it handles only the simplest cases:
+ // `http|https` -> `https?`
+ // `xfoo|foo` -> `x?foo`
+ if len(alt.Args) != 2 {
+ return false
+ }
+ x := c.concatLiteral(alt.Args[0])
+ y := c.concatLiteral(alt.Args[1])
+ if x == y {
+ return false // Reject non-literals and identical strings early
+ }
+
+ // Let x be a shorter string.
+ if len(x) > len(y) {
+ x, y = y, x
+ }
+ // Do we have a common prefix?
+ tail := strings.TrimPrefix(y, x)
+ if len(tail) <= utf8.UTFMax && utf8.RuneCountInString(tail) == 1 {
+ c.out.WriteString(x + tail + "?")
+ c.score++
+ return true
+ }
+ // Do we have a common suffix?
+ head := strings.TrimSuffix(y, x)
+ if len(head) <= utf8.UTFMax && utf8.RuneCountInString(head) == 1 {
+ c.out.WriteString(head + "?" + x)
+ c.score++
+ return true
+ }
+ return false
+}
+
+func (c *regexpSimplifyChecker) walkAlt(alt syntax.Expr) {
+ // `x|y|z` -> `[xyz]`.
+ if c.allChars(alt) {
+ c.score++
+ c.out.WriteString("[")
+ for _, e := range alt.Args {
+ c.out.WriteString(e.Value)
+ }
+ c.out.WriteString("]")
+ return
+ }
+
+ if c.factorPrefixSuffix(alt) {
+ return
+ }
+
+ for i, e := range alt.Args {
+ c.walk(e)
+ if i != len(alt.Args)-1 {
+ c.out.WriteString("|")
+ }
+ }
+}
+
+func (c *regexpSimplifyChecker) walkConcat(concat syntax.Expr) {
+ i := 0
+ for i < len(concat.Args) {
+ x := concat.Args[i]
+ c.walk(x)
+ i++
+
+ if i >= len(concat.Args) {
+ break
+ }
+
+ // Try merging `xy*` into `x+` where x=y.
+ if concat.Args[i].Op == syntax.OpStar {
+ if c.canMerge(x, concat.Args[i].Args[0]) {
+ c.out.WriteString("+")
+ c.score++
+ i++
+ continue
+ }
+ }
+
+ // Try combining `xy` into `x{2}` where x=y.
+ threshold, ok := c.canCombine(x, concat.Args[i])
+ if !ok {
+ continue
+ }
+ n := 1 // Can combine at least 1 pair.
+ for j := i + 1; j < len(concat.Args); j++ {
+ _, ok := c.canCombine(x, concat.Args[j])
+ if !ok {
+ break
+ }
+ n++
+ }
+ if n >= threshold {
+ fmt.Fprintf(c.out, "{%d}", n+1)
+ c.score++
+ i += n
+ }
+ }
+}
+
+func (c *regexpSimplifyChecker) simplifyCharRange(rng syntax.Expr) string {
+ if rng.Args[0].Op != syntax.OpChar || rng.Args[1].Op != syntax.OpChar {
+ return ""
+ }
+
+ lo := rng.Args[0].Value
+ hi := rng.Args[1].Value
+ if len(lo) == 1 && len(hi) == 1 {
+ switch hi[0] - lo[0] {
+ case 0:
+ return lo
+ case 1:
+ return fmt.Sprintf("%s%s", lo, hi)
+ case 2:
+ return fmt.Sprintf("%s%s%s", lo, string(lo[0]+1), hi)
+ }
+ }
+
+ return ""
+}
+
+func (c *regexpSimplifyChecker) warn(cause ast.Expr, orig, suggest string) {
+ c.ctx.Warn(cause, "can re-write `%s` as `%s`", orig, suggest)
+}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/ruleguard_checker.go b/vendor/github.com/go-critic/go-critic/checkers/ruleguard_checker.go
new file mode 100644
index 00000000..d9799102
--- /dev/null
+++ b/vendor/github.com/go-critic/go-critic/checkers/ruleguard_checker.go
@@ -0,0 +1,95 @@
+package checkers
+
+import (
+ "bytes"
+ "go/ast"
+ "go/token"
+ "io/ioutil"
+ "log"
+
+ "github.com/go-critic/go-critic/framework/linter"
+ "github.com/quasilyte/go-ruleguard/ruleguard"
+)
+
+func init() {
+ var info linter.CheckerInfo
+ info.Name = "ruleguard"
+ info.Tags = []string{"style", "experimental"}
+ info.Params = linter.CheckerParams{
+ "rules": {
+ Value: "",
+ Usage: "path to a gorules file",
+ },
+ }
+ info.Summary = "Runs user-defined rules using ruleguard linter"
+ info.Details = "Reads a rules file and turns them into go-critic checkers."
+ info.Before = `N/A`
+ info.After = `N/A`
+ info.Note = "See https://github.com/quasilyte/go-ruleguard."
+
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
+ return newRuleguardChecker(&info, ctx)
+ })
+}
+
+func newRuleguardChecker(info *linter.CheckerInfo, ctx *linter.CheckerContext) *ruleguardChecker {
+ c := &ruleguardChecker{ctx: ctx}
+ rulesFilename := info.Params.String("rules")
+ if rulesFilename == "" {
+ return c
+ }
+
+ // TODO(quasilyte): handle initialization errors better when we make
+ // a transition to the go/analysis framework.
+ //
+ // For now, we log error messages and return a ruleguard checker
+ // with an empty rules set.
+
+ data, err := ioutil.ReadFile(rulesFilename)
+ if err != nil {
+ log.Printf("ruleguard init error: %+v", err)
+ return c
+ }
+
+ fset := token.NewFileSet()
+ rset, err := ruleguard.ParseRules(rulesFilename, fset, bytes.NewReader(data))
+ if err != nil {
+ log.Printf("ruleguard init error: %+v", err)
+ return c
+ }
+
+ c.rset = rset
+ return c
+}
+
+type ruleguardChecker struct {
+ ctx *linter.CheckerContext
+
+ rset *ruleguard.GoRuleSet
+}
+
+func (c *ruleguardChecker) WalkFile(f *ast.File) {
+ if c.rset == nil {
+ return
+ }
+
+ ctx := &ruleguard.Context{
+ Pkg: c.ctx.Pkg,
+ Types: c.ctx.TypesInfo,
+ Sizes: c.ctx.SizesInfo,
+ Fset: c.ctx.FileSet,
+ Report: func(_ ruleguard.GoRuleInfo, n ast.Node, msg string, _ *ruleguard.Suggestion) {
+ // TODO(quasilyte): investigate whether we should add a rule name as
+ // a message prefix here.
+ c.ctx.Warn(n, msg)
+ },
+ }
+
+ err := ruleguard.RunRules(ctx, f, c.rset)
+ if err != nil {
+ // Normally this should never happen, but since
+ // we don't have a better mechanism to report errors,
+ // emit a warning.
+ c.ctx.Warn(f, "execution error: %v", err)
+ }
+}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/singleCaseSwitch_checker.go b/vendor/github.com/go-critic/go-critic/checkers/singleCaseSwitch_checker.go
index 6cdb06ae..abead3fa 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/singleCaseSwitch_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/singleCaseSwitch_checker.go
@@ -2,13 +2,15 @@ package checkers
import (
"go/ast"
+ "go/token"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
+ "golang.org/x/tools/go/ast/astutil"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "singleCaseSwitch"
info.Tags = []string{"style"}
info.Summary = "Detects switch statements that could be better written as if statement"
@@ -22,14 +24,14 @@ if x, ok := x.(int); ok {
body()
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmt(&singleCaseSwitchChecker{ctx: ctx})
})
}
type singleCaseSwitchChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *singleCaseSwitchChecker) VisitStmt(stmt ast.Stmt) {
@@ -42,14 +44,35 @@ func (c *singleCaseSwitchChecker) VisitStmt(stmt ast.Stmt) {
}
func (c *singleCaseSwitchChecker) checkSwitchStmt(stmt ast.Stmt, body *ast.BlockStmt) {
- if len(body.List) == 1 {
- if body.List[0].(*ast.CaseClause).List == nil {
- // default case.
- c.warnDefault(stmt)
- } else if len(body.List[0].(*ast.CaseClause).List) == 1 {
- c.warn(stmt)
- }
+ if len(body.List) != 1 {
+ return
}
+ cc := body.List[0].(*ast.CaseClause)
+ if c.hasBreak(cc) {
+ return
+ }
+ switch {
+ case cc.List == nil:
+ c.warnDefault(stmt)
+ case len(cc.List) == 1:
+ c.warn(stmt)
+ }
+}
+
+func (c *singleCaseSwitchChecker) hasBreak(stmt ast.Stmt) bool {
+ found := false
+ astutil.Apply(stmt, func(cur *astutil.Cursor) bool {
+ switch n := cur.Node().(type) {
+ case *ast.BranchStmt:
+ if n.Tok == token.BREAK {
+ found = true
+ }
+ case *ast.ForStmt, *ast.RangeStmt, *ast.SelectStmt, *ast.SwitchStmt:
+ return false
+ }
+ return true
+ }, nil)
+ return found
}
func (c *singleCaseSwitchChecker) warn(stmt ast.Stmt) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/sloppyLen_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sloppyLen_checker.go
index 45123ec6..e12545ff 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/sloppyLen_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/sloppyLen_checker.go
@@ -4,14 +4,14 @@ import (
"go/ast"
"go/token"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcopy"
"github.com/go-toolsmith/astfmt"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "sloppyLen"
info.Tags = []string{"style"}
info.Summary = "Detects usage of `len` when result is obvious or doesn't make sense"
@@ -23,14 +23,14 @@ len(arr) < 0 // Doesn't make sense at all`
len(arr) > 0
len(arr) == 0`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(&sloppyLenChecker{ctx: ctx})
})
}
type sloppyLenChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *sloppyLenChecker) VisitExpr(x ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/sloppyReassign_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sloppyReassign_checker.go
index 1a7c1987..d099450d 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/sloppyReassign_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/sloppyReassign_checker.go
@@ -4,29 +4,29 @@ import (
"go/ast"
"go/token"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astcopy"
"github.com/go-toolsmith/astequal"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "sloppyReassign"
info.Tags = []string{"diagnostic", "experimental"}
info.Summary = "Detects suspicious/confusing re-assignments"
info.Before = `if err = f(); err != nil { return err }`
info.After = `if err := f(); err != nil { return err }`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmt(&sloppyReassignChecker{ctx: ctx})
})
}
type sloppyReassignChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *sloppyReassignChecker) VisitStmt(stmt ast.Stmt) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/sloppyTypeAssert_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sloppyTypeAssert_checker.go
new file mode 100644
index 00000000..4abfcbab
--- /dev/null
+++ b/vendor/github.com/go-critic/go-critic/checkers/sloppyTypeAssert_checker.go
@@ -0,0 +1,75 @@
+package checkers
+
+import (
+ "go/ast"
+ "go/types"
+
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
+ "github.com/go-toolsmith/astcast"
+)
+
+func init() {
+ var info linter.CheckerInfo
+ info.Name = "sloppyTypeAssert"
+ info.Tags = []string{"diagnostic", "experimental"}
+ info.Summary = "Detects redundant type assertions"
+ info.Before = `
+function f(r io.Reader) interface{} {
+ return r.(interface{})
+}
+`
+ info.After = `
+function f(r io.Reader) interface{} {
+ return r
+}
+`
+
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
+ return astwalk.WalkerForExpr(&sloppyTypeAssertChecker{ctx: ctx})
+ })
+}
+
+type sloppyTypeAssertChecker struct {
+ astwalk.WalkHandler
+ ctx *linter.CheckerContext
+}
+
+func (c *sloppyTypeAssertChecker) VisitExpr(expr ast.Expr) {
+ assert := astcast.ToTypeAssertExpr(expr)
+ if assert.Type == nil {
+ return
+ }
+
+ toType := c.ctx.TypesInfo.TypeOf(expr)
+ fromType := c.ctx.TypesInfo.TypeOf(assert.X)
+
+ if types.Identical(toType, fromType) {
+ c.warnIdentical(expr)
+ return
+ }
+
+ toIface, ok := toType.Underlying().(*types.Interface)
+ if !ok {
+ return
+ }
+
+ switch {
+ case toIface.Empty():
+ c.warnEmpty(expr)
+ case types.Implements(fromType, toIface):
+ c.warnImplements(expr, assert.X)
+ }
+}
+
+func (c *sloppyTypeAssertChecker) warnIdentical(cause ast.Expr) {
+ c.ctx.Warn(cause, "type assertion from/to types are identical")
+}
+
+func (c *sloppyTypeAssertChecker) warnEmpty(cause ast.Expr) {
+ c.ctx.Warn(cause, "type assertion to interface{} may be redundant")
+}
+
+func (c *sloppyTypeAssertChecker) warnImplements(cause, val ast.Expr) {
+ c.ctx.Warn(cause, "type assertion may be redundant as %s always implements selected interface", val)
+}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/sortSlice_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sortSlice_checker.go
new file mode 100644
index 00000000..b80c1787
--- /dev/null
+++ b/vendor/github.com/go-critic/go-critic/checkers/sortSlice_checker.go
@@ -0,0 +1,135 @@
+package checkers
+
+import (
+ "go/ast"
+ "go/token"
+
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/lintutil"
+ "github.com/go-critic/go-critic/framework/linter"
+ "github.com/go-toolsmith/astcast"
+ "github.com/go-toolsmith/astequal"
+ "github.com/go-toolsmith/typep"
+ "golang.org/x/tools/go/ast/astutil"
+)
+
+func init() {
+ var info linter.CheckerInfo
+ info.Name = "sortSlice"
+ info.Tags = []string{"diagnostic", "experimental"}
+ info.Summary = "Detects suspicious sort.Slice calls"
+ info.Before = `sort.Slice(xs, func(i, j) bool { return keys[i] < keys[j] })`
+ info.After = `sort.Slice(kv, func(i, j) bool { return kv[i].key < kv[j].key })`
+
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
+ return astwalk.WalkerForExpr(&sortSliceChecker{ctx: ctx})
+ })
+}
+
+type sortSliceChecker struct {
+ astwalk.WalkHandler
+ ctx *linter.CheckerContext
+}
+
+func (c *sortSliceChecker) VisitExpr(expr ast.Expr) {
+ call := astcast.ToCallExpr(expr)
+ if len(call.Args) != 2 {
+ return
+ }
+ switch qualifiedName(call.Fun) {
+ case "sort.Slice", "sort.SliceStable":
+ // OK.
+ default:
+ return
+ }
+
+ slice := c.unwrapSlice(call.Args[0])
+ lessFunc, ok := call.Args[1].(*ast.FuncLit)
+ if !ok {
+ return
+ }
+ if !typep.SideEffectFree(c.ctx.TypesInfo, slice) {
+ return // Don't check unpredictable slice values
+ }
+
+ ivar, jvar := c.paramIdents(lessFunc.Type)
+ if ivar == nil || jvar == nil {
+ return
+ }
+
+ if len(lessFunc.Body.List) != 1 {
+ return
+ }
+ ret, ok := lessFunc.Body.List[0].(*ast.ReturnStmt)
+ if !ok {
+ return
+ }
+ cmp := astcast.ToBinaryExpr(astutil.Unparen(ret.Results[0]))
+ if !typep.SideEffectFree(c.ctx.TypesInfo, cmp) {
+ return
+ }
+ switch cmp.Op {
+ case token.LSS, token.LEQ, token.GTR, token.GEQ:
+ // Both cmp.X and cmp.Y are expected to be some expressions
+ // over the `slice` expression. In the simplest case,
+ // it's a `slice[i] slice[j]`.
+ if !c.containsSlice(cmp.X, slice) && !c.containsSlice(cmp.Y, slice) {
+ c.warnSlice(cmp, slice)
+ }
+
+ // This one is more about the style, but can reveal potential issue
+ // or misprint in sorting condition.
+ // We give a warn if X contains indexing with `i` index and Y
+ // contains indexing with `j`.
+ if c.containsIndex(cmp.X, jvar) && c.containsIndex(cmp.Y, ivar) {
+ c.warnIndex(cmp, ivar, jvar)
+ }
+ }
+}
+
+func (c *sortSliceChecker) paramIdents(e *ast.FuncType) (*ast.Ident, *ast.Ident) {
+ // Covers both `i, j int` and `i int, j int`.
+ idents := make([]*ast.Ident, 0, 2)
+ for _, field := range e.Params.List {
+ idents = append(idents, field.Names...)
+ }
+ if len(idents) == 2 {
+ return idents[0], idents[1]
+ }
+ return nil, nil
+}
+
+func (c *sortSliceChecker) unwrapSlice(e ast.Expr) ast.Expr {
+ switch e := e.(type) {
+ case *ast.ParenExpr:
+ return c.unwrapSlice(e.X)
+ case *ast.SliceExpr:
+ return e.X
+ default:
+ return e
+ }
+}
+
+func (c *sortSliceChecker) containsIndex(e, index ast.Expr) bool {
+ return lintutil.ContainsNode(e, func(n ast.Node) bool {
+ indexing, ok := n.(*ast.IndexExpr)
+ if !ok {
+ return false
+ }
+ return astequal.Expr(indexing.Index, index)
+ })
+}
+
+func (c *sortSliceChecker) containsSlice(e, slice ast.Expr) bool {
+ return lintutil.ContainsNode(e, func(n ast.Node) bool {
+ return astequal.Node(n, slice)
+ })
+}
+
+func (c *sortSliceChecker) warnSlice(cause ast.Node, slice ast.Expr) {
+ c.ctx.Warn(cause, "cmp func must use %s slice in comparison", slice)
+}
+
+func (c *sortSliceChecker) warnIndex(cause ast.Node, ivar, jvar *ast.Ident) {
+ c.ctx.Warn(cause, "unusual order of {%s,%s} params in comparison", ivar, jvar)
+}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go
new file mode 100644
index 00000000..697a82cc
--- /dev/null
+++ b/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go
@@ -0,0 +1,167 @@
+package checkers
+
+import (
+ "go/ast"
+ "go/types"
+
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
+ "github.com/go-toolsmith/astcast"
+)
+
+func init() {
+ var info linter.CheckerInfo
+ info.Name = "sqlQuery"
+ info.Tags = []string{"diagnostic", "experimental"}
+ info.Summary = "Detects issue in Query() and Exec() calls"
+ info.Before = `_, err := db.Query("UPDATE ...")`
+ info.After = `_, err := db.Exec("UPDATE ...")`
+
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
+ return astwalk.WalkerForStmt(&sqlQueryChecker{ctx: ctx})
+ })
+}
+
+type sqlQueryChecker struct {
+ astwalk.WalkHandler
+ ctx *linter.CheckerContext
+}
+
+func (c *sqlQueryChecker) VisitStmt(stmt ast.Stmt) {
+ assign := astcast.ToAssignStmt(stmt)
+ if len(assign.Lhs) != 2 { // Query() has 2 return values.
+ return
+ }
+ if len(assign.Rhs) != 1 {
+ return
+ }
+
+ // If Query() is called, but first return value is ignored,
+ // there is no way to close/read the returned rows.
+ // This can cause a connection leak.
+ if id, ok := assign.Lhs[0].(*ast.Ident); ok && id.Name != "_" {
+ return
+ }
+
+ call := astcast.ToCallExpr(assign.Rhs[0])
+ funcExpr := astcast.ToSelectorExpr(call.Fun)
+ if !c.funcIsQuery(funcExpr) {
+ return
+ }
+
+ if c.typeHasExecMethod(c.ctx.TypesInfo.TypeOf(funcExpr.X)) {
+ c.warnAndSuggestExec(funcExpr)
+ } else {
+ c.warnRowsIgnored(funcExpr)
+ }
+}
+
+func (c *sqlQueryChecker) funcIsQuery(funcExpr *ast.SelectorExpr) bool {
+ if funcExpr.Sel == nil {
+ return false
+ }
+ switch funcExpr.Sel.Name {
+ case "Query", "QueryContext":
+ // Stdlib and friends.
+ case "Queryx", "QueryxContext":
+ // sqlx.
+ default:
+ return false
+ }
+
+ // To avoid false positives (unrelated types can have Query method)
+ // check that the 1st returned type has Row-like name.
+ typ, ok := c.ctx.TypesInfo.TypeOf(funcExpr).Underlying().(*types.Signature)
+ if !ok || typ.Results() == nil || typ.Results().Len() != 2 {
+ return false
+ }
+ if !c.typeIsRowsLike(typ.Results().At(0).Type()) {
+ return false
+ }
+
+ return true
+}
+
+func (c *sqlQueryChecker) typeIsRowsLike(typ types.Type) bool {
+ switch typ := typ.(type) {
+ case *types.Pointer:
+ return c.typeIsRowsLike(typ.Elem())
+ case *types.Named:
+ return typ.Obj().Name() == "Rows"
+ default:
+ return false
+ }
+}
+
+func (c *sqlQueryChecker) funcIsExec(fn *types.Func) bool {
+ if fn.Name() != "Exec" {
+ return false
+ }
+
+ // Expect exactly 2 results.
+ sig := fn.Type().(*types.Signature)
+ if sig.Results() == nil || sig.Results().Len() != 2 {
+ return false
+ }
+
+ // Expect at least 1 param and it should be a string (query).
+ params := sig.Params()
+ if params == nil || params.Len() == 0 {
+ return false
+ }
+ if typ, ok := params.At(0).Type().(*types.Basic); !ok || typ.Kind() != types.String {
+ return false
+ }
+
+ return true
+}
+
+func (c *sqlQueryChecker) typeHasExecMethod(typ types.Type) bool {
+ switch typ := typ.(type) {
+ case *types.Struct:
+ for i := 0; i < typ.NumFields(); i++ {
+ if c.typeHasExecMethod(typ.Field(i).Type()) {
+ return true
+ }
+ }
+ case *types.Interface:
+ for i := 0; i < typ.NumMethods(); i++ {
+ if c.funcIsExec(typ.Method(i)) {
+ return true
+ }
+ }
+ case *types.Pointer:
+ return c.typeHasExecMethod(typ.Elem())
+ case *types.Named:
+ for i := 0; i < typ.NumMethods(); i++ {
+ if c.funcIsExec(typ.Method(i)) {
+ return true
+ }
+ }
+ switch ut := typ.Underlying().(type) {
+ case *types.Interface:
+ return c.typeHasExecMethod(ut)
+ case *types.Struct:
+ // Check embedded types.
+ for i := 0; i < ut.NumFields(); i++ {
+ field := ut.Field(i)
+ if !field.Embedded() {
+ continue
+ }
+ if c.typeHasExecMethod(field.Type()) {
+ return true
+ }
+ }
+ }
+ }
+
+ return false
+}
+
+func (c *sqlQueryChecker) warnAndSuggestExec(funcExpr *ast.SelectorExpr) {
+ c.ctx.Warn(funcExpr, "use %s.Exec() if returned result is not needed", funcExpr.X)
+}
+
+func (c *sqlQueryChecker) warnRowsIgnored(funcExpr *ast.SelectorExpr) {
+ c.ctx.Warn(funcExpr, "ignoring Query() rows result may lead to a connection leak")
+}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/stringXbytes_checker.go b/vendor/github.com/go-critic/go-critic/checkers/stringXbytes_checker.go
index 74570108..57e4084f 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/stringXbytes_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/stringXbytes_checker.go
@@ -3,27 +3,27 @@ package checkers
import (
"go/ast"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/typep"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "stringXbytes"
- info.Tags = []string{"style", "experimental"}
+ info.Tags = []string{"style"}
info.Summary = "Detects redundant conversions between string and []byte"
info.Before = `copy(b, []byte(s))`
info.After = `copy(b, s)`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(&stringXbytes{ctx: ctx})
})
}
type stringXbytes struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *stringXbytes) VisitExpr(expr ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/switchTrue_checker.go b/vendor/github.com/go-critic/go-critic/checkers/switchTrue_checker.go
index 3b276627..5390360c 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/switchTrue_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/switchTrue_checker.go
@@ -3,12 +3,12 @@ package checkers
import (
"go/ast"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "switchTrue"
info.Tags = []string{"style"}
info.Summary = "Detects switch-over-bool statements that use explicit `true` tag value"
@@ -21,14 +21,14 @@ switch {
case x > y:
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmt(&switchTrueChecker{ctx: ctx})
})
}
type switchTrueChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *switchTrueChecker) VisitStmt(stmt ast.Stmt) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/truncateCmp_checker.go b/vendor/github.com/go-critic/go-critic/checkers/truncateCmp_checker.go
index f4cb9e86..a5b7bdd3 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/truncateCmp_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/truncateCmp_checker.go
@@ -5,17 +5,17 @@ import (
"go/token"
"go/types"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astp"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "truncateCmp"
info.Tags = []string{"diagnostic", "experimental"}
- info.Params = lintpack.CheckerParams{
+ info.Params = linter.CheckerParams{
"skipArchDependent": {
Value: true,
Usage: "whether to skip int/uint/uintptr types",
@@ -31,7 +31,7 @@ func f(x int32, int16) bool {
return x < int32(y)
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
c := &truncateCmpChecker{ctx: ctx}
c.skipArchDependent = info.Params.Bool("skipArchDependent")
return astwalk.WalkerForExpr(c)
@@ -40,7 +40,7 @@ func f(x int32, int16) bool {
type truncateCmpChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
skipArchDependent bool
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/typeAssertChain_checker.go b/vendor/github.com/go-critic/go-critic/checkers/typeAssertChain_checker.go
index c0c42e35..2940e57f 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/typeAssertChain_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/typeAssertChain_checker.go
@@ -4,16 +4,16 @@ import (
"go/ast"
"go/token"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
"github.com/go-critic/go-critic/checkers/internal/lintutil"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astequal"
"github.com/go-toolsmith/astp"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "typeAssertChain"
info.Tags = []string{"style", "experimental"}
info.Summary = "Detects repeated type assertions and suggests to replace them with type switch statement"
@@ -35,14 +35,14 @@ default:
// Code C, uses x.
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmt(&typeAssertChainChecker{ctx: ctx})
})
}
type typeAssertChainChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
cause *ast.IfStmt
visited map[*ast.IfStmt]bool
diff --git a/vendor/github.com/go-critic/go-critic/checkers/typeSwitchVar_checker.go b/vendor/github.com/go-critic/go-critic/checkers/typeSwitchVar_checker.go
index a113597b..2ade4a95 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/typeSwitchVar_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/typeSwitchVar_checker.go
@@ -3,15 +3,15 @@ package checkers
import (
"go/ast"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
"github.com/go-critic/go-critic/checkers/internal/lintutil"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astequal"
"github.com/go-toolsmith/astp"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "typeSwitchVar"
info.Tags = []string{"style"}
info.Summary = "Detects type switches that can benefit from type guard clause with variable"
@@ -34,18 +34,20 @@ default:
return 0
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmt(&typeSwitchVarChecker{ctx: ctx})
})
}
type typeSwitchVarChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
+ count int
}
func (c *typeSwitchVarChecker) VisitStmt(stmt ast.Stmt) {
if stmt, ok := stmt.(*ast.TypeSwitchStmt); ok {
+ c.count = 0
c.checkTypeSwitch(stmt)
}
}
@@ -61,7 +63,7 @@ func (c *typeSwitchVarChecker) checkTypeSwitch(root *ast.TypeSwitchStmt) {
return // Give up: can't handle shadowing without object
}
- for i, clause := range root.Body.List {
+ for _, clause := range root.Body.List {
clause := clause.(*ast.CaseClause)
// Multiple types in a list mean that assert.X will have
// a type of interface{} inside clause body.
@@ -76,13 +78,20 @@ func (c *typeSwitchVarChecker) checkTypeSwitch(root *ast.TypeSwitchStmt) {
return astequal.Node(&assert1, x)
})
if object == c.ctx.TypesInfo.ObjectOf(identOf(assert2)) {
- c.warn(root, i)
+ c.count++
break
}
}
}
+ if c.count > 0 {
+ c.warn(root)
+ }
}
-func (c *typeSwitchVarChecker) warn(n ast.Node, caseIndex int) {
- c.ctx.Warn(n, "case %d can benefit from type switch with assignment", caseIndex)
+func (c *typeSwitchVarChecker) warn(n ast.Node) {
+ msg := "case"
+ if c.count > 1 {
+ msg = "cases"
+ }
+ c.ctx.Warn(n, "%d "+msg+" can benefit from type switch with assignment", c.count)
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/typeUnparen_checker.go b/vendor/github.com/go-critic/go-critic/checkers/typeUnparen_checker.go
index a17c77b4..620d2d79 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/typeUnparen_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/typeUnparen_checker.go
@@ -3,30 +3,30 @@ package checkers
import (
"go/ast"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
"github.com/go-critic/go-critic/checkers/internal/lintutil"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcopy"
"github.com/go-toolsmith/astp"
"golang.org/x/tools/go/ast/astutil"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "typeUnparen"
info.Tags = []string{"style", "opinionated"}
info.Summary = "Detects unneded parenthesis inside type expressions and suggests to remove them"
info.Before = `type foo [](func([](func())))`
info.After = `type foo []func([]func())`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForTypeExpr(&typeUnparenChecker{ctx: ctx}, ctx.TypesInfo)
})
}
type typeUnparenChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *typeUnparenChecker) VisitTypeExpr(x ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/underef_checker.go b/vendor/github.com/go-critic/go-critic/checkers/underef_checker.go
index dfc6077b..561270c7 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/underef_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/underef_checker.go
@@ -4,17 +4,17 @@ import (
"go/ast"
"go/types"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astp"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "underef"
info.Tags = []string{"style"}
- info.Params = lintpack.CheckerParams{
+ info.Params = linter.CheckerParams{
"skipRecvDeref": {
Value: true,
Usage: "whether to skip (*x).method() calls where x is a pointer receiver",
@@ -28,7 +28,7 @@ v := (*a)[5] // only if a is array`
k.field = 5
v := a[5]`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
c := &underefChecker{ctx: ctx}
c.skipRecvDeref = info.Params.Bool("skipRecvDeref")
return astwalk.WalkerForExpr(c)
@@ -37,7 +37,7 @@ v := a[5]`
type underefChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
skipRecvDeref bool
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/unlabelStmt_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unlabelStmt_checker.go
index d90c65c2..83c5b148 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/unlabelStmt_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/unlabelStmt_checker.go
@@ -4,13 +4,13 @@ import (
"go/ast"
"go/token"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
"github.com/go-critic/go-critic/checkers/internal/lintutil"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "unlabelStmt"
info.Tags = []string{"style", "experimental"}
info.Summary = "Detects redundant statement labels"
@@ -28,14 +28,14 @@ for x := range xs {
}
}`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmt(&unlabelStmtChecker{ctx: ctx})
})
}
type unlabelStmtChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *unlabelStmtChecker) EnterFunc(fn *ast.FuncDecl) bool {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/unlambda_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unlambda_checker.go
index 9e01299b..946227c2 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/unlambda_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/unlambda_checker.go
@@ -4,28 +4,28 @@ import (
"go/ast"
"go/types"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astequal"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "unlambda"
info.Tags = []string{"style"}
info.Summary = "Detects function literals that can be simplified"
info.Before = `func(x int) int { return fn(x) }`
info.After = `fn`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(&unlambdaChecker{ctx: ctx})
})
}
type unlambdaChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *unlambdaChecker) VisitExpr(x ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/unnamedResult_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unnamedResult_checker.go
index 09423250..f053842e 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/unnamedResult_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/unnamedResult_checker.go
@@ -4,15 +4,15 @@ import (
"go/ast"
"go/types"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "unnamedResult"
info.Tags = []string{"style", "opinionated", "experimental"}
- info.Params = lintpack.CheckerParams{
+ info.Params = linter.CheckerParams{
"checkExported": {
Value: false,
Usage: "whether to check exported functions",
@@ -22,7 +22,7 @@ func init() {
info.Before = `func f() (float64, float64)`
info.After = `func f() (x, y float64)`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
c := &unnamedResultChecker{ctx: ctx}
c.checkExported = info.Params.Bool("checkExported")
return astwalk.WalkerForFuncDecl(c)
@@ -31,7 +31,7 @@ func init() {
type unnamedResultChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
checkExported bool
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/unnecessaryBlock_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unnecessaryBlock_checker.go
index e5dc45f7..acc9fadb 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/unnecessaryBlock_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/unnecessaryBlock_checker.go
@@ -4,12 +4,12 @@ import (
"go/ast"
"go/token"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "unnecessaryBlock"
info.Tags = []string{"style", "opinionated", "experimental"}
info.Summary = "Detects unnecessary braced statement blocks"
@@ -22,14 +22,14 @@ x := 1
x := 1
print(x)`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmtList(&unnecessaryBlockChecker{ctx: ctx})
})
}
type unnecessaryBlockChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *unnecessaryBlockChecker) VisitStmtList(statements []ast.Stmt) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/unnecessaryDefer_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unnecessaryDefer_checker.go
new file mode 100644
index 00000000..ee706e09
--- /dev/null
+++ b/vendor/github.com/go-critic/go-critic/checkers/unnecessaryDefer_checker.go
@@ -0,0 +1,111 @@
+package checkers
+
+import (
+ "go/ast"
+
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
+ "github.com/go-toolsmith/astfmt"
+)
+
+func init() {
+ var info linter.CheckerInfo
+ info.Name = "unnecessaryDefer"
+ info.Tags = []string{"diagnostic", "experimental"}
+ info.Summary = "Detects redundantly deferred calls"
+ info.Before = `
+func() {
+ defer os.Remove(filename)
+}`
+ info.After = `
+func() {
+ os.Remove(filename)
+}`
+
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
+ return astwalk.WalkerForFuncDecl(&unnecessaryDeferChecker{ctx: ctx})
+ })
+}
+
+type unnecessaryDeferChecker struct {
+ astwalk.WalkHandler
+ ctx *linter.CheckerContext
+ isFunc bool
+}
+
+// Visit implements the ast.Visitor. This visitor keeps track of the block
+// statement belongs to a function or any other block. If the block is not a
+// function and ends with a defer statement that should be OK since it's
+// defering the outer function.
+func (c *unnecessaryDeferChecker) Visit(node ast.Node) ast.Visitor {
+ switch n := node.(type) {
+ case *ast.FuncDecl, *ast.FuncLit:
+ c.isFunc = true
+ case *ast.BlockStmt:
+ c.checkDeferBeforeReturn(n)
+ default:
+ c.isFunc = false
+ }
+
+ return c
+}
+
+func (c *unnecessaryDeferChecker) VisitFuncDecl(funcDecl *ast.FuncDecl) {
+ // We always start as a function (*ast.FuncDecl.Body passed)
+ c.isFunc = true
+
+ ast.Walk(c, funcDecl.Body)
+}
+
+func (c *unnecessaryDeferChecker) checkDeferBeforeReturn(funcDecl *ast.BlockStmt) {
+ // Check if we have an explicit return or if it's just the end of the scope.
+ explicitReturn := false
+ retIndex := len(funcDecl.List)
+ for i, stmt := range funcDecl.List {
+ retStmt, ok := stmt.(*ast.ReturnStmt)
+ if !ok {
+ continue
+ }
+ explicitReturn = true
+ if !c.isTrivialReturn(retStmt) {
+ continue
+ }
+ retIndex = i
+ break
+ }
+ if retIndex == 0 {
+ return
+ }
+
+ if deferStmt, ok := funcDecl.List[retIndex-1].(*ast.DeferStmt); ok {
+ // If the block is a function and ending with return or if we have an
+ // explicit return in any other block we should warn about
+ // unnecessary defer.
+ if c.isFunc || explicitReturn {
+ c.warn(deferStmt)
+ }
+ }
+}
+
+func (c *unnecessaryDeferChecker) isTrivialReturn(ret *ast.ReturnStmt) bool {
+ for _, e := range ret.Results {
+ if !c.isConstExpr(e) {
+ return false
+ }
+ }
+ return true
+}
+
+func (c *unnecessaryDeferChecker) isConstExpr(e ast.Expr) bool {
+ return c.ctx.TypesInfo.Types[e].Value != nil
+}
+
+func (c *unnecessaryDeferChecker) warn(deferStmt *ast.DeferStmt) {
+ s := astfmt.Sprint(deferStmt)
+ if fnlit, ok := deferStmt.Call.Fun.(*ast.FuncLit); ok {
+ // To avoid long and multi-line warning messages,
+ // collapse the function literals.
+ s = "defer " + astfmt.Sprint(fnlit.Type) + "{...}(...)"
+ }
+ c.ctx.Warn(deferStmt, "%s is placed just before return", s)
+}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/unslice_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unslice_checker.go
index 06d90819..73f67bc8 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/unslice_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/unslice_checker.go
@@ -4,13 +4,13 @@ import (
"go/ast"
"go/types"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astequal"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "unslice"
info.Tags = []string{"style"}
info.Summary = "Detects slice expressions that can be simplified to sliced expression itself"
@@ -21,14 +21,14 @@ copy(b[:], values...) // b is []byte`
f(s)
copy(b, values...)`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(&unsliceChecker{ctx: ctx})
})
}
type unsliceChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *unsliceChecker) VisitExpr(expr ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/utils.go b/vendor/github.com/go-critic/go-critic/checkers/utils.go
index ba4777db..b71f24d7 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/utils.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/utils.go
@@ -5,7 +5,7 @@ import (
"go/types"
"strings"
- "github.com/go-lintpack/lintpack"
+ "github.com/go-critic/go-critic/framework/linter"
)
// goStdlib contains `go list std` command output list.
@@ -247,7 +247,7 @@ func isExampleTestFunc(fn *ast.FuncDecl) bool {
}
// isUnitTestFunc reports whether FuncDecl declares testing function.
-func isUnitTestFunc(ctx *lintpack.CheckerContext, fn *ast.FuncDecl) bool {
+func isUnitTestFunc(ctx *linter.CheckerContext, fn *ast.FuncDecl) bool {
if !strings.HasPrefix(fn.Name.Name, "Test") {
return false
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/valSwap_checker.go b/vendor/github.com/go-critic/go-critic/checkers/valSwap_checker.go
index ab27f920..4dd494ec 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/valSwap_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/valSwap_checker.go
@@ -4,16 +4,16 @@ import (
"go/ast"
"go/token"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astequal"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "valSwap"
- info.Tags = []string{"style", "experimental"}
+ info.Tags = []string{"style"}
info.Summary = "Detects value swapping code that are not using parallel assignment"
info.Before = `
tmp := *x
@@ -21,14 +21,14 @@ tmp := *x
*y = tmp`
info.After = `*x, *y = *y, *x`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForStmtList(&valSwapChecker{ctx: ctx})
})
}
type valSwapChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *valSwapChecker) VisitStmtList(list []ast.Stmt) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/weakCond_checker.go b/vendor/github.com/go-critic/go-critic/checkers/weakCond_checker.go
index fcd9aee5..1d6ee58e 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/weakCond_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/weakCond_checker.go
@@ -4,9 +4,9 @@ import (
"go/ast"
"go/token"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
"github.com/go-critic/go-critic/checkers/internal/lintutil"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
"github.com/go-toolsmith/astequal"
"github.com/go-toolsmith/typep"
@@ -14,21 +14,21 @@ import (
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "weakCond"
info.Tags = []string{"diagnostic", "experimental"}
info.Summary = "Detects conditions that are unsafe due to not being exhaustive"
info.Before = `xs != nil && xs[0] != nil`
info.After = `len(xs) != 0 && xs[0] != nil`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForExpr(&weakCondChecker{ctx: ctx})
})
}
type weakCondChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *weakCondChecker) VisitExpr(expr ast.Expr) {
diff --git a/vendor/github.com/go-critic/go-critic/checkers/whyNoLint_checker.go b/vendor/github.com/go-critic/go-critic/checkers/whyNoLint_checker.go
index 52fefb82..cc5c5172 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/whyNoLint_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/whyNoLint_checker.go
@@ -5,12 +5,12 @@ import (
"regexp"
"strings"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
)
func init() {
- info := lintpack.CheckerInfo{
+ info := linter.CheckerInfo{
Name: "whyNoLint",
Tags: []string{"style", "experimental"},
Summary: "Ensures that `//nolint` comments include an explanation",
@@ -19,7 +19,7 @@ func init() {
}
re := regexp.MustCompile(`^// *nolint(?::[^ ]+)? *(.*)$`)
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForComment(&whyNoLintChecker{
ctx: ctx,
re: re,
@@ -30,7 +30,7 @@ func init() {
type whyNoLintChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
re *regexp.Regexp
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/wrapperFunc_checker.go b/vendor/github.com/go-critic/go-critic/checkers/wrapperFunc_checker.go
index bba82e5e..bc543e64 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/wrapperFunc_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/wrapperFunc_checker.go
@@ -6,20 +6,20 @@ import (
"go/types"
"strings"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcast"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "wrapperFunc"
- info.Tags = []string{"style", "experimental"}
+ info.Tags = []string{"style"}
info.Summary = "Detects function calls that can be replaced with convenience wrappers"
info.Before = `wg.Add(-1)`
info.After = `wg.Done()`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
type arg struct {
index int
value string
@@ -81,6 +81,11 @@ func init() {
"bytes.Map => bytes.ToTitle": {
{0, "unicode.ToTitle"},
},
+
+ "draw.DrawMask => draw.Draw": {
+ {4, "nil"},
+ {5, "image.Point{}"},
+ },
}
matchers := make(map[string]*matcher)
@@ -203,7 +208,7 @@ func init() {
type wrapperFuncChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
findSuggestion func(*ast.CallExpr) string
}
diff --git a/vendor/github.com/go-critic/go-critic/checkers/yodaStyleExpr_checker.go b/vendor/github.com/go-critic/go-critic/checkers/yodaStyleExpr_checker.go
index ddd3099f..b4672fcc 100644
--- a/vendor/github.com/go-critic/go-critic/checkers/yodaStyleExpr_checker.go
+++ b/vendor/github.com/go-critic/go-critic/checkers/yodaStyleExpr_checker.go
@@ -4,28 +4,28 @@ import (
"go/ast"
"go/token"
- "github.com/go-lintpack/lintpack"
- "github.com/go-lintpack/lintpack/astwalk"
+ "github.com/go-critic/go-critic/checkers/internal/astwalk"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/go-toolsmith/astcopy"
"github.com/go-toolsmith/astp"
)
func init() {
- var info lintpack.CheckerInfo
+ var info linter.CheckerInfo
info.Name = "yodaStyleExpr"
info.Tags = []string{"style", "experimental"}
info.Summary = "Detects Yoda style expressions and suggests to replace them"
info.Before = `return nil != ptr`
info.After = `return ptr != nil`
- collection.AddChecker(&info, func(ctx *lintpack.CheckerContext) lintpack.FileWalker {
+ collection.AddChecker(&info, func(ctx *linter.CheckerContext) linter.FileWalker {
return astwalk.WalkerForLocalExpr(&yodaStyleExprChecker{ctx: ctx})
})
}
type yodaStyleExprChecker struct {
astwalk.WalkHandler
- ctx *lintpack.CheckerContext
+ ctx *linter.CheckerContext
}
func (c *yodaStyleExprChecker) VisitLocalExpr(expr ast.Expr) {
diff --git a/vendor/github.com/go-lintpack/lintpack/checkers_db.go b/vendor/github.com/go-critic/go-critic/framework/linter/checkers_db.go
similarity index 99%
rename from vendor/github.com/go-lintpack/lintpack/checkers_db.go
rename to vendor/github.com/go-critic/go-critic/framework/linter/checkers_db.go
index 83d41b4e..b4bebe44 100644
--- a/vendor/github.com/go-lintpack/lintpack/checkers_db.go
+++ b/vendor/github.com/go-critic/go-critic/framework/linter/checkers_db.go
@@ -1,4 +1,4 @@
-package lintpack
+package linter
import (
"fmt"
diff --git a/vendor/github.com/go-lintpack/lintpack/context.go b/vendor/github.com/go-critic/go-critic/framework/linter/context.go
similarity index 97%
rename from vendor/github.com/go-lintpack/lintpack/context.go
rename to vendor/github.com/go-critic/go-critic/framework/linter/context.go
index 8671e175..6e108ab6 100644
--- a/vendor/github.com/go-lintpack/lintpack/context.go
+++ b/vendor/github.com/go-critic/go-critic/framework/linter/context.go
@@ -1,4 +1,4 @@
-package lintpack
+package linter
import (
"go/ast"
diff --git a/vendor/github.com/go-lintpack/lintpack/lintpack.go b/vendor/github.com/go-critic/go-critic/framework/linter/lintpack.go
similarity index 98%
rename from vendor/github.com/go-lintpack/lintpack/lintpack.go
rename to vendor/github.com/go-critic/go-critic/framework/linter/lintpack.go
index 28c3a635..1f984d14 100644
--- a/vendor/github.com/go-lintpack/lintpack/lintpack.go
+++ b/vendor/github.com/go-critic/go-critic/framework/linter/lintpack.go
@@ -1,7 +1,6 @@
-package lintpack
+package linter
import (
- "fmt"
"go/ast"
"go/token"
"go/types"
@@ -25,7 +24,7 @@ type CheckerCollection struct {
// constructor will not be called.
func (coll *CheckerCollection) AddChecker(info *CheckerInfo, constructor func(*CheckerContext) FileWalker) {
if coll == nil {
- panic(fmt.Sprintf("adding checker to a nil collection"))
+ panic("adding checker to a nil collection")
}
info.Collection = coll
addChecker(info, constructor)
diff --git a/vendor/github.com/go-lintpack/lintpack/.travis.yml b/vendor/github.com/go-lintpack/lintpack/.travis.yml
deleted file mode 100644
index 41a0cbac..00000000
--- a/vendor/github.com/go-lintpack/lintpack/.travis.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-language: go
-go:
- - 1.x
-install:
- - # Prevent default install action "go get -t -v ./...".
-script:
- - make ci
diff --git a/vendor/github.com/go-lintpack/lintpack/Makefile b/vendor/github.com/go-lintpack/lintpack/Makefile
deleted file mode 100644
index 63f21d2f..00000000
--- a/vendor/github.com/go-lintpack/lintpack/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-.PHONY: test ci
-
-%: # stubs to get makefile param for `test-checker` command
- @: # see: https://stackoverflow.com/a/6273809/433041
-
-build:
- go build cmd/lintpack/build.go cmd/lintpack/main.go
-
-test:
- go test -v -count=1 ./...
-
-ci:
- go get -t -v ./...
- go test -v -count=1 ./...
diff --git a/vendor/github.com/go-lintpack/lintpack/README.md b/vendor/github.com/go-lintpack/lintpack/README.md
deleted file mode 100644
index 5702228e..00000000
--- a/vendor/github.com/go-lintpack/lintpack/README.md
+++ /dev/null
@@ -1,32 +0,0 @@
-[![Build Status][travis-image]][travis-url]
-[![Go Report Card][go-report-image]][go-report-url]
-
-[travis-image]: https://travis-ci.org/go-critic/go-critic.svg?branch=master
-[travis-url]: https://travis-ci.org/go-critic/go-critic
-[go-report-image]: https://goreportcard.com/badge/github.com/go-critic/go-critic
-[go-report-url]: https://goreportcard.com/report/github.com/go-critic/go-critic
-
-## Quick start / Installation / Usage
-
-Install `lintpack`:
-
-```bash
-go get -v -u github.com/go-lintpack/lintpack/...
-```
-
-Install checkers from [go-critic/checkers](https://github.com/go-critic/checkers):
-
-```bash
-# You'll need to have sources under your Go workspace first:
-go get -v -u github.com/go-critic/checkers
-# Now build a linter that includes all checks from that package:
-lintpack build -o gocritic github.com/go-critic/checkers
-# Executable gocritic is created and can be used as a standalone linter.
-```
-
-Produced binary includes basic help as well as supported checks documentation.
-
-So, the process is simple:
-
-* Get the `lintpack` linter builder
-* Build linter from checks implemented in different repos, by various vendors
diff --git a/vendor/github.com/go-lintpack/lintpack/doc.go b/vendor/github.com/go-lintpack/lintpack/doc.go
deleted file mode 100644
index 4aba342f..00000000
--- a/vendor/github.com/go-lintpack/lintpack/doc.go
+++ /dev/null
@@ -1,5 +0,0 @@
-// Package lintpack provides shared API between the linter and its checkers.
-//
-// Linter is usually implemented by creating instances of registered checkers.
-// Checkers are registered by the AddChecker call.
-package lintpack
diff --git a/vendor/github.com/go-lintpack/lintpack/go.mod b/vendor/github.com/go-lintpack/lintpack/go.mod
deleted file mode 100644
index b2e4cd98..00000000
--- a/vendor/github.com/go-lintpack/lintpack/go.mod
+++ /dev/null
@@ -1,11 +0,0 @@
-module github.com/go-lintpack/lintpack
-
-require (
- github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6
- github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086
- github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30
- github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8
- github.com/google/go-cmp v0.2.0
- github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e
- golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09
-)
diff --git a/vendor/github.com/go-lintpack/lintpack/go.sum b/vendor/github.com/go-lintpack/lintpack/go.sum
deleted file mode 100644
index bd9f5dcb..00000000
--- a/vendor/github.com/go-lintpack/lintpack/go.sum
+++ /dev/null
@@ -1,14 +0,0 @@
-github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6 h1:aTBUNRTatDDU24gbOEKEoLiDwxtc98ga6K/iMTm6fvs=
-github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
-github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086 h1:EIMuvbE9fbtQtimdLe5yeXjuC5CeKbQt8zH6GwtIrhM=
-github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg=
-github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30 h1:zRJPftZJNLPDiOtvYbFRwjSbaJAcVOf80TeEmWGe2kQ=
-github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk=
-github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8 h1:vVouagbdmqTVlCIAxpyYsNNTbkKZ3V66VpKOLU/s6W4=
-github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks=
-github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e h1:9MlwzLdW7QSDrhDjFlsEYmxpFyIoXmYRon3dt0io31k=
-github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
-golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09 h1:QJFxMApN9XdBRwtqXfOidB2azUCA4ziuiMTrQ1uBGxw=
-golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
diff --git a/vendor/github.com/go-toolsmith/typep/.travis.yml b/vendor/github.com/go-toolsmith/typep/.travis.yml
index c32ac006..d3ff3cca 100644
--- a/vendor/github.com/go-toolsmith/typep/.travis.yml
+++ b/vendor/github.com/go-toolsmith/typep/.travis.yml
@@ -5,5 +5,5 @@ install:
- # Prevent default install action "go get -t -v ./...".
script:
- go get -t -v ./...
- - go tool vet .
+ - go vet ./...
- go test -v -race ./...
diff --git a/vendor/github.com/go-toolsmith/typep/README.md b/vendor/github.com/go-toolsmith/typep/README.md
index 73665801..f7979148 100644
--- a/vendor/github.com/go-toolsmith/typep/README.md
+++ b/vendor/github.com/go-toolsmith/typep/README.md
@@ -1,6 +1,6 @@
[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/typep)](https://goreportcard.com/report/github.com/go-toolsmith/typep)
[![GoDoc](https://godoc.org/github.com/go-toolsmith/typep?status.svg)](https://godoc.org/github.com/go-toolsmith/typep)
-
+[![Build Status](https://travis-ci.org/go-toolsmith/typep.svg?branch=master)](https://travis-ci.org/go-toolsmith/typep)
# typep
diff --git a/vendor/github.com/go-toolsmith/typep/predicates.go b/vendor/github.com/go-toolsmith/typep/predicates.go
index 70e5b55c..b07325a7 100644
--- a/vendor/github.com/go-toolsmith/typep/predicates.go
+++ b/vendor/github.com/go-toolsmith/typep/predicates.go
@@ -27,7 +27,7 @@ func IsTypeExpr(info *types.Info, x ast.Expr) bool {
_, ok := info.ObjectOf(x).(*types.TypeName)
return ok
- case *ast.FuncType, *ast.StructType, *ast.InterfaceType, *ast.ArrayType, *ast.MapType:
+ case *ast.FuncType, *ast.StructType, *ast.InterfaceType, *ast.ArrayType, *ast.MapType, *ast.ChanType:
return true
default:
diff --git a/vendor/github.com/go-toolsmith/typep/safeExpr.go b/vendor/github.com/go-toolsmith/typep/safeExpr.go
index 7236e6e0..d5835d97 100644
--- a/vendor/github.com/go-toolsmith/typep/safeExpr.go
+++ b/vendor/github.com/go-toolsmith/typep/safeExpr.go
@@ -20,6 +20,10 @@ func SideEffectFree(info *types.Info, expr ast.Expr) bool {
// whitelist to be on the conservative side.
// Can be extended as needed.
+ if expr == nil {
+ return true
+ }
+
switch expr := expr.(type) {
case *ast.StarExpr:
return SideEffectFree(info, expr.X)
@@ -31,6 +35,11 @@ func SideEffectFree(info *types.Info, expr ast.Expr) bool {
SideEffectFree(info, expr.X)
case *ast.BasicLit, *ast.Ident:
return true
+ case *ast.SliceExpr:
+ return SideEffectFree(info, expr.X) &&
+ SideEffectFree(info, expr.Low) &&
+ SideEffectFree(info, expr.High) &&
+ SideEffectFree(info, expr.Max)
case *ast.IndexExpr:
return SideEffectFree(info, expr.X) &&
SideEffectFree(info, expr.Index)
@@ -38,6 +47,8 @@ func SideEffectFree(info *types.Info, expr ast.Expr) bool {
return SideEffectFree(info, expr.X)
case *ast.ParenExpr:
return SideEffectFree(info, expr.X)
+ case *ast.TypeAssertExpr:
+ return SideEffectFree(info, expr.X)
case *ast.CompositeLit:
return SideEffectFreeList(info, expr.Elts)
case *ast.CallExpr:
diff --git a/vendor/github.com/go-xmlfmt/xmlfmt/LICENSE b/vendor/github.com/go-xmlfmt/xmlfmt/LICENSE
new file mode 100644
index 00000000..890776ab
--- /dev/null
+++ b/vendor/github.com/go-xmlfmt/xmlfmt/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2016 go-xmlfmt
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/go-xmlfmt/xmlfmt/README.md b/vendor/github.com/go-xmlfmt/xmlfmt/README.md
new file mode 100644
index 00000000..4eb6d69a
--- /dev/null
+++ b/vendor/github.com/go-xmlfmt/xmlfmt/README.md
@@ -0,0 +1,178 @@
+# Go XML Formatter
+
+[![MIT License](http://img.shields.io/badge/License-MIT-blue.svg)](LICENSE)
+[![Go Doc](https://img.shields.io/badge/godoc-reference-4b68a3.svg)](https://godoc.org/github.com/go-xmlfmt/xmlfmt)
+[![Go Report Card](https://goreportcard.com/badge/github.com/go-xmlfmt/xmlfmt)](https://goreportcard.com/report/github.com/go-xmlfmt/xmlfmt)
+[![Codeship Status](https://codeship.com/projects/c49f02b0-a384-0134-fb20-2e0351080565/status?branch=master)](https://codeship.com/projects/190297)
+
+## Synopsis
+
+The Go XML Formatter, xmlfmt, will format the XML string in a readable way.
+
+```go
+package main
+
+import "github.com/go-xmlfmt/xmlfmt"
+
+func main() {
+ xml1 := `aSome org-or-otherWouldnt you like to knowPatCalifia`
+ x := xmlfmt.FormatXML(xml1, "\t", " ")
+ print(x)
+}
+
+```
+
+Output:
+
+```xml
+
+
+ a
+
+
+
+
+
+ Some org-or-other
+
+ Wouldnt you like to know
+
+
+
+ Pat
+
+ Califia
+
+
+
+
+
+```
+
+There is no XML decoding and encoding involved, only pure regular expression matching and replacing. So it is much faster than going through decoding and encoding procedures. Moreover, the exact XML source string is preserved, instead of being changed by the encoder. This is why this package exists in the first place.
+
+## Command
+
+To use it on command line, check out [xmlfmt](https://github.com/AntonioSun/xmlfmt):
+
+
+```
+$ xmlfmt
+XML Formatter
+built on 2019-12-08
+
+The xmlfmt will format the XML string without rewriting the document
+
+Options:
+
+ -h, --help display help information
+ -f, --file *The xml file to read from (or stdin)
+ -p, --prefix each element begins on a new line and this prefix
+ -i, --indent[= ] indent string for nested elements
+```
+
+
+## Justification
+
+### The format
+
+The Go XML Formatter is not called XML Beautifier because the result is not *exactly* as what people would expect -- some, but not all, closing tags stays on the same line, just as shown above. Having been looking at the result and thinking over it, I now think it is actually a better way to present it, as those closing tags on the same line are better stay that way in my opinion. I.e.,
+
+When it comes to very big XML strings, which is what I’m dealing every day, saving spaces by not allowing those closing tags taking extra lines is plus instead of negative to me.
+
+### The alternative
+
+To format it “properly”, i.e., as what people would normally see, is very hard using pure regular expression. In fact, according to Sam Whited from the go-nuts mlist,
+
+> Regular expression is, well, regular. This means that they can parse regular grammars, but can't parse context free grammars (like XML). It is actually impossible to use a regex to do this task; it will always be fragile, unfortunately.
+
+So if the output format is so important to you, then unfortunately you have to go through decoding and encoding procedures. But there are some drawbacks as well, as put by James McGill, in http://stackoverflow.com/questions/21117161, besides such method being slow:
+
+> I like this solution, but am still in search of a Golang XML formatter/prettyprinter that doesn't rewrite the document (other than formatting whitespace). Marshalling or using the Encoder will change namespace declarations.
+>
+> For example an element like "< ns1:Element />" will be translated to something like '< Element xmlns="http://bla...bla/ns1" >< /Element >' which seems harmless enough except when the intent is to not alter the xml other than formatting. -- James McGill Nov 12 '15
+
+Using Sam's code as an example,
+
+https://play.golang.org/p/JUqQY3WpW5
+
+The above code formats the following XML
+
+```xml
+
+
+
+
+
+ 123
+ John Brown
+
+
+
+
+```
+
+into this:
+
+```xml
+
+
+
+
+
+ 123
+ John Brown
+
+
+
+
+```
+
+I know they are syntactically the same, however the problem is that they *look* totally different.
+
+That's why there is this package, an XML Beautifier that doesn't rewrite the document.
+
+## Credit
+
+The credit goes to **diotalevi** from his post at http://www.perlmonks.org/?node_id=261292.
+
+However, it does not work for all cases. For example,
+
+```sh
+$ echo '123John Brown' | perl -pe 's/(?<=>)\s+(?=<)//g; s(<(/?)([^/>]+)(/?)>\s*(?=(?))?)($indent+=$3?0:$1?-1:1;"<$1$2$3>".($1&&($4 eq"")?"\n".(" "x$indent):$4?"\n".(" "x$indent):""))ge'
+```
+```xml
+
+123
+John Brown
+
+
+
+
+```
+
+I simplified the algorithm, and now it should work for all cases:
+
+```sh
+echo '123John Brown' | perl -pe 's/(?<=>)\s+(?=<)//g; s(<(/?)([^>]+)(/?)>)($indent+=$3?0:$1?-1:1;"<$1$2$3>"."\n".(" "x$indent))ge'
+```
+```xml
+
+
+
+
+
+
+ 123
+
+ John Brown
+
+
+
+
+```
+
+This package is a direct translate from above Perl code into Go,
+then further enhanced by @ruandao.
diff --git a/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go b/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go
new file mode 100644
index 00000000..b744f5b3
--- /dev/null
+++ b/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go
@@ -0,0 +1,56 @@
+////////////////////////////////////////////////////////////////////////////
+// Porgram: xmlfmt.go
+// Purpose: Go XML Beautify from XML string using pure string manipulation
+// Authors: Antonio Sun (c) 2016-2019, All rights reserved
+////////////////////////////////////////////////////////////////////////////
+
+package xmlfmt
+
+import (
+ "regexp"
+ "strings"
+)
+
+var (
+ reg = regexp.MustCompile(`<([/!]?)([^>]+?)(/?)>`)
+ // NL is the newline string used in XML output, define for DOS-convenient.
+ NL = "\r\n"
+)
+
+// FormatXML will (purly) reformat the XML string in a readable way, without any rewriting/altering the structure
+func FormatXML(xmls, prefix, indent string) string {
+ src := regexp.MustCompile(`(?s)>\s+<`).ReplaceAllString(xmls, "><")
+
+ rf := replaceTag(prefix, indent)
+ return (prefix + reg.ReplaceAllStringFunc(src, rf))
+}
+
+// replaceTag returns a closure function to do 's/(?<=>)\s+(?=<)//g; s(<(/?)([^>]+?)(/?)>)($indent+=$3?0:$1?-1:1;"<$1$2$3>"."\n".(" "x$indent))ge' as in Perl
+// and deal with comments as well
+func replaceTag(prefix, indent string) func(string) string {
+ indentLevel := 0
+ return func(m string) string {
+ // head elem
+ if strings.HasPrefix(m, "") {
+ return NL + prefix + strings.Repeat(indent, indentLevel) + m
+ }
+ // comment elem
+ if strings.HasPrefix(m, " 0 {
@@ -122,6 +123,7 @@ func (f *Flock) try(locked *bool, flag uint32) (bool, error) {
if err := f.setFh(); err != nil {
return false, err
}
+ defer f.ensureFhState()
}
_, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{})
diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS
deleted file mode 100644
index 3d97fc7a..00000000
--- a/vendor/github.com/gogo/protobuf/AUTHORS
+++ /dev/null
@@ -1,15 +0,0 @@
-# This is the official list of GoGo authors for copyright purposes.
-# This file is distinct from the CONTRIBUTORS file, which
-# lists people. For example, employees are listed in CONTRIBUTORS,
-# but not in AUTHORS, because the employer holds the copyright.
-
-# Names should be added to this file as one of
-# Organization's name
-# Individual's name
-# Individual's name
-
-# Please keep the list sorted.
-
-Sendgrid, Inc
-Vastech SA (PTY) LTD
-Walter Schulze
diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS
deleted file mode 100644
index 1b4f6c20..00000000
--- a/vendor/github.com/gogo/protobuf/CONTRIBUTORS
+++ /dev/null
@@ -1,23 +0,0 @@
-Anton Povarov
-Brian Goff
-Clayton Coleman
-Denis Smirnov
-DongYun Kang
-Dwayne Schultz
-Georg Apitz
-Gustav Paul
-Johan Brandhorst
-John Shahid
-John Tuley
-Laurent
-Patrick Lee
-Peter Edge
-Roger Johansson
-Sam Nguyen
-Sergio Arbeo
-Stephen J Day
-Tamir Duberstein
-Todd Eisenberger
-Tormod Erevik Lea
-Vyacheslav Kim
-Walter Schulze
diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile
deleted file mode 100644
index 00d65f32..00000000
--- a/vendor/github.com/gogo/protobuf/proto/Makefile
+++ /dev/null
@@ -1,43 +0,0 @@
-# Go support for Protocol Buffers - Google's data interchange format
-#
-# Copyright 2010 The Go Authors. All rights reserved.
-# https://github.com/golang/protobuf
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-install:
- go install
-
-test: install generate-test-pbs
- go test
-
-
-generate-test-pbs:
- make install
- make -C test_proto
- make -C proto3_proto
- make
diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go
deleted file mode 100644
index a26b046d..00000000
--- a/vendor/github.com/gogo/protobuf/proto/clone.go
+++ /dev/null
@@ -1,258 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer deep copy and merge.
-// TODO: RawMessage.
-
-package proto
-
-import (
- "fmt"
- "log"
- "reflect"
- "strings"
-)
-
-// Clone returns a deep copy of a protocol buffer.
-func Clone(src Message) Message {
- in := reflect.ValueOf(src)
- if in.IsNil() {
- return src
- }
- out := reflect.New(in.Type().Elem())
- dst := out.Interface().(Message)
- Merge(dst, src)
- return dst
-}
-
-// Merger is the interface representing objects that can merge messages of the same type.
-type Merger interface {
- // Merge merges src into this message.
- // Required and optional fields that are set in src will be set to that value in dst.
- // Elements of repeated fields will be appended.
- //
- // Merge may panic if called with a different argument type than the receiver.
- Merge(src Message)
-}
-
-// generatedMerger is the custom merge method that generated protos will have.
-// We must add this method since a generate Merge method will conflict with
-// many existing protos that have a Merge data field already defined.
-type generatedMerger interface {
- XXX_Merge(src Message)
-}
-
-// Merge merges src into dst.
-// Required and optional fields that are set in src will be set to that value in dst.
-// Elements of repeated fields will be appended.
-// Merge panics if src and dst are not the same type, or if dst is nil.
-func Merge(dst, src Message) {
- if m, ok := dst.(Merger); ok {
- m.Merge(src)
- return
- }
-
- in := reflect.ValueOf(src)
- out := reflect.ValueOf(dst)
- if out.IsNil() {
- panic("proto: nil destination")
- }
- if in.Type() != out.Type() {
- panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
- }
- if in.IsNil() {
- return // Merge from nil src is a noop
- }
- if m, ok := dst.(generatedMerger); ok {
- m.XXX_Merge(src)
- return
- }
- mergeStruct(out.Elem(), in.Elem())
-}
-
-func mergeStruct(out, in reflect.Value) {
- sprop := GetProperties(in.Type())
- for i := 0; i < in.NumField(); i++ {
- f := in.Type().Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
- }
-
- if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
- emOut := out.Addr().Interface().(extensionsBytes)
- bIn := emIn.GetExtensions()
- bOut := emOut.GetExtensions()
- *bOut = append(*bOut, *bIn...)
- } else if emIn, err := extendable(in.Addr().Interface()); err == nil {
- emOut, _ := extendable(out.Addr().Interface())
- mIn, muIn := emIn.extensionsRead()
- if mIn != nil {
- mOut := emOut.extensionsWrite()
- muIn.Lock()
- mergeExtension(mOut, mIn)
- muIn.Unlock()
- }
- }
-
- uf := in.FieldByName("XXX_unrecognized")
- if !uf.IsValid() {
- return
- }
- uin := uf.Bytes()
- if len(uin) > 0 {
- out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
- }
-}
-
-// mergeAny performs a merge between two values of the same type.
-// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
-// prop is set if this is a struct field (it may be nil).
-func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
- if in.Type() == protoMessageType {
- if !in.IsNil() {
- if out.IsNil() {
- out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
- } else {
- Merge(out.Interface().(Message), in.Interface().(Message))
- }
- }
- return
- }
- switch in.Kind() {
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
- reflect.String, reflect.Uint32, reflect.Uint64:
- if !viaPtr && isProto3Zero(in) {
- return
- }
- out.Set(in)
- case reflect.Interface:
- // Probably a oneof field; copy non-nil values.
- if in.IsNil() {
- return
- }
- // Allocate destination if it is not set, or set to a different type.
- // Otherwise we will merge as normal.
- if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
- out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
- }
- mergeAny(out.Elem(), in.Elem(), false, nil)
- case reflect.Map:
- if in.Len() == 0 {
- return
- }
- if out.IsNil() {
- out.Set(reflect.MakeMap(in.Type()))
- }
- // For maps with value types of *T or []byte we need to deep copy each value.
- elemKind := in.Type().Elem().Kind()
- for _, key := range in.MapKeys() {
- var val reflect.Value
- switch elemKind {
- case reflect.Ptr:
- val = reflect.New(in.Type().Elem().Elem())
- mergeAny(val, in.MapIndex(key), false, nil)
- case reflect.Slice:
- val = in.MapIndex(key)
- val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
- default:
- val = in.MapIndex(key)
- }
- out.SetMapIndex(key, val)
- }
- case reflect.Ptr:
- if in.IsNil() {
- return
- }
- if out.IsNil() {
- out.Set(reflect.New(in.Elem().Type()))
- }
- mergeAny(out.Elem(), in.Elem(), true, nil)
- case reflect.Slice:
- if in.IsNil() {
- return
- }
- if in.Type().Elem().Kind() == reflect.Uint8 {
- // []byte is a scalar bytes field, not a repeated field.
-
- // Edge case: if this is in a proto3 message, a zero length
- // bytes field is considered the zero value, and should not
- // be merged.
- if prop != nil && prop.proto3 && in.Len() == 0 {
- return
- }
-
- // Make a deep copy.
- // Append to []byte{} instead of []byte(nil) so that we never end up
- // with a nil result.
- out.SetBytes(append([]byte{}, in.Bytes()...))
- return
- }
- n := in.Len()
- if out.IsNil() {
- out.Set(reflect.MakeSlice(in.Type(), 0, n))
- }
- switch in.Type().Elem().Kind() {
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
- reflect.String, reflect.Uint32, reflect.Uint64:
- out.Set(reflect.AppendSlice(out, in))
- default:
- for i := 0; i < n; i++ {
- x := reflect.Indirect(reflect.New(in.Type().Elem()))
- mergeAny(x, in.Index(i), false, nil)
- out.Set(reflect.Append(out, x))
- }
- }
- case reflect.Struct:
- mergeStruct(out, in)
- default:
- // unknown type, so not a protocol buffer
- log.Printf("proto: don't know how to copy %v", in)
- }
-}
-
-func mergeExtension(out, in map[int32]Extension) {
- for extNum, eIn := range in {
- eOut := Extension{desc: eIn.desc}
- if eIn.value != nil {
- v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
- mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
- eOut.value = v.Interface()
- }
- if eIn.enc != nil {
- eOut.enc = make([]byte, len(eIn.enc))
- copy(eOut.enc, eIn.enc)
- }
-
- out[extNum] = eOut
- }
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go
deleted file mode 100644
index 24552483..00000000
--- a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2018, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import "reflect"
-
-type custom interface {
- Marshal() ([]byte, error)
- Unmarshal(data []byte) error
- Size() int
-}
-
-var customType = reflect.TypeOf((*custom)(nil)).Elem()
diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go
deleted file mode 100644
index 63b0f08b..00000000
--- a/vendor/github.com/gogo/protobuf/proto/decode.go
+++ /dev/null
@@ -1,427 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for decoding protocol buffer data to construct in-memory representations.
- */
-
-import (
- "errors"
- "fmt"
- "io"
-)
-
-// errOverflow is returned when an integer is too large to be represented.
-var errOverflow = errors.New("proto: integer overflow")
-
-// ErrInternalBadWireType is returned by generated code when an incorrect
-// wire type is encountered. It does not get returned to user code.
-var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
-
-// DecodeVarint reads a varint-encoded integer from the slice.
-// It returns the integer and the number of bytes consumed, or
-// zero if there is not enough.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func DecodeVarint(buf []byte) (x uint64, n int) {
- for shift := uint(0); shift < 64; shift += 7 {
- if n >= len(buf) {
- return 0, 0
- }
- b := uint64(buf[n])
- n++
- x |= (b & 0x7F) << shift
- if (b & 0x80) == 0 {
- return x, n
- }
- }
-
- // The number is too large to represent in a 64-bit value.
- return 0, 0
-}
-
-func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
- i := p.index
- l := len(p.buf)
-
- for shift := uint(0); shift < 64; shift += 7 {
- if i >= l {
- err = io.ErrUnexpectedEOF
- return
- }
- b := p.buf[i]
- i++
- x |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- p.index = i
- return
- }
- }
-
- // The number is too large to represent in a 64-bit value.
- err = errOverflow
- return
-}
-
-// DecodeVarint reads a varint-encoded integer from the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) DecodeVarint() (x uint64, err error) {
- i := p.index
- buf := p.buf
-
- if i >= len(buf) {
- return 0, io.ErrUnexpectedEOF
- } else if buf[i] < 0x80 {
- p.index++
- return uint64(buf[i]), nil
- } else if len(buf)-i < 10 {
- return p.decodeVarintSlow()
- }
-
- var b uint64
- // we already checked the first byte
- x = uint64(buf[i]) - 0x80
- i++
-
- b = uint64(buf[i])
- i++
- x += b << 7
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 7
-
- b = uint64(buf[i])
- i++
- x += b << 14
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 14
-
- b = uint64(buf[i])
- i++
- x += b << 21
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 21
-
- b = uint64(buf[i])
- i++
- x += b << 28
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 28
-
- b = uint64(buf[i])
- i++
- x += b << 35
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 35
-
- b = uint64(buf[i])
- i++
- x += b << 42
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 42
-
- b = uint64(buf[i])
- i++
- x += b << 49
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 49
-
- b = uint64(buf[i])
- i++
- x += b << 56
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 56
-
- b = uint64(buf[i])
- i++
- x += b << 63
- if b&0x80 == 0 {
- goto done
- }
-
- return 0, errOverflow
-
-done:
- p.index = i
- return x, nil
-}
-
-// DecodeFixed64 reads a 64-bit integer from the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) DecodeFixed64() (x uint64, err error) {
- // x, err already 0
- i := p.index + 8
- if i < 0 || i > len(p.buf) {
- err = io.ErrUnexpectedEOF
- return
- }
- p.index = i
-
- x = uint64(p.buf[i-8])
- x |= uint64(p.buf[i-7]) << 8
- x |= uint64(p.buf[i-6]) << 16
- x |= uint64(p.buf[i-5]) << 24
- x |= uint64(p.buf[i-4]) << 32
- x |= uint64(p.buf[i-3]) << 40
- x |= uint64(p.buf[i-2]) << 48
- x |= uint64(p.buf[i-1]) << 56
- return
-}
-
-// DecodeFixed32 reads a 32-bit integer from the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) DecodeFixed32() (x uint64, err error) {
- // x, err already 0
- i := p.index + 4
- if i < 0 || i > len(p.buf) {
- err = io.ErrUnexpectedEOF
- return
- }
- p.index = i
-
- x = uint64(p.buf[i-4])
- x |= uint64(p.buf[i-3]) << 8
- x |= uint64(p.buf[i-2]) << 16
- x |= uint64(p.buf[i-1]) << 24
- return
-}
-
-// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
-// from the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
- x, err = p.DecodeVarint()
- if err != nil {
- return
- }
- x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
- return
-}
-
-// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
-// from the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
- x, err = p.DecodeVarint()
- if err != nil {
- return
- }
- x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
- return
-}
-
-// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
- n, err := p.DecodeVarint()
- if err != nil {
- return nil, err
- }
-
- nb := int(n)
- if nb < 0 {
- return nil, fmt.Errorf("proto: bad byte length %d", nb)
- }
- end := p.index + nb
- if end < p.index || end > len(p.buf) {
- return nil, io.ErrUnexpectedEOF
- }
-
- if !alloc {
- // todo: check if can get more uses of alloc=false
- buf = p.buf[p.index:end]
- p.index += nb
- return
- }
-
- buf = make([]byte, nb)
- copy(buf, p.buf[p.index:])
- p.index += nb
- return
-}
-
-// DecodeStringBytes reads an encoded string from the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) DecodeStringBytes() (s string, err error) {
- buf, err := p.DecodeRawBytes(false)
- if err != nil {
- return
- }
- return string(buf), nil
-}
-
-// Unmarshaler is the interface representing objects that can
-// unmarshal themselves. The argument points to data that may be
-// overwritten, so implementations should not keep references to the
-// buffer.
-// Unmarshal implementations should not clear the receiver.
-// Any unmarshaled data should be merged into the receiver.
-// Callers of Unmarshal that do not want to retain existing data
-// should Reset the receiver before calling Unmarshal.
-type Unmarshaler interface {
- Unmarshal([]byte) error
-}
-
-// newUnmarshaler is the interface representing objects that can
-// unmarshal themselves. The semantics are identical to Unmarshaler.
-//
-// This exists to support protoc-gen-go generated messages.
-// The proto package will stop type-asserting to this interface in the future.
-//
-// DO NOT DEPEND ON THIS.
-type newUnmarshaler interface {
- XXX_Unmarshal([]byte) error
-}
-
-// Unmarshal parses the protocol buffer representation in buf and places the
-// decoded result in pb. If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// Unmarshal resets pb before starting to unmarshal, so any
-// existing data in pb is always removed. Use UnmarshalMerge
-// to preserve and append to existing data.
-func Unmarshal(buf []byte, pb Message) error {
- pb.Reset()
- if u, ok := pb.(newUnmarshaler); ok {
- return u.XXX_Unmarshal(buf)
- }
- if u, ok := pb.(Unmarshaler); ok {
- return u.Unmarshal(buf)
- }
- return NewBuffer(buf).Unmarshal(pb)
-}
-
-// UnmarshalMerge parses the protocol buffer representation in buf and
-// writes the decoded result to pb. If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// UnmarshalMerge merges into existing data in pb.
-// Most code should use Unmarshal instead.
-func UnmarshalMerge(buf []byte, pb Message) error {
- if u, ok := pb.(newUnmarshaler); ok {
- return u.XXX_Unmarshal(buf)
- }
- if u, ok := pb.(Unmarshaler); ok {
- // NOTE: The history of proto have unfortunately been inconsistent
- // whether Unmarshaler should or should not implicitly clear itself.
- // Some implementations do, most do not.
- // Thus, calling this here may or may not do what people want.
- //
- // See https://github.com/golang/protobuf/issues/424
- return u.Unmarshal(buf)
- }
- return NewBuffer(buf).Unmarshal(pb)
-}
-
-// DecodeMessage reads a count-delimited message from the Buffer.
-func (p *Buffer) DecodeMessage(pb Message) error {
- enc, err := p.DecodeRawBytes(false)
- if err != nil {
- return err
- }
- return NewBuffer(enc).Unmarshal(pb)
-}
-
-// DecodeGroup reads a tag-delimited group from the Buffer.
-// StartGroup tag is already consumed. This function consumes
-// EndGroup tag.
-func (p *Buffer) DecodeGroup(pb Message) error {
- b := p.buf[p.index:]
- x, y := findEndGroup(b)
- if x < 0 {
- return io.ErrUnexpectedEOF
- }
- err := Unmarshal(b[:x], pb)
- p.index += y
- return err
-}
-
-// Unmarshal parses the protocol buffer representation in the
-// Buffer and places the decoded result in pb. If the struct
-// underlying pb does not match the data in the buffer, the results can be
-// unpredictable.
-//
-// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
-func (p *Buffer) Unmarshal(pb Message) error {
- // If the object can unmarshal itself, let it.
- if u, ok := pb.(newUnmarshaler); ok {
- err := u.XXX_Unmarshal(p.buf[p.index:])
- p.index = len(p.buf)
- return err
- }
- if u, ok := pb.(Unmarshaler); ok {
- // NOTE: The history of proto have unfortunately been inconsistent
- // whether Unmarshaler should or should not implicitly clear itself.
- // Some implementations do, most do not.
- // Thus, calling this here may or may not do what people want.
- //
- // See https://github.com/golang/protobuf/issues/424
- err := u.Unmarshal(p.buf[p.index:])
- p.index = len(p.buf)
- return err
- }
-
- // Slow workaround for messages that aren't Unmarshalers.
- // This includes some hand-coded .pb.go files and
- // bootstrap protos.
- // TODO: fix all of those and then add Unmarshal to
- // the Message interface. Then:
- // The cast above and code below can be deleted.
- // The old unmarshaler can be deleted.
- // Clients can call Unmarshal directly (can already do that, actually).
- var info InternalMessageInfo
- err := info.Unmarshal(pb, p.buf[p.index:])
- p.index = len(p.buf)
- return err
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go
deleted file mode 100644
index 35b882c0..00000000
--- a/vendor/github.com/gogo/protobuf/proto/deprecated.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2018 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import "errors"
-
-// Deprecated: do not use.
-type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
-
-// Deprecated: do not use.
-func GetStats() Stats { return Stats{} }
-
-// Deprecated: do not use.
-func MarshalMessageSet(interface{}) ([]byte, error) {
- return nil, errors.New("proto: not implemented")
-}
-
-// Deprecated: do not use.
-func UnmarshalMessageSet([]byte, interface{}) error {
- return errors.New("proto: not implemented")
-}
-
-// Deprecated: do not use.
-func MarshalMessageSetJSON(interface{}) ([]byte, error) {
- return nil, errors.New("proto: not implemented")
-}
-
-// Deprecated: do not use.
-func UnmarshalMessageSetJSON([]byte, interface{}) error {
- return errors.New("proto: not implemented")
-}
-
-// Deprecated: do not use.
-func RegisterMessageSetType(Message, int32, string) {}
diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go
deleted file mode 100644
index fe1bd7d9..00000000
--- a/vendor/github.com/gogo/protobuf/proto/discard.go
+++ /dev/null
@@ -1,350 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2017 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "fmt"
- "reflect"
- "strings"
- "sync"
- "sync/atomic"
-)
-
-type generatedDiscarder interface {
- XXX_DiscardUnknown()
-}
-
-// DiscardUnknown recursively discards all unknown fields from this message
-// and all embedded messages.
-//
-// When unmarshaling a message with unrecognized fields, the tags and values
-// of such fields are preserved in the Message. This allows a later call to
-// marshal to be able to produce a message that continues to have those
-// unrecognized fields. To avoid this, DiscardUnknown is used to
-// explicitly clear the unknown fields after unmarshaling.
-//
-// For proto2 messages, the unknown fields of message extensions are only
-// discarded from messages that have been accessed via GetExtension.
-func DiscardUnknown(m Message) {
- if m, ok := m.(generatedDiscarder); ok {
- m.XXX_DiscardUnknown()
- return
- }
- // TODO: Dynamically populate a InternalMessageInfo for legacy messages,
- // but the master branch has no implementation for InternalMessageInfo,
- // so it would be more work to replicate that approach.
- discardLegacy(m)
-}
-
-// DiscardUnknown recursively discards all unknown fields.
-func (a *InternalMessageInfo) DiscardUnknown(m Message) {
- di := atomicLoadDiscardInfo(&a.discard)
- if di == nil {
- di = getDiscardInfo(reflect.TypeOf(m).Elem())
- atomicStoreDiscardInfo(&a.discard, di)
- }
- di.discard(toPointer(&m))
-}
-
-type discardInfo struct {
- typ reflect.Type
-
- initialized int32 // 0: only typ is valid, 1: everything is valid
- lock sync.Mutex
-
- fields []discardFieldInfo
- unrecognized field
-}
-
-type discardFieldInfo struct {
- field field // Offset of field, guaranteed to be valid
- discard func(src pointer)
-}
-
-var (
- discardInfoMap = map[reflect.Type]*discardInfo{}
- discardInfoLock sync.Mutex
-)
-
-func getDiscardInfo(t reflect.Type) *discardInfo {
- discardInfoLock.Lock()
- defer discardInfoLock.Unlock()
- di := discardInfoMap[t]
- if di == nil {
- di = &discardInfo{typ: t}
- discardInfoMap[t] = di
- }
- return di
-}
-
-func (di *discardInfo) discard(src pointer) {
- if src.isNil() {
- return // Nothing to do.
- }
-
- if atomic.LoadInt32(&di.initialized) == 0 {
- di.computeDiscardInfo()
- }
-
- for _, fi := range di.fields {
- sfp := src.offset(fi.field)
- fi.discard(sfp)
- }
-
- // For proto2 messages, only discard unknown fields in message extensions
- // that have been accessed via GetExtension.
- if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
- // Ignore lock since DiscardUnknown is not concurrency safe.
- emm, _ := em.extensionsRead()
- for _, mx := range emm {
- if m, ok := mx.value.(Message); ok {
- DiscardUnknown(m)
- }
- }
- }
-
- if di.unrecognized.IsValid() {
- *src.offset(di.unrecognized).toBytes() = nil
- }
-}
-
-func (di *discardInfo) computeDiscardInfo() {
- di.lock.Lock()
- defer di.lock.Unlock()
- if di.initialized != 0 {
- return
- }
- t := di.typ
- n := t.NumField()
-
- for i := 0; i < n; i++ {
- f := t.Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
-
- dfi := discardFieldInfo{field: toField(&f)}
- tf := f.Type
-
- // Unwrap tf to get its most basic type.
- var isPointer, isSlice bool
- if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
- isSlice = true
- tf = tf.Elem()
- }
- if tf.Kind() == reflect.Ptr {
- isPointer = true
- tf = tf.Elem()
- }
- if isPointer && isSlice && tf.Kind() != reflect.Struct {
- panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
- }
-
- switch tf.Kind() {
- case reflect.Struct:
- switch {
- case !isPointer:
- panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
- case isSlice: // E.g., []*pb.T
- discardInfo := getDiscardInfo(tf)
- dfi.discard = func(src pointer) {
- sps := src.getPointerSlice()
- for _, sp := range sps {
- if !sp.isNil() {
- discardInfo.discard(sp)
- }
- }
- }
- default: // E.g., *pb.T
- discardInfo := getDiscardInfo(tf)
- dfi.discard = func(src pointer) {
- sp := src.getPointer()
- if !sp.isNil() {
- discardInfo.discard(sp)
- }
- }
- }
- case reflect.Map:
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
- default: // E.g., map[K]V
- if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
- dfi.discard = func(src pointer) {
- sm := src.asPointerTo(tf).Elem()
- if sm.Len() == 0 {
- return
- }
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- DiscardUnknown(val.Interface().(Message))
- }
- }
- } else {
- dfi.discard = func(pointer) {} // Noop
- }
- }
- case reflect.Interface:
- // Must be oneof field.
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
- default: // E.g., interface{}
- // TODO: Make this faster?
- dfi.discard = func(src pointer) {
- su := src.asPointerTo(tf).Elem()
- if !su.IsNil() {
- sv := su.Elem().Elem().Field(0)
- if sv.Kind() == reflect.Ptr && sv.IsNil() {
- return
- }
- switch sv.Type().Kind() {
- case reflect.Ptr: // Proto struct (e.g., *T)
- DiscardUnknown(sv.Interface().(Message))
- }
- }
- }
- }
- default:
- continue
- }
- di.fields = append(di.fields, dfi)
- }
-
- di.unrecognized = invalidField
- if f, ok := t.FieldByName("XXX_unrecognized"); ok {
- if f.Type != reflect.TypeOf([]byte{}) {
- panic("expected XXX_unrecognized to be of type []byte")
- }
- di.unrecognized = toField(&f)
- }
-
- atomic.StoreInt32(&di.initialized, 1)
-}
-
-func discardLegacy(m Message) {
- v := reflect.ValueOf(m)
- if v.Kind() != reflect.Ptr || v.IsNil() {
- return
- }
- v = v.Elem()
- if v.Kind() != reflect.Struct {
- return
- }
- t := v.Type()
-
- for i := 0; i < v.NumField(); i++ {
- f := t.Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- vf := v.Field(i)
- tf := f.Type
-
- // Unwrap tf to get its most basic type.
- var isPointer, isSlice bool
- if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
- isSlice = true
- tf = tf.Elem()
- }
- if tf.Kind() == reflect.Ptr {
- isPointer = true
- tf = tf.Elem()
- }
- if isPointer && isSlice && tf.Kind() != reflect.Struct {
- panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
- }
-
- switch tf.Kind() {
- case reflect.Struct:
- switch {
- case !isPointer:
- panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
- case isSlice: // E.g., []*pb.T
- for j := 0; j < vf.Len(); j++ {
- discardLegacy(vf.Index(j).Interface().(Message))
- }
- default: // E.g., *pb.T
- discardLegacy(vf.Interface().(Message))
- }
- case reflect.Map:
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
- default: // E.g., map[K]V
- tv := vf.Type().Elem()
- if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
- for _, key := range vf.MapKeys() {
- val := vf.MapIndex(key)
- discardLegacy(val.Interface().(Message))
- }
- }
- }
- case reflect.Interface:
- // Must be oneof field.
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
- default: // E.g., test_proto.isCommunique_Union interface
- if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
- vf = vf.Elem() // E.g., *test_proto.Communique_Msg
- if !vf.IsNil() {
- vf = vf.Elem() // E.g., test_proto.Communique_Msg
- vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
- if vf.Kind() == reflect.Ptr {
- discardLegacy(vf.Interface().(Message))
- }
- }
- }
- }
- }
- }
-
- if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
- if vf.Type() != reflect.TypeOf([]byte{}) {
- panic("expected XXX_unrecognized to be of type []byte")
- }
- vf.Set(reflect.ValueOf([]byte(nil)))
- }
-
- // For proto2 messages, only discard unknown fields in message extensions
- // that have been accessed via GetExtension.
- if em, err := extendable(m); err == nil {
- // Ignore lock since discardLegacy is not concurrency safe.
- emm, _ := em.extensionsRead()
- for _, mx := range emm {
- if m, ok := mx.value.(Message); ok {
- discardLegacy(m)
- }
- }
- }
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go
deleted file mode 100644
index 93464c91..00000000
--- a/vendor/github.com/gogo/protobuf/proto/duration.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// This file implements conversions between google.protobuf.Duration
-// and time.Duration.
-
-import (
- "errors"
- "fmt"
- "time"
-)
-
-const (
- // Range of a Duration in seconds, as specified in
- // google/protobuf/duration.proto. This is about 10,000 years in seconds.
- maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
- minSeconds = -maxSeconds
-)
-
-// validateDuration determines whether the Duration is valid according to the
-// definition in google/protobuf/duration.proto. A valid Duration
-// may still be too large to fit into a time.Duration (the range of Duration
-// is about 10,000 years, and the range of time.Duration is about 290).
-func validateDuration(d *duration) error {
- if d == nil {
- return errors.New("duration: nil Duration")
- }
- if d.Seconds < minSeconds || d.Seconds > maxSeconds {
- return fmt.Errorf("duration: %#v: seconds out of range", d)
- }
- if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
- return fmt.Errorf("duration: %#v: nanos out of range", d)
- }
- // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
- if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
- return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d)
- }
- return nil
-}
-
-// DurationFromProto converts a Duration to a time.Duration. DurationFromProto
-// returns an error if the Duration is invalid or is too large to be
-// represented in a time.Duration.
-func durationFromProto(p *duration) (time.Duration, error) {
- if err := validateDuration(p); err != nil {
- return 0, err
- }
- d := time.Duration(p.Seconds) * time.Second
- if int64(d/time.Second) != p.Seconds {
- return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
- }
- if p.Nanos != 0 {
- d += time.Duration(p.Nanos)
- if (d < 0) != (p.Nanos < 0) {
- return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
- }
- }
- return d, nil
-}
-
-// DurationProto converts a time.Duration to a Duration.
-func durationProto(d time.Duration) *duration {
- nanos := d.Nanoseconds()
- secs := nanos / 1e9
- nanos -= secs * 1e9
- return &duration{
- Seconds: secs,
- Nanos: int32(nanos),
- }
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go
deleted file mode 100644
index e748e173..00000000
--- a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2016, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "reflect"
- "time"
-)
-
-var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem()
-
-type duration struct {
- Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
-}
-
-func (m *duration) Reset() { *m = duration{} }
-func (*duration) ProtoMessage() {}
-func (*duration) String() string { return "duration" }
-
-func init() {
- RegisterType((*duration)(nil), "gogo.protobuf.proto.duration")
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go
deleted file mode 100644
index 3abfed2c..00000000
--- a/vendor/github.com/gogo/protobuf/proto/encode.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
-import (
- "errors"
- "reflect"
-)
-
-var (
- // errRepeatedHasNil is the error returned if Marshal is called with
- // a struct with a repeated field containing a nil element.
- errRepeatedHasNil = errors.New("proto: repeated field has nil element")
-
- // errOneofHasNil is the error returned if Marshal is called with
- // a struct with a oneof field containing a nil element.
- errOneofHasNil = errors.New("proto: oneof field has nil value")
-
- // ErrNil is the error returned if Marshal is called with nil.
- ErrNil = errors.New("proto: Marshal called with nil")
-
- // ErrTooLarge is the error returned if Marshal is called with a
- // message that encodes to >2GB.
- ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
-)
-
-// The fundamental encoders that put bytes on the wire.
-// Those that take integer types all accept uint64 and are
-// therefore of type valueEncoder.
-
-const maxVarintBytes = 10 // maximum length of a varint
-
-// EncodeVarint returns the varint encoding of x.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-// Not used by the package itself, but helpful to clients
-// wishing to use the same encoding.
-func EncodeVarint(x uint64) []byte {
- var buf [maxVarintBytes]byte
- var n int
- for n = 0; x > 127; n++ {
- buf[n] = 0x80 | uint8(x&0x7F)
- x >>= 7
- }
- buf[n] = uint8(x)
- n++
- return buf[0:n]
-}
-
-// EncodeVarint writes a varint-encoded integer to the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) EncodeVarint(x uint64) error {
- for x >= 1<<7 {
- p.buf = append(p.buf, uint8(x&0x7f|0x80))
- x >>= 7
- }
- p.buf = append(p.buf, uint8(x))
- return nil
-}
-
-// SizeVarint returns the varint encoding size of an integer.
-func SizeVarint(x uint64) int {
- switch {
- case x < 1<<7:
- return 1
- case x < 1<<14:
- return 2
- case x < 1<<21:
- return 3
- case x < 1<<28:
- return 4
- case x < 1<<35:
- return 5
- case x < 1<<42:
- return 6
- case x < 1<<49:
- return 7
- case x < 1<<56:
- return 8
- case x < 1<<63:
- return 9
- }
- return 10
-}
-
-// EncodeFixed64 writes a 64-bit integer to the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) EncodeFixed64(x uint64) error {
- p.buf = append(p.buf,
- uint8(x),
- uint8(x>>8),
- uint8(x>>16),
- uint8(x>>24),
- uint8(x>>32),
- uint8(x>>40),
- uint8(x>>48),
- uint8(x>>56))
- return nil
-}
-
-// EncodeFixed32 writes a 32-bit integer to the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) EncodeFixed32(x uint64) error {
- p.buf = append(p.buf,
- uint8(x),
- uint8(x>>8),
- uint8(x>>16),
- uint8(x>>24))
- return nil
-}
-
-// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
-// to the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) EncodeZigzag64(x uint64) error {
- // use signed number to get arithmetic right shift.
- return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-
-// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
-// to the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) EncodeZigzag32(x uint64) error {
- // use signed number to get arithmetic right shift.
- return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
-}
-
-// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) EncodeRawBytes(b []byte) error {
- p.EncodeVarint(uint64(len(b)))
- p.buf = append(p.buf, b...)
- return nil
-}
-
-// EncodeStringBytes writes an encoded string to the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) EncodeStringBytes(s string) error {
- p.EncodeVarint(uint64(len(s)))
- p.buf = append(p.buf, s...)
- return nil
-}
-
-// Marshaler is the interface representing objects that can marshal themselves.
-type Marshaler interface {
- Marshal() ([]byte, error)
-}
-
-// EncodeMessage writes the protocol buffer to the Buffer,
-// prefixed by a varint-encoded length.
-func (p *Buffer) EncodeMessage(pb Message) error {
- siz := Size(pb)
- p.EncodeVarint(uint64(siz))
- return p.Marshal(pb)
-}
-
-// All protocol buffer fields are nillable, but be careful.
-func isNil(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return v.IsNil()
- }
- return false
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
deleted file mode 100644
index 0f5fb173..00000000
--- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-func NewRequiredNotSetError(field string) *RequiredNotSetError {
- return &RequiredNotSetError{field}
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go
deleted file mode 100644
index d4db5a1c..00000000
--- a/vendor/github.com/gogo/protobuf/proto/equal.go
+++ /dev/null
@@ -1,300 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer comparison.
-
-package proto
-
-import (
- "bytes"
- "log"
- "reflect"
- "strings"
-)
-
-/*
-Equal returns true iff protocol buffers a and b are equal.
-The arguments must both be pointers to protocol buffer structs.
-
-Equality is defined in this way:
- - Two messages are equal iff they are the same type,
- corresponding fields are equal, unknown field sets
- are equal, and extensions sets are equal.
- - Two set scalar fields are equal iff their values are equal.
- If the fields are of a floating-point type, remember that
- NaN != x for all x, including NaN. If the message is defined
- in a proto3 .proto file, fields are not "set"; specifically,
- zero length proto3 "bytes" fields are equal (nil == {}).
- - Two repeated fields are equal iff their lengths are the same,
- and their corresponding elements are equal. Note a "bytes" field,
- although represented by []byte, is not a repeated field and the
- rule for the scalar fields described above applies.
- - Two unset fields are equal.
- - Two unknown field sets are equal if their current
- encoded state is equal.
- - Two extension sets are equal iff they have corresponding
- elements that are pairwise equal.
- - Two map fields are equal iff their lengths are the same,
- and they contain the same set of elements. Zero-length map
- fields are equal.
- - Every other combination of things are not equal.
-
-The return value is undefined if a and b are not protocol buffers.
-*/
-func Equal(a, b Message) bool {
- if a == nil || b == nil {
- return a == b
- }
- v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
- if v1.Type() != v2.Type() {
- return false
- }
- if v1.Kind() == reflect.Ptr {
- if v1.IsNil() {
- return v2.IsNil()
- }
- if v2.IsNil() {
- return false
- }
- v1, v2 = v1.Elem(), v2.Elem()
- }
- if v1.Kind() != reflect.Struct {
- return false
- }
- return equalStruct(v1, v2)
-}
-
-// v1 and v2 are known to have the same type.
-func equalStruct(v1, v2 reflect.Value) bool {
- sprop := GetProperties(v1.Type())
- for i := 0; i < v1.NumField(); i++ {
- f := v1.Type().Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- f1, f2 := v1.Field(i), v2.Field(i)
- if f.Type.Kind() == reflect.Ptr {
- if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
- // both unset
- continue
- } else if n1 != n2 {
- // set/unset mismatch
- return false
- }
- f1, f2 = f1.Elem(), f2.Elem()
- }
- if !equalAny(f1, f2, sprop.Prop[i]) {
- return false
- }
- }
-
- if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
- em2 := v2.FieldByName("XXX_InternalExtensions")
- if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
- return false
- }
- }
-
- if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
- em2 := v2.FieldByName("XXX_extensions")
- if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
- return false
- }
- }
-
- uf := v1.FieldByName("XXX_unrecognized")
- if !uf.IsValid() {
- return true
- }
-
- u1 := uf.Bytes()
- u2 := v2.FieldByName("XXX_unrecognized").Bytes()
- return bytes.Equal(u1, u2)
-}
-
-// v1 and v2 are known to have the same type.
-// prop may be nil.
-func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
- if v1.Type() == protoMessageType {
- m1, _ := v1.Interface().(Message)
- m2, _ := v2.Interface().(Message)
- return Equal(m1, m2)
- }
- switch v1.Kind() {
- case reflect.Bool:
- return v1.Bool() == v2.Bool()
- case reflect.Float32, reflect.Float64:
- return v1.Float() == v2.Float()
- case reflect.Int32, reflect.Int64:
- return v1.Int() == v2.Int()
- case reflect.Interface:
- // Probably a oneof field; compare the inner values.
- n1, n2 := v1.IsNil(), v2.IsNil()
- if n1 || n2 {
- return n1 == n2
- }
- e1, e2 := v1.Elem(), v2.Elem()
- if e1.Type() != e2.Type() {
- return false
- }
- return equalAny(e1, e2, nil)
- case reflect.Map:
- if v1.Len() != v2.Len() {
- return false
- }
- for _, key := range v1.MapKeys() {
- val2 := v2.MapIndex(key)
- if !val2.IsValid() {
- // This key was not found in the second map.
- return false
- }
- if !equalAny(v1.MapIndex(key), val2, nil) {
- return false
- }
- }
- return true
- case reflect.Ptr:
- // Maps may have nil values in them, so check for nil.
- if v1.IsNil() && v2.IsNil() {
- return true
- }
- if v1.IsNil() != v2.IsNil() {
- return false
- }
- return equalAny(v1.Elem(), v2.Elem(), prop)
- case reflect.Slice:
- if v1.Type().Elem().Kind() == reflect.Uint8 {
- // short circuit: []byte
-
- // Edge case: if this is in a proto3 message, a zero length
- // bytes field is considered the zero value.
- if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
- return true
- }
- if v1.IsNil() != v2.IsNil() {
- return false
- }
- return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
- }
-
- if v1.Len() != v2.Len() {
- return false
- }
- for i := 0; i < v1.Len(); i++ {
- if !equalAny(v1.Index(i), v2.Index(i), prop) {
- return false
- }
- }
- return true
- case reflect.String:
- return v1.Interface().(string) == v2.Interface().(string)
- case reflect.Struct:
- return equalStruct(v1, v2)
- case reflect.Uint32, reflect.Uint64:
- return v1.Uint() == v2.Uint()
- }
-
- // unknown type, so not a protocol buffer
- log.Printf("proto: don't know how to compare %v", v1)
- return false
-}
-
-// base is the struct type that the extensions are based on.
-// x1 and x2 are InternalExtensions.
-func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
- em1, _ := x1.extensionsRead()
- em2, _ := x2.extensionsRead()
- return equalExtMap(base, em1, em2)
-}
-
-func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
- if len(em1) != len(em2) {
- return false
- }
-
- for extNum, e1 := range em1 {
- e2, ok := em2[extNum]
- if !ok {
- return false
- }
-
- m1, m2 := e1.value, e2.value
-
- if m1 == nil && m2 == nil {
- // Both have only encoded form.
- if bytes.Equal(e1.enc, e2.enc) {
- continue
- }
- // The bytes are different, but the extensions might still be
- // equal. We need to decode them to compare.
- }
-
- if m1 != nil && m2 != nil {
- // Both are unencoded.
- if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
- return false
- }
- continue
- }
-
- // At least one is encoded. To do a semantically correct comparison
- // we need to unmarshal them first.
- var desc *ExtensionDesc
- if m := extensionMaps[base]; m != nil {
- desc = m[extNum]
- }
- if desc == nil {
- // If both have only encoded form and the bytes are the same,
- // it is handled above. We get here when the bytes are different.
- // We don't know how to decode it, so just compare them as byte
- // slices.
- log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
- return false
- }
- var err error
- if m1 == nil {
- m1, err = decodeExtension(e1.enc, desc)
- }
- if m2 == nil && err == nil {
- m2, err = decodeExtension(e2.enc, desc)
- }
- if err != nil {
- // The encoded form is invalid.
- log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
- return false
- }
- if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
- return false
- }
- }
-
- return true
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go
deleted file mode 100644
index 686bd2a0..00000000
--- a/vendor/github.com/gogo/protobuf/proto/extensions.go
+++ /dev/null
@@ -1,604 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Types and routines for supporting protocol buffer extensions.
- */
-
-import (
- "errors"
- "fmt"
- "io"
- "reflect"
- "strconv"
- "sync"
-)
-
-// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
-var ErrMissingExtension = errors.New("proto: missing extension")
-
-// ExtensionRange represents a range of message extensions for a protocol buffer.
-// Used in code generated by the protocol compiler.
-type ExtensionRange struct {
- Start, End int32 // both inclusive
-}
-
-// extendableProto is an interface implemented by any protocol buffer generated by the current
-// proto compiler that may be extended.
-type extendableProto interface {
- Message
- ExtensionRangeArray() []ExtensionRange
- extensionsWrite() map[int32]Extension
- extensionsRead() (map[int32]Extension, sync.Locker)
-}
-
-// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
-// version of the proto compiler that may be extended.
-type extendableProtoV1 interface {
- Message
- ExtensionRangeArray() []ExtensionRange
- ExtensionMap() map[int32]Extension
-}
-
-// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
-type extensionAdapter struct {
- extendableProtoV1
-}
-
-func (e extensionAdapter) extensionsWrite() map[int32]Extension {
- return e.ExtensionMap()
-}
-
-func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
- return e.ExtensionMap(), notLocker{}
-}
-
-// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
-type notLocker struct{}
-
-func (n notLocker) Lock() {}
-func (n notLocker) Unlock() {}
-
-// extendable returns the extendableProto interface for the given generated proto message.
-// If the proto message has the old extension format, it returns a wrapper that implements
-// the extendableProto interface.
-func extendable(p interface{}) (extendableProto, error) {
- switch p := p.(type) {
- case extendableProto:
- if isNilPtr(p) {
- return nil, fmt.Errorf("proto: nil %T is not extendable", p)
- }
- return p, nil
- case extendableProtoV1:
- if isNilPtr(p) {
- return nil, fmt.Errorf("proto: nil %T is not extendable", p)
- }
- return extensionAdapter{p}, nil
- case extensionsBytes:
- return slowExtensionAdapter{p}, nil
- }
- // Don't allocate a specific error containing %T:
- // this is the hot path for Clone and MarshalText.
- return nil, errNotExtendable
-}
-
-var errNotExtendable = errors.New("proto: not an extendable proto.Message")
-
-func isNilPtr(x interface{}) bool {
- v := reflect.ValueOf(x)
- return v.Kind() == reflect.Ptr && v.IsNil()
-}
-
-// XXX_InternalExtensions is an internal representation of proto extensions.
-//
-// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
-// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
-//
-// The methods of XXX_InternalExtensions are not concurrency safe in general,
-// but calls to logically read-only methods such as has and get may be executed concurrently.
-type XXX_InternalExtensions struct {
- // The struct must be indirect so that if a user inadvertently copies a
- // generated message and its embedded XXX_InternalExtensions, they
- // avoid the mayhem of a copied mutex.
- //
- // The mutex serializes all logically read-only operations to p.extensionMap.
- // It is up to the client to ensure that write operations to p.extensionMap are
- // mutually exclusive with other accesses.
- p *struct {
- mu sync.Mutex
- extensionMap map[int32]Extension
- }
-}
-
-// extensionsWrite returns the extension map, creating it on first use.
-func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
- if e.p == nil {
- e.p = new(struct {
- mu sync.Mutex
- extensionMap map[int32]Extension
- })
- e.p.extensionMap = make(map[int32]Extension)
- }
- return e.p.extensionMap
-}
-
-// extensionsRead returns the extensions map for read-only use. It may be nil.
-// The caller must hold the returned mutex's lock when accessing Elements within the map.
-func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
- if e.p == nil {
- return nil, nil
- }
- return e.p.extensionMap, &e.p.mu
-}
-
-// ExtensionDesc represents an extension specification.
-// Used in generated code from the protocol compiler.
-type ExtensionDesc struct {
- ExtendedType Message // nil pointer to the type that is being extended
- ExtensionType interface{} // nil pointer to the extension type
- Field int32 // field number
- Name string // fully-qualified name of extension, for text formatting
- Tag string // protobuf tag style
- Filename string // name of the file in which the extension is defined
-}
-
-func (ed *ExtensionDesc) repeated() bool {
- t := reflect.TypeOf(ed.ExtensionType)
- return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
-}
-
-// Extension represents an extension in a message.
-type Extension struct {
- // When an extension is stored in a message using SetExtension
- // only desc and value are set. When the message is marshaled
- // enc will be set to the encoded form of the message.
- //
- // When a message is unmarshaled and contains extensions, each
- // extension will have only enc set. When such an extension is
- // accessed using GetExtension (or GetExtensions) desc and value
- // will be set.
- desc *ExtensionDesc
- value interface{}
- enc []byte
-}
-
-// SetRawExtension is for testing only.
-func SetRawExtension(base Message, id int32, b []byte) {
- if ebase, ok := base.(extensionsBytes); ok {
- clearExtension(base, id)
- ext := ebase.GetExtensions()
- *ext = append(*ext, b...)
- return
- }
- epb, err := extendable(base)
- if err != nil {
- return
- }
- extmap := epb.extensionsWrite()
- extmap[id] = Extension{enc: b}
-}
-
-// isExtensionField returns true iff the given field number is in an extension range.
-func isExtensionField(pb extendableProto, field int32) bool {
- for _, er := range pb.ExtensionRangeArray() {
- if er.Start <= field && field <= er.End {
- return true
- }
- }
- return false
-}
-
-// checkExtensionTypes checks that the given extension is valid for pb.
-func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
- var pbi interface{} = pb
- // Check the extended type.
- if ea, ok := pbi.(extensionAdapter); ok {
- pbi = ea.extendableProtoV1
- }
- if ea, ok := pbi.(slowExtensionAdapter); ok {
- pbi = ea.extensionsBytes
- }
- if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
- return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
- }
- // Check the range.
- if !isExtensionField(pb, extension.Field) {
- return errors.New("proto: bad extension number; not in declared ranges")
- }
- return nil
-}
-
-// extPropKey is sufficient to uniquely identify an extension.
-type extPropKey struct {
- base reflect.Type
- field int32
-}
-
-var extProp = struct {
- sync.RWMutex
- m map[extPropKey]*Properties
-}{
- m: make(map[extPropKey]*Properties),
-}
-
-func extensionProperties(ed *ExtensionDesc) *Properties {
- key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
-
- extProp.RLock()
- if prop, ok := extProp.m[key]; ok {
- extProp.RUnlock()
- return prop
- }
- extProp.RUnlock()
-
- extProp.Lock()
- defer extProp.Unlock()
- // Check again.
- if prop, ok := extProp.m[key]; ok {
- return prop
- }
-
- prop := new(Properties)
- prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
- extProp.m[key] = prop
- return prop
-}
-
-// HasExtension returns whether the given extension is present in pb.
-func HasExtension(pb Message, extension *ExtensionDesc) bool {
- if epb, doki := pb.(extensionsBytes); doki {
- ext := epb.GetExtensions()
- buf := *ext
- o := 0
- for o < len(buf) {
- tag, n := DecodeVarint(buf[o:])
- fieldNum := int32(tag >> 3)
- if int32(fieldNum) == extension.Field {
- return true
- }
- wireType := int(tag & 0x7)
- o += n
- l, err := size(buf[o:], wireType)
- if err != nil {
- return false
- }
- o += l
- }
- return false
- }
- // TODO: Check types, field numbers, etc.?
- epb, err := extendable(pb)
- if err != nil {
- return false
- }
- extmap, mu := epb.extensionsRead()
- if extmap == nil {
- return false
- }
- mu.Lock()
- _, ok := extmap[extension.Field]
- mu.Unlock()
- return ok
-}
-
-// ClearExtension removes the given extension from pb.
-func ClearExtension(pb Message, extension *ExtensionDesc) {
- clearExtension(pb, extension.Field)
-}
-
-func clearExtension(pb Message, fieldNum int32) {
- if epb, ok := pb.(extensionsBytes); ok {
- offset := 0
- for offset != -1 {
- offset = deleteExtension(epb, fieldNum, offset)
- }
- return
- }
- epb, err := extendable(pb)
- if err != nil {
- return
- }
- // TODO: Check types, field numbers, etc.?
- extmap := epb.extensionsWrite()
- delete(extmap, fieldNum)
-}
-
-// GetExtension retrieves a proto2 extended field from pb.
-//
-// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
-// then GetExtension parses the encoded field and returns a Go value of the specified type.
-// If the field is not present, then the default value is returned (if one is specified),
-// otherwise ErrMissingExtension is reported.
-//
-// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
-// then GetExtension returns the raw encoded bytes of the field extension.
-func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
- if epb, doki := pb.(extensionsBytes); doki {
- ext := epb.GetExtensions()
- return decodeExtensionFromBytes(extension, *ext)
- }
-
- epb, err := extendable(pb)
- if err != nil {
- return nil, err
- }
-
- if extension.ExtendedType != nil {
- // can only check type if this is a complete descriptor
- if cerr := checkExtensionTypes(epb, extension); cerr != nil {
- return nil, cerr
- }
- }
-
- emap, mu := epb.extensionsRead()
- if emap == nil {
- return defaultExtensionValue(extension)
- }
- mu.Lock()
- defer mu.Unlock()
- e, ok := emap[extension.Field]
- if !ok {
- // defaultExtensionValue returns the default value or
- // ErrMissingExtension if there is no default.
- return defaultExtensionValue(extension)
- }
-
- if e.value != nil {
- // Already decoded. Check the descriptor, though.
- if e.desc != extension {
- // This shouldn't happen. If it does, it means that
- // GetExtension was called twice with two different
- // descriptors with the same field number.
- return nil, errors.New("proto: descriptor conflict")
- }
- return e.value, nil
- }
-
- if extension.ExtensionType == nil {
- // incomplete descriptor
- return e.enc, nil
- }
-
- v, err := decodeExtension(e.enc, extension)
- if err != nil {
- return nil, err
- }
-
- // Remember the decoded version and drop the encoded version.
- // That way it is safe to mutate what we return.
- e.value = v
- e.desc = extension
- e.enc = nil
- emap[extension.Field] = e
- return e.value, nil
-}
-
-// defaultExtensionValue returns the default value for extension.
-// If no default for an extension is defined ErrMissingExtension is returned.
-func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
- if extension.ExtensionType == nil {
- // incomplete descriptor, so no default
- return nil, ErrMissingExtension
- }
-
- t := reflect.TypeOf(extension.ExtensionType)
- props := extensionProperties(extension)
-
- sf, _, err := fieldDefault(t, props)
- if err != nil {
- return nil, err
- }
-
- if sf == nil || sf.value == nil {
- // There is no default value.
- return nil, ErrMissingExtension
- }
-
- if t.Kind() != reflect.Ptr {
- // We do not need to return a Ptr, we can directly return sf.value.
- return sf.value, nil
- }
-
- // We need to return an interface{} that is a pointer to sf.value.
- value := reflect.New(t).Elem()
- value.Set(reflect.New(value.Type().Elem()))
- if sf.kind == reflect.Int32 {
- // We may have an int32 or an enum, but the underlying data is int32.
- // Since we can't set an int32 into a non int32 reflect.value directly
- // set it as a int32.
- value.Elem().SetInt(int64(sf.value.(int32)))
- } else {
- value.Elem().Set(reflect.ValueOf(sf.value))
- }
- return value.Interface(), nil
-}
-
-// decodeExtension decodes an extension encoded in b.
-func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
- t := reflect.TypeOf(extension.ExtensionType)
- unmarshal := typeUnmarshaler(t, extension.Tag)
-
- // t is a pointer to a struct, pointer to basic type or a slice.
- // Allocate space to store the pointer/slice.
- value := reflect.New(t).Elem()
-
- var err error
- for {
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- wire := int(x) & 7
-
- b, err = unmarshal(b, valToPointer(value.Addr()), wire)
- if err != nil {
- return nil, err
- }
-
- if len(b) == 0 {
- break
- }
- }
- return value.Interface(), nil
-}
-
-// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
-// The returned slice has the same length as es; missing extensions will appear as nil elements.
-func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
- epb, err := extendable(pb)
- if err != nil {
- return nil, err
- }
- extensions = make([]interface{}, len(es))
- for i, e := range es {
- extensions[i], err = GetExtension(epb, e)
- if err == ErrMissingExtension {
- err = nil
- }
- if err != nil {
- return
- }
- }
- return
-}
-
-// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
-// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
-// just the Field field, which defines the extension's field number.
-func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
- epb, err := extendable(pb)
- if err != nil {
- return nil, err
- }
- registeredExtensions := RegisteredExtensions(pb)
-
- emap, mu := epb.extensionsRead()
- if emap == nil {
- return nil, nil
- }
- mu.Lock()
- defer mu.Unlock()
- extensions := make([]*ExtensionDesc, 0, len(emap))
- for extid, e := range emap {
- desc := e.desc
- if desc == nil {
- desc = registeredExtensions[extid]
- if desc == nil {
- desc = &ExtensionDesc{Field: extid}
- }
- }
-
- extensions = append(extensions, desc)
- }
- return extensions, nil
-}
-
-// SetExtension sets the specified extension of pb to the specified value.
-func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
- if epb, ok := pb.(extensionsBytes); ok {
- newb, err := encodeExtension(extension, value)
- if err != nil {
- return err
- }
- bb := epb.GetExtensions()
- *bb = append(*bb, newb...)
- return nil
- }
- epb, err := extendable(pb)
- if err != nil {
- return err
- }
- if err := checkExtensionTypes(epb, extension); err != nil {
- return err
- }
- typ := reflect.TypeOf(extension.ExtensionType)
- if typ != reflect.TypeOf(value) {
- return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
- }
- // nil extension values need to be caught early, because the
- // encoder can't distinguish an ErrNil due to a nil extension
- // from an ErrNil due to a missing field. Extensions are
- // always optional, so the encoder would just swallow the error
- // and drop all the extensions from the encoded message.
- if reflect.ValueOf(value).IsNil() {
- return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
- }
-
- extmap := epb.extensionsWrite()
- extmap[extension.Field] = Extension{desc: extension, value: value}
- return nil
-}
-
-// ClearAllExtensions clears all extensions from pb.
-func ClearAllExtensions(pb Message) {
- if epb, doki := pb.(extensionsBytes); doki {
- ext := epb.GetExtensions()
- *ext = []byte{}
- return
- }
- epb, err := extendable(pb)
- if err != nil {
- return
- }
- m := epb.extensionsWrite()
- for k := range m {
- delete(m, k)
- }
-}
-
-// A global registry of extensions.
-// The generated code will register the generated descriptors by calling RegisterExtension.
-
-var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
-
-// RegisterExtension is called from the generated code.
-func RegisterExtension(desc *ExtensionDesc) {
- st := reflect.TypeOf(desc.ExtendedType).Elem()
- m := extensionMaps[st]
- if m == nil {
- m = make(map[int32]*ExtensionDesc)
- extensionMaps[st] = m
- }
- if _, ok := m[desc.Field]; ok {
- panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
- }
- m[desc.Field] = desc
-}
-
-// RegisteredExtensions returns a map of the registered extensions of a
-// protocol buffer struct, indexed by the extension number.
-// The argument pb should be a nil pointer to the struct type.
-func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
- return extensionMaps[reflect.TypeOf(pb).Elem()]
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
deleted file mode 100644
index 53ebd8cc..00000000
--- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
+++ /dev/null
@@ -1,368 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "reflect"
- "sort"
- "strings"
- "sync"
-)
-
-type extensionsBytes interface {
- Message
- ExtensionRangeArray() []ExtensionRange
- GetExtensions() *[]byte
-}
-
-type slowExtensionAdapter struct {
- extensionsBytes
-}
-
-func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension {
- panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.")
-}
-
-func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
- b := s.GetExtensions()
- m, err := BytesToExtensionsMap(*b)
- if err != nil {
- panic(err)
- }
- return m, notLocker{}
-}
-
-func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool {
- if reflect.ValueOf(pb).IsNil() {
- return ifnotset
- }
- value, err := GetExtension(pb, extension)
- if err != nil {
- return ifnotset
- }
- if value == nil {
- return ifnotset
- }
- if value.(*bool) == nil {
- return ifnotset
- }
- return *(value.(*bool))
-}
-
-func (this *Extension) Equal(that *Extension) bool {
- if err := this.Encode(); err != nil {
- return false
- }
- if err := that.Encode(); err != nil {
- return false
- }
- return bytes.Equal(this.enc, that.enc)
-}
-
-func (this *Extension) Compare(that *Extension) int {
- if err := this.Encode(); err != nil {
- return 1
- }
- if err := that.Encode(); err != nil {
- return -1
- }
- return bytes.Compare(this.enc, that.enc)
-}
-
-func SizeOfInternalExtension(m extendableProto) (n int) {
- info := getMarshalInfo(reflect.TypeOf(m))
- return info.sizeV1Extensions(m.extensionsWrite())
-}
-
-type sortableMapElem struct {
- field int32
- ext Extension
-}
-
-func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions {
- s := make(sortableExtensions, 0, len(m))
- for k, v := range m {
- s = append(s, &sortableMapElem{field: k, ext: v})
- }
- return s
-}
-
-type sortableExtensions []*sortableMapElem
-
-func (this sortableExtensions) Len() int { return len(this) }
-
-func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] }
-
-func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field }
-
-func (this sortableExtensions) String() string {
- sort.Sort(this)
- ss := make([]string, len(this))
- for i := range this {
- ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext)
- }
- return "map[" + strings.Join(ss, ",") + "]"
-}
-
-func StringFromInternalExtension(m extendableProto) string {
- return StringFromExtensionsMap(m.extensionsWrite())
-}
-
-func StringFromExtensionsMap(m map[int32]Extension) string {
- return newSortableExtensionsFromMap(m).String()
-}
-
-func StringFromExtensionsBytes(ext []byte) string {
- m, err := BytesToExtensionsMap(ext)
- if err != nil {
- panic(err)
- }
- return StringFromExtensionsMap(m)
-}
-
-func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) {
- return EncodeExtensionMap(m.extensionsWrite(), data)
-}
-
-func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
- o := 0
- for _, e := range m {
- if err := e.Encode(); err != nil {
- return 0, err
- }
- n := copy(data[o:], e.enc)
- if n != len(e.enc) {
- return 0, io.ErrShortBuffer
- }
- o += n
- }
- return o, nil
-}
-
-func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
- e := m[id]
- if err := e.Encode(); err != nil {
- return nil, err
- }
- return e.enc, nil
-}
-
-func size(buf []byte, wire int) (int, error) {
- switch wire {
- case WireVarint:
- _, n := DecodeVarint(buf)
- return n, nil
- case WireFixed64:
- return 8, nil
- case WireBytes:
- v, n := DecodeVarint(buf)
- return int(v) + n, nil
- case WireFixed32:
- return 4, nil
- case WireStartGroup:
- offset := 0
- for {
- u, n := DecodeVarint(buf[offset:])
- fwire := int(u & 0x7)
- offset += n
- if fwire == WireEndGroup {
- return offset, nil
- }
- s, err := size(buf[offset:], wire)
- if err != nil {
- return 0, err
- }
- offset += s
- }
- }
- return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire)
-}
-
-func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) {
- m := make(map[int32]Extension)
- i := 0
- for i < len(buf) {
- tag, n := DecodeVarint(buf[i:])
- if n <= 0 {
- return nil, fmt.Errorf("unable to decode varint")
- }
- fieldNum := int32(tag >> 3)
- wireType := int(tag & 0x7)
- l, err := size(buf[i+n:], wireType)
- if err != nil {
- return nil, err
- }
- end := i + int(l) + n
- m[int32(fieldNum)] = Extension{enc: buf[i:end]}
- i = end
- }
- return m, nil
-}
-
-func NewExtension(e []byte) Extension {
- ee := Extension{enc: make([]byte, len(e))}
- copy(ee.enc, e)
- return ee
-}
-
-func AppendExtension(e Message, tag int32, buf []byte) {
- if ee, eok := e.(extensionsBytes); eok {
- ext := ee.GetExtensions()
- *ext = append(*ext, buf...)
- return
- }
- if ee, eok := e.(extendableProto); eok {
- m := ee.extensionsWrite()
- ext := m[int32(tag)] // may be missing
- ext.enc = append(ext.enc, buf...)
- m[int32(tag)] = ext
- }
-}
-
-func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) {
- u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType))
- ei := u.getExtElemInfo(extension)
- v := value
- p := toAddrPointer(&v, ei.isptr)
- siz := ei.sizer(p, SizeVarint(ei.wiretag))
- buf := make([]byte, 0, siz)
- return ei.marshaler(buf, p, ei.wiretag, false)
-}
-
-func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) {
- o := 0
- for o < len(buf) {
- tag, n := DecodeVarint((buf)[o:])
- fieldNum := int32(tag >> 3)
- wireType := int(tag & 0x7)
- if o+n > len(buf) {
- return nil, fmt.Errorf("unable to decode extension")
- }
- l, err := size((buf)[o+n:], wireType)
- if err != nil {
- return nil, err
- }
- if int32(fieldNum) == extension.Field {
- if o+n+l > len(buf) {
- return nil, fmt.Errorf("unable to decode extension")
- }
- v, err := decodeExtension((buf)[o:o+n+l], extension)
- if err != nil {
- return nil, err
- }
- return v, nil
- }
- o += n + l
- }
- return defaultExtensionValue(extension)
-}
-
-func (this *Extension) Encode() error {
- if this.enc == nil {
- var err error
- this.enc, err = encodeExtension(this.desc, this.value)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (this Extension) GoString() string {
- if err := this.Encode(); err != nil {
- return fmt.Sprintf("error encoding extension: %v", err)
- }
- return fmt.Sprintf("proto.NewExtension(%#v)", this.enc)
-}
-
-func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error {
- typ := reflect.TypeOf(pb).Elem()
- ext, ok := extensionMaps[typ]
- if !ok {
- return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
- }
- desc, ok := ext[fieldNum]
- if !ok {
- return errors.New("proto: bad extension number; not in declared ranges")
- }
- return SetExtension(pb, desc, value)
-}
-
-func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) {
- typ := reflect.TypeOf(pb).Elem()
- ext, ok := extensionMaps[typ]
- if !ok {
- return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
- }
- desc, ok := ext[fieldNum]
- if !ok {
- return nil, fmt.Errorf("unregistered field number %d", fieldNum)
- }
- return GetExtension(pb, desc)
-}
-
-func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions {
- x := &XXX_InternalExtensions{
- p: new(struct {
- mu sync.Mutex
- extensionMap map[int32]Extension
- }),
- }
- x.p.extensionMap = m
- return *x
-}
-
-func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension {
- pb := extendable.(extendableProto)
- return pb.extensionsWrite()
-}
-
-func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
- ext := pb.GetExtensions()
- for offset < len(*ext) {
- tag, n1 := DecodeVarint((*ext)[offset:])
- fieldNum := int32(tag >> 3)
- wireType := int(tag & 0x7)
- n2, err := size((*ext)[offset+n1:], wireType)
- if err != nil {
- panic(err)
- }
- newOffset := offset + n1 + n2
- if fieldNum == theFieldNum {
- *ext = append((*ext)[:offset], (*ext)[newOffset:]...)
- return offset
- }
- offset = newOffset
- }
- return -1
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go
deleted file mode 100644
index d17f8020..00000000
--- a/vendor/github.com/gogo/protobuf/proto/lib.go
+++ /dev/null
@@ -1,967 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/*
-Package proto converts data structures to and from the wire format of
-protocol buffers. It works in concert with the Go source code generated
-for .proto files by the protocol compiler.
-
-A summary of the properties of the protocol buffer interface
-for a protocol buffer variable v:
-
- - Names are turned from camel_case to CamelCase for export.
- - There are no methods on v to set fields; just treat
- them as structure fields.
- - There are getters that return a field's value if set,
- and return the field's default value if unset.
- The getters work even if the receiver is a nil message.
- - The zero value for a struct is its correct initialization state.
- All desired fields must be set before marshaling.
- - A Reset() method will restore a protobuf struct to its zero state.
- - Non-repeated fields are pointers to the values; nil means unset.
- That is, optional or required field int32 f becomes F *int32.
- - Repeated fields are slices.
- - Helper functions are available to aid the setting of fields.
- msg.Foo = proto.String("hello") // set field
- - Constants are defined to hold the default values of all fields that
- have them. They have the form Default_StructName_FieldName.
- Because the getter methods handle defaulted values,
- direct use of these constants should be rare.
- - Enums are given type names and maps from names to values.
- Enum values are prefixed by the enclosing message's name, or by the
- enum's type name if it is a top-level enum. Enum types have a String
- method, and a Enum method to assist in message construction.
- - Nested messages, groups and enums have type names prefixed with the name of
- the surrounding message type.
- - Extensions are given descriptor names that start with E_,
- followed by an underscore-delimited list of the nested messages
- that contain it (if any) followed by the CamelCased name of the
- extension field itself. HasExtension, ClearExtension, GetExtension
- and SetExtension are functions for manipulating extensions.
- - Oneof field sets are given a single field in their message,
- with distinguished wrapper types for each possible field value.
- - Marshal and Unmarshal are functions to encode and decode the wire format.
-
-When the .proto file specifies `syntax="proto3"`, there are some differences:
-
- - Non-repeated fields of non-message type are values instead of pointers.
- - Enum types do not get an Enum method.
-
-The simplest way to describe this is to see an example.
-Given file test.proto, containing
-
- package example;
-
- enum FOO { X = 17; }
-
- message Test {
- required string label = 1;
- optional int32 type = 2 [default=77];
- repeated int64 reps = 3;
- optional group OptionalGroup = 4 {
- required string RequiredField = 5;
- }
- oneof union {
- int32 number = 6;
- string name = 7;
- }
- }
-
-The resulting file, test.pb.go, is:
-
- package example
-
- import proto "github.com/gogo/protobuf/proto"
- import math "math"
-
- type FOO int32
- const (
- FOO_X FOO = 17
- )
- var FOO_name = map[int32]string{
- 17: "X",
- }
- var FOO_value = map[string]int32{
- "X": 17,
- }
-
- func (x FOO) Enum() *FOO {
- p := new(FOO)
- *p = x
- return p
- }
- func (x FOO) String() string {
- return proto.EnumName(FOO_name, int32(x))
- }
- func (x *FOO) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FOO_value, data)
- if err != nil {
- return err
- }
- *x = FOO(value)
- return nil
- }
-
- type Test struct {
- Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
- Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
- Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
- Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
- // Types that are valid to be assigned to Union:
- // *Test_Number
- // *Test_Name
- Union isTest_Union `protobuf_oneof:"union"`
- XXX_unrecognized []byte `json:"-"`
- }
- func (m *Test) Reset() { *m = Test{} }
- func (m *Test) String() string { return proto.CompactTextString(m) }
- func (*Test) ProtoMessage() {}
-
- type isTest_Union interface {
- isTest_Union()
- }
-
- type Test_Number struct {
- Number int32 `protobuf:"varint,6,opt,name=number"`
- }
- type Test_Name struct {
- Name string `protobuf:"bytes,7,opt,name=name"`
- }
-
- func (*Test_Number) isTest_Union() {}
- func (*Test_Name) isTest_Union() {}
-
- func (m *Test) GetUnion() isTest_Union {
- if m != nil {
- return m.Union
- }
- return nil
- }
- const Default_Test_Type int32 = 77
-
- func (m *Test) GetLabel() string {
- if m != nil && m.Label != nil {
- return *m.Label
- }
- return ""
- }
-
- func (m *Test) GetType() int32 {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return Default_Test_Type
- }
-
- func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
- if m != nil {
- return m.Optionalgroup
- }
- return nil
- }
-
- type Test_OptionalGroup struct {
- RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
- }
- func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
- func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
-
- func (m *Test_OptionalGroup) GetRequiredField() string {
- if m != nil && m.RequiredField != nil {
- return *m.RequiredField
- }
- return ""
- }
-
- func (m *Test) GetNumber() int32 {
- if x, ok := m.GetUnion().(*Test_Number); ok {
- return x.Number
- }
- return 0
- }
-
- func (m *Test) GetName() string {
- if x, ok := m.GetUnion().(*Test_Name); ok {
- return x.Name
- }
- return ""
- }
-
- func init() {
- proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
- }
-
-To create and play with a Test object:
-
- package main
-
- import (
- "log"
-
- "github.com/gogo/protobuf/proto"
- pb "./example.pb"
- )
-
- func main() {
- test := &pb.Test{
- Label: proto.String("hello"),
- Type: proto.Int32(17),
- Reps: []int64{1, 2, 3},
- Optionalgroup: &pb.Test_OptionalGroup{
- RequiredField: proto.String("good bye"),
- },
- Union: &pb.Test_Name{"fred"},
- }
- data, err := proto.Marshal(test)
- if err != nil {
- log.Fatal("marshaling error: ", err)
- }
- newTest := &pb.Test{}
- err = proto.Unmarshal(data, newTest)
- if err != nil {
- log.Fatal("unmarshaling error: ", err)
- }
- // Now test and newTest contain the same data.
- if test.GetLabel() != newTest.GetLabel() {
- log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
- }
- // Use a type switch to determine which oneof was set.
- switch u := test.Union.(type) {
- case *pb.Test_Number: // u.Number contains the number.
- case *pb.Test_Name: // u.Name contains the string.
- }
- // etc.
- }
-*/
-package proto
-
-import (
- "encoding/json"
- "fmt"
- "log"
- "reflect"
- "sort"
- "strconv"
- "sync"
-)
-
-// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
-// Marshal reports this when a required field is not initialized.
-// Unmarshal reports this when a required field is missing from the wire data.
-type RequiredNotSetError struct{ field string }
-
-func (e *RequiredNotSetError) Error() string {
- if e.field == "" {
- return fmt.Sprintf("proto: required field not set")
- }
- return fmt.Sprintf("proto: required field %q not set", e.field)
-}
-func (e *RequiredNotSetError) RequiredNotSet() bool {
- return true
-}
-
-type invalidUTF8Error struct{ field string }
-
-func (e *invalidUTF8Error) Error() string {
- if e.field == "" {
- return "proto: invalid UTF-8 detected"
- }
- return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
-}
-func (e *invalidUTF8Error) InvalidUTF8() bool {
- return true
-}
-
-// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
-// This error should not be exposed to the external API as such errors should
-// be recreated with the field information.
-var errInvalidUTF8 = &invalidUTF8Error{}
-
-// isNonFatal reports whether the error is either a RequiredNotSet error
-// or a InvalidUTF8 error.
-func isNonFatal(err error) bool {
- if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
- return true
- }
- if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
- return true
- }
- return false
-}
-
-type nonFatal struct{ E error }
-
-// Merge merges err into nf and reports whether it was successful.
-// Otherwise it returns false for any fatal non-nil errors.
-func (nf *nonFatal) Merge(err error) (ok bool) {
- if err == nil {
- return true // not an error
- }
- if !isNonFatal(err) {
- return false // fatal error
- }
- if nf.E == nil {
- nf.E = err // store first instance of non-fatal error
- }
- return true
-}
-
-// Message is implemented by generated protocol buffer messages.
-type Message interface {
- Reset()
- String() string
- ProtoMessage()
-}
-
-// A Buffer is a buffer manager for marshaling and unmarshaling
-// protocol buffers. It may be reused between invocations to
-// reduce memory usage. It is not necessary to use a Buffer;
-// the global functions Marshal and Unmarshal create a
-// temporary Buffer and are fine for most applications.
-type Buffer struct {
- buf []byte // encode/decode byte stream
- index int // read point
-
- deterministic bool
-}
-
-// NewBuffer allocates a new Buffer and initializes its internal data to
-// the contents of the argument slice.
-func NewBuffer(e []byte) *Buffer {
- return &Buffer{buf: e}
-}
-
-// Reset resets the Buffer, ready for marshaling a new protocol buffer.
-func (p *Buffer) Reset() {
- p.buf = p.buf[0:0] // for reading/writing
- p.index = 0 // for reading
-}
-
-// SetBuf replaces the internal buffer with the slice,
-// ready for unmarshaling the contents of the slice.
-func (p *Buffer) SetBuf(s []byte) {
- p.buf = s
- p.index = 0
-}
-
-// Bytes returns the contents of the Buffer.
-func (p *Buffer) Bytes() []byte { return p.buf }
-
-// SetDeterministic sets whether to use deterministic serialization.
-//
-// Deterministic serialization guarantees that for a given binary, equal
-// messages will always be serialized to the same bytes. This implies:
-//
-// - Repeated serialization of a message will return the same bytes.
-// - Different processes of the same binary (which may be executing on
-// different machines) will serialize equal messages to the same bytes.
-//
-// Note that the deterministic serialization is NOT canonical across
-// languages. It is not guaranteed to remain stable over time. It is unstable
-// across different builds with schema changes due to unknown fields.
-// Users who need canonical serialization (e.g., persistent storage in a
-// canonical form, fingerprinting, etc.) should define their own
-// canonicalization specification and implement their own serializer rather
-// than relying on this API.
-//
-// If deterministic serialization is requested, map entries will be sorted
-// by keys in lexographical order. This is an implementation detail and
-// subject to change.
-func (p *Buffer) SetDeterministic(deterministic bool) {
- p.deterministic = deterministic
-}
-
-/*
- * Helper routines for simplifying the creation of optional fields of basic type.
- */
-
-// Bool is a helper routine that allocates a new bool value
-// to store v and returns a pointer to it.
-func Bool(v bool) *bool {
- return &v
-}
-
-// Int32 is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it.
-func Int32(v int32) *int32 {
- return &v
-}
-
-// Int is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it, but unlike Int32
-// its argument value is an int.
-func Int(v int) *int32 {
- p := new(int32)
- *p = int32(v)
- return p
-}
-
-// Int64 is a helper routine that allocates a new int64 value
-// to store v and returns a pointer to it.
-func Int64(v int64) *int64 {
- return &v
-}
-
-// Float32 is a helper routine that allocates a new float32 value
-// to store v and returns a pointer to it.
-func Float32(v float32) *float32 {
- return &v
-}
-
-// Float64 is a helper routine that allocates a new float64 value
-// to store v and returns a pointer to it.
-func Float64(v float64) *float64 {
- return &v
-}
-
-// Uint32 is a helper routine that allocates a new uint32 value
-// to store v and returns a pointer to it.
-func Uint32(v uint32) *uint32 {
- return &v
-}
-
-// Uint64 is a helper routine that allocates a new uint64 value
-// to store v and returns a pointer to it.
-func Uint64(v uint64) *uint64 {
- return &v
-}
-
-// String is a helper routine that allocates a new string value
-// to store v and returns a pointer to it.
-func String(v string) *string {
- return &v
-}
-
-// EnumName is a helper function to simplify printing protocol buffer enums
-// by name. Given an enum map and a value, it returns a useful string.
-func EnumName(m map[int32]string, v int32) string {
- s, ok := m[v]
- if ok {
- return s
- }
- return strconv.Itoa(int(v))
-}
-
-// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
-// from their JSON-encoded representation. Given a map from the enum's symbolic
-// names to its int values, and a byte buffer containing the JSON-encoded
-// value, it returns an int32 that can be cast to the enum type by the caller.
-//
-// The function can deal with both JSON representations, numeric and symbolic.
-func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
- if data[0] == '"' {
- // New style: enums are strings.
- var repr string
- if err := json.Unmarshal(data, &repr); err != nil {
- return -1, err
- }
- val, ok := m[repr]
- if !ok {
- return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
- }
- return val, nil
- }
- // Old style: enums are ints.
- var val int32
- if err := json.Unmarshal(data, &val); err != nil {
- return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
- }
- return val, nil
-}
-
-// DebugPrint dumps the encoded data in b in a debugging format with a header
-// including the string s. Used in testing but made available for general debugging.
-func (p *Buffer) DebugPrint(s string, b []byte) {
- var u uint64
-
- obuf := p.buf
- sindex := p.index
- p.buf = b
- p.index = 0
- depth := 0
-
- fmt.Printf("\n--- %s ---\n", s)
-
-out:
- for {
- for i := 0; i < depth; i++ {
- fmt.Print(" ")
- }
-
- index := p.index
- if index == len(p.buf) {
- break
- }
-
- op, err := p.DecodeVarint()
- if err != nil {
- fmt.Printf("%3d: fetching op err %v\n", index, err)
- break out
- }
- tag := op >> 3
- wire := op & 7
-
- switch wire {
- default:
- fmt.Printf("%3d: t=%3d unknown wire=%d\n",
- index, tag, wire)
- break out
-
- case WireBytes:
- var r []byte
-
- r, err = p.DecodeRawBytes(false)
- if err != nil {
- break out
- }
- fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
- if len(r) <= 6 {
- for i := 0; i < len(r); i++ {
- fmt.Printf(" %.2x", r[i])
- }
- } else {
- for i := 0; i < 3; i++ {
- fmt.Printf(" %.2x", r[i])
- }
- fmt.Printf(" ..")
- for i := len(r) - 3; i < len(r); i++ {
- fmt.Printf(" %.2x", r[i])
- }
- }
- fmt.Printf("\n")
-
- case WireFixed32:
- u, err = p.DecodeFixed32()
- if err != nil {
- fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
-
- case WireFixed64:
- u, err = p.DecodeFixed64()
- if err != nil {
- fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
-
- case WireVarint:
- u, err = p.DecodeVarint()
- if err != nil {
- fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
-
- case WireStartGroup:
- fmt.Printf("%3d: t=%3d start\n", index, tag)
- depth++
-
- case WireEndGroup:
- depth--
- fmt.Printf("%3d: t=%3d end\n", index, tag)
- }
- }
-
- if depth != 0 {
- fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
- }
- fmt.Printf("\n")
-
- p.buf = obuf
- p.index = sindex
-}
-
-// SetDefaults sets unset protocol buffer fields to their default values.
-// It only modifies fields that are both unset and have defined defaults.
-// It recursively sets default values in any non-nil sub-messages.
-func SetDefaults(pb Message) {
- setDefaults(reflect.ValueOf(pb), true, false)
-}
-
-// v is a struct.
-func setDefaults(v reflect.Value, recur, zeros bool) {
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
-
- defaultMu.RLock()
- dm, ok := defaults[v.Type()]
- defaultMu.RUnlock()
- if !ok {
- dm = buildDefaultMessage(v.Type())
- defaultMu.Lock()
- defaults[v.Type()] = dm
- defaultMu.Unlock()
- }
-
- for _, sf := range dm.scalars {
- f := v.Field(sf.index)
- if !f.IsNil() {
- // field already set
- continue
- }
- dv := sf.value
- if dv == nil && !zeros {
- // no explicit default, and don't want to set zeros
- continue
- }
- fptr := f.Addr().Interface() // **T
- // TODO: Consider batching the allocations we do here.
- switch sf.kind {
- case reflect.Bool:
- b := new(bool)
- if dv != nil {
- *b = dv.(bool)
- }
- *(fptr.(**bool)) = b
- case reflect.Float32:
- f := new(float32)
- if dv != nil {
- *f = dv.(float32)
- }
- *(fptr.(**float32)) = f
- case reflect.Float64:
- f := new(float64)
- if dv != nil {
- *f = dv.(float64)
- }
- *(fptr.(**float64)) = f
- case reflect.Int32:
- // might be an enum
- if ft := f.Type(); ft != int32PtrType {
- // enum
- f.Set(reflect.New(ft.Elem()))
- if dv != nil {
- f.Elem().SetInt(int64(dv.(int32)))
- }
- } else {
- // int32 field
- i := new(int32)
- if dv != nil {
- *i = dv.(int32)
- }
- *(fptr.(**int32)) = i
- }
- case reflect.Int64:
- i := new(int64)
- if dv != nil {
- *i = dv.(int64)
- }
- *(fptr.(**int64)) = i
- case reflect.String:
- s := new(string)
- if dv != nil {
- *s = dv.(string)
- }
- *(fptr.(**string)) = s
- case reflect.Uint8:
- // exceptional case: []byte
- var b []byte
- if dv != nil {
- db := dv.([]byte)
- b = make([]byte, len(db))
- copy(b, db)
- } else {
- b = []byte{}
- }
- *(fptr.(*[]byte)) = b
- case reflect.Uint32:
- u := new(uint32)
- if dv != nil {
- *u = dv.(uint32)
- }
- *(fptr.(**uint32)) = u
- case reflect.Uint64:
- u := new(uint64)
- if dv != nil {
- *u = dv.(uint64)
- }
- *(fptr.(**uint64)) = u
- default:
- log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
- }
- }
-
- for _, ni := range dm.nested {
- f := v.Field(ni)
- // f is *T or T or []*T or []T
- switch f.Kind() {
- case reflect.Struct:
- setDefaults(f, recur, zeros)
-
- case reflect.Ptr:
- if f.IsNil() {
- continue
- }
- setDefaults(f, recur, zeros)
-
- case reflect.Slice:
- for i := 0; i < f.Len(); i++ {
- e := f.Index(i)
- if e.Kind() == reflect.Ptr && e.IsNil() {
- continue
- }
- setDefaults(e, recur, zeros)
- }
-
- case reflect.Map:
- for _, k := range f.MapKeys() {
- e := f.MapIndex(k)
- if e.IsNil() {
- continue
- }
- setDefaults(e, recur, zeros)
- }
- }
- }
-}
-
-var (
- // defaults maps a protocol buffer struct type to a slice of the fields,
- // with its scalar fields set to their proto-declared non-zero default values.
- defaultMu sync.RWMutex
- defaults = make(map[reflect.Type]defaultMessage)
-
- int32PtrType = reflect.TypeOf((*int32)(nil))
-)
-
-// defaultMessage represents information about the default values of a message.
-type defaultMessage struct {
- scalars []scalarField
- nested []int // struct field index of nested messages
-}
-
-type scalarField struct {
- index int // struct field index
- kind reflect.Kind // element type (the T in *T or []T)
- value interface{} // the proto-declared default value, or nil
-}
-
-// t is a struct type.
-func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
- sprop := GetProperties(t)
- for _, prop := range sprop.Prop {
- fi, ok := sprop.decoderTags.get(prop.Tag)
- if !ok {
- // XXX_unrecognized
- continue
- }
- ft := t.Field(fi).Type
-
- sf, nested, err := fieldDefault(ft, prop)
- switch {
- case err != nil:
- log.Print(err)
- case nested:
- dm.nested = append(dm.nested, fi)
- case sf != nil:
- sf.index = fi
- dm.scalars = append(dm.scalars, *sf)
- }
- }
-
- return dm
-}
-
-// fieldDefault returns the scalarField for field type ft.
-// sf will be nil if the field can not have a default.
-// nestedMessage will be true if this is a nested message.
-// Note that sf.index is not set on return.
-func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
- var canHaveDefault bool
- switch ft.Kind() {
- case reflect.Struct:
- nestedMessage = true // non-nullable
-
- case reflect.Ptr:
- if ft.Elem().Kind() == reflect.Struct {
- nestedMessage = true
- } else {
- canHaveDefault = true // proto2 scalar field
- }
-
- case reflect.Slice:
- switch ft.Elem().Kind() {
- case reflect.Ptr, reflect.Struct:
- nestedMessage = true // repeated message
- case reflect.Uint8:
- canHaveDefault = true // bytes field
- }
-
- case reflect.Map:
- if ft.Elem().Kind() == reflect.Ptr {
- nestedMessage = true // map with message values
- }
- }
-
- if !canHaveDefault {
- if nestedMessage {
- return nil, true, nil
- }
- return nil, false, nil
- }
-
- // We now know that ft is a pointer or slice.
- sf = &scalarField{kind: ft.Elem().Kind()}
-
- // scalar fields without defaults
- if !prop.HasDefault {
- return sf, false, nil
- }
-
- // a scalar field: either *T or []byte
- switch ft.Elem().Kind() {
- case reflect.Bool:
- x, err := strconv.ParseBool(prop.Default)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.Float32:
- x, err := strconv.ParseFloat(prop.Default, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
- }
- sf.value = float32(x)
- case reflect.Float64:
- x, err := strconv.ParseFloat(prop.Default, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.Int32:
- x, err := strconv.ParseInt(prop.Default, 10, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
- }
- sf.value = int32(x)
- case reflect.Int64:
- x, err := strconv.ParseInt(prop.Default, 10, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.String:
- sf.value = prop.Default
- case reflect.Uint8:
- // []byte (not *uint8)
- sf.value = []byte(prop.Default)
- case reflect.Uint32:
- x, err := strconv.ParseUint(prop.Default, 10, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
- }
- sf.value = uint32(x)
- case reflect.Uint64:
- x, err := strconv.ParseUint(prop.Default, 10, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
- }
- sf.value = x
- default:
- return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
- }
-
- return sf, false, nil
-}
-
-// mapKeys returns a sort.Interface to be used for sorting the map keys.
-// Map fields may have key types of non-float scalars, strings and enums.
-func mapKeys(vs []reflect.Value) sort.Interface {
- s := mapKeySorter{vs: vs}
-
- // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
- if len(vs) == 0 {
- return s
- }
- switch vs[0].Kind() {
- case reflect.Int32, reflect.Int64:
- s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
- case reflect.Uint32, reflect.Uint64:
- s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
- case reflect.Bool:
- s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
- case reflect.String:
- s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
- default:
- panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
- }
-
- return s
-}
-
-type mapKeySorter struct {
- vs []reflect.Value
- less func(a, b reflect.Value) bool
-}
-
-func (s mapKeySorter) Len() int { return len(s.vs) }
-func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
-func (s mapKeySorter) Less(i, j int) bool {
- return s.less(s.vs[i], s.vs[j])
-}
-
-// isProto3Zero reports whether v is a zero proto3 value.
-func isProto3Zero(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint32, reflect.Uint64:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.String:
- return v.String() == ""
- }
- return false
-}
-
-// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const GoGoProtoPackageIsVersion2 = true
-
-// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const GoGoProtoPackageIsVersion1 = true
-
-// InternalMessageInfo is a type used internally by generated .pb.go files.
-// This type is not intended to be used by non-generated code.
-// This type is not subject to any compatibility guarantee.
-type InternalMessageInfo struct {
- marshal *marshalInfo
- unmarshal *unmarshalInfo
- merge *mergeInfo
- discard *discardInfo
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
deleted file mode 100644
index b3aa3919..00000000
--- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "encoding/json"
- "strconv"
-)
-
-type Sizer interface {
- Size() int
-}
-
-type ProtoSizer interface {
- ProtoSize() int
-}
-
-func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) {
- s, ok := m[value]
- if !ok {
- s = strconv.Itoa(int(value))
- }
- return json.Marshal(s)
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go
deleted file mode 100644
index f48a7567..00000000
--- a/vendor/github.com/gogo/protobuf/proto/message_set.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Support for message sets.
- */
-
-import (
- "errors"
-)
-
-// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
-// A message type ID is required for storing a protocol buffer in a message set.
-var errNoMessageTypeID = errors.New("proto does not have a message type ID")
-
-// The first two types (_MessageSet_Item and messageSet)
-// model what the protocol compiler produces for the following protocol message:
-// message MessageSet {
-// repeated group Item = 1 {
-// required int32 type_id = 2;
-// required string message = 3;
-// };
-// }
-// That is the MessageSet wire format. We can't use a proto to generate these
-// because that would introduce a circular dependency between it and this package.
-
-type _MessageSet_Item struct {
- TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
- Message []byte `protobuf:"bytes,3,req,name=message"`
-}
-
-type messageSet struct {
- Item []*_MessageSet_Item `protobuf:"group,1,rep"`
- XXX_unrecognized []byte
- // TODO: caching?
-}
-
-// Make sure messageSet is a Message.
-var _ Message = (*messageSet)(nil)
-
-// messageTypeIder is an interface satisfied by a protocol buffer type
-// that may be stored in a MessageSet.
-type messageTypeIder interface {
- MessageTypeId() int32
-}
-
-func (ms *messageSet) find(pb Message) *_MessageSet_Item {
- mti, ok := pb.(messageTypeIder)
- if !ok {
- return nil
- }
- id := mti.MessageTypeId()
- for _, item := range ms.Item {
- if *item.TypeId == id {
- return item
- }
- }
- return nil
-}
-
-func (ms *messageSet) Has(pb Message) bool {
- return ms.find(pb) != nil
-}
-
-func (ms *messageSet) Unmarshal(pb Message) error {
- if item := ms.find(pb); item != nil {
- return Unmarshal(item.Message, pb)
- }
- if _, ok := pb.(messageTypeIder); !ok {
- return errNoMessageTypeID
- }
- return nil // TODO: return error instead?
-}
-
-func (ms *messageSet) Marshal(pb Message) error {
- msg, err := Marshal(pb)
- if err != nil {
- return err
- }
- if item := ms.find(pb); item != nil {
- // reuse existing item
- item.Message = msg
- return nil
- }
-
- mti, ok := pb.(messageTypeIder)
- if !ok {
- return errNoMessageTypeID
- }
-
- mtid := mti.MessageTypeId()
- ms.Item = append(ms.Item, &_MessageSet_Item{
- TypeId: &mtid,
- Message: msg,
- })
- return nil
-}
-
-func (ms *messageSet) Reset() { *ms = messageSet{} }
-func (ms *messageSet) String() string { return CompactTextString(ms) }
-func (*messageSet) ProtoMessage() {}
-
-// Support for the message_set_wire_format message option.
-
-func skipVarint(buf []byte) []byte {
- i := 0
- for ; buf[i]&0x80 != 0; i++ {
- }
- return buf[i+1:]
-}
-
-// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
-// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func unmarshalMessageSet(buf []byte, exts interface{}) error {
- var m map[int32]Extension
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- m = exts.extensionsWrite()
- case map[int32]Extension:
- m = exts
- default:
- return errors.New("proto: not an extension map")
- }
-
- ms := new(messageSet)
- if err := Unmarshal(buf, ms); err != nil {
- return err
- }
- for _, item := range ms.Item {
- id := *item.TypeId
- msg := item.Message
-
- // Restore wire type and field number varint, plus length varint.
- // Be careful to preserve duplicate items.
- b := EncodeVarint(uint64(id)<<3 | WireBytes)
- if ext, ok := m[id]; ok {
- // Existing data; rip off the tag and length varint
- // so we join the new data correctly.
- // We can assume that ext.enc is set because we are unmarshaling.
- o := ext.enc[len(b):] // skip wire type and field number
- _, n := DecodeVarint(o) // calculate length of length varint
- o = o[n:] // skip length varint
- msg = append(o, msg...) // join old data and new data
- }
- b = append(b, EncodeVarint(uint64(len(msg)))...)
- b = append(b, msg...)
-
- m[id] = Extension{enc: b}
- }
- return nil
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
deleted file mode 100644
index b6cad908..00000000
--- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
+++ /dev/null
@@ -1,357 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build purego appengine js
-
-// This file contains an implementation of proto field accesses using package reflect.
-// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
-// be used on App Engine.
-
-package proto
-
-import (
- "reflect"
- "sync"
-)
-
-const unsafeAllowed = false
-
-// A field identifies a field in a struct, accessible from a pointer.
-// In this implementation, a field is identified by the sequence of field indices
-// passed to reflect's FieldByIndex.
-type field []int
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
- return f.Index
-}
-
-// invalidField is an invalid field identifier.
-var invalidField = field(nil)
-
-// zeroField is a noop when calling pointer.offset.
-var zeroField = field([]int{})
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool { return f != nil }
-
-// The pointer type is for the table-driven decoder.
-// The implementation here uses a reflect.Value of pointer type to
-// create a generic pointer. In pointer_unsafe.go we use unsafe
-// instead of reflect to implement the same (but faster) interface.
-type pointer struct {
- v reflect.Value
-}
-
-// toPointer converts an interface of pointer type to a pointer
-// that points to the same target.
-func toPointer(i *Message) pointer {
- return pointer{v: reflect.ValueOf(*i)}
-}
-
-// toAddrPointer converts an interface to a pointer that points to
-// the interface data.
-func toAddrPointer(i *interface{}, isptr bool) pointer {
- v := reflect.ValueOf(*i)
- u := reflect.New(v.Type())
- u.Elem().Set(v)
- return pointer{v: u}
-}
-
-// valToPointer converts v to a pointer. v must be of pointer type.
-func valToPointer(v reflect.Value) pointer {
- return pointer{v: v}
-}
-
-// offset converts from a pointer to a structure to a pointer to
-// one of its fields.
-func (p pointer) offset(f field) pointer {
- return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
-}
-
-func (p pointer) isNil() bool {
- return p.v.IsNil()
-}
-
-// grow updates the slice s in place to make it one element longer.
-// s must be addressable.
-// Returns the (addressable) new element.
-func grow(s reflect.Value) reflect.Value {
- n, m := s.Len(), s.Cap()
- if n < m {
- s.SetLen(n + 1)
- } else {
- s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
- }
- return s.Index(n)
-}
-
-func (p pointer) toInt64() *int64 {
- return p.v.Interface().(*int64)
-}
-func (p pointer) toInt64Ptr() **int64 {
- return p.v.Interface().(**int64)
-}
-func (p pointer) toInt64Slice() *[]int64 {
- return p.v.Interface().(*[]int64)
-}
-
-var int32ptr = reflect.TypeOf((*int32)(nil))
-
-func (p pointer) toInt32() *int32 {
- return p.v.Convert(int32ptr).Interface().(*int32)
-}
-
-// The toInt32Ptr/Slice methods don't work because of enums.
-// Instead, we must use set/get methods for the int32ptr/slice case.
-/*
- func (p pointer) toInt32Ptr() **int32 {
- return p.v.Interface().(**int32)
-}
- func (p pointer) toInt32Slice() *[]int32 {
- return p.v.Interface().(*[]int32)
-}
-*/
-func (p pointer) getInt32Ptr() *int32 {
- if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
- // raw int32 type
- return p.v.Elem().Interface().(*int32)
- }
- // an enum
- return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
-}
-func (p pointer) setInt32Ptr(v int32) {
- // Allocate value in a *int32. Possibly convert that to a *enum.
- // Then assign it to a **int32 or **enum.
- // Note: we can convert *int32 to *enum, but we can't convert
- // **int32 to **enum!
- p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
-}
-
-// getInt32Slice copies []int32 from p as a new slice.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) getInt32Slice() []int32 {
- if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
- // raw int32 type
- return p.v.Elem().Interface().([]int32)
- }
- // an enum
- // Allocate a []int32, then assign []enum's values into it.
- // Note: we can't convert []enum to []int32.
- slice := p.v.Elem()
- s := make([]int32, slice.Len())
- for i := 0; i < slice.Len(); i++ {
- s[i] = int32(slice.Index(i).Int())
- }
- return s
-}
-
-// setInt32Slice copies []int32 into p as a new slice.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) setInt32Slice(v []int32) {
- if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
- // raw int32 type
- p.v.Elem().Set(reflect.ValueOf(v))
- return
- }
- // an enum
- // Allocate a []enum, then assign []int32's values into it.
- // Note: we can't convert []enum to []int32.
- slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
- for i, x := range v {
- slice.Index(i).SetInt(int64(x))
- }
- p.v.Elem().Set(slice)
-}
-func (p pointer) appendInt32Slice(v int32) {
- grow(p.v.Elem()).SetInt(int64(v))
-}
-
-func (p pointer) toUint64() *uint64 {
- return p.v.Interface().(*uint64)
-}
-func (p pointer) toUint64Ptr() **uint64 {
- return p.v.Interface().(**uint64)
-}
-func (p pointer) toUint64Slice() *[]uint64 {
- return p.v.Interface().(*[]uint64)
-}
-func (p pointer) toUint32() *uint32 {
- return p.v.Interface().(*uint32)
-}
-func (p pointer) toUint32Ptr() **uint32 {
- return p.v.Interface().(**uint32)
-}
-func (p pointer) toUint32Slice() *[]uint32 {
- return p.v.Interface().(*[]uint32)
-}
-func (p pointer) toBool() *bool {
- return p.v.Interface().(*bool)
-}
-func (p pointer) toBoolPtr() **bool {
- return p.v.Interface().(**bool)
-}
-func (p pointer) toBoolSlice() *[]bool {
- return p.v.Interface().(*[]bool)
-}
-func (p pointer) toFloat64() *float64 {
- return p.v.Interface().(*float64)
-}
-func (p pointer) toFloat64Ptr() **float64 {
- return p.v.Interface().(**float64)
-}
-func (p pointer) toFloat64Slice() *[]float64 {
- return p.v.Interface().(*[]float64)
-}
-func (p pointer) toFloat32() *float32 {
- return p.v.Interface().(*float32)
-}
-func (p pointer) toFloat32Ptr() **float32 {
- return p.v.Interface().(**float32)
-}
-func (p pointer) toFloat32Slice() *[]float32 {
- return p.v.Interface().(*[]float32)
-}
-func (p pointer) toString() *string {
- return p.v.Interface().(*string)
-}
-func (p pointer) toStringPtr() **string {
- return p.v.Interface().(**string)
-}
-func (p pointer) toStringSlice() *[]string {
- return p.v.Interface().(*[]string)
-}
-func (p pointer) toBytes() *[]byte {
- return p.v.Interface().(*[]byte)
-}
-func (p pointer) toBytesSlice() *[][]byte {
- return p.v.Interface().(*[][]byte)
-}
-func (p pointer) toExtensions() *XXX_InternalExtensions {
- return p.v.Interface().(*XXX_InternalExtensions)
-}
-func (p pointer) toOldExtensions() *map[int32]Extension {
- return p.v.Interface().(*map[int32]Extension)
-}
-func (p pointer) getPointer() pointer {
- return pointer{v: p.v.Elem()}
-}
-func (p pointer) setPointer(q pointer) {
- p.v.Elem().Set(q.v)
-}
-func (p pointer) appendPointer(q pointer) {
- grow(p.v.Elem()).Set(q.v)
-}
-
-// getPointerSlice copies []*T from p as a new []pointer.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) getPointerSlice() []pointer {
- if p.v.IsNil() {
- return nil
- }
- n := p.v.Elem().Len()
- s := make([]pointer, n)
- for i := 0; i < n; i++ {
- s[i] = pointer{v: p.v.Elem().Index(i)}
- }
- return s
-}
-
-// setPointerSlice copies []pointer into p as a new []*T.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) setPointerSlice(v []pointer) {
- if v == nil {
- p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
- return
- }
- s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
- for _, p := range v {
- s = reflect.Append(s, p.v)
- }
- p.v.Elem().Set(s)
-}
-
-// getInterfacePointer returns a pointer that points to the
-// interface data of the interface pointed by p.
-func (p pointer) getInterfacePointer() pointer {
- if p.v.Elem().IsNil() {
- return pointer{v: p.v.Elem()}
- }
- return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
-}
-
-func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
- // TODO: check that p.v.Type().Elem() == t?
- return p.v
-}
-
-func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-
-var atomicLock sync.Mutex
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go
deleted file mode 100644
index 7ffd3c29..00000000
--- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2018, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build purego appengine js
-
-// This file contains an implementation of proto field accesses using package reflect.
-// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
-// be used on App Engine.
-
-package proto
-
-import (
- "reflect"
-)
-
-// TODO: untested, so probably incorrect.
-
-func (p pointer) getRef() pointer {
- return pointer{v: p.v.Addr()}
-}
-
-func (p pointer) appendRef(v pointer, typ reflect.Type) {
- slice := p.getSlice(typ)
- elem := v.asPointerTo(typ).Elem()
- newSlice := reflect.Append(slice, elem)
- slice.Set(newSlice)
-}
-
-func (p pointer) getSlice(typ reflect.Type) reflect.Value {
- sliceTyp := reflect.SliceOf(typ)
- slice := p.asPointerTo(sliceTyp)
- slice = slice.Elem()
- return slice
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
deleted file mode 100644
index d55a335d..00000000
--- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
+++ /dev/null
@@ -1,308 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build !purego,!appengine,!js
-
-// This file contains the implementation of the proto field accesses using package unsafe.
-
-package proto
-
-import (
- "reflect"
- "sync/atomic"
- "unsafe"
-)
-
-const unsafeAllowed = true
-
-// A field identifies a field in a struct, accessible from a pointer.
-// In this implementation, a field is identified by its byte offset from the start of the struct.
-type field uintptr
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
- return field(f.Offset)
-}
-
-// invalidField is an invalid field identifier.
-const invalidField = ^field(0)
-
-// zeroField is a noop when calling pointer.offset.
-const zeroField = field(0)
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool {
- return f != invalidField
-}
-
-// The pointer type below is for the new table-driven encoder/decoder.
-// The implementation here uses unsafe.Pointer to create a generic pointer.
-// In pointer_reflect.go we use reflect instead of unsafe to implement
-// the same (but slower) interface.
-type pointer struct {
- p unsafe.Pointer
-}
-
-// size of pointer
-var ptrSize = unsafe.Sizeof(uintptr(0))
-
-// toPointer converts an interface of pointer type to a pointer
-// that points to the same target.
-func toPointer(i *Message) pointer {
- // Super-tricky - read pointer out of data word of interface value.
- // Saves ~25ns over the equivalent:
- // return valToPointer(reflect.ValueOf(*i))
- return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
-}
-
-// toAddrPointer converts an interface to a pointer that points to
-// the interface data.
-func toAddrPointer(i *interface{}, isptr bool) pointer {
- // Super-tricky - read or get the address of data word of interface value.
- if isptr {
- // The interface is of pointer type, thus it is a direct interface.
- // The data word is the pointer data itself. We take its address.
- return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
- }
- // The interface is not of pointer type. The data word is the pointer
- // to the data.
- return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
-}
-
-// valToPointer converts v to a pointer. v must be of pointer type.
-func valToPointer(v reflect.Value) pointer {
- return pointer{p: unsafe.Pointer(v.Pointer())}
-}
-
-// offset converts from a pointer to a structure to a pointer to
-// one of its fields.
-func (p pointer) offset(f field) pointer {
- // For safety, we should panic if !f.IsValid, however calling panic causes
- // this to no longer be inlineable, which is a serious performance cost.
- /*
- if !f.IsValid() {
- panic("invalid field")
- }
- */
- return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
-}
-
-func (p pointer) isNil() bool {
- return p.p == nil
-}
-
-func (p pointer) toInt64() *int64 {
- return (*int64)(p.p)
-}
-func (p pointer) toInt64Ptr() **int64 {
- return (**int64)(p.p)
-}
-func (p pointer) toInt64Slice() *[]int64 {
- return (*[]int64)(p.p)
-}
-func (p pointer) toInt32() *int32 {
- return (*int32)(p.p)
-}
-
-// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
-/*
- func (p pointer) toInt32Ptr() **int32 {
- return (**int32)(p.p)
- }
- func (p pointer) toInt32Slice() *[]int32 {
- return (*[]int32)(p.p)
- }
-*/
-func (p pointer) getInt32Ptr() *int32 {
- return *(**int32)(p.p)
-}
-func (p pointer) setInt32Ptr(v int32) {
- *(**int32)(p.p) = &v
-}
-
-// getInt32Slice loads a []int32 from p.
-// The value returned is aliased with the original slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) getInt32Slice() []int32 {
- return *(*[]int32)(p.p)
-}
-
-// setInt32Slice stores a []int32 to p.
-// The value set is aliased with the input slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) setInt32Slice(v []int32) {
- *(*[]int32)(p.p) = v
-}
-
-// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
-func (p pointer) appendInt32Slice(v int32) {
- s := (*[]int32)(p.p)
- *s = append(*s, v)
-}
-
-func (p pointer) toUint64() *uint64 {
- return (*uint64)(p.p)
-}
-func (p pointer) toUint64Ptr() **uint64 {
- return (**uint64)(p.p)
-}
-func (p pointer) toUint64Slice() *[]uint64 {
- return (*[]uint64)(p.p)
-}
-func (p pointer) toUint32() *uint32 {
- return (*uint32)(p.p)
-}
-func (p pointer) toUint32Ptr() **uint32 {
- return (**uint32)(p.p)
-}
-func (p pointer) toUint32Slice() *[]uint32 {
- return (*[]uint32)(p.p)
-}
-func (p pointer) toBool() *bool {
- return (*bool)(p.p)
-}
-func (p pointer) toBoolPtr() **bool {
- return (**bool)(p.p)
-}
-func (p pointer) toBoolSlice() *[]bool {
- return (*[]bool)(p.p)
-}
-func (p pointer) toFloat64() *float64 {
- return (*float64)(p.p)
-}
-func (p pointer) toFloat64Ptr() **float64 {
- return (**float64)(p.p)
-}
-func (p pointer) toFloat64Slice() *[]float64 {
- return (*[]float64)(p.p)
-}
-func (p pointer) toFloat32() *float32 {
- return (*float32)(p.p)
-}
-func (p pointer) toFloat32Ptr() **float32 {
- return (**float32)(p.p)
-}
-func (p pointer) toFloat32Slice() *[]float32 {
- return (*[]float32)(p.p)
-}
-func (p pointer) toString() *string {
- return (*string)(p.p)
-}
-func (p pointer) toStringPtr() **string {
- return (**string)(p.p)
-}
-func (p pointer) toStringSlice() *[]string {
- return (*[]string)(p.p)
-}
-func (p pointer) toBytes() *[]byte {
- return (*[]byte)(p.p)
-}
-func (p pointer) toBytesSlice() *[][]byte {
- return (*[][]byte)(p.p)
-}
-func (p pointer) toExtensions() *XXX_InternalExtensions {
- return (*XXX_InternalExtensions)(p.p)
-}
-func (p pointer) toOldExtensions() *map[int32]Extension {
- return (*map[int32]Extension)(p.p)
-}
-
-// getPointerSlice loads []*T from p as a []pointer.
-// The value returned is aliased with the original slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) getPointerSlice() []pointer {
- // Super-tricky - p should point to a []*T where T is a
- // message type. We load it as []pointer.
- return *(*[]pointer)(p.p)
-}
-
-// setPointerSlice stores []pointer into p as a []*T.
-// The value set is aliased with the input slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) setPointerSlice(v []pointer) {
- // Super-tricky - p should point to a []*T where T is a
- // message type. We store it as []pointer.
- *(*[]pointer)(p.p) = v
-}
-
-// getPointer loads the pointer at p and returns it.
-func (p pointer) getPointer() pointer {
- return pointer{p: *(*unsafe.Pointer)(p.p)}
-}
-
-// setPointer stores the pointer q at p.
-func (p pointer) setPointer(q pointer) {
- *(*unsafe.Pointer)(p.p) = q.p
-}
-
-// append q to the slice pointed to by p.
-func (p pointer) appendPointer(q pointer) {
- s := (*[]unsafe.Pointer)(p.p)
- *s = append(*s, q.p)
-}
-
-// getInterfacePointer returns a pointer that points to the
-// interface data of the interface pointed by p.
-func (p pointer) getInterfacePointer() pointer {
- // Super-tricky - read pointer out of data word of interface value.
- return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
-}
-
-// asPointerTo returns a reflect.Value that is a pointer to an
-// object of type t stored at p.
-func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
- return reflect.NewAt(t, p.p)
-}
-
-func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
- return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
- return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
- return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
- return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
deleted file mode 100644
index aca8eed0..00000000
--- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2018, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build !purego,!appengine,!js
-
-// This file contains the implementation of the proto field accesses using package unsafe.
-
-package proto
-
-import (
- "reflect"
- "unsafe"
-)
-
-func (p pointer) getRef() pointer {
- return pointer{p: (unsafe.Pointer)(&p.p)}
-}
-
-func (p pointer) appendRef(v pointer, typ reflect.Type) {
- slice := p.getSlice(typ)
- elem := v.asPointerTo(typ).Elem()
- newSlice := reflect.Append(slice, elem)
- slice.Set(newSlice)
-}
-
-func (p pointer) getSlice(typ reflect.Type) reflect.Value {
- sliceTyp := reflect.SliceOf(typ)
- slice := p.asPointerTo(sliceTyp)
- slice = slice.Elem()
- return slice
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go
deleted file mode 100644
index c9e5fa02..00000000
--- a/vendor/github.com/gogo/protobuf/proto/properties.go
+++ /dev/null
@@ -1,599 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
-import (
- "fmt"
- "log"
- "os"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "sync"
-)
-
-const debug bool = false
-
-// Constants that identify the encoding of a value on the wire.
-const (
- WireVarint = 0
- WireFixed64 = 1
- WireBytes = 2
- WireStartGroup = 3
- WireEndGroup = 4
- WireFixed32 = 5
-)
-
-// tagMap is an optimization over map[int]int for typical protocol buffer
-// use-cases. Encoded protocol buffers are often in tag order with small tag
-// numbers.
-type tagMap struct {
- fastTags []int
- slowTags map[int]int
-}
-
-// tagMapFastLimit is the upper bound on the tag number that will be stored in
-// the tagMap slice rather than its map.
-const tagMapFastLimit = 1024
-
-func (p *tagMap) get(t int) (int, bool) {
- if t > 0 && t < tagMapFastLimit {
- if t >= len(p.fastTags) {
- return 0, false
- }
- fi := p.fastTags[t]
- return fi, fi >= 0
- }
- fi, ok := p.slowTags[t]
- return fi, ok
-}
-
-func (p *tagMap) put(t int, fi int) {
- if t > 0 && t < tagMapFastLimit {
- for len(p.fastTags) < t+1 {
- p.fastTags = append(p.fastTags, -1)
- }
- p.fastTags[t] = fi
- return
- }
- if p.slowTags == nil {
- p.slowTags = make(map[int]int)
- }
- p.slowTags[t] = fi
-}
-
-// StructProperties represents properties for all the fields of a struct.
-// decoderTags and decoderOrigNames should only be used by the decoder.
-type StructProperties struct {
- Prop []*Properties // properties for each field
- reqCount int // required count
- decoderTags tagMap // map from proto tag to struct field number
- decoderOrigNames map[string]int // map from original name to struct field number
- order []int // list of struct field numbers in tag order
-
- // OneofTypes contains information about the oneof fields in this message.
- // It is keyed by the original name of a field.
- OneofTypes map[string]*OneofProperties
-}
-
-// OneofProperties represents information about a specific field in a oneof.
-type OneofProperties struct {
- Type reflect.Type // pointer to generated struct type for this oneof field
- Field int // struct field number of the containing oneof in the message
- Prop *Properties
-}
-
-// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
-// See encode.go, (*Buffer).enc_struct.
-
-func (sp *StructProperties) Len() int { return len(sp.order) }
-func (sp *StructProperties) Less(i, j int) bool {
- return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
-}
-func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
-
-// Properties represents the protocol-specific behavior of a single struct field.
-type Properties struct {
- Name string // name of the field, for error messages
- OrigName string // original name before protocol compiler (always set)
- JSONName string // name to use for JSON; determined by protoc
- Wire string
- WireType int
- Tag int
- Required bool
- Optional bool
- Repeated bool
- Packed bool // relevant for repeated primitives only
- Enum string // set for enum types only
- proto3 bool // whether this is known to be a proto3 field
- oneof bool // whether this is a oneof field
-
- Default string // default value
- HasDefault bool // whether an explicit default was provided
- CustomType string
- CastType string
- StdTime bool
- StdDuration bool
- WktPointer bool
-
- stype reflect.Type // set for struct types only
- ctype reflect.Type // set for custom types only
- sprop *StructProperties // set for struct types only
-
- mtype reflect.Type // set for map types only
- MapKeyProp *Properties // set for map types only
- MapValProp *Properties // set for map types only
-}
-
-// String formats the properties in the protobuf struct field tag style.
-func (p *Properties) String() string {
- s := p.Wire
- s += ","
- s += strconv.Itoa(p.Tag)
- if p.Required {
- s += ",req"
- }
- if p.Optional {
- s += ",opt"
- }
- if p.Repeated {
- s += ",rep"
- }
- if p.Packed {
- s += ",packed"
- }
- s += ",name=" + p.OrigName
- if p.JSONName != p.OrigName {
- s += ",json=" + p.JSONName
- }
- if p.proto3 {
- s += ",proto3"
- }
- if p.oneof {
- s += ",oneof"
- }
- if len(p.Enum) > 0 {
- s += ",enum=" + p.Enum
- }
- if p.HasDefault {
- s += ",def=" + p.Default
- }
- return s
-}
-
-// Parse populates p by parsing a string in the protobuf struct field tag style.
-func (p *Properties) Parse(s string) {
- // "bytes,49,opt,name=foo,def=hello!"
- fields := strings.Split(s, ",") // breaks def=, but handled below.
- if len(fields) < 2 {
- fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
- return
- }
-
- p.Wire = fields[0]
- switch p.Wire {
- case "varint":
- p.WireType = WireVarint
- case "fixed32":
- p.WireType = WireFixed32
- case "fixed64":
- p.WireType = WireFixed64
- case "zigzag32":
- p.WireType = WireVarint
- case "zigzag64":
- p.WireType = WireVarint
- case "bytes", "group":
- p.WireType = WireBytes
- // no numeric converter for non-numeric types
- default:
- fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
- return
- }
-
- var err error
- p.Tag, err = strconv.Atoi(fields[1])
- if err != nil {
- return
- }
-
-outer:
- for i := 2; i < len(fields); i++ {
- f := fields[i]
- switch {
- case f == "req":
- p.Required = true
- case f == "opt":
- p.Optional = true
- case f == "rep":
- p.Repeated = true
- case f == "packed":
- p.Packed = true
- case strings.HasPrefix(f, "name="):
- p.OrigName = f[5:]
- case strings.HasPrefix(f, "json="):
- p.JSONName = f[5:]
- case strings.HasPrefix(f, "enum="):
- p.Enum = f[5:]
- case f == "proto3":
- p.proto3 = true
- case f == "oneof":
- p.oneof = true
- case strings.HasPrefix(f, "def="):
- p.HasDefault = true
- p.Default = f[4:] // rest of string
- if i+1 < len(fields) {
- // Commas aren't escaped, and def is always last.
- p.Default += "," + strings.Join(fields[i+1:], ",")
- break outer
- }
- case strings.HasPrefix(f, "embedded="):
- p.OrigName = strings.Split(f, "=")[1]
- case strings.HasPrefix(f, "customtype="):
- p.CustomType = strings.Split(f, "=")[1]
- case strings.HasPrefix(f, "casttype="):
- p.CastType = strings.Split(f, "=")[1]
- case f == "stdtime":
- p.StdTime = true
- case f == "stdduration":
- p.StdDuration = true
- case f == "wktptr":
- p.WktPointer = true
- }
- }
-}
-
-var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
-
-// setFieldProps initializes the field properties for submessages and maps.
-func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
- isMap := typ.Kind() == reflect.Map
- if len(p.CustomType) > 0 && !isMap {
- p.ctype = typ
- p.setTag(lockGetProp)
- return
- }
- if p.StdTime && !isMap {
- p.setTag(lockGetProp)
- return
- }
- if p.StdDuration && !isMap {
- p.setTag(lockGetProp)
- return
- }
- if p.WktPointer && !isMap {
- p.setTag(lockGetProp)
- return
- }
- switch t1 := typ; t1.Kind() {
- case reflect.Struct:
- p.stype = typ
- case reflect.Ptr:
- if t1.Elem().Kind() == reflect.Struct {
- p.stype = t1.Elem()
- }
- case reflect.Slice:
- switch t2 := t1.Elem(); t2.Kind() {
- case reflect.Ptr:
- switch t3 := t2.Elem(); t3.Kind() {
- case reflect.Struct:
- p.stype = t3
- }
- case reflect.Struct:
- p.stype = t2
- }
-
- case reflect.Map:
-
- p.mtype = t1
- p.MapKeyProp = &Properties{}
- p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
- p.MapValProp = &Properties{}
- vtype := p.mtype.Elem()
- if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
- // The value type is not a message (*T) or bytes ([]byte),
- // so we need encoders for the pointer to this type.
- vtype = reflect.PtrTo(vtype)
- }
-
- p.MapValProp.CustomType = p.CustomType
- p.MapValProp.StdDuration = p.StdDuration
- p.MapValProp.StdTime = p.StdTime
- p.MapValProp.WktPointer = p.WktPointer
- p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
- }
- p.setTag(lockGetProp)
-}
-
-func (p *Properties) setTag(lockGetProp bool) {
- if p.stype != nil {
- if lockGetProp {
- p.sprop = GetProperties(p.stype)
- } else {
- p.sprop = getPropertiesLocked(p.stype)
- }
- }
-}
-
-var (
- marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
-)
-
-// Init populates the properties from a protocol buffer struct tag.
-func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
- p.init(typ, name, tag, f, true)
-}
-
-func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
- // "bytes,49,opt,def=hello!"
- p.Name = name
- p.OrigName = name
- if tag == "" {
- return
- }
- p.Parse(tag)
- p.setFieldProps(typ, f, lockGetProp)
-}
-
-var (
- propertiesMu sync.RWMutex
- propertiesMap = make(map[reflect.Type]*StructProperties)
-)
-
-// GetProperties returns the list of properties for the type represented by t.
-// t must represent a generated struct type of a protocol message.
-func GetProperties(t reflect.Type) *StructProperties {
- if t.Kind() != reflect.Struct {
- panic("proto: type must have kind struct")
- }
-
- // Most calls to GetProperties in a long-running program will be
- // retrieving details for types we have seen before.
- propertiesMu.RLock()
- sprop, ok := propertiesMap[t]
- propertiesMu.RUnlock()
- if ok {
- return sprop
- }
-
- propertiesMu.Lock()
- sprop = getPropertiesLocked(t)
- propertiesMu.Unlock()
- return sprop
-}
-
-// getPropertiesLocked requires that propertiesMu is held.
-func getPropertiesLocked(t reflect.Type) *StructProperties {
- if prop, ok := propertiesMap[t]; ok {
- return prop
- }
-
- prop := new(StructProperties)
- // in case of recursive protos, fill this in now.
- propertiesMap[t] = prop
-
- // build properties
- prop.Prop = make([]*Properties, t.NumField())
- prop.order = make([]int, t.NumField())
-
- isOneofMessage := false
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- p := new(Properties)
- name := f.Name
- p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
-
- oneof := f.Tag.Get("protobuf_oneof") // special case
- if oneof != "" {
- isOneofMessage = true
- // Oneof fields don't use the traditional protobuf tag.
- p.OrigName = oneof
- }
- prop.Prop[i] = p
- prop.order[i] = i
- if debug {
- print(i, " ", f.Name, " ", t.String(), " ")
- if p.Tag > 0 {
- print(p.String())
- }
- print("\n")
- }
- }
-
- // Re-order prop.order.
- sort.Sort(prop)
-
- type oneofMessage interface {
- XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
- }
- if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok {
- var oots []interface{}
- _, _, _, oots = om.XXX_OneofFuncs()
-
- // Interpret oneof metadata.
- prop.OneofTypes = make(map[string]*OneofProperties)
- for _, oot := range oots {
- oop := &OneofProperties{
- Type: reflect.ValueOf(oot).Type(), // *T
- Prop: new(Properties),
- }
- sft := oop.Type.Elem().Field(0)
- oop.Prop.Name = sft.Name
- oop.Prop.Parse(sft.Tag.Get("protobuf"))
- // There will be exactly one interface field that
- // this new value is assignable to.
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- if f.Type.Kind() != reflect.Interface {
- continue
- }
- if !oop.Type.AssignableTo(f.Type) {
- continue
- }
- oop.Field = i
- break
- }
- prop.OneofTypes[oop.Prop.OrigName] = oop
- }
- }
-
- // build required counts
- // build tags
- reqCount := 0
- prop.decoderOrigNames = make(map[string]int)
- for i, p := range prop.Prop {
- if strings.HasPrefix(p.Name, "XXX_") {
- // Internal fields should not appear in tags/origNames maps.
- // They are handled specially when encoding and decoding.
- continue
- }
- if p.Required {
- reqCount++
- }
- prop.decoderTags.put(p.Tag, i)
- prop.decoderOrigNames[p.OrigName] = i
- }
- prop.reqCount = reqCount
-
- return prop
-}
-
-// A global registry of enum types.
-// The generated code will register the generated maps by calling RegisterEnum.
-
-var enumValueMaps = make(map[string]map[string]int32)
-var enumStringMaps = make(map[string]map[int32]string)
-
-// RegisterEnum is called from the generated code to install the enum descriptor
-// maps into the global table to aid parsing text format protocol buffers.
-func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
- if _, ok := enumValueMaps[typeName]; ok {
- panic("proto: duplicate enum registered: " + typeName)
- }
- enumValueMaps[typeName] = valueMap
- if _, ok := enumStringMaps[typeName]; ok {
- panic("proto: duplicate enum registered: " + typeName)
- }
- enumStringMaps[typeName] = unusedNameMap
-}
-
-// EnumValueMap returns the mapping from names to integers of the
-// enum type enumType, or a nil if not found.
-func EnumValueMap(enumType string) map[string]int32 {
- return enumValueMaps[enumType]
-}
-
-// A registry of all linked message types.
-// The string is a fully-qualified proto name ("pkg.Message").
-var (
- protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
- protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
- revProtoTypes = make(map[reflect.Type]string)
-)
-
-// RegisterType is called from generated code and maps from the fully qualified
-// proto name to the type (pointer to struct) of the protocol buffer.
-func RegisterType(x Message, name string) {
- if _, ok := protoTypedNils[name]; ok {
- // TODO: Some day, make this a panic.
- log.Printf("proto: duplicate proto type registered: %s", name)
- return
- }
- t := reflect.TypeOf(x)
- if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
- // Generated code always calls RegisterType with nil x.
- // This check is just for extra safety.
- protoTypedNils[name] = x
- } else {
- protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
- }
- revProtoTypes[t] = name
-}
-
-// RegisterMapType is called from generated code and maps from the fully qualified
-// proto name to the native map type of the proto map definition.
-func RegisterMapType(x interface{}, name string) {
- if reflect.TypeOf(x).Kind() != reflect.Map {
- panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
- }
- if _, ok := protoMapTypes[name]; ok {
- log.Printf("proto: duplicate proto type registered: %s", name)
- return
- }
- t := reflect.TypeOf(x)
- protoMapTypes[name] = t
- revProtoTypes[t] = name
-}
-
-// MessageName returns the fully-qualified proto name for the given message type.
-func MessageName(x Message) string {
- type xname interface {
- XXX_MessageName() string
- }
- if m, ok := x.(xname); ok {
- return m.XXX_MessageName()
- }
- return revProtoTypes[reflect.TypeOf(x)]
-}
-
-// MessageType returns the message type (pointer to struct) for a named message.
-// The type is not guaranteed to implement proto.Message if the name refers to a
-// map entry.
-func MessageType(name string) reflect.Type {
- if t, ok := protoTypedNils[name]; ok {
- return reflect.TypeOf(t)
- }
- return protoMapTypes[name]
-}
-
-// A registry of all linked proto files.
-var (
- protoFiles = make(map[string][]byte) // file name => fileDescriptor
-)
-
-// RegisterFile is called from generated code and maps from the
-// full file name of a .proto file to its compressed FileDescriptorProto.
-func RegisterFile(filename string, fileDescriptor []byte) {
- protoFiles[filename] = fileDescriptor
-}
-
-// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
-func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
deleted file mode 100644
index 40ea3dd9..00000000
--- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2018, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "reflect"
-)
-
-var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem()
-var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem()
diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
deleted file mode 100644
index 5a5fd93f..00000000
--- a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "fmt"
- "io"
-)
-
-func Skip(data []byte) (n int, err error) {
- l := len(data)
- index := 0
- for index < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if index >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[index]
- index++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for {
- if index >= l {
- return 0, io.ErrUnexpectedEOF
- }
- index++
- if data[index-1] < 0x80 {
- break
- }
- }
- return index, nil
- case 1:
- index += 8
- return index, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if index >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[index]
- index++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- index += length
- return index, nil
- case 3:
- for {
- var innerWire uint64
- var start int = index
- for shift := uint(0); ; shift += 7 {
- if index >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[index]
- index++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := Skip(data[start:])
- if err != nil {
- return 0, err
- }
- index = start + next
- }
- return index, nil
- case 4:
- return index, nil
- case 5:
- index += 4
- return index, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
deleted file mode 100644
index 9b1538d0..00000000
--- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go
+++ /dev/null
@@ -1,3006 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "errors"
- "fmt"
- "math"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "unicode/utf8"
-)
-
-// a sizer takes a pointer to a field and the size of its tag, computes the size of
-// the encoded data.
-type sizer func(pointer, int) int
-
-// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
-// marshals the field to the end of the slice, returns the slice and error (if any).
-type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
-
-// marshalInfo is the information used for marshaling a message.
-type marshalInfo struct {
- typ reflect.Type
- fields []*marshalFieldInfo
- unrecognized field // offset of XXX_unrecognized
- extensions field // offset of XXX_InternalExtensions
- v1extensions field // offset of XXX_extensions
- sizecache field // offset of XXX_sizecache
- initialized int32 // 0 -- only typ is set, 1 -- fully initialized
- messageset bool // uses message set wire format
- hasmarshaler bool // has custom marshaler
- sync.RWMutex // protect extElems map, also for initialization
- extElems map[int32]*marshalElemInfo // info of extension elements
-
- hassizer bool // has custom sizer
- hasprotosizer bool // has custom protosizer
-
- bytesExtensions field // offset of XXX_extensions where the field type is []byte
-}
-
-// marshalFieldInfo is the information used for marshaling a field of a message.
-type marshalFieldInfo struct {
- field field
- wiretag uint64 // tag in wire format
- tagsize int // size of tag in wire format
- sizer sizer
- marshaler marshaler
- isPointer bool
- required bool // field is required
- name string // name of the field, for error reporting
- oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
-}
-
-// marshalElemInfo is the information used for marshaling an extension or oneof element.
-type marshalElemInfo struct {
- wiretag uint64 // tag in wire format
- tagsize int // size of tag in wire format
- sizer sizer
- marshaler marshaler
- isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
-}
-
-var (
- marshalInfoMap = map[reflect.Type]*marshalInfo{}
- marshalInfoLock sync.Mutex
-
- uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind()
-)
-
-// getMarshalInfo returns the information to marshal a given type of message.
-// The info it returns may not necessarily initialized.
-// t is the type of the message (NOT the pointer to it).
-func getMarshalInfo(t reflect.Type) *marshalInfo {
- marshalInfoLock.Lock()
- u, ok := marshalInfoMap[t]
- if !ok {
- u = &marshalInfo{typ: t}
- marshalInfoMap[t] = u
- }
- marshalInfoLock.Unlock()
- return u
-}
-
-// Size is the entry point from generated code,
-// and should be ONLY called by generated code.
-// It computes the size of encoded data of msg.
-// a is a pointer to a place to store cached marshal info.
-func (a *InternalMessageInfo) Size(msg Message) int {
- u := getMessageMarshalInfo(msg, a)
- ptr := toPointer(&msg)
- if ptr.isNil() {
- // We get here if msg is a typed nil ((*SomeMessage)(nil)),
- // so it satisfies the interface, and msg == nil wouldn't
- // catch it. We don't want crash in this case.
- return 0
- }
- return u.size(ptr)
-}
-
-// Marshal is the entry point from generated code,
-// and should be ONLY called by generated code.
-// It marshals msg to the end of b.
-// a is a pointer to a place to store cached marshal info.
-func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
- u := getMessageMarshalInfo(msg, a)
- ptr := toPointer(&msg)
- if ptr.isNil() {
- // We get here if msg is a typed nil ((*SomeMessage)(nil)),
- // so it satisfies the interface, and msg == nil wouldn't
- // catch it. We don't want crash in this case.
- return b, ErrNil
- }
- return u.marshal(b, ptr, deterministic)
-}
-
-func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
- // u := a.marshal, but atomically.
- // We use an atomic here to ensure memory consistency.
- u := atomicLoadMarshalInfo(&a.marshal)
- if u == nil {
- // Get marshal information from type of message.
- t := reflect.ValueOf(msg).Type()
- if t.Kind() != reflect.Ptr {
- panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
- }
- u = getMarshalInfo(t.Elem())
- // Store it in the cache for later users.
- // a.marshal = u, but atomically.
- atomicStoreMarshalInfo(&a.marshal, u)
- }
- return u
-}
-
-// size is the main function to compute the size of the encoded data of a message.
-// ptr is the pointer to the message.
-func (u *marshalInfo) size(ptr pointer) int {
- if atomic.LoadInt32(&u.initialized) == 0 {
- u.computeMarshalInfo()
- }
-
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- if u.hasmarshaler {
- // Uses the message's Size method if available
- if u.hassizer {
- s := ptr.asPointerTo(u.typ).Interface().(Sizer)
- return s.Size()
- }
- // Uses the message's ProtoSize method if available
- if u.hasprotosizer {
- s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer)
- return s.ProtoSize()
- }
-
- m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
- b, _ := m.Marshal()
- return len(b)
- }
-
- n := 0
- for _, f := range u.fields {
- if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
- // nil pointer always marshals to nothing
- continue
- }
- n += f.sizer(ptr.offset(f.field), f.tagsize)
- }
- if u.extensions.IsValid() {
- e := ptr.offset(u.extensions).toExtensions()
- if u.messageset {
- n += u.sizeMessageSet(e)
- } else {
- n += u.sizeExtensions(e)
- }
- }
- if u.v1extensions.IsValid() {
- m := *ptr.offset(u.v1extensions).toOldExtensions()
- n += u.sizeV1Extensions(m)
- }
- if u.bytesExtensions.IsValid() {
- s := *ptr.offset(u.bytesExtensions).toBytes()
- n += len(s)
- }
- if u.unrecognized.IsValid() {
- s := *ptr.offset(u.unrecognized).toBytes()
- n += len(s)
- }
-
- // cache the result for use in marshal
- if u.sizecache.IsValid() {
- atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
- }
- return n
-}
-
-// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
-// fall back to compute the size.
-func (u *marshalInfo) cachedsize(ptr pointer) int {
- if u.sizecache.IsValid() {
- return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
- }
- return u.size(ptr)
-}
-
-// marshal is the main function to marshal a message. It takes a byte slice and appends
-// the encoded data to the end of the slice, returns the slice and error (if any).
-// ptr is the pointer to the message.
-// If deterministic is true, map is marshaled in deterministic order.
-func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
- if atomic.LoadInt32(&u.initialized) == 0 {
- u.computeMarshalInfo()
- }
-
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- if u.hasmarshaler {
- m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
- b1, err := m.Marshal()
- b = append(b, b1...)
- return b, err
- }
-
- var err, errLater error
- // The old marshaler encodes extensions at beginning.
- if u.extensions.IsValid() {
- e := ptr.offset(u.extensions).toExtensions()
- if u.messageset {
- b, err = u.appendMessageSet(b, e, deterministic)
- } else {
- b, err = u.appendExtensions(b, e, deterministic)
- }
- if err != nil {
- return b, err
- }
- }
- if u.v1extensions.IsValid() {
- m := *ptr.offset(u.v1extensions).toOldExtensions()
- b, err = u.appendV1Extensions(b, m, deterministic)
- if err != nil {
- return b, err
- }
- }
- if u.bytesExtensions.IsValid() {
- s := *ptr.offset(u.bytesExtensions).toBytes()
- b = append(b, s...)
- }
- for _, f := range u.fields {
- if f.required {
- if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
- // Required field is not set.
- // We record the error but keep going, to give a complete marshaling.
- if errLater == nil {
- errLater = &RequiredNotSetError{f.name}
- }
- continue
- }
- }
- if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
- // nil pointer always marshals to nothing
- continue
- }
- b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
- if err != nil {
- if err1, ok := err.(*RequiredNotSetError); ok {
- // Required field in submessage is not set.
- // We record the error but keep going, to give a complete marshaling.
- if errLater == nil {
- errLater = &RequiredNotSetError{f.name + "." + err1.field}
- }
- continue
- }
- if err == errRepeatedHasNil {
- err = errors.New("proto: repeated field " + f.name + " has nil element")
- }
- if err == errInvalidUTF8 {
- if errLater == nil {
- fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
- errLater = &invalidUTF8Error{fullName}
- }
- continue
- }
- return b, err
- }
- }
- if u.unrecognized.IsValid() {
- s := *ptr.offset(u.unrecognized).toBytes()
- b = append(b, s...)
- }
- return b, errLater
-}
-
-// computeMarshalInfo initializes the marshal info.
-func (u *marshalInfo) computeMarshalInfo() {
- u.Lock()
- defer u.Unlock()
- if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
- return
- }
-
- t := u.typ
- u.unrecognized = invalidField
- u.extensions = invalidField
- u.v1extensions = invalidField
- u.bytesExtensions = invalidField
- u.sizecache = invalidField
- isOneofMessage := false
-
- if reflect.PtrTo(t).Implements(sizerType) {
- u.hassizer = true
- }
- if reflect.PtrTo(t).Implements(protosizerType) {
- u.hasprotosizer = true
- }
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- if reflect.PtrTo(t).Implements(marshalerType) {
- u.hasmarshaler = true
- atomic.StoreInt32(&u.initialized, 1)
- return
- }
-
- n := t.NumField()
-
- // deal with XXX fields first
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- if f.Tag.Get("protobuf_oneof") != "" {
- isOneofMessage = true
- }
- if !strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- switch f.Name {
- case "XXX_sizecache":
- u.sizecache = toField(&f)
- case "XXX_unrecognized":
- u.unrecognized = toField(&f)
- case "XXX_InternalExtensions":
- u.extensions = toField(&f)
- u.messageset = f.Tag.Get("protobuf_messageset") == "1"
- case "XXX_extensions":
- if f.Type.Kind() == reflect.Map {
- u.v1extensions = toField(&f)
- } else {
- u.bytesExtensions = toField(&f)
- }
- case "XXX_NoUnkeyedLiteral":
- // nothing to do
- default:
- panic("unknown XXX field: " + f.Name)
- }
- n--
- }
-
- // get oneof implementers
- var oneofImplementers []interface{}
- // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler
- if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage {
- _, _, _, oneofImplementers = m.XXX_OneofFuncs()
- }
-
- // normal fields
- fields := make([]marshalFieldInfo, n) // batch allocation
- u.fields = make([]*marshalFieldInfo, 0, n)
- for i, j := 0, 0; i < t.NumField(); i++ {
- f := t.Field(i)
-
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- field := &fields[j]
- j++
- field.name = f.Name
- u.fields = append(u.fields, field)
- if f.Tag.Get("protobuf_oneof") != "" {
- field.computeOneofFieldInfo(&f, oneofImplementers)
- continue
- }
- if f.Tag.Get("protobuf") == "" {
- // field has no tag (not in generated message), ignore it
- u.fields = u.fields[:len(u.fields)-1]
- j--
- continue
- }
- field.computeMarshalFieldInfo(&f)
- }
-
- // fields are marshaled in tag order on the wire.
- sort.Sort(byTag(u.fields))
-
- atomic.StoreInt32(&u.initialized, 1)
-}
-
-// helper for sorting fields by tag
-type byTag []*marshalFieldInfo
-
-func (a byTag) Len() int { return len(a) }
-func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
-
-// getExtElemInfo returns the information to marshal an extension element.
-// The info it returns is initialized.
-func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
- // get from cache first
- u.RLock()
- e, ok := u.extElems[desc.Field]
- u.RUnlock()
- if ok {
- return e
- }
-
- t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
- tags := strings.Split(desc.Tag, ",")
- tag, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("tag is not an integer")
- }
- wt := wiretype(tags[0])
- sizr, marshalr := typeMarshaler(t, tags, false, false)
- e = &marshalElemInfo{
- wiretag: uint64(tag)<<3 | wt,
- tagsize: SizeVarint(uint64(tag) << 3),
- sizer: sizr,
- marshaler: marshalr,
- isptr: t.Kind() == reflect.Ptr,
- }
-
- // update cache
- u.Lock()
- if u.extElems == nil {
- u.extElems = make(map[int32]*marshalElemInfo)
- }
- u.extElems[desc.Field] = e
- u.Unlock()
- return e
-}
-
-// computeMarshalFieldInfo fills up the information to marshal a field.
-func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
- // parse protobuf tag of the field.
- // tag has format of "bytes,49,opt,name=foo,def=hello!"
- tags := strings.Split(f.Tag.Get("protobuf"), ",")
- if tags[0] == "" {
- return
- }
- tag, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("tag is not an integer")
- }
- wt := wiretype(tags[0])
- if tags[2] == "req" {
- fi.required = true
- }
- fi.setTag(f, tag, wt)
- fi.setMarshaler(f, tags)
-}
-
-func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
- fi.field = toField(f)
- fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
- fi.isPointer = true
- fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
- fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
-
- ityp := f.Type // interface type
- for _, o := range oneofImplementers {
- t := reflect.TypeOf(o)
- if !t.Implements(ityp) {
- continue
- }
- sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
- tags := strings.Split(sf.Tag.Get("protobuf"), ",")
- tag, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("tag is not an integer")
- }
- wt := wiretype(tags[0])
- sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
- fi.oneofElems[t.Elem()] = &marshalElemInfo{
- wiretag: uint64(tag)<<3 | wt,
- tagsize: SizeVarint(uint64(tag) << 3),
- sizer: sizr,
- marshaler: marshalr,
- }
- }
-}
-
-type oneofMessage interface {
- XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
-}
-
-// wiretype returns the wire encoding of the type.
-func wiretype(encoding string) uint64 {
- switch encoding {
- case "fixed32":
- return WireFixed32
- case "fixed64":
- return WireFixed64
- case "varint", "zigzag32", "zigzag64":
- return WireVarint
- case "bytes":
- return WireBytes
- case "group":
- return WireStartGroup
- }
- panic("unknown wire type " + encoding)
-}
-
-// setTag fills up the tag (in wire format) and its size in the info of a field.
-func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
- fi.field = toField(f)
- fi.wiretag = uint64(tag)<<3 | wt
- fi.tagsize = SizeVarint(uint64(tag) << 3)
-}
-
-// setMarshaler fills up the sizer and marshaler in the info of a field.
-func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
- switch f.Type.Kind() {
- case reflect.Map:
- // map field
- fi.isPointer = true
- fi.sizer, fi.marshaler = makeMapMarshaler(f)
- return
- case reflect.Ptr, reflect.Slice:
- fi.isPointer = true
- }
- fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
-}
-
-// typeMarshaler returns the sizer and marshaler of a given field.
-// t is the type of the field.
-// tags is the generated "protobuf" tag of the field.
-// If nozero is true, zero value is not marshaled to the wire.
-// If oneof is true, it is a oneof field.
-func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
- encoding := tags[0]
-
- pointer := false
- slice := false
- if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
- slice = true
- t = t.Elem()
- }
- if t.Kind() == reflect.Ptr {
- pointer = true
- t = t.Elem()
- }
-
- packed := false
- proto3 := false
- ctype := false
- isTime := false
- isDuration := false
- isWktPointer := false
- validateUTF8 := true
- for i := 2; i < len(tags); i++ {
- if tags[i] == "packed" {
- packed = true
- }
- if tags[i] == "proto3" {
- proto3 = true
- }
- if strings.HasPrefix(tags[i], "customtype=") {
- ctype = true
- }
- if tags[i] == "stdtime" {
- isTime = true
- }
- if tags[i] == "stdduration" {
- isDuration = true
- }
- if tags[i] == "wktptr" {
- isWktPointer = true
- }
- }
- validateUTF8 = validateUTF8 && proto3
- if !proto3 && !pointer && !slice {
- nozero = false
- }
-
- if ctype {
- if reflect.PtrTo(t).Implements(customType) {
- if slice {
- return makeMessageRefSliceMarshaler(getMarshalInfo(t))
- }
- if pointer {
- return makeCustomPtrMarshaler(getMarshalInfo(t))
- }
- return makeCustomMarshaler(getMarshalInfo(t))
- } else {
- panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t))
- }
- }
-
- if isTime {
- if pointer {
- if slice {
- return makeTimePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeTimePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeTimeSliceMarshaler(getMarshalInfo(t))
- }
- return makeTimeMarshaler(getMarshalInfo(t))
- }
-
- if isDuration {
- if pointer {
- if slice {
- return makeDurationPtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeDurationPtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeDurationSliceMarshaler(getMarshalInfo(t))
- }
- return makeDurationMarshaler(getMarshalInfo(t))
- }
-
- if isWktPointer {
- switch t.Kind() {
- case reflect.Float64:
- if pointer {
- if slice {
- return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdDoubleValueMarshaler(getMarshalInfo(t))
- case reflect.Float32:
- if pointer {
- if slice {
- return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdFloatValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdFloatValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdFloatValueMarshaler(getMarshalInfo(t))
- case reflect.Int64:
- if pointer {
- if slice {
- return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdInt64ValueMarshaler(getMarshalInfo(t))
- case reflect.Uint64:
- if pointer {
- if slice {
- return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdUInt64ValueMarshaler(getMarshalInfo(t))
- case reflect.Int32:
- if pointer {
- if slice {
- return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdInt32ValueMarshaler(getMarshalInfo(t))
- case reflect.Uint32:
- if pointer {
- if slice {
- return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdUInt32ValueMarshaler(getMarshalInfo(t))
- case reflect.Bool:
- if pointer {
- if slice {
- return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdBoolValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdBoolValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdBoolValueMarshaler(getMarshalInfo(t))
- case reflect.String:
- if pointer {
- if slice {
- return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdStringValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdStringValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdStringValueMarshaler(getMarshalInfo(t))
- case uint8SliceType:
- if pointer {
- if slice {
- return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdBytesValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdBytesValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdBytesValueMarshaler(getMarshalInfo(t))
- default:
- panic(fmt.Sprintf("unknown wktpointer type %#v", t))
- }
- }
-
- switch t.Kind() {
- case reflect.Bool:
- if pointer {
- return sizeBoolPtr, appendBoolPtr
- }
- if slice {
- if packed {
- return sizeBoolPackedSlice, appendBoolPackedSlice
- }
- return sizeBoolSlice, appendBoolSlice
- }
- if nozero {
- return sizeBoolValueNoZero, appendBoolValueNoZero
- }
- return sizeBoolValue, appendBoolValue
- case reflect.Uint32:
- switch encoding {
- case "fixed32":
- if pointer {
- return sizeFixed32Ptr, appendFixed32Ptr
- }
- if slice {
- if packed {
- return sizeFixed32PackedSlice, appendFixed32PackedSlice
- }
- return sizeFixed32Slice, appendFixed32Slice
- }
- if nozero {
- return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
- }
- return sizeFixed32Value, appendFixed32Value
- case "varint":
- if pointer {
- return sizeVarint32Ptr, appendVarint32Ptr
- }
- if slice {
- if packed {
- return sizeVarint32PackedSlice, appendVarint32PackedSlice
- }
- return sizeVarint32Slice, appendVarint32Slice
- }
- if nozero {
- return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
- }
- return sizeVarint32Value, appendVarint32Value
- }
- case reflect.Int32:
- switch encoding {
- case "fixed32":
- if pointer {
- return sizeFixedS32Ptr, appendFixedS32Ptr
- }
- if slice {
- if packed {
- return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
- }
- return sizeFixedS32Slice, appendFixedS32Slice
- }
- if nozero {
- return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
- }
- return sizeFixedS32Value, appendFixedS32Value
- case "varint":
- if pointer {
- return sizeVarintS32Ptr, appendVarintS32Ptr
- }
- if slice {
- if packed {
- return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
- }
- return sizeVarintS32Slice, appendVarintS32Slice
- }
- if nozero {
- return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
- }
- return sizeVarintS32Value, appendVarintS32Value
- case "zigzag32":
- if pointer {
- return sizeZigzag32Ptr, appendZigzag32Ptr
- }
- if slice {
- if packed {
- return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
- }
- return sizeZigzag32Slice, appendZigzag32Slice
- }
- if nozero {
- return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
- }
- return sizeZigzag32Value, appendZigzag32Value
- }
- case reflect.Uint64:
- switch encoding {
- case "fixed64":
- if pointer {
- return sizeFixed64Ptr, appendFixed64Ptr
- }
- if slice {
- if packed {
- return sizeFixed64PackedSlice, appendFixed64PackedSlice
- }
- return sizeFixed64Slice, appendFixed64Slice
- }
- if nozero {
- return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
- }
- return sizeFixed64Value, appendFixed64Value
- case "varint":
- if pointer {
- return sizeVarint64Ptr, appendVarint64Ptr
- }
- if slice {
- if packed {
- return sizeVarint64PackedSlice, appendVarint64PackedSlice
- }
- return sizeVarint64Slice, appendVarint64Slice
- }
- if nozero {
- return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
- }
- return sizeVarint64Value, appendVarint64Value
- }
- case reflect.Int64:
- switch encoding {
- case "fixed64":
- if pointer {
- return sizeFixedS64Ptr, appendFixedS64Ptr
- }
- if slice {
- if packed {
- return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
- }
- return sizeFixedS64Slice, appendFixedS64Slice
- }
- if nozero {
- return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
- }
- return sizeFixedS64Value, appendFixedS64Value
- case "varint":
- if pointer {
- return sizeVarintS64Ptr, appendVarintS64Ptr
- }
- if slice {
- if packed {
- return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
- }
- return sizeVarintS64Slice, appendVarintS64Slice
- }
- if nozero {
- return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
- }
- return sizeVarintS64Value, appendVarintS64Value
- case "zigzag64":
- if pointer {
- return sizeZigzag64Ptr, appendZigzag64Ptr
- }
- if slice {
- if packed {
- return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
- }
- return sizeZigzag64Slice, appendZigzag64Slice
- }
- if nozero {
- return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
- }
- return sizeZigzag64Value, appendZigzag64Value
- }
- case reflect.Float32:
- if pointer {
- return sizeFloat32Ptr, appendFloat32Ptr
- }
- if slice {
- if packed {
- return sizeFloat32PackedSlice, appendFloat32PackedSlice
- }
- return sizeFloat32Slice, appendFloat32Slice
- }
- if nozero {
- return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
- }
- return sizeFloat32Value, appendFloat32Value
- case reflect.Float64:
- if pointer {
- return sizeFloat64Ptr, appendFloat64Ptr
- }
- if slice {
- if packed {
- return sizeFloat64PackedSlice, appendFloat64PackedSlice
- }
- return sizeFloat64Slice, appendFloat64Slice
- }
- if nozero {
- return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
- }
- return sizeFloat64Value, appendFloat64Value
- case reflect.String:
- if validateUTF8 {
- if pointer {
- return sizeStringPtr, appendUTF8StringPtr
- }
- if slice {
- return sizeStringSlice, appendUTF8StringSlice
- }
- if nozero {
- return sizeStringValueNoZero, appendUTF8StringValueNoZero
- }
- return sizeStringValue, appendUTF8StringValue
- }
- if pointer {
- return sizeStringPtr, appendStringPtr
- }
- if slice {
- return sizeStringSlice, appendStringSlice
- }
- if nozero {
- return sizeStringValueNoZero, appendStringValueNoZero
- }
- return sizeStringValue, appendStringValue
- case reflect.Slice:
- if slice {
- return sizeBytesSlice, appendBytesSlice
- }
- if oneof {
- // Oneof bytes field may also have "proto3" tag.
- // We want to marshal it as a oneof field. Do this
- // check before the proto3 check.
- return sizeBytesOneof, appendBytesOneof
- }
- if proto3 {
- return sizeBytes3, appendBytes3
- }
- return sizeBytes, appendBytes
- case reflect.Struct:
- switch encoding {
- case "group":
- if slice {
- return makeGroupSliceMarshaler(getMarshalInfo(t))
- }
- return makeGroupMarshaler(getMarshalInfo(t))
- case "bytes":
- if pointer {
- if slice {
- return makeMessageSliceMarshaler(getMarshalInfo(t))
- }
- return makeMessageMarshaler(getMarshalInfo(t))
- } else {
- if slice {
- return makeMessageRefSliceMarshaler(getMarshalInfo(t))
- }
- return makeMessageRefMarshaler(getMarshalInfo(t))
- }
- }
- }
- panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
-}
-
-// Below are functions to size/marshal a specific type of a field.
-// They are stored in the field's info, and called by function pointers.
-// They have type sizer or marshaler.
-
-func sizeFixed32Value(_ pointer, tagsize int) int {
- return 4 + tagsize
-}
-func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint32()
- if v == 0 {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixed32Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixed32Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- return (4 + tagsize) * len(s)
-}
-func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return 0
- }
- return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFixedS32Value(_ pointer, tagsize int) int {
- return 4 + tagsize
-}
-func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- if v == 0 {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
- p := ptr.getInt32Ptr()
- if p == nil {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixedS32Slice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- return (4 + tagsize) * len(s)
-}
-func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return 0
- }
- return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFloat32Value(_ pointer, tagsize int) int {
- return 4 + tagsize
-}
-func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
- v := math.Float32bits(*ptr.toFloat32())
- if v == 0 {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFloat32Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toFloat32Ptr()
- if p == nil {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFloat32Slice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat32Slice()
- return (4 + tagsize) * len(s)
-}
-func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat32Slice()
- if len(s) == 0 {
- return 0
- }
- return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFixed64Value(_ pointer, tagsize int) int {
- return 8 + tagsize
-}
-func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint64()
- if v == 0 {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixed64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixed64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- return (8 + tagsize) * len(s)
-}
-func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return 0
- }
- return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeFixedS64Value(_ pointer, tagsize int) int {
- return 8 + tagsize
-}
-func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- if v == 0 {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixedS64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- return (8 + tagsize) * len(s)
-}
-func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return 0
- }
- return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeFloat64Value(_ pointer, tagsize int) int {
- return 8 + tagsize
-}
-func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
- v := math.Float64bits(*ptr.toFloat64())
- if v == 0 {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFloat64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toFloat64Ptr()
- if p == nil {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFloat64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat64Slice()
- return (8 + tagsize) * len(s)
-}
-func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat64Slice()
- if len(s) == 0 {
- return 0
- }
- return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeVarint32Value(ptr pointer, tagsize int) int {
- v := *ptr.toUint32()
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint32()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarint32Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarint32Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v)) + tagsize
- }
- return n
-}
-func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarintS32Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
- p := ptr.getInt32Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarintS32Slice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v)) + tagsize
- }
- return n
-}
-func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarint64Value(ptr pointer, tagsize int) int {
- v := *ptr.toUint64()
- return SizeVarint(v) + tagsize
-}
-func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint64()
- if v == 0 {
- return 0
- }
- return SizeVarint(v) + tagsize
-}
-func sizeVarint64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(*p) + tagsize
-}
-func sizeVarint64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(v) + tagsize
- }
- return n
-}
-func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(v)
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarintS64Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarintS64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v)) + tagsize
- }
- return n
-}
-func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeZigzag32Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
- p := ptr.getInt32Ptr()
- if p == nil {
- return 0
- }
- v := *p
- return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32Slice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
- }
- return n
-}
-func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeZigzag64Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return 0
- }
- v := *p
- return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
- }
- return n
-}
-func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeBoolValue(_ pointer, tagsize int) int {
- return 1 + tagsize
-}
-func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toBool()
- if !v {
- return 0
- }
- return 1 + tagsize
-}
-func sizeBoolPtr(ptr pointer, tagsize int) int {
- p := *ptr.toBoolPtr()
- if p == nil {
- return 0
- }
- return 1 + tagsize
-}
-func sizeBoolSlice(ptr pointer, tagsize int) int {
- s := *ptr.toBoolSlice()
- return (1 + tagsize) * len(s)
-}
-func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toBoolSlice()
- if len(s) == 0 {
- return 0
- }
- return len(s) + SizeVarint(uint64(len(s))) + tagsize
-}
-func sizeStringValue(ptr pointer, tagsize int) int {
- v := *ptr.toString()
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toString()
- if v == "" {
- return 0
- }
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringPtr(ptr pointer, tagsize int) int {
- p := *ptr.toStringPtr()
- if p == nil {
- return 0
- }
- v := *p
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringSlice(ptr pointer, tagsize int) int {
- s := *ptr.toStringSlice()
- n := 0
- for _, v := range s {
- n += len(v) + SizeVarint(uint64(len(v))) + tagsize
- }
- return n
-}
-func sizeBytes(ptr pointer, tagsize int) int {
- v := *ptr.toBytes()
- if v == nil {
- return 0
- }
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytes3(ptr pointer, tagsize int) int {
- v := *ptr.toBytes()
- if len(v) == 0 {
- return 0
- }
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytesOneof(ptr pointer, tagsize int) int {
- v := *ptr.toBytes()
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytesSlice(ptr pointer, tagsize int) int {
- s := *ptr.toBytesSlice()
- n := 0
- for _, v := range s {
- n += len(v) + SizeVarint(uint64(len(v))) + tagsize
- }
- return n
-}
-
-// appendFixed32 appends an encoded fixed32 to b.
-func appendFixed32(b []byte, v uint32) []byte {
- b = append(b,
- byte(v),
- byte(v>>8),
- byte(v>>16),
- byte(v>>24))
- return b
-}
-
-// appendFixed64 appends an encoded fixed64 to b.
-func appendFixed64(b []byte, v uint64) []byte {
- b = append(b,
- byte(v),
- byte(v>>8),
- byte(v>>16),
- byte(v>>24),
- byte(v>>32),
- byte(v>>40),
- byte(v>>48),
- byte(v>>56))
- return b
-}
-
-// appendVarint appends an encoded varint to b.
-func appendVarint(b []byte, v uint64) []byte {
- // TODO: make 1-byte (maybe 2-byte) case inline-able, once we
- // have non-leaf inliner.
- switch {
- case v < 1<<7:
- b = append(b, byte(v))
- case v < 1<<14:
- b = append(b,
- byte(v&0x7f|0x80),
- byte(v>>7))
- case v < 1<<21:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte(v>>14))
- case v < 1<<28:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte(v>>21))
- case v < 1<<35:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte(v>>28))
- case v < 1<<42:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte(v>>35))
- case v < 1<<49:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte(v>>42))
- case v < 1<<56:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte((v>>42)&0x7f|0x80),
- byte(v>>49))
- case v < 1<<63:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte((v>>42)&0x7f|0x80),
- byte((v>>49)&0x7f|0x80),
- byte(v>>56))
- default:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte((v>>42)&0x7f|0x80),
- byte((v>>49)&0x7f|0x80),
- byte((v>>56)&0x7f|0x80),
- 1)
- }
- return b
-}
-
-func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, *p)
- return b, nil
-}
-func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- }
- return b, nil
-}
-func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(4*len(s)))
- for _, v := range s {
- b = appendFixed32(b, v)
- }
- return b, nil
-}
-func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(v))
- return b, nil
-}
-func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(v))
- return b, nil
-}
-func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := ptr.getInt32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(*p))
- return b, nil
-}
-func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(v))
- }
- return b, nil
-}
-func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(4*len(s)))
- for _, v := range s {
- b = appendFixed32(b, uint32(v))
- }
- return b, nil
-}
-func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float32bits(*ptr.toFloat32())
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float32bits(*ptr.toFloat32())
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toFloat32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, math.Float32bits(*p))
- return b, nil
-}
-func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, math.Float32bits(v))
- }
- return b, nil
-}
-func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(4*len(s)))
- for _, v := range s {
- b = appendFixed32(b, math.Float32bits(v))
- }
- return b, nil
-}
-func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, *p)
- return b, nil
-}
-func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- }
- return b, nil
-}
-func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(8*len(s)))
- for _, v := range s {
- b = appendFixed64(b, v)
- }
- return b, nil
-}
-func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(v))
- return b, nil
-}
-func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(v))
- return b, nil
-}
-func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(*p))
- return b, nil
-}
-func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(v))
- }
- return b, nil
-}
-func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(8*len(s)))
- for _, v := range s {
- b = appendFixed64(b, uint64(v))
- }
- return b, nil
-}
-func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float64bits(*ptr.toFloat64())
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float64bits(*ptr.toFloat64())
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toFloat64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, math.Float64bits(*p))
- return b, nil
-}
-func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, math.Float64bits(v))
- }
- return b, nil
-}
-func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(8*len(s)))
- for _, v := range s {
- b = appendFixed64(b, math.Float64bits(v))
- }
- return b, nil
-}
-func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(*p))
- return b, nil
-}
-func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := ptr.getInt32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(*p))
- return b, nil
-}
-func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, v)
- return b, nil
-}
-func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, v)
- return b, nil
-}
-func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, *p)
- return b, nil
-}
-func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, v)
- }
- return b, nil
-}
-func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(v)
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, v)
- }
- return b, nil
-}
-func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(*p))
- return b, nil
-}
-func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- return b, nil
-}
-func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- return b, nil
-}
-func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := ptr.getInt32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- v := *p
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- return b, nil
-}
-func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- }
- return b, nil
-}
-func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- }
- return b, nil
-}
-func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- return b, nil
-}
-func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- return b, nil
-}
-func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- v := *p
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- return b, nil
-}
-func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- }
- return b, nil
-}
-func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- }
- return b, nil
-}
-func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBool()
- b = appendVarint(b, wiretag)
- if v {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- return b, nil
-}
-func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBool()
- if !v {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = append(b, 1)
- return b, nil
-}
-
-func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toBoolPtr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- if *p {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- return b, nil
-}
-func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toBoolSlice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- if v {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- }
- return b, nil
-}
-func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toBoolSlice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(len(s)))
- for _, v := range s {
- if v {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- }
- return b, nil
-}
-func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toString()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toString()
- if v == "" {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toStringPtr()
- if p == nil {
- return b, nil
- }
- v := *p
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toStringSlice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- }
- return b, nil
-}
-func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- v := *ptr.toString()
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- v := *ptr.toString()
- if v == "" {
- return b, nil
- }
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- p := *ptr.toStringPtr()
- if p == nil {
- return b, nil
- }
- v := *p
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- s := *ptr.toStringSlice()
- for _, v := range s {
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- }
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBytes()
- if v == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBytes()
- if len(v) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBytes()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toBytesSlice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- }
- return b, nil
-}
-
-// makeGroupMarshaler returns the sizer and marshaler for a group.
-// u is the marshal info of the underlying message.
-func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- p := ptr.getPointer()
- if p.isNil() {
- return 0
- }
- return u.size(p) + 2*tagsize
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- p := ptr.getPointer()
- if p.isNil() {
- return b, nil
- }
- var err error
- b = appendVarint(b, wiretag) // start group
- b, err = u.marshal(b, p, deterministic)
- b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
- return b, err
- }
-}
-
-// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
-// u is the marshal info of the underlying message.
-func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getPointerSlice()
- n := 0
- for _, v := range s {
- if v.isNil() {
- continue
- }
- n += u.size(v) + 2*tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getPointerSlice()
- var err error
- var nerr nonFatal
- for _, v := range s {
- if v.isNil() {
- return b, errRepeatedHasNil
- }
- b = appendVarint(b, wiretag) // start group
- b, err = u.marshal(b, v, deterministic)
- b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
- if !nerr.Merge(err) {
- if err == ErrNil {
- err = errRepeatedHasNil
- }
- return b, err
- }
- }
- return b, nerr.E
- }
-}
-
-// makeMessageMarshaler returns the sizer and marshaler for a message field.
-// u is the marshal info of the message.
-func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- p := ptr.getPointer()
- if p.isNil() {
- return 0
- }
- siz := u.size(p)
- return siz + SizeVarint(uint64(siz)) + tagsize
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- p := ptr.getPointer()
- if p.isNil() {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- siz := u.cachedsize(p)
- b = appendVarint(b, uint64(siz))
- return u.marshal(b, p, deterministic)
- }
-}
-
-// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
-// u is the marshal info of the message.
-func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getPointerSlice()
- n := 0
- for _, v := range s {
- if v.isNil() {
- continue
- }
- siz := u.size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getPointerSlice()
- var err error
- var nerr nonFatal
- for _, v := range s {
- if v.isNil() {
- return b, errRepeatedHasNil
- }
- b = appendVarint(b, wiretag)
- siz := u.cachedsize(v)
- b = appendVarint(b, uint64(siz))
- b, err = u.marshal(b, v, deterministic)
-
- if !nerr.Merge(err) {
- if err == ErrNil {
- err = errRepeatedHasNil
- }
- return b, err
- }
- }
- return b, nerr.E
- }
-}
-
-// makeMapMarshaler returns the sizer and marshaler for a map field.
-// f is the pointer to the reflect data structure of the field.
-func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
- // figure out key and value type
- t := f.Type
- keyType := t.Key()
- valType := t.Elem()
- tags := strings.Split(f.Tag.Get("protobuf"), ",")
- keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
- valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
- stdOptions := false
- for _, t := range tags {
- if strings.HasPrefix(t, "customtype=") {
- valTags = append(valTags, t)
- }
- if t == "stdtime" {
- valTags = append(valTags, t)
- stdOptions = true
- }
- if t == "stdduration" {
- valTags = append(valTags, t)
- stdOptions = true
- }
- if t == "wktptr" {
- valTags = append(valTags, t)
- }
- }
- keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
- valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
- keyWireTag := 1<<3 | wiretype(keyTags[0])
- valWireTag := 2<<3 | wiretype(valTags[0])
-
- // We create an interface to get the addresses of the map key and value.
- // If value is pointer-typed, the interface is a direct interface, the
- // idata itself is the value. Otherwise, the idata is the pointer to the
- // value.
- // Key cannot be pointer-typed.
- valIsPtr := valType.Kind() == reflect.Ptr
-
- // If value is a message with nested maps, calling
- // valSizer in marshal may be quadratic. We should use
- // cached version in marshal (but not in size).
- // If value is not message type, we don't have size cache,
- // but it cannot be nested either. Just use valSizer.
- valCachedSizer := valSizer
- if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct {
- u := getMarshalInfo(valType.Elem())
- valCachedSizer = func(ptr pointer, tagsize int) int {
- // Same as message sizer, but use cache.
- p := ptr.getPointer()
- if p.isNil() {
- return 0
- }
- siz := u.cachedsize(p)
- return siz + SizeVarint(uint64(siz)) + tagsize
- }
- }
- return func(ptr pointer, tagsize int) int {
- m := ptr.asPointerTo(t).Elem() // the map
- n := 0
- for _, k := range m.MapKeys() {
- ki := k.Interface()
- vi := m.MapIndex(k).Interface()
- kaddr := toAddrPointer(&ki, false) // pointer to key
- vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
- siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
- m := ptr.asPointerTo(t).Elem() // the map
- var err error
- keys := m.MapKeys()
- if len(keys) > 1 && deterministic {
- sort.Sort(mapKeys(keys))
- }
-
- var nerr nonFatal
- for _, k := range keys {
- ki := k.Interface()
- vi := m.MapIndex(k).Interface()
- kaddr := toAddrPointer(&ki, false) // pointer to key
- vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
- b = appendVarint(b, tag)
- siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
- b = appendVarint(b, uint64(siz))
- b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
- if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
- return b, err
- }
- }
- return b, nerr.E
- }
-}
-
-// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
-// fi is the marshal info of the field.
-// f is the pointer to the reflect data structure of the field.
-func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
- // Oneof field is an interface. We need to get the actual data type on the fly.
- t := f.Type
- return func(ptr pointer, _ int) int {
- p := ptr.getInterfacePointer()
- if p.isNil() {
- return 0
- }
- v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
- telem := v.Type()
- e := fi.oneofElems[telem]
- return e.sizer(p, e.tagsize)
- },
- func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
- p := ptr.getInterfacePointer()
- if p.isNil() {
- return b, nil
- }
- v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
- telem := v.Type()
- if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
- return b, errOneofHasNil
- }
- e := fi.oneofElems[telem]
- return e.marshaler(b, p, e.wiretag, deterministic)
- }
-}
-
-// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
-func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
- m, mu := ext.extensionsRead()
- if m == nil {
- return 0
- }
- mu.Lock()
-
- n := 0
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- n += len(e.enc)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr)
- n += ei.sizer(p, ei.tagsize)
- }
- mu.Unlock()
- return n
-}
-
-// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
-func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
- m, mu := ext.extensionsRead()
- if m == nil {
- return b, nil
- }
- mu.Lock()
- defer mu.Unlock()
-
- var err error
- var nerr nonFatal
-
- // Fast-path for common cases: zero or one extensions.
- // Don't bother sorting the keys.
- if len(m) <= 1 {
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- b = append(b, e.enc...)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr)
- b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
- }
-
- // Sort the keys to provide a deterministic encoding.
- // Not sure this is required, but the old code does it.
- keys := make([]int, 0, len(m))
- for k := range m {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- for _, k := range keys {
- e := m[int32(k)]
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- b = append(b, e.enc...)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr)
- b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
-}
-
-// message set format is:
-// message MessageSet {
-// repeated group Item = 1 {
-// required int32 type_id = 2;
-// required string message = 3;
-// };
-// }
-
-// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
-// in message set format (above).
-func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
- m, mu := ext.extensionsRead()
- if m == nil {
- return 0
- }
- mu.Lock()
-
- n := 0
- for id, e := range m {
- n += 2 // start group, end group. tag = 1 (size=1)
- n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
-
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
- siz := len(msgWithLen)
- n += siz + 1 // message, tag = 3 (size=1)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr)
- n += ei.sizer(p, 1) // message, tag = 3 (size=1)
- }
- mu.Unlock()
- return n
-}
-
-// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
-// to the end of byte slice b.
-func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
- m, mu := ext.extensionsRead()
- if m == nil {
- return b, nil
- }
- mu.Lock()
- defer mu.Unlock()
-
- var err error
- var nerr nonFatal
-
- // Fast-path for common cases: zero or one extensions.
- // Don't bother sorting the keys.
- if len(m) <= 1 {
- for id, e := range m {
- b = append(b, 1<<3|WireStartGroup)
- b = append(b, 2<<3|WireVarint)
- b = appendVarint(b, uint64(id))
-
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
- b = append(b, 3<<3|WireBytes)
- b = append(b, msgWithLen...)
- b = append(b, 1<<3|WireEndGroup)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr)
- b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- b = append(b, 1<<3|WireEndGroup)
- }
- return b, nerr.E
- }
-
- // Sort the keys to provide a deterministic encoding.
- keys := make([]int, 0, len(m))
- for k := range m {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- for _, id := range keys {
- e := m[int32(id)]
- b = append(b, 1<<3|WireStartGroup)
- b = append(b, 2<<3|WireVarint)
- b = appendVarint(b, uint64(id))
-
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
- b = append(b, 3<<3|WireBytes)
- b = append(b, msgWithLen...)
- b = append(b, 1<<3|WireEndGroup)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr)
- b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
- b = append(b, 1<<3|WireEndGroup)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
-}
-
-// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
-func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
- if m == nil {
- return 0
- }
-
- n := 0
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- n += len(e.enc)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr)
- n += ei.sizer(p, ei.tagsize)
- }
- return n
-}
-
-// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
-func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
- if m == nil {
- return b, nil
- }
-
- // Sort the keys to provide a deterministic encoding.
- keys := make([]int, 0, len(m))
- for k := range m {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- var err error
- var nerr nonFatal
- for _, k := range keys {
- e := m[int32(k)]
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- b = append(b, e.enc...)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr)
- b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
-}
-
-// newMarshaler is the interface representing objects that can marshal themselves.
-//
-// This exists to support protoc-gen-go generated messages.
-// The proto package will stop type-asserting to this interface in the future.
-//
-// DO NOT DEPEND ON THIS.
-type newMarshaler interface {
- XXX_Size() int
- XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
-}
-
-// Size returns the encoded size of a protocol buffer message.
-// This is the main entry point.
-func Size(pb Message) int {
- if m, ok := pb.(newMarshaler); ok {
- return m.XXX_Size()
- }
- if m, ok := pb.(Marshaler); ok {
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- b, _ := m.Marshal()
- return len(b)
- }
- // in case somehow we didn't generate the wrapper
- if pb == nil {
- return 0
- }
- var info InternalMessageInfo
- return info.Size(pb)
-}
-
-// Marshal takes a protocol buffer message
-// and encodes it into the wire format, returning the data.
-// This is the main entry point.
-func Marshal(pb Message) ([]byte, error) {
- if m, ok := pb.(newMarshaler); ok {
- siz := m.XXX_Size()
- b := make([]byte, 0, siz)
- return m.XXX_Marshal(b, false)
- }
- if m, ok := pb.(Marshaler); ok {
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- return m.Marshal()
- }
- // in case somehow we didn't generate the wrapper
- if pb == nil {
- return nil, ErrNil
- }
- var info InternalMessageInfo
- siz := info.Size(pb)
- b := make([]byte, 0, siz)
- return info.Marshal(b, pb, false)
-}
-
-// Marshal takes a protocol buffer message
-// and encodes it into the wire format, writing the result to the
-// Buffer.
-// This is an alternative entry point. It is not necessary to use
-// a Buffer for most applications.
-func (p *Buffer) Marshal(pb Message) error {
- var err error
- if p.deterministic {
- if _, ok := pb.(Marshaler); ok {
- return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb)
- }
- }
- if m, ok := pb.(newMarshaler); ok {
- siz := m.XXX_Size()
- p.grow(siz) // make sure buf has enough capacity
- p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
- return err
- }
- if m, ok := pb.(Marshaler); ok {
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- var b []byte
- b, err = m.Marshal()
- p.buf = append(p.buf, b...)
- return err
- }
- // in case somehow we didn't generate the wrapper
- if pb == nil {
- return ErrNil
- }
- var info InternalMessageInfo
- siz := info.Size(pb)
- p.grow(siz) // make sure buf has enough capacity
- p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
- return err
-}
-
-// grow grows the buffer's capacity, if necessary, to guarantee space for
-// another n bytes. After grow(n), at least n bytes can be written to the
-// buffer without another allocation.
-func (p *Buffer) grow(n int) {
- need := len(p.buf) + n
- if need <= cap(p.buf) {
- return
- }
- newCap := len(p.buf) * 2
- if newCap < need {
- newCap = need
- }
- p.buf = append(make([]byte, 0, newCap), p.buf...)
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go
deleted file mode 100644
index 997f57c1..00000000
--- a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go
+++ /dev/null
@@ -1,388 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2018, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "reflect"
- "time"
-)
-
-// makeMessageRefMarshaler differs a bit from makeMessageMarshaler
-// It marshal a message T instead of a *T
-func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- siz := u.size(ptr)
- return siz + SizeVarint(uint64(siz)) + tagsize
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- b = appendVarint(b, wiretag)
- siz := u.cachedsize(ptr)
- b = appendVarint(b, uint64(siz))
- return u.marshal(b, ptr, deterministic)
- }
-}
-
-// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler
-// It marshals a slice of messages []T instead of []*T
-func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- e := elem.Interface()
- v := toAddrPointer(&e, false)
- siz := u.size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- var err, errreq error
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- e := elem.Interface()
- v := toAddrPointer(&e, false)
- b = appendVarint(b, wiretag)
- siz := u.size(v)
- b = appendVarint(b, uint64(siz))
- b, err = u.marshal(b, v, deterministic)
-
- if err != nil {
- if _, ok := err.(*RequiredNotSetError); ok {
- // Required field in submessage is not set.
- // We record the error but keep going, to give a complete marshaling.
- if errreq == nil {
- errreq = err
- }
- continue
- }
- if err == ErrNil {
- err = errRepeatedHasNil
- }
- return b, err
- }
- }
-
- return b, errreq
- }
-}
-
-func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom)
- siz := m.Size()
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom)
- siz := m.Size()
- buf, err := m.Marshal()
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- m := ptr.asPointerTo(u.typ).Interface().(custom)
- siz := m.Size()
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- m := ptr.asPointerTo(u.typ).Interface().(custom)
- siz := m.Size()
- buf, err := m.Marshal()
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*time.Time)
- ts, err := timestampProto(*t)
- if err != nil {
- return 0
- }
- siz := Size(ts)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*time.Time)
- ts, err := timestampProto(*t)
- if err != nil {
- return nil, err
- }
- buf, err := Marshal(ts)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time)
- ts, err := timestampProto(*t)
- if err != nil {
- return 0
- }
- siz := Size(ts)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time)
- ts, err := timestampProto(*t)
- if err != nil {
- return nil, err
- }
- buf, err := Marshal(ts)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(time.Time)
- ts, err := timestampProto(t)
- if err != nil {
- return 0
- }
- siz := Size(ts)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(time.Time)
- ts, err := timestampProto(t)
- if err != nil {
- return nil, err
- }
- siz := Size(ts)
- buf, err := Marshal(ts)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*time.Time)
- ts, err := timestampProto(*t)
- if err != nil {
- return 0
- }
- siz := Size(ts)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*time.Time)
- ts, err := timestampProto(*t)
- if err != nil {
- return nil, err
- }
- siz := Size(ts)
- buf, err := Marshal(ts)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- d := ptr.asPointerTo(u.typ).Interface().(*time.Duration)
- dur := durationProto(*d)
- siz := Size(dur)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- d := ptr.asPointerTo(u.typ).Interface().(*time.Duration)
- dur := durationProto(*d)
- buf, err := Marshal(dur)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration)
- dur := durationProto(*d)
- siz := Size(dur)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration)
- dur := durationProto(*d)
- buf, err := Marshal(dur)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- d := elem.Interface().(time.Duration)
- dur := durationProto(d)
- siz := Size(dur)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- d := elem.Interface().(time.Duration)
- dur := durationProto(d)
- siz := Size(dur)
- buf, err := Marshal(dur)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- d := elem.Interface().(*time.Duration)
- dur := durationProto(*d)
- siz := Size(dur)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- d := elem.Interface().(*time.Duration)
- dur := durationProto(*d)
- siz := Size(dur)
- buf, err := Marshal(dur)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go
deleted file mode 100644
index f520106e..00000000
--- a/vendor/github.com/gogo/protobuf/proto/table_merge.go
+++ /dev/null
@@ -1,657 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "fmt"
- "reflect"
- "strings"
- "sync"
- "sync/atomic"
-)
-
-// Merge merges the src message into dst.
-// This assumes that dst and src of the same type and are non-nil.
-func (a *InternalMessageInfo) Merge(dst, src Message) {
- mi := atomicLoadMergeInfo(&a.merge)
- if mi == nil {
- mi = getMergeInfo(reflect.TypeOf(dst).Elem())
- atomicStoreMergeInfo(&a.merge, mi)
- }
- mi.merge(toPointer(&dst), toPointer(&src))
-}
-
-type mergeInfo struct {
- typ reflect.Type
-
- initialized int32 // 0: only typ is valid, 1: everything is valid
- lock sync.Mutex
-
- fields []mergeFieldInfo
- unrecognized field // Offset of XXX_unrecognized
-}
-
-type mergeFieldInfo struct {
- field field // Offset of field, guaranteed to be valid
-
- // isPointer reports whether the value in the field is a pointer.
- // This is true for the following situations:
- // * Pointer to struct
- // * Pointer to basic type (proto2 only)
- // * Slice (first value in slice header is a pointer)
- // * String (first value in string header is a pointer)
- isPointer bool
-
- // basicWidth reports the width of the field assuming that it is directly
- // embedded in the struct (as is the case for basic types in proto3).
- // The possible values are:
- // 0: invalid
- // 1: bool
- // 4: int32, uint32, float32
- // 8: int64, uint64, float64
- basicWidth int
-
- // Where dst and src are pointers to the types being merged.
- merge func(dst, src pointer)
-}
-
-var (
- mergeInfoMap = map[reflect.Type]*mergeInfo{}
- mergeInfoLock sync.Mutex
-)
-
-func getMergeInfo(t reflect.Type) *mergeInfo {
- mergeInfoLock.Lock()
- defer mergeInfoLock.Unlock()
- mi := mergeInfoMap[t]
- if mi == nil {
- mi = &mergeInfo{typ: t}
- mergeInfoMap[t] = mi
- }
- return mi
-}
-
-// merge merges src into dst assuming they are both of type *mi.typ.
-func (mi *mergeInfo) merge(dst, src pointer) {
- if dst.isNil() {
- panic("proto: nil destination")
- }
- if src.isNil() {
- return // Nothing to do.
- }
-
- if atomic.LoadInt32(&mi.initialized) == 0 {
- mi.computeMergeInfo()
- }
-
- for _, fi := range mi.fields {
- sfp := src.offset(fi.field)
-
- // As an optimization, we can avoid the merge function call cost
- // if we know for sure that the source will have no effect
- // by checking if it is the zero value.
- if unsafeAllowed {
- if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
- continue
- }
- if fi.basicWidth > 0 {
- switch {
- case fi.basicWidth == 1 && !*sfp.toBool():
- continue
- case fi.basicWidth == 4 && *sfp.toUint32() == 0:
- continue
- case fi.basicWidth == 8 && *sfp.toUint64() == 0:
- continue
- }
- }
- }
-
- dfp := dst.offset(fi.field)
- fi.merge(dfp, sfp)
- }
-
- // TODO: Make this faster?
- out := dst.asPointerTo(mi.typ).Elem()
- in := src.asPointerTo(mi.typ).Elem()
- if emIn, err := extendable(in.Addr().Interface()); err == nil {
- emOut, _ := extendable(out.Addr().Interface())
- mIn, muIn := emIn.extensionsRead()
- if mIn != nil {
- mOut := emOut.extensionsWrite()
- muIn.Lock()
- mergeExtension(mOut, mIn)
- muIn.Unlock()
- }
- }
-
- if mi.unrecognized.IsValid() {
- if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
- *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
- }
- }
-}
-
-func (mi *mergeInfo) computeMergeInfo() {
- mi.lock.Lock()
- defer mi.lock.Unlock()
- if mi.initialized != 0 {
- return
- }
- t := mi.typ
- n := t.NumField()
-
- props := GetProperties(t)
- for i := 0; i < n; i++ {
- f := t.Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
-
- mfi := mergeFieldInfo{field: toField(&f)}
- tf := f.Type
-
- // As an optimization, we can avoid the merge function call cost
- // if we know for sure that the source will have no effect
- // by checking if it is the zero value.
- if unsafeAllowed {
- switch tf.Kind() {
- case reflect.Ptr, reflect.Slice, reflect.String:
- // As a special case, we assume slices and strings are pointers
- // since we know that the first field in the SliceSlice or
- // StringHeader is a data pointer.
- mfi.isPointer = true
- case reflect.Bool:
- mfi.basicWidth = 1
- case reflect.Int32, reflect.Uint32, reflect.Float32:
- mfi.basicWidth = 4
- case reflect.Int64, reflect.Uint64, reflect.Float64:
- mfi.basicWidth = 8
- }
- }
-
- // Unwrap tf to get at its most basic type.
- var isPointer, isSlice bool
- if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
- isSlice = true
- tf = tf.Elem()
- }
- if tf.Kind() == reflect.Ptr {
- isPointer = true
- tf = tf.Elem()
- }
- if isPointer && isSlice && tf.Kind() != reflect.Struct {
- panic("both pointer and slice for basic type in " + tf.Name())
- }
-
- switch tf.Kind() {
- case reflect.Int32:
- switch {
- case isSlice: // E.g., []int32
- mfi.merge = func(dst, src pointer) {
- // NOTE: toInt32Slice is not defined (see pointer_reflect.go).
- /*
- sfsp := src.toInt32Slice()
- if *sfsp != nil {
- dfsp := dst.toInt32Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []int64{}
- }
- }
- */
- sfs := src.getInt32Slice()
- if sfs != nil {
- dfs := dst.getInt32Slice()
- dfs = append(dfs, sfs...)
- if dfs == nil {
- dfs = []int32{}
- }
- dst.setInt32Slice(dfs)
- }
- }
- case isPointer: // E.g., *int32
- mfi.merge = func(dst, src pointer) {
- // NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
- /*
- sfpp := src.toInt32Ptr()
- if *sfpp != nil {
- dfpp := dst.toInt32Ptr()
- if *dfpp == nil {
- *dfpp = Int32(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- */
- sfp := src.getInt32Ptr()
- if sfp != nil {
- dfp := dst.getInt32Ptr()
- if dfp == nil {
- dst.setInt32Ptr(*sfp)
- } else {
- *dfp = *sfp
- }
- }
- }
- default: // E.g., int32
- mfi.merge = func(dst, src pointer) {
- if v := *src.toInt32(); v != 0 {
- *dst.toInt32() = v
- }
- }
- }
- case reflect.Int64:
- switch {
- case isSlice: // E.g., []int64
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toInt64Slice()
- if *sfsp != nil {
- dfsp := dst.toInt64Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []int64{}
- }
- }
- }
- case isPointer: // E.g., *int64
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toInt64Ptr()
- if *sfpp != nil {
- dfpp := dst.toInt64Ptr()
- if *dfpp == nil {
- *dfpp = Int64(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., int64
- mfi.merge = func(dst, src pointer) {
- if v := *src.toInt64(); v != 0 {
- *dst.toInt64() = v
- }
- }
- }
- case reflect.Uint32:
- switch {
- case isSlice: // E.g., []uint32
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toUint32Slice()
- if *sfsp != nil {
- dfsp := dst.toUint32Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []uint32{}
- }
- }
- }
- case isPointer: // E.g., *uint32
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toUint32Ptr()
- if *sfpp != nil {
- dfpp := dst.toUint32Ptr()
- if *dfpp == nil {
- *dfpp = Uint32(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., uint32
- mfi.merge = func(dst, src pointer) {
- if v := *src.toUint32(); v != 0 {
- *dst.toUint32() = v
- }
- }
- }
- case reflect.Uint64:
- switch {
- case isSlice: // E.g., []uint64
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toUint64Slice()
- if *sfsp != nil {
- dfsp := dst.toUint64Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []uint64{}
- }
- }
- }
- case isPointer: // E.g., *uint64
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toUint64Ptr()
- if *sfpp != nil {
- dfpp := dst.toUint64Ptr()
- if *dfpp == nil {
- *dfpp = Uint64(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., uint64
- mfi.merge = func(dst, src pointer) {
- if v := *src.toUint64(); v != 0 {
- *dst.toUint64() = v
- }
- }
- }
- case reflect.Float32:
- switch {
- case isSlice: // E.g., []float32
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toFloat32Slice()
- if *sfsp != nil {
- dfsp := dst.toFloat32Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []float32{}
- }
- }
- }
- case isPointer: // E.g., *float32
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toFloat32Ptr()
- if *sfpp != nil {
- dfpp := dst.toFloat32Ptr()
- if *dfpp == nil {
- *dfpp = Float32(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., float32
- mfi.merge = func(dst, src pointer) {
- if v := *src.toFloat32(); v != 0 {
- *dst.toFloat32() = v
- }
- }
- }
- case reflect.Float64:
- switch {
- case isSlice: // E.g., []float64
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toFloat64Slice()
- if *sfsp != nil {
- dfsp := dst.toFloat64Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []float64{}
- }
- }
- }
- case isPointer: // E.g., *float64
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toFloat64Ptr()
- if *sfpp != nil {
- dfpp := dst.toFloat64Ptr()
- if *dfpp == nil {
- *dfpp = Float64(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., float64
- mfi.merge = func(dst, src pointer) {
- if v := *src.toFloat64(); v != 0 {
- *dst.toFloat64() = v
- }
- }
- }
- case reflect.Bool:
- switch {
- case isSlice: // E.g., []bool
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toBoolSlice()
- if *sfsp != nil {
- dfsp := dst.toBoolSlice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []bool{}
- }
- }
- }
- case isPointer: // E.g., *bool
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toBoolPtr()
- if *sfpp != nil {
- dfpp := dst.toBoolPtr()
- if *dfpp == nil {
- *dfpp = Bool(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., bool
- mfi.merge = func(dst, src pointer) {
- if v := *src.toBool(); v {
- *dst.toBool() = v
- }
- }
- }
- case reflect.String:
- switch {
- case isSlice: // E.g., []string
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toStringSlice()
- if *sfsp != nil {
- dfsp := dst.toStringSlice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []string{}
- }
- }
- }
- case isPointer: // E.g., *string
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toStringPtr()
- if *sfpp != nil {
- dfpp := dst.toStringPtr()
- if *dfpp == nil {
- *dfpp = String(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., string
- mfi.merge = func(dst, src pointer) {
- if v := *src.toString(); v != "" {
- *dst.toString() = v
- }
- }
- }
- case reflect.Slice:
- isProto3 := props.Prop[i].proto3
- switch {
- case isPointer:
- panic("bad pointer in byte slice case in " + tf.Name())
- case tf.Elem().Kind() != reflect.Uint8:
- panic("bad element kind in byte slice case in " + tf.Name())
- case isSlice: // E.g., [][]byte
- mfi.merge = func(dst, src pointer) {
- sbsp := src.toBytesSlice()
- if *sbsp != nil {
- dbsp := dst.toBytesSlice()
- for _, sb := range *sbsp {
- if sb == nil {
- *dbsp = append(*dbsp, nil)
- } else {
- *dbsp = append(*dbsp, append([]byte{}, sb...))
- }
- }
- if *dbsp == nil {
- *dbsp = [][]byte{}
- }
- }
- }
- default: // E.g., []byte
- mfi.merge = func(dst, src pointer) {
- sbp := src.toBytes()
- if *sbp != nil {
- dbp := dst.toBytes()
- if !isProto3 || len(*sbp) > 0 {
- *dbp = append([]byte{}, *sbp...)
- }
- }
- }
- }
- case reflect.Struct:
- switch {
- case !isPointer:
- mergeInfo := getMergeInfo(tf)
- mfi.merge = func(dst, src pointer) {
- mergeInfo.merge(dst, src)
- }
- case isSlice: // E.g., []*pb.T
- mergeInfo := getMergeInfo(tf)
- mfi.merge = func(dst, src pointer) {
- sps := src.getPointerSlice()
- if sps != nil {
- dps := dst.getPointerSlice()
- for _, sp := range sps {
- var dp pointer
- if !sp.isNil() {
- dp = valToPointer(reflect.New(tf))
- mergeInfo.merge(dp, sp)
- }
- dps = append(dps, dp)
- }
- if dps == nil {
- dps = []pointer{}
- }
- dst.setPointerSlice(dps)
- }
- }
- default: // E.g., *pb.T
- mergeInfo := getMergeInfo(tf)
- mfi.merge = func(dst, src pointer) {
- sp := src.getPointer()
- if !sp.isNil() {
- dp := dst.getPointer()
- if dp.isNil() {
- dp = valToPointer(reflect.New(tf))
- dst.setPointer(dp)
- }
- mergeInfo.merge(dp, sp)
- }
- }
- }
- case reflect.Map:
- switch {
- case isPointer || isSlice:
- panic("bad pointer or slice in map case in " + tf.Name())
- default: // E.g., map[K]V
- mfi.merge = func(dst, src pointer) {
- sm := src.asPointerTo(tf).Elem()
- if sm.Len() == 0 {
- return
- }
- dm := dst.asPointerTo(tf).Elem()
- if dm.IsNil() {
- dm.Set(reflect.MakeMap(tf))
- }
-
- switch tf.Elem().Kind() {
- case reflect.Ptr: // Proto struct (e.g., *T)
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- val = reflect.ValueOf(Clone(val.Interface().(Message)))
- dm.SetMapIndex(key, val)
- }
- case reflect.Slice: // E.g. Bytes type (e.g., []byte)
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
- dm.SetMapIndex(key, val)
- }
- default: // Basic type (e.g., string)
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- dm.SetMapIndex(key, val)
- }
- }
- }
- }
- case reflect.Interface:
- // Must be oneof field.
- switch {
- case isPointer || isSlice:
- panic("bad pointer or slice in interface case in " + tf.Name())
- default: // E.g., interface{}
- // TODO: Make this faster?
- mfi.merge = func(dst, src pointer) {
- su := src.asPointerTo(tf).Elem()
- if !su.IsNil() {
- du := dst.asPointerTo(tf).Elem()
- typ := su.Elem().Type()
- if du.IsNil() || du.Elem().Type() != typ {
- du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
- }
- sv := su.Elem().Elem().Field(0)
- if sv.Kind() == reflect.Ptr && sv.IsNil() {
- return
- }
- dv := du.Elem().Elem().Field(0)
- if dv.Kind() == reflect.Ptr && dv.IsNil() {
- dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
- }
- switch sv.Type().Kind() {
- case reflect.Ptr: // Proto struct (e.g., *T)
- Merge(dv.Interface().(Message), sv.Interface().(Message))
- case reflect.Slice: // E.g. Bytes type (e.g., []byte)
- dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
- default: // Basic type (e.g., string)
- dv.Set(sv)
- }
- }
- }
- }
- default:
- panic(fmt.Sprintf("merger not found for type:%s", tf))
- }
- mi.fields = append(mi.fields, mfi)
- }
-
- mi.unrecognized = invalidField
- if f, ok := t.FieldByName("XXX_unrecognized"); ok {
- if f.Type != reflect.TypeOf([]byte{}) {
- panic("expected XXX_unrecognized to be of type []byte")
- }
- mi.unrecognized = toField(&f)
- }
-
- atomic.StoreInt32(&mi.initialized, 1)
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
deleted file mode 100644
index bb2622f2..00000000
--- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
+++ /dev/null
@@ -1,2245 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "errors"
- "fmt"
- "io"
- "math"
- "reflect"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "unicode/utf8"
-)
-
-// Unmarshal is the entry point from the generated .pb.go files.
-// This function is not intended to be used by non-generated code.
-// This function is not subject to any compatibility guarantee.
-// msg contains a pointer to a protocol buffer struct.
-// b is the data to be unmarshaled into the protocol buffer.
-// a is a pointer to a place to store cached unmarshal information.
-func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
- // Load the unmarshal information for this message type.
- // The atomic load ensures memory consistency.
- u := atomicLoadUnmarshalInfo(&a.unmarshal)
- if u == nil {
- // Slow path: find unmarshal info for msg, update a with it.
- u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
- atomicStoreUnmarshalInfo(&a.unmarshal, u)
- }
- // Then do the unmarshaling.
- err := u.unmarshal(toPointer(&msg), b)
- return err
-}
-
-type unmarshalInfo struct {
- typ reflect.Type // type of the protobuf struct
-
- // 0 = only typ field is initialized
- // 1 = completely initialized
- initialized int32
- lock sync.Mutex // prevents double initialization
- dense []unmarshalFieldInfo // fields indexed by tag #
- sparse map[uint64]unmarshalFieldInfo // fields indexed by tag #
- reqFields []string // names of required fields
- reqMask uint64 // 1< 0 {
- // Read tag and wire type.
- // Special case 1 and 2 byte varints.
- var x uint64
- if b[0] < 128 {
- x = uint64(b[0])
- b = b[1:]
- } else if len(b) >= 2 && b[1] < 128 {
- x = uint64(b[0]&0x7f) + uint64(b[1])<<7
- b = b[2:]
- } else {
- var n int
- x, n = decodeVarint(b)
- if n == 0 {
- return io.ErrUnexpectedEOF
- }
- b = b[n:]
- }
- tag := x >> 3
- wire := int(x) & 7
-
- // Dispatch on the tag to one of the unmarshal* functions below.
- var f unmarshalFieldInfo
- if tag < uint64(len(u.dense)) {
- f = u.dense[tag]
- } else {
- f = u.sparse[tag]
- }
- if fn := f.unmarshal; fn != nil {
- var err error
- b, err = fn(b, m.offset(f.field), wire)
- if err == nil {
- reqMask |= f.reqMask
- continue
- }
- if r, ok := err.(*RequiredNotSetError); ok {
- // Remember this error, but keep parsing. We need to produce
- // a full parse even if a required field is missing.
- if errLater == nil {
- errLater = r
- }
- reqMask |= f.reqMask
- continue
- }
- if err != errInternalBadWireType {
- if err == errInvalidUTF8 {
- if errLater == nil {
- fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
- errLater = &invalidUTF8Error{fullName}
- }
- continue
- }
- return err
- }
- // Fragments with bad wire type are treated as unknown fields.
- }
-
- // Unknown tag.
- if !u.unrecognized.IsValid() {
- // Don't keep unrecognized data; just skip it.
- var err error
- b, err = skipField(b, wire)
- if err != nil {
- return err
- }
- continue
- }
- // Keep unrecognized data around.
- // maybe in extensions, maybe in the unrecognized field.
- z := m.offset(u.unrecognized).toBytes()
- var emap map[int32]Extension
- var e Extension
- for _, r := range u.extensionRanges {
- if uint64(r.Start) <= tag && tag <= uint64(r.End) {
- if u.extensions.IsValid() {
- mp := m.offset(u.extensions).toExtensions()
- emap = mp.extensionsWrite()
- e = emap[int32(tag)]
- z = &e.enc
- break
- }
- if u.oldExtensions.IsValid() {
- p := m.offset(u.oldExtensions).toOldExtensions()
- emap = *p
- if emap == nil {
- emap = map[int32]Extension{}
- *p = emap
- }
- e = emap[int32(tag)]
- z = &e.enc
- break
- }
- if u.bytesExtensions.IsValid() {
- z = m.offset(u.bytesExtensions).toBytes()
- break
- }
- panic("no extensions field available")
- }
- }
- // Use wire type to skip data.
- var err error
- b0 := b
- b, err = skipField(b, wire)
- if err != nil {
- return err
- }
- *z = encodeVarint(*z, tag<<3|uint64(wire))
- *z = append(*z, b0[:len(b0)-len(b)]...)
-
- if emap != nil {
- emap[int32(tag)] = e
- }
- }
- if reqMask != u.reqMask && errLater == nil {
- // A required field of this message is missing.
- for _, n := range u.reqFields {
- if reqMask&1 == 0 {
- errLater = &RequiredNotSetError{n}
- }
- reqMask >>= 1
- }
- }
- return errLater
-}
-
-// computeUnmarshalInfo fills in u with information for use
-// in unmarshaling protocol buffers of type u.typ.
-func (u *unmarshalInfo) computeUnmarshalInfo() {
- u.lock.Lock()
- defer u.lock.Unlock()
- if u.initialized != 0 {
- return
- }
- t := u.typ
- n := t.NumField()
-
- // Set up the "not found" value for the unrecognized byte buffer.
- // This is the default for proto3.
- u.unrecognized = invalidField
- u.extensions = invalidField
- u.oldExtensions = invalidField
- u.bytesExtensions = invalidField
-
- // List of the generated type and offset for each oneof field.
- type oneofField struct {
- ityp reflect.Type // interface type of oneof field
- field field // offset in containing message
- }
- var oneofFields []oneofField
-
- for i := 0; i < n; i++ {
- f := t.Field(i)
- if f.Name == "XXX_unrecognized" {
- // The byte slice used to hold unrecognized input is special.
- if f.Type != reflect.TypeOf(([]byte)(nil)) {
- panic("bad type for XXX_unrecognized field: " + f.Type.Name())
- }
- u.unrecognized = toField(&f)
- continue
- }
- if f.Name == "XXX_InternalExtensions" {
- // Ditto here.
- if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
- panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
- }
- u.extensions = toField(&f)
- if f.Tag.Get("protobuf_messageset") == "1" {
- u.isMessageSet = true
- }
- continue
- }
- if f.Name == "XXX_extensions" {
- // An older form of the extensions field.
- if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) {
- u.oldExtensions = toField(&f)
- continue
- } else if f.Type == reflect.TypeOf(([]byte)(nil)) {
- u.bytesExtensions = toField(&f)
- continue
- }
- panic("bad type for XXX_extensions field: " + f.Type.Name())
- }
- if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
- continue
- }
-
- oneof := f.Tag.Get("protobuf_oneof")
- if oneof != "" {
- oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
- // The rest of oneof processing happens below.
- continue
- }
-
- tags := f.Tag.Get("protobuf")
- tagArray := strings.Split(tags, ",")
- if len(tagArray) < 2 {
- panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
- }
- tag, err := strconv.Atoi(tagArray[1])
- if err != nil {
- panic("protobuf tag field not an integer: " + tagArray[1])
- }
-
- name := ""
- for _, tag := range tagArray[3:] {
- if strings.HasPrefix(tag, "name=") {
- name = tag[5:]
- }
- }
-
- // Extract unmarshaling function from the field (its type and tags).
- unmarshal := fieldUnmarshaler(&f)
-
- // Required field?
- var reqMask uint64
- if tagArray[2] == "req" {
- bit := len(u.reqFields)
- u.reqFields = append(u.reqFields, name)
- reqMask = uint64(1) << uint(bit)
- // TODO: if we have more than 64 required fields, we end up
- // not verifying that all required fields are present.
- // Fix this, perhaps using a count of required fields?
- }
-
- // Store the info in the correct slot in the message.
- u.setTag(tag, toField(&f), unmarshal, reqMask, name)
- }
-
- // Find any types associated with oneof fields.
- // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
- fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
- // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler
- if fn.IsValid() && len(oneofFields) > 0 {
- res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
- for i := res.Len() - 1; i >= 0; i-- {
- v := res.Index(i) // interface{}
- tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
- typ := tptr.Elem() // Msg_X
-
- f := typ.Field(0) // oneof implementers have one field
- baseUnmarshal := fieldUnmarshaler(&f)
- tags := strings.Split(f.Tag.Get("protobuf"), ",")
- fieldNum, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("protobuf tag field not an integer: " + tags[1])
- }
- var name string
- for _, tag := range tags {
- if strings.HasPrefix(tag, "name=") {
- name = strings.TrimPrefix(tag, "name=")
- break
- }
- }
-
- // Find the oneof field that this struct implements.
- // Might take O(n^2) to process all of the oneofs, but who cares.
- for _, of := range oneofFields {
- if tptr.Implements(of.ityp) {
- // We have found the corresponding interface for this struct.
- // That lets us know where this struct should be stored
- // when we encounter it during unmarshaling.
- unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
- u.setTag(fieldNum, of.field, unmarshal, 0, name)
- }
- }
- }
- }
-
- // Get extension ranges, if any.
- fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
- if fn.IsValid() {
- if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() {
- panic("a message with extensions, but no extensions field in " + t.Name())
- }
- u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
- }
-
- // Explicitly disallow tag 0. This will ensure we flag an error
- // when decoding a buffer of all zeros. Without this code, we
- // would decode and skip an all-zero buffer of even length.
- // [0 0] is [tag=0/wiretype=varint varint-encoded-0].
- u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
- return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
- }, 0, "")
-
- // Set mask for required field check.
- u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
- for len(u.dense) <= tag {
- u.dense = append(u.dense, unmarshalFieldInfo{})
- }
- u.dense[tag] = i
- return
- }
- if u.sparse == nil {
- u.sparse = map[uint64]unmarshalFieldInfo{}
- }
- u.sparse[uint64(tag)] = i
-}
-
-// fieldUnmarshaler returns an unmarshaler for the given field.
-func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
- if f.Type.Kind() == reflect.Map {
- return makeUnmarshalMap(f)
- }
- return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
-}
-
-// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
-func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
- tagArray := strings.Split(tags, ",")
- encoding := tagArray[0]
- name := "unknown"
- ctype := false
- isTime := false
- isDuration := false
- isWktPointer := false
- proto3 := false
- validateUTF8 := true
- for _, tag := range tagArray[3:] {
- if strings.HasPrefix(tag, "name=") {
- name = tag[5:]
- }
- if tag == "proto3" {
- proto3 = true
- }
- if strings.HasPrefix(tag, "customtype=") {
- ctype = true
- }
- if tag == "stdtime" {
- isTime = true
- }
- if tag == "stdduration" {
- isDuration = true
- }
- if tag == "wktptr" {
- isWktPointer = true
- }
- }
- validateUTF8 = validateUTF8 && proto3
-
- // Figure out packaging (pointer, slice, or both)
- slice := false
- pointer := false
- if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
- slice = true
- t = t.Elem()
- }
- if t.Kind() == reflect.Ptr {
- pointer = true
- t = t.Elem()
- }
-
- if ctype {
- if reflect.PtrTo(t).Implements(customType) {
- if slice {
- return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name)
- }
- if pointer {
- return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalCustom(getUnmarshalInfo(t), name)
- } else {
- panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t))
- }
- }
-
- if isTime {
- if pointer {
- if slice {
- return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalTimePtr(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalTime(getUnmarshalInfo(t), name)
- }
-
- if isDuration {
- if pointer {
- if slice {
- return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalDuration(getUnmarshalInfo(t), name)
- }
-
- if isWktPointer {
- switch t.Kind() {
- case reflect.Float64:
- if pointer {
- if slice {
- return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name)
- case reflect.Float32:
- if pointer {
- if slice {
- return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name)
- case reflect.Int64:
- if pointer {
- if slice {
- return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name)
- case reflect.Uint64:
- if pointer {
- if slice {
- return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name)
- case reflect.Int32:
- if pointer {
- if slice {
- return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name)
- case reflect.Uint32:
- if pointer {
- if slice {
- return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name)
- case reflect.Bool:
- if pointer {
- if slice {
- return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name)
- case reflect.String:
- if pointer {
- if slice {
- return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name)
- case uint8SliceType:
- if pointer {
- if slice {
- return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name)
- default:
- panic(fmt.Sprintf("unknown wktpointer type %#v", t))
- }
- }
-
- // We'll never have both pointer and slice for basic types.
- if pointer && slice && t.Kind() != reflect.Struct {
- panic("both pointer and slice for basic type in " + t.Name())
- }
-
- switch t.Kind() {
- case reflect.Bool:
- if pointer {
- return unmarshalBoolPtr
- }
- if slice {
- return unmarshalBoolSlice
- }
- return unmarshalBoolValue
- case reflect.Int32:
- switch encoding {
- case "fixed32":
- if pointer {
- return unmarshalFixedS32Ptr
- }
- if slice {
- return unmarshalFixedS32Slice
- }
- return unmarshalFixedS32Value
- case "varint":
- // this could be int32 or enum
- if pointer {
- return unmarshalInt32Ptr
- }
- if slice {
- return unmarshalInt32Slice
- }
- return unmarshalInt32Value
- case "zigzag32":
- if pointer {
- return unmarshalSint32Ptr
- }
- if slice {
- return unmarshalSint32Slice
- }
- return unmarshalSint32Value
- }
- case reflect.Int64:
- switch encoding {
- case "fixed64":
- if pointer {
- return unmarshalFixedS64Ptr
- }
- if slice {
- return unmarshalFixedS64Slice
- }
- return unmarshalFixedS64Value
- case "varint":
- if pointer {
- return unmarshalInt64Ptr
- }
- if slice {
- return unmarshalInt64Slice
- }
- return unmarshalInt64Value
- case "zigzag64":
- if pointer {
- return unmarshalSint64Ptr
- }
- if slice {
- return unmarshalSint64Slice
- }
- return unmarshalSint64Value
- }
- case reflect.Uint32:
- switch encoding {
- case "fixed32":
- if pointer {
- return unmarshalFixed32Ptr
- }
- if slice {
- return unmarshalFixed32Slice
- }
- return unmarshalFixed32Value
- case "varint":
- if pointer {
- return unmarshalUint32Ptr
- }
- if slice {
- return unmarshalUint32Slice
- }
- return unmarshalUint32Value
- }
- case reflect.Uint64:
- switch encoding {
- case "fixed64":
- if pointer {
- return unmarshalFixed64Ptr
- }
- if slice {
- return unmarshalFixed64Slice
- }
- return unmarshalFixed64Value
- case "varint":
- if pointer {
- return unmarshalUint64Ptr
- }
- if slice {
- return unmarshalUint64Slice
- }
- return unmarshalUint64Value
- }
- case reflect.Float32:
- if pointer {
- return unmarshalFloat32Ptr
- }
- if slice {
- return unmarshalFloat32Slice
- }
- return unmarshalFloat32Value
- case reflect.Float64:
- if pointer {
- return unmarshalFloat64Ptr
- }
- if slice {
- return unmarshalFloat64Slice
- }
- return unmarshalFloat64Value
- case reflect.Map:
- panic("map type in typeUnmarshaler in " + t.Name())
- case reflect.Slice:
- if pointer {
- panic("bad pointer in slice case in " + t.Name())
- }
- if slice {
- return unmarshalBytesSlice
- }
- return unmarshalBytesValue
- case reflect.String:
- if validateUTF8 {
- if pointer {
- return unmarshalUTF8StringPtr
- }
- if slice {
- return unmarshalUTF8StringSlice
- }
- return unmarshalUTF8StringValue
- }
- if pointer {
- return unmarshalStringPtr
- }
- if slice {
- return unmarshalStringSlice
- }
- return unmarshalStringValue
- case reflect.Struct:
- // message or group field
- if !pointer {
- switch encoding {
- case "bytes":
- if slice {
- return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalMessage(getUnmarshalInfo(t), name)
- }
- }
- switch encoding {
- case "bytes":
- if slice {
- return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
- case "group":
- if slice {
- return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
- }
- }
- panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
-}
-
-// Below are all the unmarshalers for individual fields of various types.
-
-func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- *f.toInt64() = v
- return b, nil
-}
-
-func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- *f.toInt64Ptr() = &v
- return b, nil
-}
-
-func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- s := f.toInt64Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- s := f.toInt64Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- *f.toInt64() = v
- return b, nil
-}
-
-func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- *f.toInt64Ptr() = &v
- return b, nil
-}
-
-func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- s := f.toInt64Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- s := f.toInt64Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- *f.toUint64() = v
- return b, nil
-}
-
-func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- *f.toUint64Ptr() = &v
- return b, nil
-}
-
-func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- s := f.toUint64Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- s := f.toUint64Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- *f.toInt32() = v
- return b, nil
-}
-
-func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- f.setInt32Ptr(v)
- return b, nil
-}
-
-func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- f.appendInt32Slice(v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- f.appendInt32Slice(v)
- return b, nil
-}
-
-func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- *f.toInt32() = v
- return b, nil
-}
-
-func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- f.setInt32Ptr(v)
- return b, nil
-}
-
-func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- f.appendInt32Slice(v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- f.appendInt32Slice(v)
- return b, nil
-}
-
-func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- *f.toUint32() = v
- return b, nil
-}
-
-func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- *f.toUint32Ptr() = &v
- return b, nil
-}
-
-func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- s := f.toUint32Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- s := f.toUint32Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- *f.toUint64() = v
- return b[8:], nil
-}
-
-func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- *f.toUint64Ptr() = &v
- return b[8:], nil
-}
-
-func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- s := f.toUint64Slice()
- *s = append(*s, v)
- b = b[8:]
- }
- return res, nil
- }
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- s := f.toUint64Slice()
- *s = append(*s, v)
- return b[8:], nil
-}
-
-func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- *f.toInt64() = v
- return b[8:], nil
-}
-
-func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- *f.toInt64Ptr() = &v
- return b[8:], nil
-}
-
-func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- s := f.toInt64Slice()
- *s = append(*s, v)
- b = b[8:]
- }
- return res, nil
- }
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- s := f.toInt64Slice()
- *s = append(*s, v)
- return b[8:], nil
-}
-
-func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- *f.toUint32() = v
- return b[4:], nil
-}
-
-func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- *f.toUint32Ptr() = &v
- return b[4:], nil
-}
-
-func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- s := f.toUint32Slice()
- *s = append(*s, v)
- b = b[4:]
- }
- return res, nil
- }
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- s := f.toUint32Slice()
- *s = append(*s, v)
- return b[4:], nil
-}
-
-func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- *f.toInt32() = v
- return b[4:], nil
-}
-
-func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- f.setInt32Ptr(v)
- return b[4:], nil
-}
-
-func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- f.appendInt32Slice(v)
- b = b[4:]
- }
- return res, nil
- }
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- f.appendInt32Slice(v)
- return b[4:], nil
-}
-
-func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- // Note: any length varint is allowed, even though any sane
- // encoder will use one byte.
- // See https://github.com/golang/protobuf/issues/76
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- // TODO: check if x>1? Tests seem to indicate no.
- v := x != 0
- *f.toBool() = v
- return b[n:], nil
-}
-
-func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := x != 0
- *f.toBoolPtr() = &v
- return b[n:], nil
-}
-
-func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := x != 0
- s := f.toBoolSlice()
- *s = append(*s, v)
- b = b[n:]
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := x != 0
- s := f.toBoolSlice()
- *s = append(*s, v)
- return b[n:], nil
-}
-
-func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- *f.toFloat64() = v
- return b[8:], nil
-}
-
-func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- *f.toFloat64Ptr() = &v
- return b[8:], nil
-}
-
-func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- s := f.toFloat64Slice()
- *s = append(*s, v)
- b = b[8:]
- }
- return res, nil
- }
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- s := f.toFloat64Slice()
- *s = append(*s, v)
- return b[8:], nil
-}
-
-func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- *f.toFloat32() = v
- return b[4:], nil
-}
-
-func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- *f.toFloat32Ptr() = &v
- return b[4:], nil
-}
-
-func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- s := f.toFloat32Slice()
- *s = append(*s, v)
- b = b[4:]
- }
- return res, nil
- }
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- s := f.toFloat32Slice()
- *s = append(*s, v)
- return b[4:], nil
-}
-
-func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toString() = v
- return b[x:], nil
-}
-
-func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toStringPtr() = &v
- return b[x:], nil
-}
-
-func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- s := f.toStringSlice()
- *s = append(*s, v)
- return b[x:], nil
-}
-
-func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toString() = v
- if !utf8.ValidString(v) {
- return b[x:], errInvalidUTF8
- }
- return b[x:], nil
-}
-
-func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toStringPtr() = &v
- if !utf8.ValidString(v) {
- return b[x:], errInvalidUTF8
- }
- return b[x:], nil
-}
-
-func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- s := f.toStringSlice()
- *s = append(*s, v)
- if !utf8.ValidString(v) {
- return b[x:], errInvalidUTF8
- }
- return b[x:], nil
-}
-
-var emptyBuf [0]byte
-
-func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- // The use of append here is a trick which avoids the zeroing
- // that would be required if we used a make/copy pair.
- // We append to emptyBuf instead of nil because we want
- // a non-nil result even when the length is 0.
- v := append(emptyBuf[:], b[:x]...)
- *f.toBytes() = v
- return b[x:], nil
-}
-
-func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := append(emptyBuf[:], b[:x]...)
- s := f.toBytesSlice()
- *s = append(*s, v)
- return b[x:], nil
-}
-
-func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- // First read the message field to see if something is there.
- // The semantics of multiple submessages are weird. Instead of
- // the last one winning (as it is for all other fields), multiple
- // submessages are merged.
- v := f.getPointer()
- if v.isNil() {
- v = valToPointer(reflect.New(sub.typ))
- f.setPointer(v)
- }
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- return b[x:], err
- }
-}
-
-func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := valToPointer(reflect.New(sub.typ))
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- f.appendPointer(v)
- return b[x:], err
- }
-}
-
-func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireStartGroup {
- return b, errInternalBadWireType
- }
- x, y := findEndGroup(b)
- if x < 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := f.getPointer()
- if v.isNil() {
- v = valToPointer(reflect.New(sub.typ))
- f.setPointer(v)
- }
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- return b[y:], err
- }
-}
-
-func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireStartGroup {
- return b, errInternalBadWireType
- }
- x, y := findEndGroup(b)
- if x < 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := valToPointer(reflect.New(sub.typ))
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- f.appendPointer(v)
- return b[y:], err
- }
-}
-
-func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
- t := f.Type
- kt := t.Key()
- vt := t.Elem()
- tagArray := strings.Split(f.Tag.Get("protobuf"), ",")
- valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
- for _, t := range tagArray {
- if strings.HasPrefix(t, "customtype=") {
- valTags = append(valTags, t)
- }
- if t == "stdtime" {
- valTags = append(valTags, t)
- }
- if t == "stdduration" {
- valTags = append(valTags, t)
- }
- if t == "wktptr" {
- valTags = append(valTags, t)
- }
- }
- unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
- unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ","))
- return func(b []byte, f pointer, w int) ([]byte, error) {
- // The map entry is a submessage. Figure out how big it is.
- if w != WireBytes {
- return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- r := b[x:] // unused data to return
- b = b[:x] // data for map entry
-
- // Note: we could use #keys * #values ~= 200 functions
- // to do map decoding without reflection. Probably not worth it.
- // Maps will be somewhat slow. Oh well.
-
- // Read key and value from data.
- var nerr nonFatal
- k := reflect.New(kt)
- v := reflect.New(vt)
- for len(b) > 0 {
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- wire := int(x) & 7
- b = b[n:]
-
- var err error
- switch x >> 3 {
- case 1:
- b, err = unmarshalKey(b, valToPointer(k), wire)
- case 2:
- b, err = unmarshalVal(b, valToPointer(v), wire)
- default:
- err = errInternalBadWireType // skip unknown tag
- }
-
- if nerr.Merge(err) {
- continue
- }
- if err != errInternalBadWireType {
- return nil, err
- }
-
- // Skip past unknown fields.
- b, err = skipField(b, wire)
- if err != nil {
- return nil, err
- }
- }
-
- // Get map, allocate if needed.
- m := f.asPointerTo(t).Elem() // an addressable map[K]T
- if m.IsNil() {
- m.Set(reflect.MakeMap(t))
- }
-
- // Insert into map.
- m.SetMapIndex(k.Elem(), v.Elem())
-
- return r, nerr.E
- }
-}
-
-// makeUnmarshalOneof makes an unmarshaler for oneof fields.
-// for:
-// message Msg {
-// oneof F {
-// int64 X = 1;
-// float64 Y = 2;
-// }
-// }
-// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
-// ityp is the interface type of the oneof field (e.g. isMsg_F).
-// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
-// Note that this function will be called once for each case in the oneof.
-func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
- sf := typ.Field(0)
- field0 := toField(&sf)
- return func(b []byte, f pointer, w int) ([]byte, error) {
- // Allocate holder for value.
- v := reflect.New(typ)
-
- // Unmarshal data into holder.
- // We unmarshal into the first field of the holder object.
- var err error
- var nerr nonFatal
- b, err = unmarshal(b, valToPointer(v).offset(field0), w)
- if !nerr.Merge(err) {
- return nil, err
- }
-
- // Write pointer to holder into target field.
- f.asPointerTo(ityp).Elem().Set(v)
-
- return b, nerr.E
- }
-}
-
-// Error used by decode internally.
-var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
-
-// skipField skips past a field of type wire and returns the remaining bytes.
-func skipField(b []byte, wire int) ([]byte, error) {
- switch wire {
- case WireVarint:
- _, k := decodeVarint(b)
- if k == 0 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[k:]
- case WireFixed32:
- if len(b) < 4 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[4:]
- case WireFixed64:
- if len(b) < 8 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[8:]
- case WireBytes:
- m, k := decodeVarint(b)
- if k == 0 || uint64(len(b)-k) < m {
- return b, io.ErrUnexpectedEOF
- }
- b = b[uint64(k)+m:]
- case WireStartGroup:
- _, i := findEndGroup(b)
- if i == -1 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[i:]
- default:
- return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
- }
- return b, nil
-}
-
-// findEndGroup finds the index of the next EndGroup tag.
-// Groups may be nested, so the "next" EndGroup tag is the first
-// unpaired EndGroup.
-// findEndGroup returns the indexes of the start and end of the EndGroup tag.
-// Returns (-1,-1) if it can't find one.
-func findEndGroup(b []byte) (int, int) {
- depth := 1
- i := 0
- for {
- x, n := decodeVarint(b[i:])
- if n == 0 {
- return -1, -1
- }
- j := i
- i += n
- switch x & 7 {
- case WireVarint:
- _, k := decodeVarint(b[i:])
- if k == 0 {
- return -1, -1
- }
- i += k
- case WireFixed32:
- if len(b)-4 < i {
- return -1, -1
- }
- i += 4
- case WireFixed64:
- if len(b)-8 < i {
- return -1, -1
- }
- i += 8
- case WireBytes:
- m, k := decodeVarint(b[i:])
- if k == 0 {
- return -1, -1
- }
- i += k
- if uint64(len(b)-i) < m {
- return -1, -1
- }
- i += int(m)
- case WireStartGroup:
- depth++
- case WireEndGroup:
- depth--
- if depth == 0 {
- return j, i
- }
- default:
- return -1, -1
- }
- }
-}
-
-// encodeVarint appends a varint-encoded integer to b and returns the result.
-func encodeVarint(b []byte, x uint64) []byte {
- for x >= 1<<7 {
- b = append(b, byte(x&0x7f|0x80))
- x >>= 7
- }
- return append(b, byte(x))
-}
-
-// decodeVarint reads a varint-encoded integer from b.
-// Returns the decoded integer and the number of bytes read.
-// If there is an error, it returns 0,0.
-func decodeVarint(b []byte) (uint64, int) {
- var x, y uint64
- if len(b) == 0 {
- goto bad
- }
- x = uint64(b[0])
- if x < 0x80 {
- return x, 1
- }
- x -= 0x80
-
- if len(b) <= 1 {
- goto bad
- }
- y = uint64(b[1])
- x += y << 7
- if y < 0x80 {
- return x, 2
- }
- x -= 0x80 << 7
-
- if len(b) <= 2 {
- goto bad
- }
- y = uint64(b[2])
- x += y << 14
- if y < 0x80 {
- return x, 3
- }
- x -= 0x80 << 14
-
- if len(b) <= 3 {
- goto bad
- }
- y = uint64(b[3])
- x += y << 21
- if y < 0x80 {
- return x, 4
- }
- x -= 0x80 << 21
-
- if len(b) <= 4 {
- goto bad
- }
- y = uint64(b[4])
- x += y << 28
- if y < 0x80 {
- return x, 5
- }
- x -= 0x80 << 28
-
- if len(b) <= 5 {
- goto bad
- }
- y = uint64(b[5])
- x += y << 35
- if y < 0x80 {
- return x, 6
- }
- x -= 0x80 << 35
-
- if len(b) <= 6 {
- goto bad
- }
- y = uint64(b[6])
- x += y << 42
- if y < 0x80 {
- return x, 7
- }
- x -= 0x80 << 42
-
- if len(b) <= 7 {
- goto bad
- }
- y = uint64(b[7])
- x += y << 49
- if y < 0x80 {
- return x, 8
- }
- x -= 0x80 << 49
-
- if len(b) <= 8 {
- goto bad
- }
- y = uint64(b[8])
- x += y << 56
- if y < 0x80 {
- return x, 9
- }
- x -= 0x80 << 56
-
- if len(b) <= 9 {
- goto bad
- }
- y = uint64(b[9])
- x += y << 63
- if y < 2 {
- return x, 10
- }
-
-bad:
- return 0, 0
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go
deleted file mode 100644
index 00d6c7ad..00000000
--- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go
+++ /dev/null
@@ -1,385 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2018, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "io"
- "reflect"
-)
-
-func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- // First read the message field to see if something is there.
- // The semantics of multiple submessages are weird. Instead of
- // the last one winning (as it is for all other fields), multiple
- // submessages are merged.
- v := f // gogo: changed from v := f.getPointer()
- if v.isNil() {
- v = valToPointer(reflect.New(sub.typ))
- f.setPointer(v)
- }
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- return b[x:], err
- }
-}
-
-func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := valToPointer(reflect.New(sub.typ))
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v)
- return b[x:], err
- }
-}
-
-func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
-
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.New(sub.typ))
- m := s.Interface().(custom)
- if err := m.Unmarshal(b[:x]); err != nil {
- return nil, err
- }
- return b[x:], nil
- }
-}
-
-func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := reflect.New(sub.typ)
- c := m.Interface().(custom)
- if err := c.Unmarshal(b[:x]); err != nil {
- return nil, err
- }
- v := valToPointer(m)
- f.appendRef(v, sub.typ)
- return b[x:], nil
- }
-}
-
-func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
-
- m := f.asPointerTo(sub.typ).Interface().(custom)
- if err := m.Unmarshal(b[:x]); err != nil {
- return nil, err
- }
- return b[x:], nil
- }
-}
-
-func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := ×tamp{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- t, err := timestampFromProto(m)
- if err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(t))
- return b[x:], nil
- }
-}
-
-func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := ×tamp{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- t, err := timestampFromProto(m)
- if err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&t))
- return b[x:], nil
- }
-}
-
-func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := ×tamp{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- t, err := timestampFromProto(m)
- if err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&t))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := ×tamp{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- t, err := timestampFromProto(m)
- if err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(t))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &duration{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- d, err := durationFromProto(m)
- if err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&d))
- return b[x:], nil
- }
-}
-
-func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &duration{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- d, err := durationFromProto(m)
- if err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(d))
- return b[x:], nil
- }
-}
-
-func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &duration{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- d, err := durationFromProto(m)
- if err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&d))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &duration{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- d, err := durationFromProto(m)
- if err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(d))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go
deleted file mode 100644
index 0407ba85..00000000
--- a/vendor/github.com/gogo/protobuf/proto/text.go
+++ /dev/null
@@ -1,928 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for writing the text protocol buffer format.
-
-import (
- "bufio"
- "bytes"
- "encoding"
- "errors"
- "fmt"
- "io"
- "log"
- "math"
- "reflect"
- "sort"
- "strings"
- "sync"
- "time"
-)
-
-var (
- newline = []byte("\n")
- spaces = []byte(" ")
- endBraceNewline = []byte("}\n")
- backslashN = []byte{'\\', 'n'}
- backslashR = []byte{'\\', 'r'}
- backslashT = []byte{'\\', 't'}
- backslashDQ = []byte{'\\', '"'}
- backslashBS = []byte{'\\', '\\'}
- posInf = []byte("inf")
- negInf = []byte("-inf")
- nan = []byte("nan")
-)
-
-type writer interface {
- io.Writer
- WriteByte(byte) error
-}
-
-// textWriter is an io.Writer that tracks its indentation level.
-type textWriter struct {
- ind int
- complete bool // if the current position is a complete line
- compact bool // whether to write out as a one-liner
- w writer
-}
-
-func (w *textWriter) WriteString(s string) (n int, err error) {
- if !strings.Contains(s, "\n") {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.complete = false
- return io.WriteString(w.w, s)
- }
- // WriteString is typically called without newlines, so this
- // codepath and its copy are rare. We copy to avoid
- // duplicating all of Write's logic here.
- return w.Write([]byte(s))
-}
-
-func (w *textWriter) Write(p []byte) (n int, err error) {
- newlines := bytes.Count(p, newline)
- if newlines == 0 {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- n, err = w.w.Write(p)
- w.complete = false
- return n, err
- }
-
- frags := bytes.SplitN(p, newline, newlines+1)
- if w.compact {
- for i, frag := range frags {
- if i > 0 {
- if err := w.w.WriteByte(' '); err != nil {
- return n, err
- }
- n++
- }
- nn, err := w.w.Write(frag)
- n += nn
- if err != nil {
- return n, err
- }
- }
- return n, nil
- }
-
- for i, frag := range frags {
- if w.complete {
- w.writeIndent()
- }
- nn, err := w.w.Write(frag)
- n += nn
- if err != nil {
- return n, err
- }
- if i+1 < len(frags) {
- if err := w.w.WriteByte('\n'); err != nil {
- return n, err
- }
- n++
- }
- }
- w.complete = len(frags[len(frags)-1]) == 0
- return n, nil
-}
-
-func (w *textWriter) WriteByte(c byte) error {
- if w.compact && c == '\n' {
- c = ' '
- }
- if !w.compact && w.complete {
- w.writeIndent()
- }
- err := w.w.WriteByte(c)
- w.complete = c == '\n'
- return err
-}
-
-func (w *textWriter) indent() { w.ind++ }
-
-func (w *textWriter) unindent() {
- if w.ind == 0 {
- log.Print("proto: textWriter unindented too far")
- return
- }
- w.ind--
-}
-
-func writeName(w *textWriter, props *Properties) error {
- if _, err := w.WriteString(props.OrigName); err != nil {
- return err
- }
- if props.Wire != "group" {
- return w.WriteByte(':')
- }
- return nil
-}
-
-func requiresQuotes(u string) bool {
- // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
- for _, ch := range u {
- switch {
- case ch == '.' || ch == '/' || ch == '_':
- continue
- case '0' <= ch && ch <= '9':
- continue
- case 'A' <= ch && ch <= 'Z':
- continue
- case 'a' <= ch && ch <= 'z':
- continue
- default:
- return true
- }
- }
- return false
-}
-
-// isAny reports whether sv is a google.protobuf.Any message
-func isAny(sv reflect.Value) bool {
- type wkt interface {
- XXX_WellKnownType() string
- }
- t, ok := sv.Addr().Interface().(wkt)
- return ok && t.XXX_WellKnownType() == "Any"
-}
-
-// writeProto3Any writes an expanded google.protobuf.Any message.
-//
-// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
-// required messages are not linked in).
-//
-// It returns (true, error) when sv was written in expanded format or an error
-// was encountered.
-func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
- turl := sv.FieldByName("TypeUrl")
- val := sv.FieldByName("Value")
- if !turl.IsValid() || !val.IsValid() {
- return true, errors.New("proto: invalid google.protobuf.Any message")
- }
-
- b, ok := val.Interface().([]byte)
- if !ok {
- return true, errors.New("proto: invalid google.protobuf.Any message")
- }
-
- parts := strings.Split(turl.String(), "/")
- mt := MessageType(parts[len(parts)-1])
- if mt == nil {
- return false, nil
- }
- m := reflect.New(mt.Elem())
- if err := Unmarshal(b, m.Interface().(Message)); err != nil {
- return false, nil
- }
- w.Write([]byte("["))
- u := turl.String()
- if requiresQuotes(u) {
- writeString(w, u)
- } else {
- w.Write([]byte(u))
- }
- if w.compact {
- w.Write([]byte("]:<"))
- } else {
- w.Write([]byte("]: <\n"))
- w.ind++
- }
- if err := tm.writeStruct(w, m.Elem()); err != nil {
- return true, err
- }
- if w.compact {
- w.Write([]byte("> "))
- } else {
- w.ind--
- w.Write([]byte(">\n"))
- }
- return true, nil
-}
-
-func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
- if tm.ExpandAny && isAny(sv) {
- if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
- return err
- }
- }
- st := sv.Type()
- sprops := GetProperties(st)
- for i := 0; i < sv.NumField(); i++ {
- fv := sv.Field(i)
- props := sprops.Prop[i]
- name := st.Field(i).Name
-
- if name == "XXX_NoUnkeyedLiteral" {
- continue
- }
-
- if strings.HasPrefix(name, "XXX_") {
- // There are two XXX_ fields:
- // XXX_unrecognized []byte
- // XXX_extensions map[int32]proto.Extension
- // The first is handled here;
- // the second is handled at the bottom of this function.
- if name == "XXX_unrecognized" && !fv.IsNil() {
- if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
- return err
- }
- }
- continue
- }
- if fv.Kind() == reflect.Ptr && fv.IsNil() {
- // Field not filled in. This could be an optional field or
- // a required field that wasn't filled in. Either way, there
- // isn't anything we can show for it.
- continue
- }
- if fv.Kind() == reflect.Slice && fv.IsNil() {
- // Repeated field that is empty, or a bytes field that is unused.
- continue
- }
-
- if props.Repeated && fv.Kind() == reflect.Slice {
- // Repeated field.
- for j := 0; j < fv.Len(); j++ {
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- v := fv.Index(j)
- if v.Kind() == reflect.Ptr && v.IsNil() {
- // A nil message in a repeated field is not valid,
- // but we can handle that more gracefully than panicking.
- if _, err := w.Write([]byte("\n")); err != nil {
- return err
- }
- continue
- }
- if len(props.Enum) > 0 {
- if err := tm.writeEnum(w, v, props); err != nil {
- return err
- }
- } else if err := tm.writeAny(w, v, props); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- continue
- }
- if fv.Kind() == reflect.Map {
- // Map fields are rendered as a repeated struct with key/value fields.
- keys := fv.MapKeys()
- sort.Sort(mapKeys(keys))
- for _, key := range keys {
- val := fv.MapIndex(key)
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- // open struct
- if err := w.WriteByte('<'); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- // key
- if _, err := w.WriteString("key:"); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- // nil values aren't legal, but we can avoid panicking because of them.
- if val.Kind() != reflect.Ptr || !val.IsNil() {
- // value
- if _, err := w.WriteString("value:"); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, val, props.MapValProp); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- // close struct
- w.unindent()
- if err := w.WriteByte('>'); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- continue
- }
- if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
- // empty bytes field
- continue
- }
- if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
- // proto3 non-repeated scalar field; skip if zero value
- if isProto3Zero(fv) {
- continue
- }
- }
-
- if fv.Kind() == reflect.Interface {
- // Check if it is a oneof.
- if st.Field(i).Tag.Get("protobuf_oneof") != "" {
- // fv is nil, or holds a pointer to generated struct.
- // That generated struct has exactly one field,
- // which has a protobuf struct tag.
- if fv.IsNil() {
- continue
- }
- inner := fv.Elem().Elem() // interface -> *T -> T
- tag := inner.Type().Field(0).Tag.Get("protobuf")
- props = new(Properties) // Overwrite the outer props var, but not its pointee.
- props.Parse(tag)
- // Write the value in the oneof, not the oneof itself.
- fv = inner.Field(0)
-
- // Special case to cope with malformed messages gracefully:
- // If the value in the oneof is a nil pointer, don't panic
- // in writeAny.
- if fv.Kind() == reflect.Ptr && fv.IsNil() {
- // Use errors.New so writeAny won't render quotes.
- msg := errors.New("/* nil */")
- fv = reflect.ValueOf(&msg).Elem()
- }
- }
- }
-
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
-
- if len(props.Enum) > 0 {
- if err := tm.writeEnum(w, fv, props); err != nil {
- return err
- }
- } else if err := tm.writeAny(w, fv, props); err != nil {
- return err
- }
-
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
-
- // Extensions (the XXX_extensions field).
- pv := sv
- if pv.CanAddr() {
- pv = sv.Addr()
- } else {
- pv = reflect.New(sv.Type())
- pv.Elem().Set(sv)
- }
- if _, err := extendable(pv.Interface()); err == nil {
- if err := tm.writeExtensions(w, pv); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// writeAny writes an arbitrary field.
-func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
- v = reflect.Indirect(v)
-
- if props != nil {
- if len(props.CustomType) > 0 {
- custom, ok := v.Interface().(Marshaler)
- if ok {
- data, err := custom.Marshal()
- if err != nil {
- return err
- }
- if err := writeString(w, string(data)); err != nil {
- return err
- }
- return nil
- }
- } else if len(props.CastType) > 0 {
- if _, ok := v.Interface().(interface {
- String() string
- }); ok {
- switch v.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- _, err := fmt.Fprintf(w, "%d", v.Interface())
- return err
- }
- }
- } else if props.StdTime {
- t, ok := v.Interface().(time.Time)
- if !ok {
- return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface())
- }
- tproto, err := timestampProto(t)
- if err != nil {
- return err
- }
- propsCopy := *props // Make a copy so that this is goroutine-safe
- propsCopy.StdTime = false
- err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy)
- return err
- } else if props.StdDuration {
- d, ok := v.Interface().(time.Duration)
- if !ok {
- return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface())
- }
- dproto := durationProto(d)
- propsCopy := *props // Make a copy so that this is goroutine-safe
- propsCopy.StdDuration = false
- err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy)
- return err
- }
- }
-
- // Floats have special cases.
- if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
- x := v.Float()
- var b []byte
- switch {
- case math.IsInf(x, 1):
- b = posInf
- case math.IsInf(x, -1):
- b = negInf
- case math.IsNaN(x):
- b = nan
- }
- if b != nil {
- _, err := w.Write(b)
- return err
- }
- // Other values are handled below.
- }
-
- // We don't attempt to serialise every possible value type; only those
- // that can occur in protocol buffers.
- switch v.Kind() {
- case reflect.Slice:
- // Should only be a []byte; repeated fields are handled in writeStruct.
- if err := writeString(w, string(v.Bytes())); err != nil {
- return err
- }
- case reflect.String:
- if err := writeString(w, v.String()); err != nil {
- return err
- }
- case reflect.Struct:
- // Required/optional group/message.
- var bra, ket byte = '<', '>'
- if props != nil && props.Wire == "group" {
- bra, ket = '{', '}'
- }
- if err := w.WriteByte(bra); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- if v.CanAddr() {
- // Calling v.Interface on a struct causes the reflect package to
- // copy the entire struct. This is racy with the new Marshaler
- // since we atomically update the XXX_sizecache.
- //
- // Thus, we retrieve a pointer to the struct if possible to avoid
- // a race since v.Interface on the pointer doesn't copy the struct.
- //
- // If v is not addressable, then we are not worried about a race
- // since it implies that the binary Marshaler cannot possibly be
- // mutating this value.
- v = v.Addr()
- }
- if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
- text, err := etm.MarshalText()
- if err != nil {
- return err
- }
- if _, err = w.Write(text); err != nil {
- return err
- }
- } else {
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
- if err := tm.writeStruct(w, v); err != nil {
- return err
- }
- }
- w.unindent()
- if err := w.WriteByte(ket); err != nil {
- return err
- }
- default:
- _, err := fmt.Fprint(w, v.Interface())
- return err
- }
- return nil
-}
-
-// equivalent to C's isprint.
-func isprint(c byte) bool {
- return c >= 0x20 && c < 0x7f
-}
-
-// writeString writes a string in the protocol buffer text format.
-// It is similar to strconv.Quote except we don't use Go escape sequences,
-// we treat the string as a byte sequence, and we use octal escapes.
-// These differences are to maintain interoperability with the other
-// languages' implementations of the text format.
-func writeString(w *textWriter, s string) error {
- // use WriteByte here to get any needed indent
- if err := w.WriteByte('"'); err != nil {
- return err
- }
- // Loop over the bytes, not the runes.
- for i := 0; i < len(s); i++ {
- var err error
- // Divergence from C++: we don't escape apostrophes.
- // There's no need to escape them, and the C++ parser
- // copes with a naked apostrophe.
- switch c := s[i]; c {
- case '\n':
- _, err = w.w.Write(backslashN)
- case '\r':
- _, err = w.w.Write(backslashR)
- case '\t':
- _, err = w.w.Write(backslashT)
- case '"':
- _, err = w.w.Write(backslashDQ)
- case '\\':
- _, err = w.w.Write(backslashBS)
- default:
- if isprint(c) {
- err = w.w.WriteByte(c)
- } else {
- _, err = fmt.Fprintf(w.w, "\\%03o", c)
- }
- }
- if err != nil {
- return err
- }
- }
- return w.WriteByte('"')
-}
-
-func writeUnknownStruct(w *textWriter, data []byte) (err error) {
- if !w.compact {
- if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
- return err
- }
- }
- b := NewBuffer(data)
- for b.index < len(b.buf) {
- x, err := b.DecodeVarint()
- if err != nil {
- _, ferr := fmt.Fprintf(w, "/* %v */\n", err)
- return ferr
- }
- wire, tag := x&7, x>>3
- if wire == WireEndGroup {
- w.unindent()
- if _, werr := w.Write(endBraceNewline); werr != nil {
- return werr
- }
- continue
- }
- if _, ferr := fmt.Fprint(w, tag); ferr != nil {
- return ferr
- }
- if wire != WireStartGroup {
- if err = w.WriteByte(':'); err != nil {
- return err
- }
- }
- if !w.compact || wire == WireStartGroup {
- if err = w.WriteByte(' '); err != nil {
- return err
- }
- }
- switch wire {
- case WireBytes:
- buf, e := b.DecodeRawBytes(false)
- if e == nil {
- _, err = fmt.Fprintf(w, "%q", buf)
- } else {
- _, err = fmt.Fprintf(w, "/* %v */", e)
- }
- case WireFixed32:
- x, err = b.DecodeFixed32()
- err = writeUnknownInt(w, x, err)
- case WireFixed64:
- x, err = b.DecodeFixed64()
- err = writeUnknownInt(w, x, err)
- case WireStartGroup:
- err = w.WriteByte('{')
- w.indent()
- case WireVarint:
- x, err = b.DecodeVarint()
- err = writeUnknownInt(w, x, err)
- default:
- _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
- }
- if err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- return nil
-}
-
-func writeUnknownInt(w *textWriter, x uint64, err error) error {
- if err == nil {
- _, err = fmt.Fprint(w, x)
- } else {
- _, err = fmt.Fprintf(w, "/* %v */", err)
- }
- return err
-}
-
-type int32Slice []int32
-
-func (s int32Slice) Len() int { return len(s) }
-func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
-func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-// writeExtensions writes all the extensions in pv.
-// pv is assumed to be a pointer to a protocol message struct that is extendable.
-func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
- emap := extensionMaps[pv.Type().Elem()]
- e := pv.Interface().(Message)
-
- var m map[int32]Extension
- var mu sync.Locker
- if em, ok := e.(extensionsBytes); ok {
- eb := em.GetExtensions()
- var err error
- m, err = BytesToExtensionsMap(*eb)
- if err != nil {
- return err
- }
- mu = notLocker{}
- } else if _, ok := e.(extendableProto); ok {
- ep, _ := extendable(e)
- m, mu = ep.extensionsRead()
- if m == nil {
- return nil
- }
- }
-
- // Order the extensions by ID.
- // This isn't strictly necessary, but it will give us
- // canonical output, which will also make testing easier.
-
- mu.Lock()
- ids := make([]int32, 0, len(m))
- for id := range m {
- ids = append(ids, id)
- }
- sort.Sort(int32Slice(ids))
- mu.Unlock()
-
- for _, extNum := range ids {
- ext := m[extNum]
- var desc *ExtensionDesc
- if emap != nil {
- desc = emap[extNum]
- }
- if desc == nil {
- // Unknown extension.
- if err := writeUnknownStruct(w, ext.enc); err != nil {
- return err
- }
- continue
- }
-
- pb, err := GetExtension(e, desc)
- if err != nil {
- return fmt.Errorf("failed getting extension: %v", err)
- }
-
- // Repeated extensions will appear as a slice.
- if !desc.repeated() {
- if err := tm.writeExtension(w, desc.Name, pb); err != nil {
- return err
- }
- } else {
- v := reflect.ValueOf(pb)
- for i := 0; i < v.Len(); i++ {
- if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
- return err
- }
- }
- }
- }
- return nil
-}
-
-func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
- if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- return nil
-}
-
-func (w *textWriter) writeIndent() {
- if !w.complete {
- return
- }
- remain := w.ind * 2
- for remain > 0 {
- n := remain
- if n > len(spaces) {
- n = len(spaces)
- }
- w.w.Write(spaces[:n])
- remain -= n
- }
- w.complete = false
-}
-
-// TextMarshaler is a configurable text format marshaler.
-type TextMarshaler struct {
- Compact bool // use compact text format (one line).
- ExpandAny bool // expand google.protobuf.Any messages of known types
-}
-
-// Marshal writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
- val := reflect.ValueOf(pb)
- if pb == nil || val.IsNil() {
- w.Write([]byte(""))
- return nil
- }
- var bw *bufio.Writer
- ww, ok := w.(writer)
- if !ok {
- bw = bufio.NewWriter(w)
- ww = bw
- }
- aw := &textWriter{
- w: ww,
- complete: true,
- compact: tm.Compact,
- }
-
- if etm, ok := pb.(encoding.TextMarshaler); ok {
- text, err := etm.MarshalText()
- if err != nil {
- return err
- }
- if _, err = aw.Write(text); err != nil {
- return err
- }
- if bw != nil {
- return bw.Flush()
- }
- return nil
- }
- // Dereference the received pointer so we don't have outer < and >.
- v := reflect.Indirect(val)
- if err := tm.writeStruct(aw, v); err != nil {
- return err
- }
- if bw != nil {
- return bw.Flush()
- }
- return nil
-}
-
-// Text is the same as Marshal, but returns the string directly.
-func (tm *TextMarshaler) Text(pb Message) string {
- var buf bytes.Buffer
- tm.Marshal(&buf, pb)
- return buf.String()
-}
-
-var (
- defaultTextMarshaler = TextMarshaler{}
- compactTextMarshaler = TextMarshaler{Compact: true}
-)
-
-// TODO: consider removing some of the Marshal functions below.
-
-// MarshalText writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
-
-// MarshalTextString is the same as MarshalText, but returns the string directly.
-func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
-
-// CompactText writes a given protocol buffer in compact text format (one line).
-func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
-
-// CompactTextString is the same as CompactText, but returns the string directly.
-func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go
deleted file mode 100644
index 1d6c6aa0..00000000
--- a/vendor/github.com/gogo/protobuf/proto/text_gogo.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "fmt"
- "reflect"
-)
-
-func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
- m, ok := enumStringMaps[props.Enum]
- if !ok {
- if err := tm.writeAny(w, v, props); err != nil {
- return err
- }
- }
- key := int32(0)
- if v.Kind() == reflect.Ptr {
- key = int32(v.Elem().Int())
- } else {
- key = int32(v.Int())
- }
- s, ok := m[key]
- if !ok {
- if err := tm.writeAny(w, v, props); err != nil {
- return err
- }
- }
- _, err := fmt.Fprint(w, s)
- return err
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go
deleted file mode 100644
index 1ce0be2f..00000000
--- a/vendor/github.com/gogo/protobuf/proto/text_parser.go
+++ /dev/null
@@ -1,1018 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for parsing the Text protocol buffer format.
-// TODO: message sets.
-
-import (
- "encoding"
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "time"
- "unicode/utf8"
-)
-
-// Error string emitted when deserializing Any and fields are already set
-const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
-
-type ParseError struct {
- Message string
- Line int // 1-based line number
- Offset int // 0-based byte offset from start of input
-}
-
-func (p *ParseError) Error() string {
- if p.Line == 1 {
- // show offset only for first line
- return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
- }
- return fmt.Sprintf("line %d: %v", p.Line, p.Message)
-}
-
-type token struct {
- value string
- err *ParseError
- line int // line number
- offset int // byte number from start of input, not start of line
- unquoted string // the unquoted version of value, if it was a quoted string
-}
-
-func (t *token) String() string {
- if t.err == nil {
- return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
- }
- return fmt.Sprintf("parse error: %v", t.err)
-}
-
-type textParser struct {
- s string // remaining input
- done bool // whether the parsing is finished (success or error)
- backed bool // whether back() was called
- offset, line int
- cur token
-}
-
-func newTextParser(s string) *textParser {
- p := new(textParser)
- p.s = s
- p.line = 1
- p.cur.line = 1
- return p
-}
-
-func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
- pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
- p.cur.err = pe
- p.done = true
- return pe
-}
-
-// Numbers and identifiers are matched by [-+._A-Za-z0-9]
-func isIdentOrNumberChar(c byte) bool {
- switch {
- case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
- return true
- case '0' <= c && c <= '9':
- return true
- }
- switch c {
- case '-', '+', '.', '_':
- return true
- }
- return false
-}
-
-func isWhitespace(c byte) bool {
- switch c {
- case ' ', '\t', '\n', '\r':
- return true
- }
- return false
-}
-
-func isQuote(c byte) bool {
- switch c {
- case '"', '\'':
- return true
- }
- return false
-}
-
-func (p *textParser) skipWhitespace() {
- i := 0
- for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
- if p.s[i] == '#' {
- // comment; skip to end of line or input
- for i < len(p.s) && p.s[i] != '\n' {
- i++
- }
- if i == len(p.s) {
- break
- }
- }
- if p.s[i] == '\n' {
- p.line++
- }
- i++
- }
- p.offset += i
- p.s = p.s[i:len(p.s)]
- if len(p.s) == 0 {
- p.done = true
- }
-}
-
-func (p *textParser) advance() {
- // Skip whitespace
- p.skipWhitespace()
- if p.done {
- return
- }
-
- // Start of non-whitespace
- p.cur.err = nil
- p.cur.offset, p.cur.line = p.offset, p.line
- p.cur.unquoted = ""
- switch p.s[0] {
- case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
- // Single symbol
- p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
- case '"', '\'':
- // Quoted string
- i := 1
- for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
- if p.s[i] == '\\' && i+1 < len(p.s) {
- // skip escaped char
- i++
- }
- i++
- }
- if i >= len(p.s) || p.s[i] != p.s[0] {
- p.errorf("unmatched quote")
- return
- }
- unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
- if err != nil {
- p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
- return
- }
- p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
- p.cur.unquoted = unq
- default:
- i := 0
- for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
- i++
- }
- if i == 0 {
- p.errorf("unexpected byte %#x", p.s[0])
- return
- }
- p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
- }
- p.offset += len(p.cur.value)
-}
-
-var (
- errBadUTF8 = errors.New("proto: bad UTF-8")
-)
-
-func unquoteC(s string, quote rune) (string, error) {
- // This is based on C++'s tokenizer.cc.
- // Despite its name, this is *not* parsing C syntax.
- // For instance, "\0" is an invalid quoted string.
-
- // Avoid allocation in trivial cases.
- simple := true
- for _, r := range s {
- if r == '\\' || r == quote {
- simple = false
- break
- }
- }
- if simple {
- return s, nil
- }
-
- buf := make([]byte, 0, 3*len(s)/2)
- for len(s) > 0 {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", errBadUTF8
- }
- s = s[n:]
- if r != '\\' {
- if r < utf8.RuneSelf {
- buf = append(buf, byte(r))
- } else {
- buf = append(buf, string(r)...)
- }
- continue
- }
-
- ch, tail, err := unescape(s)
- if err != nil {
- return "", err
- }
- buf = append(buf, ch...)
- s = tail
- }
- return string(buf), nil
-}
-
-func unescape(s string) (ch string, tail string, err error) {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", "", errBadUTF8
- }
- s = s[n:]
- switch r {
- case 'a':
- return "\a", s, nil
- case 'b':
- return "\b", s, nil
- case 'f':
- return "\f", s, nil
- case 'n':
- return "\n", s, nil
- case 'r':
- return "\r", s, nil
- case 't':
- return "\t", s, nil
- case 'v':
- return "\v", s, nil
- case '?':
- return "?", s, nil // trigraph workaround
- case '\'', '"', '\\':
- return string(r), s, nil
- case '0', '1', '2', '3', '4', '5', '6', '7':
- if len(s) < 2 {
- return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
- }
- ss := string(r) + s[:2]
- s = s[2:]
- i, err := strconv.ParseUint(ss, 8, 8)
- if err != nil {
- return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
- }
- return string([]byte{byte(i)}), s, nil
- case 'x', 'X', 'u', 'U':
- var n int
- switch r {
- case 'x', 'X':
- n = 2
- case 'u':
- n = 4
- case 'U':
- n = 8
- }
- if len(s) < n {
- return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
- }
- ss := s[:n]
- s = s[n:]
- i, err := strconv.ParseUint(ss, 16, 64)
- if err != nil {
- return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
- }
- if r == 'x' || r == 'X' {
- return string([]byte{byte(i)}), s, nil
- }
- if i > utf8.MaxRune {
- return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
- }
- return string(i), s, nil
- }
- return "", "", fmt.Errorf(`unknown escape \%c`, r)
-}
-
-// Back off the parser by one token. Can only be done between calls to next().
-// It makes the next advance() a no-op.
-func (p *textParser) back() { p.backed = true }
-
-// Advances the parser and returns the new current token.
-func (p *textParser) next() *token {
- if p.backed || p.done {
- p.backed = false
- return &p.cur
- }
- p.advance()
- if p.done {
- p.cur.value = ""
- } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
- // Look for multiple quoted strings separated by whitespace,
- // and concatenate them.
- cat := p.cur
- for {
- p.skipWhitespace()
- if p.done || !isQuote(p.s[0]) {
- break
- }
- p.advance()
- if p.cur.err != nil {
- return &p.cur
- }
- cat.value += " " + p.cur.value
- cat.unquoted += p.cur.unquoted
- }
- p.done = false // parser may have seen EOF, but we want to return cat
- p.cur = cat
- }
- return &p.cur
-}
-
-func (p *textParser) consumeToken(s string) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != s {
- p.back()
- return p.errorf("expected %q, found %q", s, tok.value)
- }
- return nil
-}
-
-// Return a RequiredNotSetError indicating which required field was not set.
-func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
- st := sv.Type()
- sprops := GetProperties(st)
- for i := 0; i < st.NumField(); i++ {
- if !isNil(sv.Field(i)) {
- continue
- }
-
- props := sprops.Prop[i]
- if props.Required {
- return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
- }
- }
- return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen
-}
-
-// Returns the index in the struct for the named field, as well as the parsed tag properties.
-func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
- i, ok := sprops.decoderOrigNames[name]
- if ok {
- return i, sprops.Prop[i], true
- }
- return -1, nil, false
-}
-
-// Consume a ':' from the input stream (if the next token is a colon),
-// returning an error if a colon is needed but not present.
-func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ":" {
- // Colon is optional when the field is a group or message.
- needColon := true
- switch props.Wire {
- case "group":
- needColon = false
- case "bytes":
- // A "bytes" field is either a message, a string, or a repeated field;
- // those three become *T, *string and []T respectively, so we can check for
- // this field being a pointer to a non-string.
- if typ.Kind() == reflect.Ptr {
- // *T or *string
- if typ.Elem().Kind() == reflect.String {
- break
- }
- } else if typ.Kind() == reflect.Slice {
- // []T or []*T
- if typ.Elem().Kind() != reflect.Ptr {
- break
- }
- } else if typ.Kind() == reflect.String {
- // The proto3 exception is for a string field,
- // which requires a colon.
- break
- }
- needColon = false
- }
- if needColon {
- return p.errorf("expected ':', found %q", tok.value)
- }
- p.back()
- }
- return nil
-}
-
-func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
- st := sv.Type()
- sprops := GetProperties(st)
- reqCount := sprops.reqCount
- var reqFieldErr error
- fieldSet := make(map[string]bool)
- // A struct is a sequence of "name: value", terminated by one of
- // '>' or '}', or the end of the input. A name may also be
- // "[extension]" or "[type/url]".
- //
- // The whole struct can also be an expanded Any message, like:
- // [type/url] < ... struct contents ... >
- for {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == terminator {
- break
- }
- if tok.value == "[" {
- // Looks like an extension or an Any.
- //
- // TODO: Check whether we need to handle
- // namespace rooted names (e.g. ".something.Foo").
- extName, err := p.consumeExtName()
- if err != nil {
- return err
- }
-
- if s := strings.LastIndex(extName, "/"); s >= 0 {
- // If it contains a slash, it's an Any type URL.
- messageName := extName[s+1:]
- mt := MessageType(messageName)
- if mt == nil {
- return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
- }
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- // consume an optional colon
- if tok.value == ":" {
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- }
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- v := reflect.New(mt.Elem())
- if pe := p.readStruct(v.Elem(), terminator); pe != nil {
- return pe
- }
- b, err := Marshal(v.Interface().(Message))
- if err != nil {
- return p.errorf("failed to marshal message of type %q: %v", messageName, err)
- }
- if fieldSet["type_url"] {
- return p.errorf(anyRepeatedlyUnpacked, "type_url")
- }
- if fieldSet["value"] {
- return p.errorf(anyRepeatedlyUnpacked, "value")
- }
- sv.FieldByName("TypeUrl").SetString(extName)
- sv.FieldByName("Value").SetBytes(b)
- fieldSet["type_url"] = true
- fieldSet["value"] = true
- continue
- }
-
- var desc *ExtensionDesc
- // This could be faster, but it's functional.
- // TODO: Do something smarter than a linear scan.
- for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
- if d.Name == extName {
- desc = d
- break
- }
- }
- if desc == nil {
- return p.errorf("unrecognized extension %q", extName)
- }
-
- props := &Properties{}
- props.Parse(desc.Tag)
-
- typ := reflect.TypeOf(desc.ExtensionType)
- if err := p.checkForColon(props, typ); err != nil {
- return err
- }
-
- rep := desc.repeated()
-
- // Read the extension structure, and set it in
- // the value we're constructing.
- var ext reflect.Value
- if !rep {
- ext = reflect.New(typ).Elem()
- } else {
- ext = reflect.New(typ.Elem()).Elem()
- }
- if err := p.readAny(ext, props); err != nil {
- if _, ok := err.(*RequiredNotSetError); !ok {
- return err
- }
- reqFieldErr = err
- }
- ep := sv.Addr().Interface().(Message)
- if !rep {
- SetExtension(ep, desc, ext.Interface())
- } else {
- old, err := GetExtension(ep, desc)
- var sl reflect.Value
- if err == nil {
- sl = reflect.ValueOf(old) // existing slice
- } else {
- sl = reflect.MakeSlice(typ, 0, 1)
- }
- sl = reflect.Append(sl, ext)
- SetExtension(ep, desc, sl.Interface())
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- continue
- }
-
- // This is a normal, non-extension field.
- name := tok.value
- var dst reflect.Value
- fi, props, ok := structFieldByName(sprops, name)
- if ok {
- dst = sv.Field(fi)
- } else if oop, ok := sprops.OneofTypes[name]; ok {
- // It is a oneof.
- props = oop.Prop
- nv := reflect.New(oop.Type.Elem())
- dst = nv.Elem().Field(0)
- field := sv.Field(oop.Field)
- if !field.IsNil() {
- return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
- }
- field.Set(nv)
- }
- if !dst.IsValid() {
- return p.errorf("unknown field name %q in %v", name, st)
- }
-
- if dst.Kind() == reflect.Map {
- // Consume any colon.
- if err := p.checkForColon(props, dst.Type()); err != nil {
- return err
- }
-
- // Construct the map if it doesn't already exist.
- if dst.IsNil() {
- dst.Set(reflect.MakeMap(dst.Type()))
- }
- key := reflect.New(dst.Type().Key()).Elem()
- val := reflect.New(dst.Type().Elem()).Elem()
-
- // The map entry should be this sequence of tokens:
- // < key : KEY value : VALUE >
- // However, implementations may omit key or value, and technically
- // we should support them in any order. See b/28924776 for a time
- // this went wrong.
-
- tok := p.next()
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- for {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == terminator {
- break
- }
- switch tok.value {
- case "key":
- if err := p.consumeToken(":"); err != nil {
- return err
- }
- if err := p.readAny(key, props.MapKeyProp); err != nil {
- return err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- case "value":
- if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
- return err
- }
- if err := p.readAny(val, props.MapValProp); err != nil {
- return err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- default:
- p.back()
- return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
- }
- }
-
- dst.SetMapIndex(key, val)
- continue
- }
-
- // Check that it's not already set if it's not a repeated field.
- if !props.Repeated && fieldSet[name] {
- return p.errorf("non-repeated field %q was repeated", name)
- }
-
- if err := p.checkForColon(props, dst.Type()); err != nil {
- return err
- }
-
- // Parse into the field.
- fieldSet[name] = true
- if err := p.readAny(dst, props); err != nil {
- if _, ok := err.(*RequiredNotSetError); !ok {
- return err
- }
- reqFieldErr = err
- }
- if props.Required {
- reqCount--
- }
-
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
-
- }
-
- if reqCount > 0 {
- return p.missingRequiredFieldError(sv)
- }
- return reqFieldErr
-}
-
-// consumeExtName consumes extension name or expanded Any type URL and the
-// following ']'. It returns the name or URL consumed.
-func (p *textParser) consumeExtName() (string, error) {
- tok := p.next()
- if tok.err != nil {
- return "", tok.err
- }
-
- // If extension name or type url is quoted, it's a single token.
- if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
- name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
- if err != nil {
- return "", err
- }
- return name, p.consumeToken("]")
- }
-
- // Consume everything up to "]"
- var parts []string
- for tok.value != "]" {
- parts = append(parts, tok.value)
- tok = p.next()
- if tok.err != nil {
- return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
- }
- if p.done && tok.value != "]" {
- return "", p.errorf("unclosed type_url or extension name")
- }
- }
- return strings.Join(parts, ""), nil
-}
-
-// consumeOptionalSeparator consumes an optional semicolon or comma.
-// It is used in readStruct to provide backward compatibility.
-func (p *textParser) consumeOptionalSeparator() error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ";" && tok.value != "," {
- p.back()
- }
- return nil
-}
-
-func (p *textParser) readAny(v reflect.Value, props *Properties) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == "" {
- return p.errorf("unexpected EOF")
- }
- if len(props.CustomType) > 0 {
- if props.Repeated {
- t := reflect.TypeOf(v.Interface())
- if t.Kind() == reflect.Slice {
- tc := reflect.TypeOf(new(Marshaler))
- ok := t.Elem().Implements(tc.Elem())
- if ok {
- fv := v
- flen := fv.Len()
- if flen == fv.Cap() {
- nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1)
- reflect.Copy(nav, fv)
- fv.Set(nav)
- }
- fv.SetLen(flen + 1)
-
- // Read one.
- p.back()
- return p.readAny(fv.Index(flen), props)
- }
- }
- }
- if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
- custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler)
- err := custom.Unmarshal([]byte(tok.unquoted))
- if err != nil {
- return p.errorf("%v %v: %v", err, v.Type(), tok.value)
- }
- v.Set(reflect.ValueOf(custom))
- } else {
- custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler)
- err := custom.Unmarshal([]byte(tok.unquoted))
- if err != nil {
- return p.errorf("%v %v: %v", err, v.Type(), tok.value)
- }
- v.Set(reflect.Indirect(reflect.ValueOf(custom)))
- }
- return nil
- }
- if props.StdTime {
- fv := v
- p.back()
- props.StdTime = false
- tproto := ×tamp{}
- err := p.readAny(reflect.ValueOf(tproto).Elem(), props)
- props.StdTime = true
- if err != nil {
- return err
- }
- tim, err := timestampFromProto(tproto)
- if err != nil {
- return err
- }
- if props.Repeated {
- t := reflect.TypeOf(v.Interface())
- if t.Kind() == reflect.Slice {
- if t.Elem().Kind() == reflect.Ptr {
- ts := fv.Interface().([]*time.Time)
- ts = append(ts, &tim)
- fv.Set(reflect.ValueOf(ts))
- return nil
- } else {
- ts := fv.Interface().([]time.Time)
- ts = append(ts, tim)
- fv.Set(reflect.ValueOf(ts))
- return nil
- }
- }
- }
- if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
- v.Set(reflect.ValueOf(&tim))
- } else {
- v.Set(reflect.Indirect(reflect.ValueOf(&tim)))
- }
- return nil
- }
- if props.StdDuration {
- fv := v
- p.back()
- props.StdDuration = false
- dproto := &duration{}
- err := p.readAny(reflect.ValueOf(dproto).Elem(), props)
- props.StdDuration = true
- if err != nil {
- return err
- }
- dur, err := durationFromProto(dproto)
- if err != nil {
- return err
- }
- if props.Repeated {
- t := reflect.TypeOf(v.Interface())
- if t.Kind() == reflect.Slice {
- if t.Elem().Kind() == reflect.Ptr {
- ds := fv.Interface().([]*time.Duration)
- ds = append(ds, &dur)
- fv.Set(reflect.ValueOf(ds))
- return nil
- } else {
- ds := fv.Interface().([]time.Duration)
- ds = append(ds, dur)
- fv.Set(reflect.ValueOf(ds))
- return nil
- }
- }
- }
- if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
- v.Set(reflect.ValueOf(&dur))
- } else {
- v.Set(reflect.Indirect(reflect.ValueOf(&dur)))
- }
- return nil
- }
- switch fv := v; fv.Kind() {
- case reflect.Slice:
- at := v.Type()
- if at.Elem().Kind() == reflect.Uint8 {
- // Special case for []byte
- if tok.value[0] != '"' && tok.value[0] != '\'' {
- // Deliberately written out here, as the error after
- // this switch statement would write "invalid []byte: ...",
- // which is not as user-friendly.
- return p.errorf("invalid string: %v", tok.value)
- }
- bytes := []byte(tok.unquoted)
- fv.Set(reflect.ValueOf(bytes))
- return nil
- }
- // Repeated field.
- if tok.value == "[" {
- // Repeated field with list notation, like [1,2,3].
- for {
- fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
- err := p.readAny(fv.Index(fv.Len()-1), props)
- if err != nil {
- return err
- }
- ntok := p.next()
- if ntok.err != nil {
- return ntok.err
- }
- if ntok.value == "]" {
- break
- }
- if ntok.value != "," {
- return p.errorf("Expected ']' or ',' found %q", ntok.value)
- }
- }
- return nil
- }
- // One value of the repeated field.
- p.back()
- fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
- return p.readAny(fv.Index(fv.Len()-1), props)
- case reflect.Bool:
- // true/1/t/True or false/f/0/False.
- switch tok.value {
- case "true", "1", "t", "True":
- fv.SetBool(true)
- return nil
- case "false", "0", "f", "False":
- fv.SetBool(false)
- return nil
- }
- case reflect.Float32, reflect.Float64:
- v := tok.value
- // Ignore 'f' for compatibility with output generated by C++, but don't
- // remove 'f' when the value is "-inf" or "inf".
- if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
- v = v[:len(v)-1]
- }
- if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
- fv.SetFloat(f)
- return nil
- }
- case reflect.Int8:
- if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil {
- fv.SetInt(x)
- return nil
- }
- case reflect.Int16:
- if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil {
- fv.SetInt(x)
- return nil
- }
- case reflect.Int32:
- if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
- fv.SetInt(x)
- return nil
- }
-
- if len(props.Enum) == 0 {
- break
- }
- m, ok := enumValueMaps[props.Enum]
- if !ok {
- break
- }
- x, ok := m[tok.value]
- if !ok {
- break
- }
- fv.SetInt(int64(x))
- return nil
- case reflect.Int64:
- if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
- fv.SetInt(x)
- return nil
- }
-
- case reflect.Ptr:
- // A basic field (indirected through pointer), or a repeated message/group
- p.back()
- fv.Set(reflect.New(fv.Type().Elem()))
- return p.readAny(fv.Elem(), props)
- case reflect.String:
- if tok.value[0] == '"' || tok.value[0] == '\'' {
- fv.SetString(tok.unquoted)
- return nil
- }
- case reflect.Struct:
- var terminator string
- switch tok.value {
- case "{":
- terminator = "}"
- case "<":
- terminator = ">"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
- return p.readStruct(fv, terminator)
- case reflect.Uint8:
- if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil {
- fv.SetUint(x)
- return nil
- }
- case reflect.Uint16:
- if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil {
- fv.SetUint(x)
- return nil
- }
- case reflect.Uint32:
- if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- fv.SetUint(uint64(x))
- return nil
- }
- case reflect.Uint64:
- if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
- fv.SetUint(x)
- return nil
- }
- }
- return p.errorf("invalid %v: %v", v.Type(), tok.value)
-}
-
-// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
-// before starting to unmarshal, so any existing data in pb is always removed.
-// If a required field is not set and no other error occurs,
-// UnmarshalText returns *RequiredNotSetError.
-func UnmarshalText(s string, pb Message) error {
- if um, ok := pb.(encoding.TextUnmarshaler); ok {
- return um.UnmarshalText([]byte(s))
- }
- pb.Reset()
- v := reflect.ValueOf(pb)
- return newTextParser(s).readStruct(v.Elem(), "")
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go
deleted file mode 100644
index 9324f654..00000000
--- a/vendor/github.com/gogo/protobuf/proto/timestamp.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// This file implements operations on google.protobuf.Timestamp.
-
-import (
- "errors"
- "fmt"
- "time"
-)
-
-const (
- // Seconds field of the earliest valid Timestamp.
- // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
- minValidSeconds = -62135596800
- // Seconds field just after the latest valid Timestamp.
- // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
- maxValidSeconds = 253402300800
-)
-
-// validateTimestamp determines whether a Timestamp is valid.
-// A valid timestamp represents a time in the range
-// [0001-01-01, 10000-01-01) and has a Nanos field
-// in the range [0, 1e9).
-//
-// If the Timestamp is valid, validateTimestamp returns nil.
-// Otherwise, it returns an error that describes
-// the problem.
-//
-// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
-func validateTimestamp(ts *timestamp) error {
- if ts == nil {
- return errors.New("timestamp: nil Timestamp")
- }
- if ts.Seconds < minValidSeconds {
- return fmt.Errorf("timestamp: %#v before 0001-01-01", ts)
- }
- if ts.Seconds >= maxValidSeconds {
- return fmt.Errorf("timestamp: %#v after 10000-01-01", ts)
- }
- if ts.Nanos < 0 || ts.Nanos >= 1e9 {
- return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts)
- }
- return nil
-}
-
-// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time.
-// It returns an error if the argument is invalid.
-//
-// Unlike most Go functions, if Timestamp returns an error, the first return value
-// is not the zero time.Time. Instead, it is the value obtained from the
-// time.Unix function when passed the contents of the Timestamp, in the UTC
-// locale. This may or may not be a meaningful time; many invalid Timestamps
-// do map to valid time.Times.
-//
-// A nil Timestamp returns an error. The first return value in that case is
-// undefined.
-func timestampFromProto(ts *timestamp) (time.Time, error) {
- // Don't return the zero value on error, because corresponds to a valid
- // timestamp. Instead return whatever time.Unix gives us.
- var t time.Time
- if ts == nil {
- t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
- } else {
- t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
- }
- return t, validateTimestamp(ts)
-}
-
-// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
-// It returns an error if the resulting Timestamp is invalid.
-func timestampProto(t time.Time) (*timestamp, error) {
- seconds := t.Unix()
- nanos := int32(t.Sub(time.Unix(seconds, 0)))
- ts := ×tamp{
- Seconds: seconds,
- Nanos: nanos,
- }
- if err := validateTimestamp(ts); err != nil {
- return nil, err
- }
- return ts, nil
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
deleted file mode 100644
index 38439fa9..00000000
--- a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2016, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "reflect"
- "time"
-)
-
-var timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
-
-type timestamp struct {
- Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
-}
-
-func (m *timestamp) Reset() { *m = timestamp{} }
-func (*timestamp) ProtoMessage() {}
-func (*timestamp) String() string { return "timestamp" }
-
-func init() {
- RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp")
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go
deleted file mode 100644
index b175d1b6..00000000
--- a/vendor/github.com/gogo/protobuf/proto/wrappers.go
+++ /dev/null
@@ -1,1888 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2018, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "io"
- "reflect"
-)
-
-func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*float64)
- v := &float64Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*float64)
- v := &float64Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64)
- v := &float64Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64)
- v := &float64Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(float64)
- v := &float64Value{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(float64)
- v := &float64Value{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*float64)
- v := &float64Value{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*float64)
- v := &float64Value{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &float64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &float64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &float64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &float64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*float32)
- v := &float32Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*float32)
- v := &float32Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32)
- v := &float32Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32)
- v := &float32Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(float32)
- v := &float32Value{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(float32)
- v := &float32Value{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*float32)
- v := &float32Value{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*float32)
- v := &float32Value{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &float32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &float32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &float32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &float32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*int64)
- v := &int64Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*int64)
- v := &int64Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64)
- v := &int64Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64)
- v := &int64Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(int64)
- v := &int64Value{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(int64)
- v := &int64Value{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*int64)
- v := &int64Value{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*int64)
- v := &int64Value{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &int64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &int64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &int64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &int64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*uint64)
- v := &uint64Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*uint64)
- v := &uint64Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64)
- v := &uint64Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64)
- v := &uint64Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(uint64)
- v := &uint64Value{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(uint64)
- v := &uint64Value{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*uint64)
- v := &uint64Value{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*uint64)
- v := &uint64Value{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &uint64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &uint64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &uint64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &uint64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*int32)
- v := &int32Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*int32)
- v := &int32Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32)
- v := &int32Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32)
- v := &int32Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(int32)
- v := &int32Value{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(int32)
- v := &int32Value{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*int32)
- v := &int32Value{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*int32)
- v := &int32Value{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &int32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &int32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &int32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &int32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*uint32)
- v := &uint32Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*uint32)
- v := &uint32Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32)
- v := &uint32Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32)
- v := &uint32Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(uint32)
- v := &uint32Value{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(uint32)
- v := &uint32Value{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*uint32)
- v := &uint32Value{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*uint32)
- v := &uint32Value{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &uint32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &uint32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &uint32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &uint32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*bool)
- v := &boolValue{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*bool)
- v := &boolValue{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool)
- v := &boolValue{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool)
- v := &boolValue{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(bool)
- v := &boolValue{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(bool)
- v := &boolValue{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*bool)
- v := &boolValue{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*bool)
- v := &boolValue{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &boolValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &boolValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &boolValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &boolValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*string)
- v := &stringValue{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*string)
- v := &stringValue{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string)
- v := &stringValue{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string)
- v := &stringValue{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(string)
- v := &stringValue{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(string)
- v := &stringValue{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*string)
- v := &stringValue{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*string)
- v := &stringValue{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &stringValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &stringValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &stringValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &stringValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*[]byte)
- v := &bytesValue{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*[]byte)
- v := &bytesValue{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte)
- v := &bytesValue{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte)
- v := &bytesValue{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().([]byte)
- v := &bytesValue{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().([]byte)
- v := &bytesValue{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*[]byte)
- v := &bytesValue{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*[]byte)
- v := &bytesValue{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &bytesValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &bytesValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &bytesValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &bytesValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go
deleted file mode 100644
index c1cf7bf8..00000000
--- a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2018, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-type float64Value struct {
- Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *float64Value) Reset() { *m = float64Value{} }
-func (*float64Value) ProtoMessage() {}
-func (*float64Value) String() string { return "float64" }
-
-type float32Value struct {
- Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *float32Value) Reset() { *m = float32Value{} }
-func (*float32Value) ProtoMessage() {}
-func (*float32Value) String() string { return "float32" }
-
-type int64Value struct {
- Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *int64Value) Reset() { *m = int64Value{} }
-func (*int64Value) ProtoMessage() {}
-func (*int64Value) String() string { return "int64" }
-
-type uint64Value struct {
- Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *uint64Value) Reset() { *m = uint64Value{} }
-func (*uint64Value) ProtoMessage() {}
-func (*uint64Value) String() string { return "uint64" }
-
-type int32Value struct {
- Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *int32Value) Reset() { *m = int32Value{} }
-func (*int32Value) ProtoMessage() {}
-func (*int32Value) String() string { return "int32" }
-
-type uint32Value struct {
- Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *uint32Value) Reset() { *m = uint32Value{} }
-func (*uint32Value) ProtoMessage() {}
-func (*uint32Value) String() string { return "uint32" }
-
-type boolValue struct {
- Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *boolValue) Reset() { *m = boolValue{} }
-func (*boolValue) ProtoMessage() {}
-func (*boolValue) String() string { return "bool" }
-
-type stringValue struct {
- Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *stringValue) Reset() { *m = stringValue{} }
-func (*stringValue) ProtoMessage() {}
-func (*stringValue) String() string { return "string" }
-
-type bytesValue struct {
- Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *bytesValue) Reset() { *m = bytesValue{} }
-func (*bytesValue) ProtoMessage() {}
-func (*bytesValue) String() string { return "[]byte" }
-
-func init() {
- RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue")
- RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue")
- RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value")
- RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value")
- RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value")
- RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value")
- RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue")
- RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue")
- RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue")
-}
diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go b/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go
index ae5b7da1..51c75a77 100644
--- a/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go
+++ b/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go
@@ -81,7 +81,7 @@ func (c *Cache) fileName(id [HashSize]byte, key string) string {
var errMissing = errors.New("cache entry not found")
func IsErrMissing(err error) bool {
- return err == errMissing
+ return errors.Cause(err) == errMissing
}
const (
@@ -199,26 +199,6 @@ func (c *Cache) get(id ActionID) (Entry, error) {
return Entry{buf, size, time.Unix(0, tm)}, nil
}
-// GetFile looks up the action ID in the cache and returns
-// the name of the corresponding data file.
-func (c *Cache) GetFile(id ActionID) (file string, entry Entry, err error) {
- entry, err = c.Get(id)
- if err != nil {
- return "", Entry{}, err
- }
-
- file, err = c.OutputFile(entry.OutputID)
- if err != nil {
- return "", Entry{}, err
- }
-
- info, err := os.Stat(file)
- if err != nil || info.Size() != entry.Size {
- return "", Entry{}, errMissing
- }
- return file, entry, nil
-}
-
// GetBytes looks up the action ID in the cache and returns
// the corresponding output bytes.
// GetBytes should only be used for data that can be expected to fit in memory.
@@ -282,6 +262,9 @@ const (
func (c *Cache) used(file string) error {
info, err := os.Stat(file)
if err != nil {
+ if os.IsNotExist(err) {
+ return errMissing
+ }
return errors.Wrapf(err, "failed to stat file %s", file)
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go
index 9581acab..0becc990 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go
@@ -218,11 +218,15 @@ func (e *Executor) acquireFileLock() bool {
lockFile := filepath.Join(os.TempDir(), "golangci-lint.lock")
e.debugf("Locking on file %s...", lockFile)
f := flock.New(lockFile)
- const totalTimeout = 5 * time.Second
const retryDelay = time.Second
- ctx, finish := context.WithTimeout(context.Background(), totalTimeout)
- defer finish()
+ ctx := context.Background()
+ if !e.cfg.Run.AllowSerialRunners {
+ const totalTimeout = 5 * time.Second
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, totalTimeout)
+ defer cancel()
+ }
if ok, _ := f.TryLockContext(ctx, retryDelay); !ok {
return false
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go
index e4887d58..57cb5471 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go
@@ -80,7 +80,9 @@ func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, is
fs.BoolVar(&oc.PrintIssuedLine, "print-issued-lines", true, wh("Print lines of code with issue"))
fs.BoolVar(&oc.PrintLinterName, "print-linter-name", true, wh("Print linter name in issue line"))
fs.BoolVar(&oc.UniqByLine, "uniq-by-line", true, wh("Make issues output unique by line"))
+ fs.BoolVar(&oc.SortResults, "sort-results", false, wh("Sort linter results"))
fs.BoolVar(&oc.PrintWelcomeMessage, "print-welcome", false, wh("Print welcome message"))
+ fs.StringVar(&oc.PathPrefix, "path-prefix", "", wh("Path prefix to add to output"))
hideFlag("print-welcome") // no longer used
// Run config
@@ -109,6 +111,9 @@ func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, is
const allowParallelDesc = "Allow multiple parallel golangci-lint instances running. " +
"If false (default) - golangci-lint acquires file lock on start."
fs.BoolVar(&rc.AllowParallelRunners, "allow-parallel-runners", false, wh(allowParallelDesc))
+ const allowSerialDesc = "Allow multiple golangci-lint instances running, but serialize them around a lock. " +
+ "If false (default) - golangci-lint exits with an error if it fails to acquire file lock on start."
+ fs.BoolVar(&rc.AllowSerialRunners, "allow-serial-runners", false, wh(allowSerialDesc))
// Linters settings config
lsc := &cfg.LintersSettings
@@ -232,6 +237,7 @@ func (e *Executor) getConfigForCommandLine() (*config.Config, error) {
// Use another config variable here, not e.cfg, to not
// affect main parsing by this parsing of only config option.
initFlagSet(fs, &cfg, e.DBManager, false)
+ initVersionFlagSet(fs, &cfg)
// Parse max options, even force version option: don't want
// to get access to Executor here: it's error-prone to use
@@ -446,8 +452,8 @@ func (e *Executor) executeRun(_ *cobra.Command, args []string) {
// to be removed when deadline is finally decommissioned
func (e *Executor) setTimeoutToDeadlineIfOnlyDeadlineIsSet() {
- //lint:ignore SA1019 We want to promoted the deprecated config value when needed
- deadlineValue := e.cfg.Run.Deadline // nolint:staticcheck
+ // nolint:staticcheck
+ deadlineValue := e.cfg.Run.Deadline
if deadlineValue != 0 && e.cfg.Run.Timeout == defaultTimeout {
e.cfg.Run.Timeout = deadlineValue
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/version.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/version.go
index fdb5aa88..3918d6b7 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/commands/version.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/version.go
@@ -1,17 +1,59 @@
package commands
import (
+ "encoding/json"
+ "strings"
+
"github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+
+ "github.com/golangci/golangci-lint/pkg/config"
)
+type jsonVersion struct {
+ Version string `json:"version"`
+ Commit string `json:"commit"`
+ Date string `json:"date"`
+}
+
+func (e *Executor) initVersionConfiguration(cmd *cobra.Command) {
+ fs := cmd.Flags()
+ fs.SortFlags = false // sort them as they are defined here
+ initVersionFlagSet(fs, e.cfg)
+}
+
+func initVersionFlagSet(fs *pflag.FlagSet, cfg *config.Config) {
+ // Version config
+ vc := &cfg.Version
+ fs.StringVar(&vc.Format, "format", "", wh("The version's format can be: 'short', 'json'"))
+}
+
func (e *Executor) initVersion() {
versionCmd := &cobra.Command{
Use: "version",
Short: "Version",
- Run: func(cmd *cobra.Command, _ []string) {
- cmd.Printf("golangci-lint has version %s built from %s on %s\n", e.version, e.commit, e.date)
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ switch strings.ToLower(e.cfg.Version.Format) {
+ case "short":
+ cmd.Println(e.version)
+ case "json":
+ ver := jsonVersion{
+ Version: e.version,
+ Commit: e.commit,
+ Date: e.date,
+ }
+ data, err := json.Marshal(&ver)
+ if err != nil {
+ return err
+ }
+ cmd.Println(string(data))
+ default:
+ cmd.Printf("golangci-lint has version %s built from %s on %s\n", e.version, e.commit, e.date)
+ }
+ return nil
},
}
e.rootCmd.AddCommand(versionCmd)
+ e.initVersionConfiguration(versionCmd)
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go
index 89e3559c..9689ea09 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go
@@ -152,9 +152,13 @@ type Run struct {
UseDefaultSkipDirs bool `mapstructure:"skip-dirs-use-default"`
AllowParallelRunners bool `mapstructure:"allow-parallel-runners"`
+ AllowSerialRunners bool `mapstructure:"allow-serial-runners"`
}
type LintersSettings struct {
+ Gci struct {
+ LocalPrefixes string `mapstructure:"local-prefixes"`
+ }
Govet GovetSettings
Golint struct {
MinConfidence float64 `mapstructure:"min-confidence"`
@@ -221,6 +225,10 @@ type LintersSettings struct {
Recommendations []string `mapstructure:"recommendations"`
Reason string `mapstructure:"reason"`
} `mapstructure:"modules"`
+ Versions []map[string]struct {
+ Version string `mapstructure:"version"`
+ Reason string `mapstructure:"reason"`
+ } `mapstructure:"versions"`
} `mapstructure:"blocked"`
}
@@ -235,13 +243,22 @@ type LintersSettings struct {
Dogsled DogsledSettings
Gocognit GocognitSettings
Godot GodotSettings
+ Goheader GoHeaderSettings
Testpackage TestpackageSettings
Nestif NestifSettings
NoLintLint NoLintLintSettings
+ Exhaustive ExhaustiveSettings
+ Gofumpt GofumptSettings
Custom map[string]CustomLinterSettings
}
+type GoHeaderSettings struct {
+ Values map[string]map[string]string `mapstructure:"values"`
+ Template string `mapstructure:"template"`
+ TemplatePath string `mapstructure:"template-path"`
+}
+
type GovetSettings struct {
CheckShadowing bool `mapstructure:"check-shadowing"`
Settings map[string]map[string]interface{}
@@ -335,6 +352,14 @@ type NestifSettings struct {
MinComplexity int `mapstructure:"min-complexity"`
}
+type ExhaustiveSettings struct {
+ DefaultSignifiesExhaustive bool `mapstructure:"default-signifies-exhaustive"`
+}
+
+type GofumptSettings struct {
+ ExtraRules bool `mapstructure:"extra-rules"`
+}
+
var defaultLintersSettings = LintersSettings{
Lll: LllSettings{
LineLength: 120,
@@ -385,6 +410,12 @@ var defaultLintersSettings = LintersSettings{
Nestif: NestifSettings{
MinComplexity: 5,
},
+ Exhaustive: ExhaustiveSettings{
+ DefaultSignifiesExhaustive: false,
+ },
+ Gofumpt: GofumptSettings{
+ ExtraRules: false,
+ },
}
type CustomLinterSettings struct {
@@ -403,13 +434,48 @@ type Linters struct {
Presets []string
}
-type ExcludeRule struct {
+type BaseRule struct {
Linters []string
Path string
Text string
Source string
}
+func (b BaseRule) Validate(minConditionsCount int) error {
+ if err := validateOptionalRegex(b.Path); err != nil {
+ return fmt.Errorf("invalid path regex: %v", err)
+ }
+ if err := validateOptionalRegex(b.Text); err != nil {
+ return fmt.Errorf("invalid text regex: %v", err)
+ }
+ if err := validateOptionalRegex(b.Source); err != nil {
+ return fmt.Errorf("invalid source regex: %v", err)
+ }
+ nonBlank := 0
+ if len(b.Linters) > 0 {
+ nonBlank++
+ }
+ if b.Path != "" {
+ nonBlank++
+ }
+ if b.Text != "" {
+ nonBlank++
+ }
+ if b.Source != "" {
+ nonBlank++
+ }
+ if nonBlank < minConditionsCount {
+ return fmt.Errorf("at least %d of (text, source, path, linters) should be set", minConditionsCount)
+ }
+ return nil
+}
+
+const excludeRuleMinConditionsCount = 2
+
+type ExcludeRule struct {
+ BaseRule `mapstructure:",squash"`
+}
+
func validateOptionalRegex(value string) error {
if value == "" {
return nil
@@ -419,33 +485,18 @@ func validateOptionalRegex(value string) error {
}
func (e ExcludeRule) Validate() error {
- if err := validateOptionalRegex(e.Path); err != nil {
- return fmt.Errorf("invalid path regex: %v", err)
- }
- if err := validateOptionalRegex(e.Text); err != nil {
- return fmt.Errorf("invalid text regex: %v", err)
- }
- if err := validateOptionalRegex(e.Source); err != nil {
- return fmt.Errorf("invalid source regex: %v", err)
- }
- nonBlank := 0
- if len(e.Linters) > 0 {
- nonBlank++
- }
- if e.Path != "" {
- nonBlank++
- }
- if e.Text != "" {
- nonBlank++
- }
- if e.Source != "" {
- nonBlank++
- }
- const minConditionsCount = 2
- if nonBlank < minConditionsCount {
- return errors.New("at least 2 of (text, source, path, linters) should be set")
- }
- return nil
+ return e.BaseRule.Validate(excludeRuleMinConditionsCount)
+}
+
+const severityRuleMinConditionsCount = 1
+
+type SeverityRule struct {
+ BaseRule `mapstructure:",squash"`
+ Severity string
+}
+
+func (s *SeverityRule) Validate() error {
+ return s.BaseRule.Validate(severityRuleMinConditionsCount)
}
type Issues struct {
@@ -465,21 +516,35 @@ type Issues struct {
NeedFix bool `mapstructure:"fix"`
}
+type Severity struct {
+ Default string `mapstructure:"default-severity"`
+ CaseSensitive bool `mapstructure:"case-sensitive"`
+ Rules []SeverityRule `mapstructure:"rules"`
+}
+
+type Version struct {
+ Format string `mapstructure:"format"`
+}
+
type Config struct {
Run Run
Output struct {
Format string
Color string
- PrintIssuedLine bool `mapstructure:"print-issued-lines"`
- PrintLinterName bool `mapstructure:"print-linter-name"`
- UniqByLine bool `mapstructure:"uniq-by-line"`
- PrintWelcomeMessage bool `mapstructure:"print-welcome"`
+ PrintIssuedLine bool `mapstructure:"print-issued-lines"`
+ PrintLinterName bool `mapstructure:"print-linter-name"`
+ UniqByLine bool `mapstructure:"uniq-by-line"`
+ SortResults bool `mapstructure:"sort-results"`
+ PrintWelcomeMessage bool `mapstructure:"print-welcome"`
+ PathPrefix string `mapstructure:"path-prefix"`
}
LintersSettings LintersSettings `mapstructure:"linters-settings"`
Linters Linters
Issues Issues
+ Severity Severity
+ Version Version
InternalTest bool // Option is used only for testing golangci-lint code, don't use it
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/config_gocritic.go b/vendor/github.com/golangci/golangci-lint/pkg/config/config_gocritic.go
index bbfd7bb1..26317a67 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/config/config_gocritic.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/config/config_gocritic.go
@@ -5,7 +5,7 @@ import (
"sort"
"strings"
- "github.com/go-lintpack/lintpack"
+ "github.com/go-critic/go-critic/framework/linter"
"github.com/pkg/errors"
_ "github.com/go-critic/go-critic/checkers" // this import register checkers
@@ -18,9 +18,9 @@ const gocriticDebugKey = "gocritic"
var (
gocriticDebugf = logutils.Debug(gocriticDebugKey)
isGocriticDebug = logutils.HaveDebugTag(gocriticDebugKey)
- allGocriticCheckers = lintpack.GetCheckersInfo()
- allGocriticCheckerMap = func() map[string]*lintpack.CheckerInfo {
- checkInfoMap := make(map[string]*lintpack.CheckerInfo)
+ allGocriticCheckers = linter.GetCheckersInfo()
+ allGocriticCheckerMap = func() map[string]*linter.CheckerInfo {
+ checkInfoMap := make(map[string]*linter.CheckerInfo)
for _, checkInfo := range allGocriticCheckers {
checkInfoMap[checkInfo.Name] = checkInfo
}
@@ -281,7 +281,7 @@ func getAllCheckerNames() map[string]bool {
return allCheckerNames
}
-func isEnabledByDefaultGocriticCheck(info *lintpack.CheckerInfo) bool {
+func isEnabledByDefaultGocriticCheck(info *linter.CheckerInfo) bool {
return !info.HasTag("experimental") &&
!info.HasTag("opinionated") &&
!info.HasTag("performance")
@@ -290,9 +290,6 @@ func isEnabledByDefaultGocriticCheck(info *lintpack.CheckerInfo) bool {
func getDefaultEnabledGocriticCheckersNames() []string {
var enabled []string
for _, info := range allGocriticCheckers {
- // get in sync with lintpack behavior in bindDefaultEnabledList
- // in https://github.com/go-lintpack/lintpack/blob/master/linter/lintmain/internal/check/check.go#L317
-
enable := isEnabledByDefaultGocriticCheck(info)
if enable {
enabled = append(enabled, info.Name)
@@ -305,9 +302,6 @@ func getDefaultEnabledGocriticCheckersNames() []string {
func getDefaultDisabledGocriticCheckersNames() []string {
var disabled []string
for _, info := range allGocriticCheckers {
- // get in sync with lintpack behavior in bindDefaultEnabledList
- // in https://github.com/go-lintpack/lintpack/blob/master/linter/lintmain/internal/check/check.go#L317
-
enable := isEnabledByDefaultGocriticCheck(info)
if !enable {
disabled = append(disabled, info.Name)
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/reader.go b/vendor/github.com/golangci/golangci-lint/pkg/config/reader.go
index 1e355e72..00722ba6 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/config/reader.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/config/reader.go
@@ -12,6 +12,7 @@ import (
"github.com/golangci/golangci-lint/pkg/fsutils"
"github.com/golangci/golangci-lint/pkg/logutils"
+ "github.com/golangci/golangci-lint/pkg/sliceutil"
)
type FileReader struct {
@@ -113,6 +114,14 @@ func (r *FileReader) validateConfig() error {
return fmt.Errorf("error in exclude rule #%d: %v", i, err)
}
}
+ if len(c.Severity.Rules) > 0 && c.Severity.Default == "" {
+ return errors.New("can't set severity rule option: no default severity defined")
+ }
+ for i, rule := range c.Severity.Rules {
+ if err := rule.Validate(); err != nil {
+ return fmt.Errorf("error in severity rule #%d: %v", i, err)
+ }
+ }
if err := c.LintersSettings.Govet.Validate(); err != nil {
return fmt.Errorf("error in govet config: %v", err)
}
@@ -162,6 +171,7 @@ func (r *FileReader) setupConfigFileSearch() {
// find all dirs from it up to the root
configSearchPaths := []string{"./"}
+
for {
configSearchPaths = append(configSearchPaths, curDir)
newCurDir := filepath.Dir(curDir)
@@ -171,6 +181,13 @@ func (r *FileReader) setupConfigFileSearch() {
curDir = newCurDir
}
+ // find home directory for global config
+ if home, err := homedir.Dir(); err != nil {
+ r.log.Warnf("Can't get user's home directory: %s", err.Error())
+ } else if !sliceutil.Contains(configSearchPaths, home) {
+ configSearchPaths = append(configSearchPaths, home)
+ }
+
r.log.Infof("Config search paths: %s", configSearchPaths)
viper.SetConfigName(".golangci")
for _, p := range configSearchPaths {
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustive.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustive.go
new file mode 100644
index 00000000..cae37ecc
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustive.go
@@ -0,0 +1,25 @@
+package golinters
+
+import (
+ "github.com/nishanths/exhaustive"
+ "golang.org/x/tools/go/analysis"
+
+ "github.com/golangci/golangci-lint/pkg/config"
+ "github.com/golangci/golangci-lint/pkg/golinters/goanalysis"
+)
+
+func NewExhaustive(settings *config.ExhaustiveSettings) *goanalysis.Linter {
+ a := exhaustive.Analyzer
+
+ var cfg map[string]map[string]interface{}
+ if settings != nil {
+ cfg = map[string]map[string]interface{}{
+ a.Name: {
+ exhaustive.DefaultSignifiesExhaustiveFlag: settings.DefaultSignifiesExhaustive,
+ },
+ }
+ }
+
+ return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, cfg).
+ WithLoadMode(goanalysis.LoadModeTypesInfo)
+}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/exportloopref.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exportloopref.go
new file mode 100644
index 00000000..1131c575
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exportloopref.go
@@ -0,0 +1,19 @@
+package golinters
+
+import (
+ "github.com/kyoh86/exportloopref"
+ "golang.org/x/tools/go/analysis"
+
+ "github.com/golangci/golangci-lint/pkg/golinters/goanalysis"
+)
+
+func NewExportLoopRef() *goanalysis.Linter {
+ a := exportloopref.Analyzer
+
+ return goanalysis.NewLinter(
+ a.Name,
+ a.Doc,
+ []*analysis.Analyzer{a},
+ nil,
+ ).WithLoadMode(goanalysis.LoadModeTypesInfo)
+}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci.go
new file mode 100644
index 00000000..6fa43544
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci.go
@@ -0,0 +1,92 @@
+package golinters
+
+import (
+ "bytes"
+ "fmt"
+ "sync"
+
+ "github.com/daixiang0/gci/pkg/gci"
+ "github.com/pkg/errors"
+ "github.com/shazow/go-diff/difflib"
+ "golang.org/x/tools/go/analysis"
+
+ "github.com/golangci/golangci-lint/pkg/golinters/goanalysis"
+ "github.com/golangci/golangci-lint/pkg/lint/linter"
+)
+
+const gciName = "gci"
+
+func NewGci() *goanalysis.Linter {
+ var mu sync.Mutex
+ var resIssues []goanalysis.Issue
+ differ := difflib.New()
+
+ analyzer := &analysis.Analyzer{
+ Name: gciName,
+ Doc: goanalysis.TheOnlyanalyzerDoc,
+ }
+ return goanalysis.NewLinter(
+ gciName,
+ "Gci control golang package import order and make it always deterministic.",
+ []*analysis.Analyzer{analyzer},
+ nil,
+ ).WithContextSetter(func(lintCtx *linter.Context) {
+ localFlag := lintCtx.Settings().Gci.LocalPrefixes
+ goimportsFlag := lintCtx.Settings().Goimports.LocalPrefixes
+ if localFlag == "" && goimportsFlag != "" {
+ localFlag = goimportsFlag
+ }
+
+ analyzer.Run = func(pass *analysis.Pass) (interface{}, error) {
+ var fileNames []string
+ for _, f := range pass.Files {
+ pos := pass.Fset.PositionFor(f.Pos(), false)
+ fileNames = append(fileNames, pos.Filename)
+ }
+
+ var issues []goanalysis.Issue
+
+ for _, f := range fileNames {
+ source, result, err := gci.Run(f, &gci.FlagSet{LocalFlag: localFlag})
+ if err != nil {
+ return nil, err
+ }
+ if result == nil {
+ continue
+ }
+
+ diff := bytes.Buffer{}
+ _, err = diff.WriteString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", f))
+ if err != nil {
+ return nil, fmt.Errorf("can't write diff header: %v", err)
+ }
+
+ err = differ.Diff(&diff, bytes.NewReader(source), bytes.NewReader(result))
+ if err != nil {
+ return nil, fmt.Errorf("can't get gci diff output: %v", err)
+ }
+
+ is, err := extractIssuesFromPatch(diff.String(), lintCtx.Log, lintCtx, gciName)
+ if err != nil {
+ return nil, errors.Wrapf(err, "can't extract issues from gci diff output %q", diff.String())
+ }
+
+ for i := range is {
+ issues = append(issues, goanalysis.NewIssue(&is[i], pass))
+ }
+ }
+
+ if len(issues) == 0 {
+ return nil, nil
+ }
+
+ mu.Lock()
+ resIssues = append(resIssues, issues...)
+ mu.Unlock()
+
+ return nil, nil
+ }
+ }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
+ return resIssues
+ }).WithLoadMode(goanalysis.LoadModeSyntax)
+}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go
index d0729424..db193f37 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go
@@ -935,7 +935,8 @@ func sizeOfReflectValueTreeBytes(rv reflect.Value, visitedPtrs map[uintptr]struc
return rv.Len()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
- reflect.Uintptr, reflect.Bool, reflect.Float32, reflect.Float64, reflect.UnsafePointer:
+ reflect.Uintptr, reflect.Bool, reflect.Float32, reflect.Float64,
+ reflect.Complex64, reflect.Complex128, reflect.Func, reflect.UnsafePointer:
return int(rv.Type().Size())
case reflect.Invalid:
return 0
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go
index fb292520..7181c486 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go
@@ -10,7 +10,7 @@ import (
"strings"
"sync"
- "github.com/go-lintpack/lintpack"
+ gocriticlinter "github.com/go-critic/go-critic/framework/linter"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/config"
@@ -38,15 +38,15 @@ func NewGocritic() *goanalysis.Linter {
nil,
).WithContextSetter(func(lintCtx *linter.Context) {
analyzer.Run = func(pass *analysis.Pass) (interface{}, error) {
- lintpackCtx := lintpack.NewContext(pass.Fset, sizes)
- enabledCheckers, err := buildEnabledCheckers(lintCtx, lintpackCtx)
+ linterCtx := gocriticlinter.NewContext(pass.Fset, sizes)
+ enabledCheckers, err := buildEnabledCheckers(lintCtx, linterCtx)
if err != nil {
return nil, err
}
- lintpackCtx.SetPackageInfo(pass.TypesInfo, pass.Pkg)
+ linterCtx.SetPackageInfo(pass.TypesInfo, pass.Pkg)
var res []goanalysis.Issue
- pkgIssues := runGocriticOnPackage(lintpackCtx, enabledCheckers, pass.Files)
+ pkgIssues := runGocriticOnPackage(linterCtx, enabledCheckers, pass.Files)
for i := range pkgIssues {
res = append(res, goanalysis.NewIssue(&pkgIssues[i], pass))
}
@@ -65,9 +65,9 @@ func NewGocritic() *goanalysis.Linter {
}).WithLoadMode(goanalysis.LoadModeTypesInfo)
}
-func normalizeCheckerInfoParams(info *lintpack.CheckerInfo) lintpack.CheckerParams {
+func normalizeCheckerInfoParams(info *gocriticlinter.CheckerInfo) gocriticlinter.CheckerParams {
// lowercase info param keys here because golangci-lint's config parser lowercases all strings
- ret := lintpack.CheckerParams{}
+ ret := gocriticlinter.CheckerParams{}
for k, v := range info.Params {
ret[strings.ToLower(k)] = v
}
@@ -75,7 +75,7 @@ func normalizeCheckerInfoParams(info *lintpack.CheckerInfo) lintpack.CheckerPara
return ret
}
-func configureCheckerInfo(info *lintpack.CheckerInfo, allParams map[string]config.GocriticCheckSettings) error {
+func configureCheckerInfo(info *gocriticlinter.CheckerInfo, allParams map[string]config.GocriticCheckSettings) error {
params := allParams[strings.ToLower(info.Name)]
if params == nil { // no config for this checker
return nil
@@ -108,12 +108,12 @@ func configureCheckerInfo(info *lintpack.CheckerInfo, allParams map[string]confi
return nil
}
-func buildEnabledCheckers(lintCtx *linter.Context, lintpackCtx *lintpack.Context) ([]*lintpack.Checker, error) {
+func buildEnabledCheckers(lintCtx *linter.Context, linterCtx *gocriticlinter.Context) ([]*gocriticlinter.Checker, error) {
s := lintCtx.Settings().Gocritic
allParams := s.GetLowercasedParams()
- var enabledCheckers []*lintpack.Checker
- for _, info := range lintpack.GetCheckersInfo() {
+ var enabledCheckers []*gocriticlinter.Checker
+ for _, info := range gocriticlinter.GetCheckersInfo() {
if !s.IsCheckEnabled(info.Name) {
continue
}
@@ -122,27 +122,27 @@ func buildEnabledCheckers(lintCtx *linter.Context, lintpackCtx *lintpack.Context
return nil, err
}
- c := lintpack.NewChecker(lintpackCtx, info)
+ c := gocriticlinter.NewChecker(linterCtx, info)
enabledCheckers = append(enabledCheckers, c)
}
return enabledCheckers, nil
}
-func runGocriticOnPackage(lintpackCtx *lintpack.Context, checkers []*lintpack.Checker,
+func runGocriticOnPackage(linterCtx *gocriticlinter.Context, checkers []*gocriticlinter.Checker,
files []*ast.File) []result.Issue {
var res []result.Issue
for _, f := range files {
- filename := filepath.Base(lintpackCtx.FileSet.Position(f.Pos()).Filename)
- lintpackCtx.SetFileInfo(filename, f)
+ filename := filepath.Base(linterCtx.FileSet.Position(f.Pos()).Filename)
+ linterCtx.SetFileInfo(filename, f)
- issues := runGocriticOnFile(lintpackCtx, f, checkers)
+ issues := runGocriticOnFile(linterCtx, f, checkers)
res = append(res, issues...)
}
return res
}
-func runGocriticOnFile(ctx *lintpack.Context, f *ast.File, checkers []*lintpack.Checker) []result.Issue {
+func runGocriticOnFile(ctx *gocriticlinter.Context, f *ast.File, checkers []*gocriticlinter.Checker) []result.Issue {
var res []result.Issue
for _, c := range checkers {
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot.go
index 7e5b233a..5139713c 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot.go
@@ -31,7 +31,7 @@ func NewGodot() *goanalysis.Linter {
settings := godot.Settings{CheckAll: cfg.CheckAll}
analyzer.Run = func(pass *analysis.Pass) (interface{}, error) {
- var issues []godot.Message
+ var issues []godot.Issue
for _, file := range pass.Files {
issues = append(issues, godot.Run(file, pass.Fset, settings)...)
}
@@ -46,6 +46,9 @@ func NewGodot() *goanalysis.Linter {
Pos: i.Pos,
Text: i.Message,
FromLinter: godotName,
+ Replacement: &result.Replacement{
+ NewLines: []string{i.Replacement},
+ },
}
res[k] = goanalysis.NewIssue(&issue, pass)
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go
index a7bd88a7..aa340dcf 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go
@@ -46,7 +46,7 @@ func NewGofmt() *goanalysis.Linter {
continue
}
- is, err := extractIssuesFromPatch(string(diff), lintCtx.Log, lintCtx, false)
+ is, err := extractIssuesFromPatch(string(diff), lintCtx.Log, lintCtx, gofmtName)
if err != nil {
return nil, errors.Wrapf(err, "can't extract issues from gofmt diff output %q", string(diff))
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt_common.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt_common.go
index fb1e3f66..3235622e 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt_common.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt_common.go
@@ -207,7 +207,40 @@ func (p *hunkChangesParser) parse(h *diffpkg.Hunk) []Change {
return p.ret
}
-func extractIssuesFromPatch(patch string, log logutils.Log, lintCtx *linter.Context, isGoimports bool) ([]result.Issue, error) {
+func getErrorTextForLinter(lintCtx *linter.Context, linterName string) string {
+ text := "File is not formatted"
+ switch linterName {
+ case gofumptName:
+ text = "File is not `gofumpt`-ed"
+ if lintCtx.Settings().Gofumpt.ExtraRules {
+ text += " with `-extra`"
+ }
+ case gofmtName:
+ text = "File is not `gofmt`-ed"
+ if lintCtx.Settings().Gofmt.Simplify {
+ text += " with `-s`"
+ }
+ case goimportsName:
+ text = "File is not `goimports`-ed"
+ if lintCtx.Settings().Goimports.LocalPrefixes != "" {
+ text += " with -local " + lintCtx.Settings().Goimports.LocalPrefixes
+ }
+ case gciName:
+ text = "File is not `gci`-ed"
+ localPrefixes := lintCtx.Settings().Gci.LocalPrefixes
+ goimportsFlag := lintCtx.Settings().Goimports.LocalPrefixes
+ if localPrefixes == "" && goimportsFlag != "" {
+ localPrefixes = goimportsFlag
+ }
+
+ if localPrefixes != "" {
+ text += " with -local " + localPrefixes
+ }
+ }
+ return text
+}
+
+func extractIssuesFromPatch(patch string, log logutils.Log, lintCtx *linter.Context, linterName string) ([]result.Issue, error) {
diffs, err := diffpkg.ParseMultiFileDiff([]byte(patch))
if err != nil {
return nil, errors.Wrap(err, "can't parse patch")
@@ -225,35 +258,19 @@ func extractIssuesFromPatch(patch string, log logutils.Log, lintCtx *linter.Cont
}
for _, hunk := range d.Hunks {
- var text string
- if isGoimports {
- text = "File is not `goimports`-ed"
- if lintCtx.Settings().Goimports.LocalPrefixes != "" {
- text += " with -local " + lintCtx.Settings().Goimports.LocalPrefixes
- }
- } else {
- text = "File is not `gofmt`-ed"
- if lintCtx.Settings().Gofmt.Simplify {
- text += " with `-s`"
- }
- }
p := hunkChangesParser{
log: log,
}
changes := p.parse(hunk)
for _, change := range changes {
change := change // fix scope
- linterName := gofmtName
- if isGoimports {
- linterName = goimportsName
- }
i := result.Issue{
FromLinter: linterName,
Pos: token.Position{
Filename: d.NewName,
Line: change.LineRange.From,
},
- Text: text,
+ Text: getErrorTextForLinter(lintCtx, linterName),
Replacement: &change.Replacement,
}
if change.LineRange.From != change.LineRange.To {
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt.go
new file mode 100644
index 00000000..e91e54ee
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt.go
@@ -0,0 +1,92 @@
+package golinters
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "sync"
+
+ "github.com/pkg/errors"
+ "github.com/shazow/go-diff/difflib"
+ "golang.org/x/tools/go/analysis"
+ "mvdan.cc/gofumpt/format"
+
+ "github.com/golangci/golangci-lint/pkg/golinters/goanalysis"
+ "github.com/golangci/golangci-lint/pkg/lint/linter"
+)
+
+const gofumptName = "gofumpt"
+
+func NewGofumpt() *goanalysis.Linter {
+ var mu sync.Mutex
+ var resIssues []goanalysis.Issue
+ differ := difflib.New()
+
+ analyzer := &analysis.Analyzer{
+ Name: gofumptName,
+ Doc: goanalysis.TheOnlyanalyzerDoc,
+ }
+ return goanalysis.NewLinter(
+ gofumptName,
+ "Gofumpt checks whether code was gofumpt-ed.",
+ []*analysis.Analyzer{analyzer},
+ nil,
+ ).WithContextSetter(func(lintCtx *linter.Context) {
+ analyzer.Run = func(pass *analysis.Pass) (interface{}, error) {
+ var fileNames []string
+ for _, f := range pass.Files {
+ pos := pass.Fset.PositionFor(f.Pos(), false)
+ fileNames = append(fileNames, pos.Filename)
+ }
+
+ var issues []goanalysis.Issue
+
+ for _, f := range fileNames {
+ input, err := ioutil.ReadFile(f)
+ if err != nil {
+ return nil, fmt.Errorf("unable to open file %s: %w", f, err)
+ }
+ output, err := format.Source(input, format.Options{
+ ExtraRules: lintCtx.Settings().Gofumpt.ExtraRules,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("error while running gofumpt: %w", err)
+ }
+ if !bytes.Equal(input, output) {
+ out := bytes.Buffer{}
+ _, err = out.WriteString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", f))
+ if err != nil {
+ return nil, fmt.Errorf("error while running gofumpt: %w", err)
+ }
+
+ err = differ.Diff(&out, bytes.NewReader(input), bytes.NewReader(output))
+ if err != nil {
+ return nil, fmt.Errorf("error while running gofumpt: %w", err)
+ }
+
+ diff := out.String()
+ is, err := extractIssuesFromPatch(diff, lintCtx.Log, lintCtx, gofumptName)
+ if err != nil {
+ return nil, errors.Wrapf(err, "can't extract issues from gofumpt diff output %q", diff)
+ }
+
+ for i := range is {
+ issues = append(issues, goanalysis.NewIssue(&is[i], pass))
+ }
+ }
+ }
+
+ if len(issues) == 0 {
+ return nil, nil
+ }
+
+ mu.Lock()
+ resIssues = append(resIssues, issues...)
+ mu.Unlock()
+
+ return nil, nil
+ }
+ }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
+ return resIssues
+ }).WithLoadMode(goanalysis.LoadModeSyntax)
+}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader.go
new file mode 100644
index 00000000..8517e173
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader.go
@@ -0,0 +1,81 @@
+package golinters
+
+import (
+ "go/token"
+ "sync"
+
+ goheader "github.com/denis-tingajkin/go-header"
+ "golang.org/x/tools/go/analysis"
+
+ "github.com/golangci/golangci-lint/pkg/golinters/goanalysis"
+ "github.com/golangci/golangci-lint/pkg/lint/linter"
+ "github.com/golangci/golangci-lint/pkg/result"
+)
+
+const goHeaderName = "goheader"
+
+func NewGoHeader() *goanalysis.Linter {
+ var mu sync.Mutex
+ var issues []goanalysis.Issue
+
+ analyzer := &analysis.Analyzer{
+ Name: goHeaderName,
+ Doc: goanalysis.TheOnlyanalyzerDoc,
+ }
+ return goanalysis.NewLinter(
+ goHeaderName,
+ "Checks is file header matches to pattern",
+ []*analysis.Analyzer{analyzer},
+ nil,
+ ).WithContextSetter(func(lintCtx *linter.Context) {
+ cfg := lintCtx.Cfg.LintersSettings.Goheader
+ c := &goheader.Configuration{
+ Values: cfg.Values,
+ Template: cfg.Template,
+ TemplatePath: cfg.TemplatePath,
+ }
+ analyzer.Run = func(pass *analysis.Pass) (interface{}, error) {
+ if c.TemplatePath == "" && c.Template == "" {
+ // User did not pass template, so then do not run go-header linter
+ return nil, nil
+ }
+ template, err := c.GetTemplate()
+ if err != nil {
+ return nil, err
+ }
+ values, err := c.GetValues()
+ if err != nil {
+ return nil, err
+ }
+ a := goheader.New(goheader.WithTemplate(template), goheader.WithValues(values))
+ var res []goanalysis.Issue
+ for _, file := range pass.Files {
+ i := a.Analyze(file)
+ if i == nil {
+ continue
+ }
+ issue := result.Issue{
+ Pos: token.Position{
+ Line: i.Location().Line + 1,
+ Column: i.Location().Position,
+ Filename: pass.Fset.Position(file.Pos()).Filename,
+ },
+ Text: i.Message(),
+ FromLinter: goHeaderName,
+ }
+ res = append(res, goanalysis.NewIssue(&issue, pass))
+ }
+ if len(res) == 0 {
+ return nil, nil
+ }
+
+ mu.Lock()
+ issues = append(issues, res...)
+ mu.Unlock()
+
+ return nil, nil
+ }
+ }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
+ return issues
+ }).WithLoadMode(goanalysis.LoadModeSyntax)
+}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go
index 97767db8..9ea4558f 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go
@@ -47,7 +47,7 @@ func NewGoimports() *goanalysis.Linter {
continue
}
- is, err := extractIssuesFromPatch(string(diff), lintCtx.Log, lintCtx, true)
+ is, err := extractIssuesFromPatch(string(diff), lintCtx.Log, lintCtx, goimportsName)
if err != nil {
return nil, errors.Wrapf(err, "can't extract issues from gofmt diff output %q", string(diff))
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard.go
index 5b140631..1376ad15 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard.go
@@ -30,7 +30,9 @@ func NewGomodguard() *goanalysis.Linter {
return goanalysis.NewLinter(
gomodguardName,
- "Allow and block list linter for direct Go module dependencies.",
+ "Allow and block list linter for direct Go module dependencies. "+
+ "This is different from depguard where there are different block "+
+ "types for example version constraints and module recommendations.",
[]*analysis.Analyzer{analyzer},
nil,
).WithContextSetter(func(lintCtx *linter.Context) {
@@ -44,7 +46,7 @@ func NewGomodguard() *goanalysis.Linter {
processorCfg.Allowed.Domains = linterCfg.Allowed.Domains
for n := range linterCfg.Blocked.Modules {
for k, v := range linterCfg.Blocked.Modules[n] {
- m := gomodguard.BlockedModule{k: gomodguard.Recommendations{
+ m := map[string]gomodguard.BlockedModule{k: {
Recommendations: v.Recommendations,
Reason: v.Reason,
}}
@@ -53,6 +55,17 @@ func NewGomodguard() *goanalysis.Linter {
}
}
+ for n := range linterCfg.Blocked.Versions {
+ for k, v := range linterCfg.Blocked.Versions[n] {
+ m := map[string]gomodguard.BlockedVersion{k: {
+ Version: v.Version,
+ Reason: v.Reason,
+ }}
+ processorCfg.Blocked.Versions = append(processorCfg.Blocked.Versions, m)
+ break
+ }
+ }
+
for _, file := range pass.Files {
files = append(files, pass.Fset.PositionFor(file.Pos(), false).Filename)
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nlreturn.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nlreturn.go
new file mode 100644
index 00000000..3b661c64
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nlreturn.go
@@ -0,0 +1,19 @@
+package golinters
+
+import (
+ "github.com/ssgreg/nlreturn/v2/pkg/nlreturn"
+ "golang.org/x/tools/go/analysis"
+
+ "github.com/golangci/golangci-lint/pkg/golinters/goanalysis"
+)
+
+func NewNLReturn() *goanalysis.Linter {
+ return goanalysis.NewLinter(
+ "nlreturn",
+ "nlreturn checks for a new line before return and branch statements to increase code clarity",
+ []*analysis.Analyzer{
+ nlreturn.NewAnalyzer(),
+ },
+ nil,
+ ).WithLoadMode(goanalysis.LoadModeSyntax)
+}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/noctx.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/noctx.go
new file mode 100644
index 00000000..b5c4a4be
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/noctx.go
@@ -0,0 +1,21 @@
+package golinters
+
+import (
+ "github.com/sonatard/noctx"
+ "golang.org/x/tools/go/analysis"
+
+ "github.com/golangci/golangci-lint/pkg/golinters/goanalysis"
+)
+
+func NewNoctx() *goanalysis.Linter {
+ analyzers := []*analysis.Analyzer{
+ noctx.Analyzer,
+ }
+
+ return goanalysis.NewLinter(
+ "noctx",
+ "noctx finds sending http request without context.Context",
+ analyzers,
+ nil,
+ ).WithLoadMode(goanalysis.LoadModeTypesInfo)
+}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/sqlclosecheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/sqlclosecheck.go
new file mode 100644
index 00000000..48ca246e
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/sqlclosecheck.go
@@ -0,0 +1,21 @@
+package golinters
+
+import (
+ "github.com/ryanrolds/sqlclosecheck/pkg/analyzer"
+ "golang.org/x/tools/go/analysis"
+
+ "github.com/golangci/golangci-lint/pkg/golinters/goanalysis"
+)
+
+func NewSQLCloseCheck() *goanalysis.Linter {
+ analyzers := []*analysis.Analyzer{
+ analyzer.NewAnalyzer(),
+ }
+
+ return goanalysis.NewLinter(
+ "sqlclosecheck",
+ "Checks that sql.Rows and sql.Stmt are closed.",
+ analyzers,
+ nil,
+ ).WithLoadMode(goanalysis.LoadModeTypesInfo)
+}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go
index 1ec78de2..7ffaf9c2 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go
@@ -87,9 +87,11 @@ func enableLinterConfigs(lcs []*linter.Config, isEnabled func(lc *linter.Config)
func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config {
var govetCfg *config.GovetSettings
var testpackageCfg *config.TestpackageSettings
+ var exhaustiveCfg *config.ExhaustiveSettings
if m.cfg != nil {
govetCfg = &m.cfg.LintersSettings.Govet
testpackageCfg = &m.cfg.LintersSettings.Testpackage
+ exhaustiveCfg = &m.cfg.LintersSettings.Exhaustive
}
const megacheckName = "megacheck"
lcs := []*linter.Config{
@@ -102,6 +104,10 @@ func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config {
WithLoadForGoAnalysis().
WithPresets(linter.PresetPerformance, linter.PresetBugs).
WithURL("https://github.com/timakin/bodyclose"),
+ linter.NewConfig(golinters.NewNoctx()).
+ WithLoadForGoAnalysis().
+ WithPresets(linter.PresetPerformance, linter.PresetBugs).
+ WithURL("https://github.com/sonatard/noctx"),
linter.NewConfig(golinters.NewErrcheck()).
WithLoadForGoAnalysis().
WithPresets(linter.PresetBugs).
@@ -189,10 +195,22 @@ func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config {
WithPresets(linter.PresetFormatting).
WithAutoFix().
WithURL("https://golang.org/cmd/gofmt/"),
+ linter.NewConfig(golinters.NewGofumpt()).
+ WithPresets(linter.PresetFormatting).
+ WithAutoFix().
+ WithURL("https://github.com/mvdan/gofumpt"),
linter.NewConfig(golinters.NewGoimports()).
WithPresets(linter.PresetFormatting).
WithAutoFix().
WithURL("https://godoc.org/golang.org/x/tools/cmd/goimports"),
+ linter.NewConfig(golinters.NewGoHeader()).
+ WithPresets(linter.PresetStyle).
+ WithLoadForGoAnalysis().
+ WithURL("https://github.com/denis-tingajkin/go-header"),
+ linter.NewConfig(golinters.NewGci()).
+ WithLoadForGoAnalysis().
+ WithAutoFix().
+ WithURL("https://github.com/daixiang0/gci"),
linter.NewConfig(golinters.NewMaligned()).
WithLoadForGoAnalysis().
WithPresets(linter.PresetPerformance).
@@ -263,6 +281,7 @@ func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config {
WithURL("https://github.com/ryancurrah/gomodguard"),
linter.NewConfig(golinters.NewGodot()).
WithPresets(linter.PresetStyle).
+ WithAutoFix().
WithURL("https://github.com/tetafro/godot"),
linter.NewConfig(golinters.NewTestpackage(testpackageCfg)).
WithPresets(linter.PresetStyle).
@@ -271,10 +290,25 @@ func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config {
linter.NewConfig(golinters.NewNestif()).
WithPresets(linter.PresetComplexity).
WithURL("https://github.com/nakabonne/nestif"),
+ linter.NewConfig(golinters.NewExportLoopRef()).
+ WithPresets(linter.PresetBugs).
+ WithURL("https://github.com/kyoh86/exportloopref"),
+ linter.NewConfig(golinters.NewExhaustive(exhaustiveCfg)).
+ WithPresets(linter.PresetBugs).
+ WithLoadForGoAnalysis().
+ WithURL("https://github.com/nishanths/exhaustive"),
+ linter.NewConfig(golinters.NewSQLCloseCheck()).
+ WithPresets(linter.PresetBugs).
+ WithLoadForGoAnalysis().
+ WithURL("https://github.com/ryanrolds/sqlclosecheck"),
+ linter.NewConfig(golinters.NewNLReturn()).
+ WithPresets(linter.PresetStyle).
+ WithLoadForGoAnalysis().
+ WithURL("https://github.com/ssgreg/nlreturn"),
// nolintlint must be last because it looks at the results of all the previous linters for unused nolint directives
linter.NewConfig(golinters.NewNoLintLint()).
WithPresets(linter.PresetStyle).
- WithURL("https://github.com/golangci-lint/pkg/golinters/nolintlint"),
+ WithURL("https://github.com/golangci/golangci-lint/blob/master/pkg/golinters/nolintlint/README.md"),
}
isLocalRun := os.Getenv("GOLANGCI_COM_RUN") == ""
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/validator.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/validator.go
index d7e3699c..dead5965 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/validator.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/validator.go
@@ -22,7 +22,7 @@ func (v Validator) validateLintersNames(cfg *config.Linters) error {
allNames = append(allNames, cfg.Disable...)
for _, name := range allNames {
if v.m.GetLinterConfigs(name) == nil {
- return fmt.Errorf("no such linter %q", name)
+ return fmt.Errorf("no such linter %v, run 'golangci-lint linters' to see the list of supported linters", name)
}
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go
index e2928a57..3525396b 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go
@@ -199,7 +199,14 @@ func (cl *ContextLoader) loadPackages(ctx context.Context, loadMode packages.Loa
cl.debugf("Built loader args are %s", args)
pkgs, err := packages.Load(conf, args...)
if err != nil {
- return nil, errors.Wrap(err, "failed to load program with go/packages")
+ return nil, errors.Wrap(err, "failed to load with go/packages")
+ }
+
+ // Currently, go/packages doesn't guarantee that error will be returned
+ // if context was canceled. See
+ // https://github.com/golang/tools/commit/c5cec6710e927457c3c29d6c156415e8539a5111#r39261855
+ if ctx.Err() != nil {
+ return nil, errors.Wrap(ctx.Err(), "timed out to load packages")
}
if loadMode&packages.NeedSyntax == 0 {
@@ -280,7 +287,7 @@ func (cl *ContextLoader) Load(ctx context.Context, linters []*linter.Config) (*l
loadMode := cl.findLoadMode(linters)
pkgs, err := cl.loadPackages(ctx, loadMode)
if err != nil {
- return nil, err
+ return nil, errors.Wrap(err, "failed to load packages")
}
deduplicatedPkgs := cl.filterDuplicatePackages(pkgs)
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go
index 05dc51ba..08491222 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go
@@ -31,24 +31,6 @@ type Runner struct {
func NewRunner(cfg *config.Config, log logutils.Log, goenv *goutil.Env, es *lintersdb.EnabledSet,
lineCache *fsutils.LineCache, dbManager *lintersdb.Manager, pkgs []*gopackages.Package) (*Runner, error) {
- icfg := cfg.Issues
- excludePatterns := icfg.ExcludePatterns
- if icfg.UseDefaultExcludes {
- excludePatterns = append(excludePatterns, config.GetExcludePatternsStrings(icfg.IncludeDefaultExcludes)...)
- }
-
- var excludeTotalPattern string
- if len(excludePatterns) != 0 {
- excludeTotalPattern = fmt.Sprintf("(%s)", strings.Join(excludePatterns, "|"))
- }
-
- var excludeProcessor processors.Processor
- if cfg.Issues.ExcludeCaseSensitive {
- excludeProcessor = processors.NewExcludeCaseSensitive(excludeTotalPattern)
- } else {
- excludeProcessor = processors.NewExclude(excludeTotalPattern)
- }
-
skipFilesProcessor, err := processors.NewSkipFiles(cfg.Run.SkipFiles)
if err != nil {
return nil, err
@@ -63,22 +45,6 @@ func NewRunner(cfg *config.Config, log logutils.Log, goenv *goutil.Env, es *lint
return nil, err
}
- var excludeRules []processors.ExcludeRule
- for _, r := range icfg.ExcludeRules {
- excludeRules = append(excludeRules, processors.ExcludeRule{
- Text: r.Text,
- Source: r.Source,
- Path: r.Path,
- Linters: r.Linters,
- })
- }
- var excludeRulesProcessor processors.Processor
- if cfg.Issues.ExcludeCaseSensitive {
- excludeRulesProcessor = processors.NewExcludeRulesCaseSensitive(excludeRules, lineCache, log.Child("exclude_rules"))
- } else {
- excludeRulesProcessor = processors.NewExcludeRules(excludeRules, lineCache, log.Child("exclude_rules"))
- }
-
enabledLinters, err := es.GetEnabledLintersMap()
if err != nil {
return nil, errors.Wrap(err, "failed to get enabled linters")
@@ -101,17 +67,20 @@ func NewRunner(cfg *config.Config, log logutils.Log, goenv *goutil.Env, es *lint
// Must be before exclude because users see already marked output and configure excluding by it.
processors.NewIdentifierMarker(),
- excludeProcessor,
- excludeRulesProcessor,
+ getExcludeProcessor(&cfg.Issues),
+ getExcludeRulesProcessor(&cfg.Issues, log, lineCache),
processors.NewNolint(log.Child("nolint"), dbManager, enabledLinters),
processors.NewUniqByLine(cfg),
- processors.NewDiff(icfg.Diff, icfg.DiffFromRevision, icfg.DiffPatchFilePath),
+ processors.NewDiff(cfg.Issues.Diff, cfg.Issues.DiffFromRevision, cfg.Issues.DiffPatchFilePath),
processors.NewMaxPerFileFromLinter(cfg),
- processors.NewMaxSameIssues(icfg.MaxSameIssues, log.Child("max_same_issues"), cfg),
- processors.NewMaxFromLinter(icfg.MaxIssuesPerLinter, log.Child("max_from_linter"), cfg),
+ processors.NewMaxSameIssues(cfg.Issues.MaxSameIssues, log.Child("max_same_issues"), cfg),
+ processors.NewMaxFromLinter(cfg.Issues.MaxIssuesPerLinter, log.Child("max_from_linter"), cfg),
processors.NewSourceCode(lineCache, log.Child("source_code")),
processors.NewPathShortener(),
+ getSeverityRulesProcessor(&cfg.Severity, log, lineCache),
+ processors.NewPathPrefixer(cfg.Output.PathPrefix),
+ processors.NewSortResults(cfg),
},
Log: log,
}, nil
@@ -254,3 +223,89 @@ func (r *Runner) processIssues(issues []result.Issue, sw *timeutils.Stopwatch, s
return issues
}
+
+func getExcludeProcessor(cfg *config.Issues) processors.Processor {
+ excludePatterns := cfg.ExcludePatterns
+ if cfg.UseDefaultExcludes {
+ excludePatterns = append(excludePatterns, config.GetExcludePatternsStrings(cfg.IncludeDefaultExcludes)...)
+ }
+
+ var excludeTotalPattern string
+ if len(excludePatterns) != 0 {
+ excludeTotalPattern = fmt.Sprintf("(%s)", strings.Join(excludePatterns, "|"))
+ }
+
+ var excludeProcessor processors.Processor
+ if cfg.ExcludeCaseSensitive {
+ excludeProcessor = processors.NewExcludeCaseSensitive(excludeTotalPattern)
+ } else {
+ excludeProcessor = processors.NewExclude(excludeTotalPattern)
+ }
+
+ return excludeProcessor
+}
+
+func getExcludeRulesProcessor(cfg *config.Issues, log logutils.Log, lineCache *fsutils.LineCache) processors.Processor {
+ var excludeRules []processors.ExcludeRule
+ for _, r := range cfg.ExcludeRules {
+ excludeRules = append(excludeRules, processors.ExcludeRule{
+ BaseRule: processors.BaseRule{
+ Text: r.Text,
+ Source: r.Source,
+ Path: r.Path,
+ Linters: r.Linters,
+ },
+ })
+ }
+
+ var excludeRulesProcessor processors.Processor
+ if cfg.ExcludeCaseSensitive {
+ excludeRulesProcessor = processors.NewExcludeRulesCaseSensitive(
+ excludeRules,
+ lineCache,
+ log.Child("exclude_rules"),
+ )
+ } else {
+ excludeRulesProcessor = processors.NewExcludeRules(
+ excludeRules,
+ lineCache,
+ log.Child("exclude_rules"),
+ )
+ }
+
+ return excludeRulesProcessor
+}
+
+func getSeverityRulesProcessor(cfg *config.Severity, log logutils.Log, lineCache *fsutils.LineCache) processors.Processor {
+ var severityRules []processors.SeverityRule
+ for _, r := range cfg.Rules {
+ severityRules = append(severityRules, processors.SeverityRule{
+ Severity: r.Severity,
+ BaseRule: processors.BaseRule{
+ Text: r.Text,
+ Source: r.Source,
+ Path: r.Path,
+ Linters: r.Linters,
+ },
+ })
+ }
+
+ var severityRulesProcessor processors.Processor
+ if cfg.CaseSensitive {
+ severityRulesProcessor = processors.NewSeverityRulesCaseSensitive(
+ cfg.Default,
+ severityRules,
+ lineCache,
+ log.Child("severity_rules"),
+ )
+ } else {
+ severityRulesProcessor = processors.NewSeverityRules(
+ cfg.Default,
+ severityRules,
+ lineCache,
+ log.Child("severity_rules"),
+ )
+ }
+
+ return severityRulesProcessor
+}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go
index f36bc108..c5b948a9 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go
@@ -5,6 +5,8 @@ import (
"encoding/xml"
"fmt"
+ "github.com/go-xmlfmt/xmlfmt"
+
"github.com/golangci/golangci-lint/pkg/logutils"
"github.com/golangci/golangci-lint/pkg/result"
)
@@ -28,7 +30,7 @@ type checkstyleError struct {
Source string `xml:"source,attr"`
}
-const defaultSeverity = "error"
+const defaultCheckstyleSeverity = "error"
type Checkstyle struct{}
@@ -54,12 +56,17 @@ func (Checkstyle) Print(ctx context.Context, issues []result.Issue) error {
files[issue.FilePath()] = file
}
+ severity := defaultCheckstyleSeverity
+ if issue.Severity != "" {
+ severity = issue.Severity
+ }
+
newError := &checkstyleError{
Column: issue.Column(),
Line: issue.Line(),
Message: issue.Text,
Source: issue.FromLinter,
- Severity: defaultSeverity,
+ Severity: severity,
}
file.Errors = append(file.Errors, newError)
@@ -75,6 +82,6 @@ func (Checkstyle) Print(ctx context.Context, issues []result.Issue) error {
return err
}
- fmt.Fprintf(logutils.StdOut, "%s%s\n", xml.Header, data)
+ fmt.Fprintf(logutils.StdOut, "%s%s\n", xml.Header, xmlfmt.FormatXML(string(data), "", " "))
return nil
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go
index 5d45c4eb..35a22ce9 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go
@@ -2,7 +2,6 @@ package printers
import (
"context"
- "crypto/md5" //nolint:gosec
"encoding/json"
"fmt"
@@ -14,6 +13,7 @@ import (
// It is just enough to support GitLab CI Code Quality - https://docs.gitlab.com/ee/user/project/merge_requests/code_quality.html
type CodeClimateIssue struct {
Description string `json:"description"`
+ Severity string `json:"severity,omitempty"`
Fingerprint string `json:"fingerprint"`
Location struct {
Path string `json:"path"`
@@ -31,28 +31,23 @@ func NewCodeClimate() *CodeClimate {
}
func (p CodeClimate) Print(ctx context.Context, issues []result.Issue) error {
- allIssues := []CodeClimateIssue{}
- for ind := range issues {
- i := &issues[ind]
- var issue CodeClimateIssue
- issue.Description = i.FromLinter + ": " + i.Text
- issue.Location.Path = i.Pos.Filename
- issue.Location.Lines.Begin = i.Pos.Line
+ codeClimateIssues := []CodeClimateIssue{}
+ for i := range issues {
+ issue := &issues[i]
+ codeClimateIssue := CodeClimateIssue{}
+ codeClimateIssue.Description = issue.Description()
+ codeClimateIssue.Location.Path = issue.Pos.Filename
+ codeClimateIssue.Location.Lines.Begin = issue.Pos.Line
+ codeClimateIssue.Fingerprint = issue.Fingerprint()
- // Need a checksum of the issue, so we use MD5 of the filename, text, and first line of source if there is any
- var firstLine string
- if len(i.SourceLines) > 0 {
- firstLine = i.SourceLines[0]
+ if issue.Severity != "" {
+ codeClimateIssue.Severity = issue.Severity
}
- hash := md5.New() //nolint:gosec
- _, _ = hash.Write([]byte(i.Pos.Filename + i.Text + firstLine))
- issue.Fingerprint = fmt.Sprintf("%X", hash.Sum(nil))
-
- allIssues = append(allIssues, issue)
+ codeClimateIssues = append(codeClimateIssues, codeClimateIssue)
}
- outputJSON, err := json.Marshal(allIssues)
+ outputJSON, err := json.Marshal(codeClimateIssues)
if err != nil {
return err
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/github.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/github.go
index fa11a283..b8d70140 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/printers/github.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/github.go
@@ -11,6 +11,8 @@ import (
type github struct {
}
+const defaultGithubSeverity = "error"
+
// Github output format outputs issues according to Github actions format:
// https://help.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-error-message
func NewGithub() Printer {
@@ -19,7 +21,12 @@ func NewGithub() Printer {
// print each line as: ::error file=app.js,line=10,col=15::Something went wrong
func formatIssueAsGithub(issue *result.Issue) string {
- ret := fmt.Sprintf("::error file=%s,line=%d", issue.FilePath(), issue.Line())
+ severity := defaultGithubSeverity
+ if issue.Severity != "" {
+ severity = issue.Severity
+ }
+
+ ret := fmt.Sprintf("::%s file=%s,line=%d", severity, issue.FilePath(), issue.Line())
if issue.Pos.Column != 0 {
ret += fmt.Sprintf(",col=%d", issue.Pos.Column)
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go
index 16d9a8a8..eafdbc4a 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go
@@ -1,6 +1,8 @@
package result
import (
+ "crypto/md5" //nolint:gosec
+ "fmt"
"go/token"
"golang.org/x/tools/go/packages"
@@ -26,6 +28,8 @@ type Issue struct {
FromLinter string
Text string
+ Severity string
+
// Source lines of a code with the issue to show
SourceLines []string
@@ -76,3 +80,19 @@ func (i *Issue) GetLineRange() Range {
return *i.LineRange
}
+
+func (i *Issue) Description() string {
+ return fmt.Sprintf("%s: %s", i.FromLinter, i.Text)
+}
+
+func (i *Issue) Fingerprint() string {
+ firstLine := ""
+ if len(i.SourceLines) > 0 {
+ firstLine = i.SourceLines[0]
+ }
+
+ hash := md5.New() //nolint:gosec
+ _, _ = hash.Write([]byte(fmt.Sprintf("%s%s%s", i.Pos.Filename, i.Text, firstLine)))
+
+ return fmt.Sprintf("%X", hash.Sum(nil))
+}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go
index 249ba9d4..11221795 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go
@@ -1,9 +1,9 @@
package processors
import (
- "bufio"
"fmt"
- "os"
+ "go/parser"
+ "go/token"
"path/filepath"
"strings"
@@ -113,37 +113,15 @@ func (p *AutogeneratedExclude) getOrCreateFileSummary(i *result.Issue) (*ageFile
}
func getDoc(filePath string) (string, error) {
- file, err := os.Open(filePath)
+ fset := token.NewFileSet()
+ syntax, err := parser.ParseFile(fset, filePath, nil, parser.PackageClauseOnly|parser.ParseComments)
if err != nil {
- return "", errors.Wrap(err, "failed to open file")
+ return "", errors.Wrap(err, "failed to parse file")
}
- defer file.Close()
-
- scanner := bufio.NewScanner(file)
-
- // Issue 954: Some lines can be very long, e.g. auto-generated
- // embedded resources. Reported on file of 86.2KB.
- const (
- maxSize = 10 * 1024 * 1024 // 10MB should be enough
- initialSize = 4096 // same as startBufSize in bufio
- )
- scanner.Buffer(make([]byte, initialSize), maxSize)
var docLines []string
- for scanner.Scan() {
- line := strings.TrimSpace(scanner.Text())
- if strings.HasPrefix(line, "//") {
- text := strings.TrimSpace(strings.TrimPrefix(line, "//"))
- docLines = append(docLines, text)
- } else if line == "" || strings.HasPrefix(line, "package") {
- // go to next line
- } else {
- break
- }
- }
-
- if err := scanner.Err(); err != nil {
- return "", errors.Wrap(err, "failed to scan file")
+ for _, c := range syntax.Comments {
+ docLines = append(docLines, strings.TrimSpace(c.Text()))
}
return strings.Join(docLines, "\n"), nil
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go
new file mode 100644
index 00000000..b6ce4f21
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go
@@ -0,0 +1,69 @@
+package processors
+
+import (
+ "regexp"
+
+ "github.com/golangci/golangci-lint/pkg/fsutils"
+ "github.com/golangci/golangci-lint/pkg/logutils"
+ "github.com/golangci/golangci-lint/pkg/result"
+)
+
+type BaseRule struct {
+ Text string
+ Source string
+ Path string
+ Linters []string
+}
+
+type baseRule struct {
+ text *regexp.Regexp
+ source *regexp.Regexp
+ path *regexp.Regexp
+ linters []string
+}
+
+func (r *baseRule) isEmpty() bool {
+ return r.text == nil && r.source == nil && r.path == nil && len(r.linters) == 0
+}
+
+func (r *baseRule) match(issue *result.Issue, lineCache *fsutils.LineCache, log logutils.Log) bool {
+ if r.isEmpty() {
+ return false
+ }
+ if r.text != nil && !r.text.MatchString(issue.Text) {
+ return false
+ }
+ if r.path != nil && !r.path.MatchString(issue.FilePath()) {
+ return false
+ }
+ if len(r.linters) != 0 && !r.matchLinter(issue) {
+ return false
+ }
+
+ // the most heavyweight checking last
+ if r.source != nil && !r.matchSource(issue, lineCache, log) {
+ return false
+ }
+
+ return true
+}
+
+func (r *baseRule) matchLinter(issue *result.Issue) bool {
+ for _, linter := range r.linters {
+ if linter == issue.FromLinter {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (r *baseRule) matchSource(issue *result.Issue, lineCache *fsutils.LineCache, log logutils.Log) bool { // nolint:interfacer
+ sourceLine, errSourceLine := lineCache.GetLine(issue.FilePath(), issue.Line())
+ if errSourceLine != nil {
+ log.Warnf("Failed to get line %s:%d from line cache: %s", issue.FilePath(), issue.Line(), errSourceLine)
+ return false // can't properly match
+ }
+
+ return r.source.MatchString(sourceLine)
+}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go
index b926af5b..d4d6569f 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go
@@ -9,21 +9,11 @@ import (
)
type excludeRule struct {
- text *regexp.Regexp
- source *regexp.Regexp
- path *regexp.Regexp
- linters []string
-}
-
-func (r *excludeRule) isEmpty() bool {
- return r.text == nil && r.path == nil && len(r.linters) == 0
+ baseRule
}
type ExcludeRule struct {
- Text string
- Source string
- Path string
- Linters []string
+ BaseRule
}
type ExcludeRules struct {
@@ -45,9 +35,8 @@ func NewExcludeRules(rules []ExcludeRule, lineCache *fsutils.LineCache, log logu
func createRules(rules []ExcludeRule, prefix string) []excludeRule {
parsedRules := make([]excludeRule, 0, len(rules))
for _, rule := range rules {
- parsedRule := excludeRule{
- linters: rule.Linters,
- }
+ parsedRule := excludeRule{}
+ parsedRule.linters = rule.Linters
if rule.Text != "" {
parsedRule.text = regexp.MustCompile(prefix + rule.Text)
}
@@ -69,7 +58,7 @@ func (p ExcludeRules) Process(issues []result.Issue) ([]result.Issue, error) {
return filterIssues(issues, func(i *result.Issue) bool {
for _, rule := range p.rules {
rule := rule
- if p.match(i, &rule) {
+ if rule.match(i, p.lineCache, p.log) {
return false
}
}
@@ -77,48 +66,6 @@ func (p ExcludeRules) Process(issues []result.Issue) ([]result.Issue, error) {
}), nil
}
-func (p ExcludeRules) matchLinter(i *result.Issue, r *excludeRule) bool {
- for _, linter := range r.linters {
- if linter == i.FromLinter {
- return true
- }
- }
-
- return false
-}
-
-func (p ExcludeRules) matchSource(i *result.Issue, r *excludeRule) bool { //nolint:interfacer
- sourceLine, err := p.lineCache.GetLine(i.FilePath(), i.Line())
- if err != nil {
- p.log.Warnf("Failed to get line %s:%d from line cache: %s", i.FilePath(), i.Line(), err)
- return false // can't properly match
- }
-
- return r.source.MatchString(sourceLine)
-}
-
-func (p ExcludeRules) match(i *result.Issue, r *excludeRule) bool {
- if r.isEmpty() {
- return false
- }
- if r.text != nil && !r.text.MatchString(i.Text) {
- return false
- }
- if r.path != nil && !r.path.MatchString(i.FilePath()) {
- return false
- }
- if len(r.linters) != 0 && !p.matchLinter(i, r) {
- return false
- }
-
- // the most heavyweight checking last
- if r.source != nil && !p.matchSource(i, r) {
- return false
- }
-
- return true
-}
-
func (ExcludeRules) Name() string { return "exclude-rules" }
func (ExcludeRules) Finish() {}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go
index 5e692ceb..96540245 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go
@@ -16,6 +16,11 @@ import (
type posMapper func(pos token.Position) token.Position
+type adjustMap struct {
+ sync.Mutex
+ m map[string]posMapper
+}
+
// FilenameUnadjuster is needed because a lot of linters use fset.Position(f.Pos())
// to get filename. And they return adjusted filename (e.g. *.qtpl) for an issue. We need
// restore real .go filename to properly output it, parse it, etc.
@@ -27,7 +32,7 @@ type FilenameUnadjuster struct {
var _ Processor = &FilenameUnadjuster{}
-func processUnadjusterPkg(m map[string]posMapper, pkg *packages.Package, log logutils.Log) {
+func processUnadjusterPkg(m *adjustMap, pkg *packages.Package, log logutils.Log) {
fset := token.NewFileSet() // it's more memory efficient to not store all in one fset
for _, filename := range pkg.CompiledGoFiles {
@@ -36,7 +41,7 @@ func processUnadjusterPkg(m map[string]posMapper, pkg *packages.Package, log log
}
}
-func processUnadjusterFile(filename string, m map[string]posMapper, log logutils.Log, fset *token.FileSet) {
+func processUnadjusterFile(filename string, m *adjustMap, log logutils.Log, fset *token.FileSet) {
syntax, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)
if err != nil {
// Error will be reported by typecheck
@@ -57,7 +62,9 @@ func processUnadjusterFile(filename string, m map[string]posMapper, log logutils
return // file.go -> /caches/cgo-xxx
}
- m[adjustedFilename] = func(adjustedPos token.Position) token.Position {
+ m.Lock()
+ defer m.Unlock()
+ m.m[adjustedFilename] = func(adjustedPos token.Position) token.Position {
tokenFile := fset.File(syntax.Pos())
if tokenFile == nil {
log.Warnf("Failed to get token file for %s", adjustedFilename)
@@ -68,22 +75,23 @@ func processUnadjusterFile(filename string, m map[string]posMapper, log logutils
}
func NewFilenameUnadjuster(pkgs []*packages.Package, log logutils.Log) *FilenameUnadjuster {
- m := map[string]posMapper{}
+ m := adjustMap{m: map[string]posMapper{}}
+
startedAt := time.Now()
var wg sync.WaitGroup
wg.Add(len(pkgs))
for _, pkg := range pkgs {
go func(pkg *packages.Package) {
// It's important to call func here to run GC
- processUnadjusterPkg(m, pkg, log)
+ processUnadjusterPkg(&m, pkg, log)
wg.Done()
}(pkg)
}
wg.Wait()
- log.Infof("Pre-built %d adjustments in %s", len(m), time.Since(startedAt))
+ log.Infof("Pre-built %d adjustments in %s", len(m.m), time.Since(startedAt))
return &FilenameUnadjuster{
- m: m,
+ m: m.m,
log: log,
loggedUnadjustments: map[string]bool{},
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go
new file mode 100644
index 00000000..5ce940b3
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go
@@ -0,0 +1,37 @@
+package processors
+
+import (
+ "path"
+
+ "github.com/golangci/golangci-lint/pkg/result"
+)
+
+// PathPrefixer adds a customizable prefix to every output path
+type PathPrefixer struct {
+ prefix string
+}
+
+var _ Processor = new(PathPrefixer)
+
+// NewPathPrefixer returns a new path prefixer for the provided string
+func NewPathPrefixer(prefix string) *PathPrefixer {
+ return &PathPrefixer{prefix: prefix}
+}
+
+// Name returns the name of this processor
+func (*PathPrefixer) Name() string {
+ return "path_prefixer"
+}
+
+// Process adds the prefix to each path
+func (p *PathPrefixer) Process(issues []result.Issue) ([]result.Issue, error) {
+ if p.prefix != "" {
+ for i := range issues {
+ issues[i].Pos.Filename = path.Join(p.prefix, issues[i].Pos.Filename)
+ }
+ }
+ return issues, nil
+}
+
+// Finish is implemented to satisfy the Processor interface
+func (*PathPrefixer) Finish() {}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity_rules.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity_rules.go
new file mode 100644
index 00000000..5f11b541
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity_rules.go
@@ -0,0 +1,103 @@
+package processors
+
+import (
+ "regexp"
+
+ "github.com/golangci/golangci-lint/pkg/fsutils"
+ "github.com/golangci/golangci-lint/pkg/logutils"
+ "github.com/golangci/golangci-lint/pkg/result"
+)
+
+type severityRule struct {
+ baseRule
+ severity string
+}
+
+type SeverityRule struct {
+ BaseRule
+ Severity string
+}
+
+type SeverityRules struct {
+ defaultSeverity string
+ rules []severityRule
+ lineCache *fsutils.LineCache
+ log logutils.Log
+}
+
+func NewSeverityRules(defaultSeverity string, rules []SeverityRule, lineCache *fsutils.LineCache, log logutils.Log) *SeverityRules {
+ r := &SeverityRules{
+ lineCache: lineCache,
+ log: log,
+ defaultSeverity: defaultSeverity,
+ }
+ r.rules = createSeverityRules(rules, "(?i)")
+
+ return r
+}
+
+func createSeverityRules(rules []SeverityRule, prefix string) []severityRule {
+ parsedRules := make([]severityRule, 0, len(rules))
+ for _, rule := range rules {
+ parsedRule := severityRule{}
+ parsedRule.linters = rule.Linters
+ parsedRule.severity = rule.Severity
+ if rule.Text != "" {
+ parsedRule.text = regexp.MustCompile(prefix + rule.Text)
+ }
+ if rule.Source != "" {
+ parsedRule.source = regexp.MustCompile(prefix + rule.Source)
+ }
+ if rule.Path != "" {
+ parsedRule.path = regexp.MustCompile(rule.Path)
+ }
+ parsedRules = append(parsedRules, parsedRule)
+ }
+ return parsedRules
+}
+
+func (p SeverityRules) Process(issues []result.Issue) ([]result.Issue, error) {
+ if len(p.rules) == 0 {
+ return issues, nil
+ }
+ return transformIssues(issues, func(i *result.Issue) *result.Issue {
+ for _, rule := range p.rules {
+ rule := rule
+
+ ruleSeverity := p.defaultSeverity
+ if rule.severity != "" {
+ ruleSeverity = rule.severity
+ }
+
+ if rule.match(i, p.lineCache, p.log) {
+ i.Severity = ruleSeverity
+ return i
+ }
+ }
+ i.Severity = p.defaultSeverity
+ return i
+ }), nil
+}
+
+func (SeverityRules) Name() string { return "severity-rules" }
+func (SeverityRules) Finish() {}
+
+var _ Processor = SeverityRules{}
+
+type SeverityRulesCaseSensitive struct {
+ *SeverityRules
+}
+
+func NewSeverityRulesCaseSensitive(defaultSeverity string, rules []SeverityRule,
+ lineCache *fsutils.LineCache, log logutils.Log) *SeverityRulesCaseSensitive {
+ r := &SeverityRules{
+ lineCache: lineCache,
+ log: log,
+ defaultSeverity: defaultSeverity,
+ }
+ r.rules = createSeverityRules(rules, "")
+
+ return &SeverityRulesCaseSensitive{r}
+}
+
+func (SeverityRulesCaseSensitive) Name() string { return "severity-rules-case-sensitive" }
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go
new file mode 100644
index 00000000..e726c3ad
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go
@@ -0,0 +1,173 @@
+package processors
+
+import (
+ "sort"
+ "strings"
+
+ "github.com/golangci/golangci-lint/pkg/config"
+ "github.com/golangci/golangci-lint/pkg/result"
+)
+
+// Base propose of this functionality to sort results (issues)
+// produced by various linters by analyzing code. We achieving this
+// by sorting results.Issues using processor step, and chain based
+// rules that can compare different properties of the Issues struct.
+
+var _ Processor = (*SortResults)(nil)
+
+type SortResults struct {
+ cmp comparator
+ cfg *config.Config
+}
+
+func NewSortResults(cfg *config.Config) *SortResults {
+ // For sorting we are comparing (in next order): file names, line numbers,
+ // position, and finally - giving up.
+ return &SortResults{
+ cmp: ByName{
+ next: ByLine{
+ next: ByColumn{},
+ },
+ },
+ cfg: cfg,
+ }
+}
+
+// Process is performing sorting of the result issues.
+func (sr SortResults) Process(issues []result.Issue) ([]result.Issue, error) {
+ if !sr.cfg.Output.SortResults {
+ return issues, nil
+ }
+
+ sort.Slice(issues, func(i, j int) bool {
+ return sr.cmp.Compare(&issues[i], &issues[j]) == Less
+ })
+
+ return issues, nil
+}
+
+func (sr SortResults) Name() string { return "sort_results" }
+func (sr SortResults) Finish() {}
+
+type compareResult int
+
+const (
+ Less compareResult = iota - 1
+ Equal
+ Greater
+ None
+)
+
+func (c compareResult) isNeutral() bool {
+ // return true if compare result is incomparable or equal.
+ return c == None || c == Equal
+}
+
+//nolint:exhaustive
+func (c compareResult) String() string {
+ switch c {
+ case Less:
+ return "Less"
+ case Equal:
+ return "Equal"
+ case Greater:
+ return "Greater"
+ }
+
+ return "None"
+}
+
+// comparator describe how to implement compare for two "issues" lexicographically
+type comparator interface {
+ Compare(a, b *result.Issue) compareResult
+ Next() comparator
+}
+
+var (
+ _ comparator = (*ByName)(nil)
+ _ comparator = (*ByLine)(nil)
+ _ comparator = (*ByColumn)(nil)
+)
+
+type ByName struct{ next comparator }
+
+//nolint:golint
+func (cmp ByName) Next() comparator { return cmp.next }
+
+//nolint:golint
+func (cmp ByName) Compare(a, b *result.Issue) compareResult {
+ var res compareResult
+
+ if res = compareResult(strings.Compare(a.FilePath(), b.FilePath())); !res.isNeutral() {
+ return res
+ }
+
+ if next := cmp.Next(); next != nil {
+ return next.Compare(a, b)
+ }
+
+ return res
+}
+
+type ByLine struct{ next comparator }
+
+//nolint:golint
+func (cmp ByLine) Next() comparator { return cmp.next }
+
+//nolint:golint
+func (cmp ByLine) Compare(a, b *result.Issue) compareResult {
+ var res compareResult
+
+ if res = numericCompare(a.Line(), b.Line()); !res.isNeutral() {
+ return res
+ }
+
+ if next := cmp.Next(); next != nil {
+ return next.Compare(a, b)
+ }
+
+ return res
+}
+
+type ByColumn struct{ next comparator }
+
+//nolint:golint
+func (cmp ByColumn) Next() comparator { return cmp.next }
+
+//nolint:golint
+func (cmp ByColumn) Compare(a, b *result.Issue) compareResult {
+ var res compareResult
+
+ if res = numericCompare(a.Column(), b.Column()); !res.isNeutral() {
+ return res
+ }
+
+ if next := cmp.Next(); next != nil {
+ return next.Compare(a, b)
+ }
+
+ return res
+}
+
+func numericCompare(a, b int) compareResult {
+ var (
+ isValuesInvalid = a < 0 || b < 0
+ isZeroValuesBoth = a == 0 && b == 0
+ isEqual = a == b
+ isZeroValueInA = b > 0 && a == 0
+ isZeroValueInB = a > 0 && b == 0
+ )
+
+ switch {
+ case isZeroValuesBoth || isEqual:
+ return Equal
+ case isValuesInvalid || isZeroValueInA || isZeroValueInB:
+ return None
+ case a > b:
+ return Greater
+ case a < b:
+ return Less
+ }
+
+ return Equal
+}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/sliceutil/sliceutil.go b/vendor/github.com/golangci/golangci-lint/pkg/sliceutil/sliceutil.go
new file mode 100644
index 00000000..cb89e34e
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/pkg/sliceutil/sliceutil.go
@@ -0,0 +1,17 @@
+package sliceutil
+
+// IndexOf get the index of the given value in the given string slice,
+// or -1 if not found.
+func IndexOf(slice []string, value string) int {
+ for i, v := range slice {
+ if v == value {
+ return i
+ }
+ }
+ return -1
+}
+
+// Contains check if a string slice contains a value.
+func Contains(slice []string, value string) bool {
+ return IndexOf(slice, value) != -1
+}
diff --git a/vendor/github.com/golangci/revgrep/revgrep.go b/vendor/github.com/golangci/revgrep/revgrep.go
index 1a56dcf4..3650d646 100644
--- a/vendor/github.com/golangci/revgrep/revgrep.go
+++ b/vendor/github.com/golangci/revgrep/revgrep.go
@@ -80,6 +80,7 @@ func (c *Checker) preparePatch() error {
return nil
}
+// InputIssue represents issue found by some linter
type InputIssue interface {
FilePath() string
Line() int
@@ -98,12 +99,14 @@ func (i simpleInputIssue) Line() int {
return i.lineNumber
}
+// Prepare extracts a patch and changed lines
func (c *Checker) Prepare() error {
returnErr := c.preparePatch()
c.changes = c.linesChanged()
return returnErr
}
+// IsNewIssue checks whether issue found by linter is new: it was found in changed lines
func (c Checker) IsNewIssue(i InputIssue) (hunkPos int, isNew bool) {
fchanges, ok := c.changes[i.FilePath()]
if !ok { // file wasn't changed
@@ -116,7 +119,7 @@ func (c Checker) IsNewIssue(i InputIssue) (hunkPos int, isNew bool) {
)
// found file, see if lines matched
for _, pos := range fchanges {
- if pos.lineNo == int(i.Line()) {
+ if pos.lineNo == i.Line() {
fpos = pos
changed = true
break
@@ -338,7 +341,7 @@ func GitPatch(revisionFrom, revisionTo string) (io.Reader, []string, error) {
// make a patch for untracked files
var newFiles []string
- ls, err := exec.Command("git", "ls-files", "-o").CombinedOutput()
+ ls, err := exec.Command("git", "ls-files", "--others", "--exclude-standard").CombinedOutput()
if err != nil {
return nil, nil, fmt.Errorf("error executing git ls-files: %s", err)
}
diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go
index 580ae209..66561868 100644
--- a/vendor/github.com/google/go-cmp/cmp/compare.go
+++ b/vendor/github.com/google/go-cmp/cmp/compare.go
@@ -95,12 +95,12 @@ func Equal(x, y interface{}, opts ...Option) bool {
return s.result.Equal()
}
-// Diff returns a human-readable report of the differences between two values.
-// It returns an empty string if and only if Equal returns true for the same
-// input values and options.
+// Diff returns a human-readable report of the differences between two values:
+// y - x. It returns an empty string if and only if Equal returns true for the
+// same input values and options.
//
// The output is displayed as a literal in pseudo-Go syntax.
-// At the start of each line, a "-" prefix indicates an element removed from x,
+// At the start of each line, a "-" prefix indicates an element removed from y,
// a "+" prefix to indicates an element added to y, and the lack of a prefix
// indicates an element common to both x and y. If possible, the output
// uses fmt.Stringer.String or error.Error methods to produce more humanly
diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go
index abbd2a63..4b0407a7 100644
--- a/vendor/github.com/google/go-cmp/cmp/options.go
+++ b/vendor/github.com/google/go-cmp/cmp/options.go
@@ -225,11 +225,14 @@ func (validator) apply(s *state, vx, vy reflect.Value) {
// Unable to Interface implies unexported field without visibility access.
if !vx.CanInterface() || !vy.CanInterface() {
- const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
+ help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
var name string
if t := s.curPath.Index(-2).Type(); t.Name() != "" {
// Named type with unexported fields.
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
+ if _, ok := reflect.New(t).Interface().(error); ok {
+ help = "consider using cmpopts.EquateErrors to compare error values"
+ }
} else {
// Unnamed type with unexported fields. Derive PkgPath from field.
var pkgPath string
diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
index 2d722ea5..786f6712 100644
--- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go
+++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
@@ -5,6 +5,7 @@
package cmp
import (
+ "bytes"
"fmt"
"reflect"
"strconv"
@@ -125,21 +126,20 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
// implementations crash when doing so.
if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
var prefix, strVal string
- switch v := v.Interface().(type) {
- case error:
- prefix, strVal = "e", v.Error()
- case fmt.Stringer:
- prefix, strVal = "s", v.String()
- }
+ func() {
+ // Swallow and ignore any panics from String or Error.
+ defer func() { recover() }()
+ switch v := v.Interface().(type) {
+ case error:
+ strVal = v.Error()
+ prefix = "e"
+ case fmt.Stringer:
+ strVal = v.String()
+ prefix = "s"
+ }
+ }()
if prefix != "" {
- maxLen := len(strVal)
- if opts.LimitVerbosity {
- maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
- }
- if len(strVal) > maxLen+len(textEllipsis) {
- return textLine(prefix + formatString(strVal[:maxLen]) + string(textEllipsis))
- }
- return textLine(prefix + formatString(strVal))
+ return opts.formatString(prefix, strVal)
}
}
}
@@ -171,14 +171,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
case reflect.Complex64, reflect.Complex128:
return textLine(fmt.Sprint(v.Complex()))
case reflect.String:
- maxLen := v.Len()
- if opts.LimitVerbosity {
- maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
- }
- if v.Len() > maxLen+len(textEllipsis) {
- return textLine(formatString(v.String()[:maxLen]) + string(textEllipsis))
- }
- return textLine(formatString(v.String()))
+ return opts.formatString("", v.String())
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
return textLine(formatPointer(value.PointerOf(v), true))
case reflect.Struct:
@@ -210,6 +203,17 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
if v.IsNil() {
return textNil
}
+
+ // Check whether this is a []byte of text data.
+ if t.Elem() == reflect.TypeOf(byte(0)) {
+ b := v.Bytes()
+ isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) }
+ if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
+ out = opts.formatString("", string(b))
+ return opts.WithTypeMode(emitType).FormatType(t, out)
+ }
+ }
+
fallthrough
case reflect.Array:
maxLen := v.Len()
@@ -295,6 +299,49 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
}
}
+func (opts formatOptions) formatString(prefix, s string) textNode {
+ maxLen := len(s)
+ maxLines := strings.Count(s, "\n") + 1
+ if opts.LimitVerbosity {
+ maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
+ maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
+ }
+
+ // For multiline strings, use the triple-quote syntax,
+ // but only use it when printing removed or inserted nodes since
+ // we only want the extra verbosity for those cases.
+ lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n")
+ isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+')
+ for i := 0; i < len(lines) && isTripleQuoted; i++ {
+ lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
+ isPrintable := func(r rune) bool {
+ return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
+ }
+ line := lines[i]
+ isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen
+ }
+ if isTripleQuoted {
+ var list textList
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
+ for i, line := range lines {
+ if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 {
+ comment := commentString(fmt.Sprintf("%d elided lines", numElided))
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment})
+ break
+ }
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true})
+ }
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
+ return &textWrap{Prefix: "(", Value: list, Suffix: ")"}
+ }
+
+ // Format the string as a single-line quoted string.
+ if len(s) > maxLen+len(textEllipsis) {
+ return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis))
+ }
+ return textLine(prefix + formatString(s))
+}
+
// formatMapKey formats v as if it were a map key.
// The result is guaranteed to be a single line.
func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string {
diff --git a/vendor/github.com/gostaticanalysis/analysisutil/call.go b/vendor/github.com/gostaticanalysis/analysisutil/call.go
new file mode 100644
index 00000000..e3d98d1d
--- /dev/null
+++ b/vendor/github.com/gostaticanalysis/analysisutil/call.go
@@ -0,0 +1,405 @@
+package analysisutil
+
+import (
+ "go/types"
+
+ "golang.org/x/tools/go/ssa"
+)
+
+// CalledChecker checks a function is called.
+// See From and Func.
+type CalledChecker struct {
+ Ignore func(instr ssa.Instruction) bool
+}
+
+// NotIn checks whether receiver's method is called in a function.
+// If there is no methods calling at a path from an instruction
+// which type is receiver to all return instruction, NotIn returns these instructions.
+func (c *CalledChecker) NotIn(f *ssa.Function, receiver types.Type, methods ...*types.Func) []ssa.Instruction {
+ done := map[ssa.Value]bool{}
+ var instrs []ssa.Instruction
+ for _, b := range f.Blocks {
+ for i, instr := range b.Instrs {
+ v, _ := instr.(ssa.Value)
+ if v == nil || done[v] {
+ continue
+ }
+
+ if v, _ := v.(*ssa.UnOp); v != nil && done[v.X] {
+ continue
+ }
+
+ called, ok := c.From(b, i, receiver, methods...)
+ if ok && !called {
+ instrs = append(instrs, instr)
+ done[v] = true
+ if v, _ := v.(*ssa.UnOp); v != nil {
+ done[v.X] = true
+ }
+ }
+ }
+ }
+ return instrs
+}
+
+// Func returns true when f is called in the instr.
+// If recv is not nil, Func also checks the receiver.
+func (c *CalledChecker) Func(instr ssa.Instruction, recv ssa.Value, f *types.Func) bool {
+
+ if c.Ignore != nil && c.Ignore(instr) {
+ return false
+ }
+
+ call, ok := instr.(ssa.CallInstruction)
+ if !ok {
+ return false
+ }
+
+ common := call.Common()
+ if common == nil {
+ return false
+ }
+
+ callee := common.StaticCallee()
+ if callee == nil {
+ return false
+ }
+
+ fn, ok := callee.Object().(*types.Func)
+ if !ok {
+ return false
+ }
+
+ if recv != nil &&
+ common.Signature().Recv() != nil &&
+ (len(common.Args) == 0 && recv != nil || common.Args[0] != recv &&
+ !referrer(recv, common.Args[0])) {
+ return false
+ }
+
+ return fn == f
+}
+
+func referrer(a, b ssa.Value) bool {
+ return isReferrerOf(a, b) || isReferrerOf(b, a)
+}
+
+func isReferrerOf(a, b ssa.Value) bool {
+ if a == nil || b == nil {
+ return false
+ }
+ if b.Referrers() != nil {
+ brs := *b.Referrers()
+
+ for _, br := range brs {
+ brv, ok := br.(ssa.Value)
+ if !ok {
+ continue
+ }
+ if brv == a {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// From checks whether receiver's method is called in an instruction
+// which belogns to after i-th instructions, or in succsor blocks of b.
+// The first result is above value.
+// The second result is whether type of i-th instruction does not much receiver
+// or matches with ignore cases.
+func (c *CalledChecker) From(b *ssa.BasicBlock, i int, receiver types.Type, methods ...*types.Func) (called, ok bool) {
+ if b == nil || i < 0 || i >= len(b.Instrs) ||
+ receiver == nil || len(methods) == 0 {
+ return false, false
+ }
+
+ v, ok := b.Instrs[i].(ssa.Value)
+ if !ok {
+ return false, false
+ }
+
+ from := &calledFrom{recv: v, fs: methods, ignore: c.Ignore}
+
+ if !from.isRecv(receiver, v.Type()) {
+ return false, false
+ }
+
+ if from.ignored() {
+ return false, false
+ }
+
+ if from.instrs(b.Instrs[i+1:]) ||
+ from.succs(b) {
+ return true, true
+ }
+
+ from.done = nil
+ if from.storedInInstrs(b.Instrs[i+1:]) ||
+ from.storedInSuccs(b) {
+ return false, false
+ }
+
+ return false, true
+}
+
+type calledFrom struct {
+ recv ssa.Value
+ fs []*types.Func
+ done map[*ssa.BasicBlock]bool
+ ignore func(ssa.Instruction) bool
+}
+
+func (c *calledFrom) ignored() bool {
+
+ switch v := c.recv.(type) {
+ case *ssa.UnOp:
+ switch v.X.(type) {
+ case *ssa.FreeVar, *ssa.Global:
+ return true
+ }
+ }
+
+ refs := c.recv.Referrers()
+ if refs == nil {
+ return false
+ }
+
+ for _, ref := range *refs {
+ done := map[ssa.Instruction]bool{}
+ if !c.isOwn(ref) &&
+ ((c.ignore != nil && c.ignore(ref)) ||
+ c.isRet(ref, done) || c.isArg(ref)) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (c *calledFrom) isOwn(instr ssa.Instruction) bool {
+ v, ok := instr.(ssa.Value)
+ if !ok {
+ return false
+ }
+ return v == c.recv
+}
+
+func (c *calledFrom) isRet(instr ssa.Instruction, done map[ssa.Instruction]bool) bool {
+ if done[instr] {
+ return false
+ }
+ done[instr] = true
+
+ switch instr := instr.(type) {
+ case *ssa.Return:
+ return true
+ case *ssa.MapUpdate:
+ return c.isRetInRefs(instr.Map, done)
+ case *ssa.Store:
+ if instr, _ := instr.Addr.(ssa.Instruction); instr != nil {
+ return c.isRet(instr, done)
+ }
+ return c.isRetInRefs(instr.Addr, done)
+ case *ssa.FieldAddr:
+ return c.isRetInRefs(instr.X, done)
+ case ssa.Value:
+ return c.isRetInRefs(instr, done)
+ default:
+ return false
+ }
+}
+
+func (c *calledFrom) isRetInRefs(v ssa.Value, done map[ssa.Instruction]bool) bool {
+ refs := v.Referrers()
+ if refs == nil {
+ return false
+ }
+ for _, ref := range *refs {
+ if c.isRet(ref, done) {
+ return true
+ }
+ }
+ return false
+}
+
+func (c *calledFrom) isArg(instr ssa.Instruction) bool {
+
+ call, ok := instr.(ssa.CallInstruction)
+ if !ok {
+ return false
+ }
+
+ common := call.Common()
+ if common == nil {
+ return false
+ }
+
+ args := common.Args
+ if common.Signature().Recv() != nil {
+ args = args[1:]
+ }
+
+ for i := range args {
+ if args[i] == c.recv {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (c *calledFrom) instrs(instrs []ssa.Instruction) bool {
+ for _, instr := range instrs {
+ for _, f := range c.fs {
+ if Called(instr, c.recv, f) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func (c *calledFrom) succs(b *ssa.BasicBlock) bool {
+ if c.done == nil {
+ c.done = map[*ssa.BasicBlock]bool{}
+ }
+
+ if c.done[b] {
+ return true
+ }
+ c.done[b] = true
+
+ if len(b.Succs) == 0 {
+ return false
+ }
+
+ for _, s := range b.Succs {
+ if !c.instrs(s.Instrs) && !c.succs(s) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (c *calledFrom) storedInInstrs(instrs []ssa.Instruction) bool {
+ for _, instr := range instrs {
+ switch instr := instr.(type) {
+ case *ssa.Store:
+ if instr.Val == c.recv {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func (c *calledFrom) storedInSuccs(b *ssa.BasicBlock) bool {
+ if c.done == nil {
+ c.done = map[*ssa.BasicBlock]bool{}
+ }
+
+ if c.done[b] {
+ return true
+ }
+ c.done[b] = true
+
+ if len(b.Succs) == 0 {
+ return false
+ }
+
+ for _, s := range b.Succs {
+ if !c.storedInInstrs(s.Instrs) && !c.succs(s) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (c *calledFrom) isRecv(recv, typ types.Type) bool {
+ return recv == typ || identical(recv, typ) ||
+ c.isRecvInTuple(recv, typ) || c.isRecvInEmbedded(recv, typ)
+}
+
+func (c *calledFrom) isRecvInTuple(recv, typ types.Type) bool {
+ tuple, _ := typ.(*types.Tuple)
+ if tuple == nil {
+ return false
+ }
+
+ for i := 0; i < tuple.Len(); i++ {
+ if c.isRecv(recv, tuple.At(i).Type()) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (c *calledFrom) isRecvInEmbedded(recv, typ types.Type) bool {
+
+ var st *types.Struct
+ switch typ := typ.(type) {
+ case *types.Struct:
+ st = typ
+ case *types.Pointer:
+ return c.isRecvInEmbedded(recv, typ.Elem())
+ case *types.Named:
+ return c.isRecvInEmbedded(recv, typ.Underlying())
+ default:
+ return false
+ }
+
+ for i := 0; i < st.NumFields(); i++ {
+ field := st.Field(i)
+ if !field.Embedded() {
+ continue
+ }
+
+ ft := field.Type()
+ if c.isRecv(recv, ft) {
+ return true
+ }
+
+ var ptrOrUnptr types.Type
+ switch ft := ft.(type) {
+ case *types.Pointer:
+ // struct { *T } -> T
+ ptrOrUnptr = ft.Elem()
+ default:
+ // struct { T } -> *T
+ ptrOrUnptr = types.NewPointer(ft)
+ }
+
+ if c.isRecv(recv, ptrOrUnptr) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// NotCalledIn checks whether receiver's method is called in a function.
+// If there is no methods calling at a path from an instruction
+// which type is receiver to all return instruction, NotCalledIn returns these instructions.
+func NotCalledIn(f *ssa.Function, receiver types.Type, methods ...*types.Func) []ssa.Instruction {
+ return new(CalledChecker).NotIn(f, receiver, methods...)
+}
+
+// CalledFrom checks whether receiver's method is called in an instruction
+// which belogns to after i-th instructions, or in succsor blocks of b.
+// The first result is above value.
+// The second result is whether type of i-th instruction does not much receiver
+// or matches with ignore cases.
+func CalledFrom(b *ssa.BasicBlock, i int, receiver types.Type, methods ...*types.Func) (called, ok bool) {
+ return new(CalledChecker).From(b, i, receiver, methods...)
+}
+
+// Called returns true when f is called in the instr.
+// If recv is not nil, Called also checks the receiver.
+func Called(instr ssa.Instruction, recv ssa.Value, f *types.Func) bool {
+ return new(CalledChecker).Func(instr, recv, f)
+}
diff --git a/vendor/github.com/gostaticanalysis/analysisutil/diagnostic.go b/vendor/github.com/gostaticanalysis/analysisutil/diagnostic.go
new file mode 100644
index 00000000..a911db6f
--- /dev/null
+++ b/vendor/github.com/gostaticanalysis/analysisutil/diagnostic.go
@@ -0,0 +1,45 @@
+package analysisutil
+
+import (
+ "go/token"
+
+ "github.com/gostaticanalysis/comment"
+ "github.com/gostaticanalysis/comment/passes/commentmap"
+ "golang.org/x/tools/go/analysis"
+)
+
+// ReportWithoutIgnore returns a report function which can set to (analysis.Pass).Report.
+// The report function ignores a diagnostic which annotated by ignore comment as the below.
+// //lint:ignore Check1[,Check2,...,CheckN] reason
+// names is a list of checker names.
+// If names was omitted, the report function ignores by pass.Analyzer.Name.
+func ReportWithoutIgnore(pass *analysis.Pass, names ...string) func(analysis.Diagnostic) {
+ cmaps, _ := pass.ResultOf[commentmap.Analyzer].(comment.Maps)
+ if cmaps == nil {
+ cmaps = comment.New(pass.Fset, pass.Files)
+ }
+
+ if len(names) == 0 {
+ names = []string{pass.Analyzer.Name}
+ }
+
+ report := pass.Report // original report func
+
+ return func(d analysis.Diagnostic) {
+ start := pass.Fset.File(d.Pos).Line(d.Pos)
+ end := start
+ if d.End != token.NoPos {
+ end = pass.Fset.File(d.End).Line(d.End)
+ }
+
+ for l := start; l <= end; l++ {
+ for _, n := range names {
+ if cmaps.IgnoreLine(pass.Fset, l, n) {
+ return
+ }
+ }
+ }
+
+ report(d)
+ }
+}
diff --git a/vendor/github.com/gostaticanalysis/analysisutil/go.mod b/vendor/github.com/gostaticanalysis/analysisutil/go.mod
index 7e3e55be..5ca7c62b 100644
--- a/vendor/github.com/gostaticanalysis/analysisutil/go.mod
+++ b/vendor/github.com/gostaticanalysis/analysisutil/go.mod
@@ -2,4 +2,7 @@ module github.com/gostaticanalysis/analysisutil
go 1.12
-require golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5
+require (
+ github.com/gostaticanalysis/comment v1.4.1
+ golang.org/x/tools v0.0.0-20200820010801-b793a1359eac
+)
diff --git a/vendor/github.com/gostaticanalysis/analysisutil/go.sum b/vendor/github.com/gostaticanalysis/analysisutil/go.sum
index 16c8787c..134e67db 100644
--- a/vendor/github.com/gostaticanalysis/analysisutil/go.sum
+++ b/vendor/github.com/gostaticanalysis/analysisutil/go.sum
@@ -1,6 +1,37 @@
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/gostaticanalysis/comment v1.3.0 h1:wTVgynbFu8/nz6SGgywA0TcyIoAVsYc7ai/Zp5xNGlw=
+github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI=
+github.com/gostaticanalysis/comment v1.4.1 h1:xHopR5L2lRz6OsjH4R2HG5wRhW9ySl3FsHIvi5pcXwc=
+github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5 h1:ZcPpqKMdoZeNQ/4GHlyY4COf8n8SmpPv6mcqF1+VPSM=
-golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3 h1:2oZsfYnKfYzL4I57uYiRFsUf0bqlLkiuw8nnj3+voUA=
+golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff h1:foic6oVZ4MKltJC6MXzuFZFswE7NCjjtc0Hxbyblawc=
+golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200820010801-b793a1359eac h1:DugppSxw0LSF8lcjaODPJZoDzq0ElTGskTst3ZaBkHI=
+golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/gostaticanalysis/analysisutil/pkg.go b/vendor/github.com/gostaticanalysis/analysisutil/pkg.go
index c98710d1..b64150d8 100644
--- a/vendor/github.com/gostaticanalysis/analysisutil/pkg.go
+++ b/vendor/github.com/gostaticanalysis/analysisutil/pkg.go
@@ -2,14 +2,17 @@ package analysisutil
import (
"go/types"
+ "strconv"
"strings"
+
+ "golang.org/x/tools/go/analysis"
)
-// RemoVendor removes vendoring infomation from import path.
+// RemoVendor removes vendoring information from import path.
func RemoveVendor(path string) string {
- i := strings.Index(path, "vendor")
+ i := strings.Index(path, "vendor/")
if i >= 0 {
- return path[i+len("vendor")+1:]
+ return path[i+len("vendor/"):]
}
return path
}
@@ -24,3 +27,23 @@ func LookupFromImports(imports []*types.Package, path, name string) types.Object
}
return nil
}
+
+// Imported returns true when the given pass imports the pkg.
+func Imported(pkgPath string, pass *analysis.Pass) bool {
+ fs := pass.Files
+ if len(fs) == 0 {
+ return false
+ }
+ for _, f := range fs {
+ for _, i := range f.Imports {
+ path, err := strconv.Unquote(i.Path.Value)
+ if err != nil {
+ continue
+ }
+ if RemoveVendor(path) == pkgPath {
+ return true
+ }
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/gostaticanalysis/analysisutil/ssa.go b/vendor/github.com/gostaticanalysis/analysisutil/ssa.go
index 37915866..ce3af580 100644
--- a/vendor/github.com/gostaticanalysis/analysisutil/ssa.go
+++ b/vendor/github.com/gostaticanalysis/analysisutil/ssa.go
@@ -1,6 +1,8 @@
package analysisutil
-import "golang.org/x/tools/go/ssa"
+import (
+ "golang.org/x/tools/go/ssa"
+)
// IfInstr returns *ssa.If which is contained in the block b.
// If the block b has not any if instruction, IfInstr returns nil.
@@ -29,3 +31,53 @@ func Phi(b *ssa.BasicBlock) (phis []*ssa.Phi) {
}
return
}
+
+// Returns returns a slice of *ssa.Return in the function.
+func Returns(v ssa.Value) []*ssa.Return {
+ var fn *ssa.Function
+ switch v := v.(type) {
+ case *ssa.Function:
+ fn = v
+ case *ssa.MakeClosure:
+ return Returns(v.Fn)
+ default:
+ return nil
+ }
+
+ var rets []*ssa.Return
+ done := map[*ssa.BasicBlock]bool{}
+ for _, b := range fn.Blocks {
+ rets = append(rets, returnsInBlock(b, done)...)
+ }
+ return rets
+}
+
+func returnsInBlock(b *ssa.BasicBlock, done map[*ssa.BasicBlock]bool) (rets []*ssa.Return) {
+ if done[b] {
+ return
+ }
+ done[b] = true
+
+ if len(b.Instrs) != 0 {
+ switch instr := b.Instrs[len(b.Instrs)-1].(type) {
+ case *ssa.Return:
+ rets = append(rets, instr)
+ }
+ }
+
+ for _, s := range b.Succs {
+ rets = append(rets, returnsInBlock(s, done)...)
+ }
+ return
+}
+
+// BinOp returns binary operator values which are contained in the block b.
+func BinOp(b *ssa.BasicBlock) ([]*ssa.BinOp) {
+ var binops []*ssa.BinOp
+ for _, instr := range b.Instrs {
+ if binop, ok := instr.(*ssa.BinOp); ok {
+ binops = append(binops, binop)
+ }
+ }
+ return binops
+}
diff --git a/vendor/github.com/gostaticanalysis/analysisutil/ssainspect.go b/vendor/github.com/gostaticanalysis/analysisutil/ssainspect.go
new file mode 100644
index 00000000..2f8a1657
--- /dev/null
+++ b/vendor/github.com/gostaticanalysis/analysisutil/ssainspect.go
@@ -0,0 +1,37 @@
+package analysisutil
+
+import "golang.org/x/tools/go/ssa"
+
+// InspectInstr inspects from i-th instruction of start block to succsessor blocks.
+func InspectInstr(start *ssa.BasicBlock, i int, f func(i int, instr ssa.Instruction) bool) {
+ new(instrInspector).block(start, i, f)
+}
+
+type instrInspector struct {
+ done map[*ssa.BasicBlock]bool
+}
+
+func (ins *instrInspector) block(b *ssa.BasicBlock, i int, f func(i int, instr ssa.Instruction) bool) {
+ if ins.done == nil {
+ ins.done = map[*ssa.BasicBlock]bool{}
+ }
+
+ if b == nil || ins.done[b] || len(b.Instrs) <= i {
+ return
+ }
+
+ ins.done[b] = true
+ ins.instrs(i, b.Instrs[i:], f)
+ for _, s := range b.Succs {
+ ins.block(s, 0, f)
+ }
+
+}
+
+func (ins *instrInspector) instrs(offset int, instrs []ssa.Instruction, f func(i int, instr ssa.Instruction) bool) {
+ for i, instr := range instrs {
+ if !f(offset+i, instr) {
+ break
+ }
+ }
+}
diff --git a/vendor/github.com/gostaticanalysis/analysisutil/types.go b/vendor/github.com/gostaticanalysis/analysisutil/types.go
new file mode 100644
index 00000000..4773ac43
--- /dev/null
+++ b/vendor/github.com/gostaticanalysis/analysisutil/types.go
@@ -0,0 +1,198 @@
+package analysisutil
+
+import (
+ "go/ast"
+ "go/types"
+
+ "golang.org/x/tools/go/analysis"
+)
+
+var errType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface)
+
+// ImplementsError return whether t implements error interface.
+func ImplementsError(t types.Type) bool {
+ return types.Implements(t, errType)
+}
+
+// ObjectOf returns types.Object by given name in the package.
+func ObjectOf(pass *analysis.Pass, pkg, name string) types.Object {
+ obj := LookupFromImports(pass.Pkg.Imports(), pkg, name)
+ if obj != nil {
+ return obj
+ }
+ if RemoveVendor(pass.Pkg.Name()) != RemoveVendor(pkg) {
+ return nil
+ }
+ return pass.Pkg.Scope().Lookup(name)
+}
+
+// TypeOf returns types.Type by given name in the package.
+// TypeOf accepts pointer types such as *T.
+func TypeOf(pass *analysis.Pass, pkg, name string) types.Type {
+ if name == "" {
+ return nil
+ }
+
+ if name[0] == '*' {
+ obj := TypeOf(pass, pkg, name[1:])
+ if obj == nil {
+ return nil
+ }
+ return types.NewPointer(obj)
+ }
+
+ obj := ObjectOf(pass, pkg, name)
+ if obj == nil {
+ return nil
+ }
+
+ return obj.Type()
+}
+
+// MethodOf returns a method which has given name in the type.
+func MethodOf(typ types.Type, name string) *types.Func {
+ switch typ := typ.(type) {
+ case *types.Named:
+ for i := 0; i < typ.NumMethods(); i++ {
+ if f := typ.Method(i); f.Name() == name {
+ return f
+ }
+ }
+ case *types.Pointer:
+ return MethodOf(typ.Elem(), name)
+ }
+ return nil
+}
+
+// see: https://github.com/golang/go/issues/19670
+func identical(x, y types.Type) (ret bool) {
+ defer func() {
+ r := recover()
+ switch r := r.(type) {
+ case string:
+ if r == "unreachable" {
+ ret = false
+ return
+ }
+ case nil:
+ return
+ }
+ panic(r)
+ }()
+ return types.Identical(x, y)
+}
+
+// Interfaces returns a map of interfaces which are declared in the package.
+func Interfaces(pkg *types.Package) map[string]*types.Interface {
+ ifs := map[string]*types.Interface{}
+
+ for _, n := range pkg.Scope().Names() {
+ o := pkg.Scope().Lookup(n)
+ if o != nil {
+ i, ok := o.Type().Underlying().(*types.Interface)
+ if ok {
+ ifs[n] = i
+ }
+ }
+ }
+
+ return ifs
+}
+
+// Structs returns a map of structs which are declared in the package.
+func Structs(pkg *types.Package) map[string]*types.Struct {
+ structs := map[string]*types.Struct{}
+
+ for _, n := range pkg.Scope().Names() {
+ o := pkg.Scope().Lookup(n)
+ if o != nil {
+ s, ok := o.Type().Underlying().(*types.Struct)
+ if ok {
+ structs[n] = s
+ }
+ }
+ }
+
+ return structs
+}
+
+// HasField returns whether the struct has the field.
+func HasField(s *types.Struct, f *types.Var) bool {
+ if s == nil || f == nil {
+ return false
+ }
+
+ for i := 0; i < s.NumFields(); i++ {
+ if s.Field(i) == f {
+ return true
+ }
+ }
+
+ return false
+}
+
+func TypesInfo(info ...*types.Info) *types.Info {
+ if len(info) == 0 {
+ return nil
+ }
+
+ var merged types.Info
+ for i := range info {
+ mergeTypesInfo(&merged, info[i])
+ }
+
+ return &merged
+}
+
+func mergeTypesInfo(i1, i2 *types.Info) {
+ // Types
+ if i1.Types == nil && i2.Types != nil {
+ i1.Types = map[ast.Expr]types.TypeAndValue{}
+ }
+ for expr, tv := range i2.Types {
+ i1.Types[expr] = tv
+ }
+
+ // Defs
+ if i1.Defs == nil && i2.Defs != nil {
+ i1.Defs = map[*ast.Ident]types.Object{}
+ }
+ for ident, obj := range i2.Defs {
+ i1.Defs[ident] = obj
+ }
+
+ // Uses
+ if i1.Uses == nil && i2.Uses != nil {
+ i1.Uses = map[*ast.Ident]types.Object{}
+ }
+ for ident, obj := range i2.Uses {
+ i1.Uses[ident] = obj
+ }
+
+ // Implicits
+ if i1.Implicits == nil && i2.Implicits != nil {
+ i1.Implicits = map[ast.Node]types.Object{}
+ }
+ for n, obj := range i2.Implicits {
+ i1.Implicits[n] = obj
+ }
+
+ // Selections
+ if i1.Selections == nil && i2.Selections != nil {
+ i1.Selections = map[*ast.SelectorExpr]*types.Selection{}
+ }
+ for expr, sel := range i2.Selections {
+ i1.Selections[expr] = sel
+ }
+
+ // Scopes
+ if i1.Scopes == nil && i2.Scopes != nil {
+ i1.Scopes = map[ast.Node]*types.Scope{}
+ }
+ for n, s := range i2.Scopes {
+ i1.Scopes[n] = s
+ }
+
+ // InitOrder
+ i1.InitOrder = append(i1.InitOrder, i2.InitOrder...)
+}
diff --git a/vendor/github.com/gostaticanalysis/comment/LICENSE b/vendor/github.com/gostaticanalysis/comment/LICENSE
new file mode 100644
index 00000000..4f7eeff5
--- /dev/null
+++ b/vendor/github.com/gostaticanalysis/comment/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 Takuya Ueda
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/gostaticanalysis/comment/README.md b/vendor/github.com/gostaticanalysis/comment/README.md
new file mode 100644
index 00000000..53355531
--- /dev/null
+++ b/vendor/github.com/gostaticanalysis/comment/README.md
@@ -0,0 +1,10 @@
+# gostaticanalysis/comment
+
+[![godoc.org][godoc-badge]][godoc]
+
+`comment` provides utilities for [ast.CommentMap](https://golang.org/pkg/go/ast/#CommentMap).
+
+
+[godoc]: https://godoc.org/github.com/gostaticanalysis/comment
+[godoc-badge]: https://img.shields.io/badge/godoc-reference-4F73B3.svg?style=flat-square&label=%20godoc.org
+
diff --git a/vendor/github.com/gostaticanalysis/comment/comment.go b/vendor/github.com/gostaticanalysis/comment/comment.go
new file mode 100644
index 00000000..2fe67fa9
--- /dev/null
+++ b/vendor/github.com/gostaticanalysis/comment/comment.go
@@ -0,0 +1,147 @@
+package comment
+
+import (
+ "go/ast"
+ "go/token"
+ "strings"
+)
+
+// Maps is slice of ast.CommentMap.
+type Maps []ast.CommentMap
+
+// New creates new a CommentMap slice from specified files.
+func New(fset *token.FileSet, files []*ast.File) Maps {
+ maps := make(Maps, len(files))
+ for i := range files {
+ maps[i] = ast.NewCommentMap(fset, files[i], files[i].Comments)
+ }
+ return maps
+}
+
+// Comments returns correspond a CommentGroup slice to specified AST node.
+func (maps Maps) Comments(n ast.Node) []*ast.CommentGroup {
+ for i := range maps {
+ if maps[i][n] != nil {
+ return maps[i][n]
+ }
+ }
+ return nil
+}
+
+// CommentsByPos returns correspond a CommentGroup slice to specified pos.
+func (maps Maps) CommentsByPos(pos token.Pos) []*ast.CommentGroup {
+ for i := range maps {
+ for n, cgs := range maps[i] {
+ if n.Pos() == pos {
+ return cgs
+ }
+ }
+ }
+ return nil
+}
+
+// Annotated checks either specified AST node is annotated or not.
+func (maps Maps) Annotated(n ast.Node, annotation string) bool {
+ for _, cg := range maps.Comments(n) {
+ if strings.HasPrefix(strings.TrimSpace(cg.Text()), annotation) {
+ return true
+ }
+ }
+ return false
+}
+
+// Ignore checks either specified AST node is ignored by the check.
+// It follows staticcheck style as the below.
+// //lint:ignore Check1[,Check2,...,CheckN] reason
+func (maps Maps) Ignore(n ast.Node, check string) bool {
+ for _, cg := range maps.Comments(n) {
+ if hasIgnoreCheck(cg, check) {
+ return true
+ }
+ }
+ return false
+}
+
+// IgnorePos checks either specified postion of AST node is ignored by the check.
+// It follows staticcheck style as the below.
+// //lint:ignore Check1[,Check2,...,CheckN] reason
+func (maps Maps) IgnorePos(pos token.Pos, check string) bool {
+ for _, cg := range maps.CommentsByPos(pos) {
+ if hasIgnoreCheck(cg, check) {
+ return true
+ }
+ }
+ return false
+}
+
+// Deprecated: This function does not work with multiple files.
+// CommentsByPosLine can be used instead of CommentsByLine.
+//
+// CommentsByLine returns correspond a CommentGroup slice to specified line.
+func (maps Maps) CommentsByLine(fset *token.FileSet, line int) []*ast.CommentGroup {
+ for i := range maps {
+ for n, cgs := range maps[i] {
+ l := fset.File(n.Pos()).Line(n.Pos())
+ if l == line {
+ return cgs
+ }
+ }
+ }
+ return nil
+}
+
+// CommentsByPosLine returns correspond a CommentGroup slice to specified line.
+func (maps Maps) CommentsByPosLine(fset *token.FileSet, pos token.Pos) []*ast.CommentGroup {
+ f1 := fset.File(pos)
+ for i := range maps {
+ for n, cgs := range maps[i] {
+ f2 := fset.File(n.Pos())
+ if f1 != f2 {
+ // different file
+ continue
+ }
+
+ if f1.Line(pos) == f2.Line(n.Pos()) {
+ return cgs
+ }
+ }
+ }
+ return nil
+}
+
+// IgnoreLine checks either specified lineof AST node is ignored by the check.
+// It follows staticcheck style as the below.
+// //lint:ignore Check1[,Check2,...,CheckN] reason
+func (maps Maps) IgnoreLine(fset *token.FileSet, line int, check string) bool {
+ for _, cg := range maps.CommentsByLine(fset, line) {
+ if hasIgnoreCheck(cg, check) {
+ return true
+ }
+ }
+ return false
+}
+
+// hasIgnoreCheck returns true if the provided CommentGroup starts with a comment
+// of the form "//lint:ignore Check1[,Check2,...,CheckN] reason" and one of the
+// checks matches the provided check. The *ast.CommentGroup is checked directly
+// rather than using "cg.Text()" because, starting in Go 1.15, the "cg.Text()" call
+// no longer returns directive-style comments (see https://github.com/golang/go/issues/37974).
+func hasIgnoreCheck(cg *ast.CommentGroup, check string) bool {
+ if !strings.HasPrefix(cg.List[0].Text, "//") {
+ return false
+ }
+
+ s := strings.TrimSpace(cg.List[0].Text[2:])
+ txt := strings.Split(s, " ")
+ if len(txt) < 3 || txt[0] != "lint:ignore" {
+ return false
+ }
+
+ checks := strings.Split(txt[1], ",")
+ for i := range checks {
+ if check == checks[i] {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/gostaticanalysis/comment/go.mod b/vendor/github.com/gostaticanalysis/comment/go.mod
new file mode 100644
index 00000000..27556811
--- /dev/null
+++ b/vendor/github.com/gostaticanalysis/comment/go.mod
@@ -0,0 +1,8 @@
+module github.com/gostaticanalysis/comment
+
+go 1.12
+
+require (
+ github.com/google/go-cmp v0.5.1
+ golang.org/x/tools v0.0.0-20200820010801-b793a1359eac
+)
diff --git a/vendor/github.com/gostaticanalysis/comment/go.sum b/vendor/github.com/gostaticanalysis/comment/go.sum
new file mode 100644
index 00000000..425807ce
--- /dev/null
+++ b/vendor/github.com/gostaticanalysis/comment/go.sum
@@ -0,0 +1,24 @@
+github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200820010801-b793a1359eac h1:DugppSxw0LSF8lcjaODPJZoDzq0ElTGskTst3ZaBkHI=
+golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/gostaticanalysis/comment/passes/commentmap/commentmap.go b/vendor/github.com/gostaticanalysis/comment/passes/commentmap/commentmap.go
new file mode 100644
index 00000000..9266d989
--- /dev/null
+++ b/vendor/github.com/gostaticanalysis/comment/passes/commentmap/commentmap.go
@@ -0,0 +1,20 @@
+package commentmap
+
+import (
+ "reflect"
+
+ "github.com/gostaticanalysis/comment"
+ "golang.org/x/tools/go/analysis"
+)
+
+var Analyzer = &analysis.Analyzer{
+ Name: "commentmap",
+ Doc: "create comment map",
+ Run: run,
+ RunDespiteErrors: true,
+ ResultType: reflect.TypeOf(comment.Maps{}),
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ return comment.New(pass.Fset, pass.Files), nil
+}
diff --git a/vendor/github.com/jirfag/go-printf-func-name/LICENSE b/vendor/github.com/jirfag/go-printf-func-name/LICENSE
new file mode 100644
index 00000000..d06a809c
--- /dev/null
+++ b/vendor/github.com/jirfag/go-printf-func-name/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 Isaev Denis
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
index 949b77e3..09a4a35c 100644
--- a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
@@ -26,6 +26,8 @@ The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de).
We thank all the authors who provided code to this library:
* Felix Kollmann
+* Nicolas Perraut
+* @dirty49374
## License
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
index ef18d8f9..57f530ae 100644
--- a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
@@ -4,7 +4,6 @@ package sequences
import (
"syscall"
- "unsafe"
)
var (
@@ -27,7 +26,7 @@ func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error {
mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING
}
- ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode))
+ ret, _, err := setConsoleMode.Call(uintptr(stream), uintptr(mode))
if ret == 0 {
return err
}
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go
new file mode 100644
index 00000000..df61a6f2
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go
@@ -0,0 +1,11 @@
+// +build linux darwin
+
+package sequences
+
+import (
+ "fmt"
+)
+
+func EnableVirtualTerminalProcessing(stream uintptr, enable bool) error {
+ return fmt.Errorf("windows only package")
+}
diff --git a/vendor/github.com/kyoh86/exportloopref/.golangci.yml b/vendor/github.com/kyoh86/exportloopref/.golangci.yml
new file mode 100644
index 00000000..58667b21
--- /dev/null
+++ b/vendor/github.com/kyoh86/exportloopref/.golangci.yml
@@ -0,0 +1,4 @@
+linters:
+ enable:
+ - unparam
+ - scopelint
diff --git a/vendor/github.com/kyoh86/exportloopref/.goreleaser.yml b/vendor/github.com/kyoh86/exportloopref/.goreleaser.yml
new file mode 100644
index 00000000..22ff4404
--- /dev/null
+++ b/vendor/github.com/kyoh86/exportloopref/.goreleaser.yml
@@ -0,0 +1,43 @@
+project_name: exportloopref
+release:
+ github:
+ owner: kyoh86
+ name: exportloopref
+brews:
+- install: |
+ bin.install "exportloopref"
+ github:
+ owner: kyoh86
+ name: homebrew-tap
+ folder: Formula
+ homepage: https://github.com/kyoh86/exportloopref
+ description: An analyzer that finds exporting pointers for loop variables.
+builds:
+- goos:
+ - linux
+ - darwin
+ - windows
+ goarch:
+ - amd64
+ - "386"
+ main: ./cmd/exportloopref
+ ldflags: -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.Date}}
+ binary: exportloopref
+archives:
+- id: gzip
+ format: tar.gz
+ format_overrides:
+ - goos: windows
+ format: zip
+ name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
+ files:
+ - licence*
+ - LICENCE*
+ - license*
+ - LICENSE*
+ - readme*
+ - README*
+ - changelog*
+ - CHANGELOG*
+snapshot:
+ name_template: SNAPSHOT-{{ .Commit }}
diff --git a/vendor/github.com/kyoh86/exportloopref/LICENSE b/vendor/github.com/kyoh86/exportloopref/LICENSE
new file mode 100644
index 00000000..7ac9dba4
--- /dev/null
+++ b/vendor/github.com/kyoh86/exportloopref/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 kyoh86
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
+OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/kyoh86/exportloopref/Makefile b/vendor/github.com/kyoh86/exportloopref/Makefile
new file mode 100644
index 00000000..4d3ef22f
--- /dev/null
+++ b/vendor/github.com/kyoh86/exportloopref/Makefile
@@ -0,0 +1,16 @@
+.PHONY: gen lint test install man
+
+VERSION := `git vertag get`
+COMMIT := `git rev-parse HEAD`
+
+gen:
+ go generate ./...
+
+lint: gen
+ golangci-lint run
+
+test: lint
+ go test -v --race ./...
+
+install: test
+ go install -a -ldflags "-X=main.version=$(VERSION) -X=main.commit=$(COMMIT)" ./...
diff --git a/vendor/github.com/kyoh86/exportloopref/README.md b/vendor/github.com/kyoh86/exportloopref/README.md
new file mode 100644
index 00000000..5b154915
--- /dev/null
+++ b/vendor/github.com/kyoh86/exportloopref/README.md
@@ -0,0 +1,178 @@
+# exportloopref
+
+An analyzer that finds exporting pointers for loop variables.
+
+[![Go Report Card](https://goreportcard.com/badge/github.com/kyoh86/exportloopref)](https://goreportcard.com/report/github.com/kyoh86/exportloopref)
+[![Coverage Status](https://img.shields.io/codecov/c/github/kyoh86/exportloopref.svg)](https://codecov.io/gh/kyoh86/exportloopref)
+[![Release](https://github.com/kyoh86/exportloopref/workflows/Release/badge.svg)](https://github.com/kyoh86/exportloopref/releases)
+
+## What's this?
+
+Sample problem code from: https://github.com/kyoh86/exportloopref/blob/master/testdata/simple/simple.go
+
+```go
+package main
+
+func main() {
+ var intArray [4]*int
+ var intSlice []*int
+ var intRef *int
+ var intStr struct{ x *int }
+
+ println("loop expecting 10, 11, 12, 13")
+ for i, p := range []int{10, 11, 12, 13} {
+ printp(&p) // not a diagnostic
+ intSlice = append(intSlice, &p) // want "exporting a pointer for the loop variable p"
+ intArray[i] = &p // want "exporting a pointer for the loop variable p"
+ if i%2 == 0 {
+ intRef = &p // want "exporting a pointer for the loop variable p"
+ intStr.x = &p // want "exporting a pointer for the loop variable p"
+ }
+ var vStr struct{ x *int }
+ var vArray [4]*int
+ var v *int
+ if i%2 == 0 {
+ v = &p // not a diagnostic (x is inner variable)
+ vArray[1] = &p // not a diagnostic (x is inner variable)
+ vStr.x = &p
+ }
+ _ = v
+ }
+
+ println(`slice expecting "10, 11, 12, 13" but "13, 13, 13, 13"`)
+ for _, p := range intSlice {
+ printp(p)
+ }
+ println(`array expecting "10, 11, 12, 13" but "13, 13, 13, 13"`)
+ for _, p := range intArray {
+ printp(p)
+ }
+ println(`captured value expecting "12" but "13"`)
+ printp(intRef)
+}
+
+func printp(p *int) {
+ println(*p)
+}
+```
+
+In Go, the `p` variable in the above loops is actually a single variable.
+So in many case (like the above), using it makes for us annoying bugs.
+
+You can find them with `exportloopref`, and fix it.
+
+```go
+package main
+
+func main() {
+ var intArray [4]*int
+ var intSlice []*int
+ var intRef *int
+ var intStr struct{ x *int }
+
+ println("loop expecting 10, 11, 12, 13")
+ for i, p := range []int{10, 11, 12, 13} {
+ p := p // FIX variable into the inner variable
+ printp(&p)
+ intSlice = append(intSlice, &p)
+ intArray[i] = &p
+ if i%2 == 0 {
+ intRef = &p
+ intStr.x = &p
+ }
+ var vStr struct{ x *int }
+ var vArray [4]*int
+ var v *int
+ if i%2 == 0 {
+ v = &p
+ vArray[1] = &p
+ vStr.x = &p
+ }
+ _ = v
+ }
+
+ println(`slice expecting "10, 11, 12, 13"`)
+ for _, p := range intSlice {
+ printp(p)
+ }
+ println(`array expecting "10, 11, 12, 13"`)
+ for _, p := range intArray {
+ printp(p)
+ }
+ println(`captured value expecting "12"`)
+ printp(intRef)
+}
+
+func printp(p *int) {
+ println(*p)
+}
+```
+
+ref: https://github.com/kyoh86/exportloopref/blob/master/testdata/fixed/fixed.go
+
+## Sensing policy
+
+I want to make exportloopref as accurately as possible.
+So some cases of lints will be ignored.
+
+e.g.
+
+```go
+var s Foo
+for _, p := []int{10, 11, 12, 13} {
+ s.Bar(&p) // If s stores the pointer, it will be bug.
+}
+```
+
+If you want to report all of lints (with some false-positives),
+you should use [looppointer](https://github.com/kyoh86/looppointer).
+
+## Install
+
+go:
+
+```console
+$ go get github.com/kyoh86/exportloopref/cmd/exportloopref
+```
+
+[homebrew](https://brew.sh/):
+
+```console
+$ brew install kyoh86/tap/exportloopref
+```
+
+[gordon](https://github.com/kyoh86/gordon):
+
+```console
+$ gordon install kyoh86/exportloopref
+```
+
+## Usage
+
+```
+exportloopref [-flag] [package]
+```
+
+### Flags
+
+| Flag | Description |
+| --- | --- |
+| -V | print version and exit |
+| -all | no effect (deprecated) |
+| -c int | display offending line with this many lines of context (default -1) |
+| -cpuprofile string | write CPU profile to this file |
+| -debug string | debug flags, any subset of "fpstv" |
+| -fix | apply all suggested fixes |
+| -flags | print analyzer flags in JSON |
+| -json | emit JSON output |
+| -memprofile string | write memory profile to this file |
+| -source | no effect (deprecated) |
+| -tags string | no effect (deprecated) |
+| -trace string | write trace log to this file |
+| -v | no effect (deprecated) |
+
+# LICENSE
+
+[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg)](http://www.opensource.org/licenses/MIT)
+
+This is distributed under the [MIT License](http://www.opensource.org/licenses/MIT).
diff --git a/vendor/github.com/kyoh86/exportloopref/exportloopref.go b/vendor/github.com/kyoh86/exportloopref/exportloopref.go
new file mode 100644
index 00000000..0b310b3c
--- /dev/null
+++ b/vendor/github.com/kyoh86/exportloopref/exportloopref.go
@@ -0,0 +1,276 @@
+package exportloopref
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+var Analyzer = &analysis.Analyzer{
+ Name: "exportloopref",
+ Doc: "checks for pointers to enclosing loop variables",
+ Run: run,
+ RunDespiteErrors: true,
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+ // ResultType reflect.Type
+ // FactTypes []Fact
+}
+
+func init() {
+ // Analyzer.Flags.StringVar(&v, "name", "default", "description")
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+
+ search := &Searcher{
+ Stats: map[token.Pos]struct{}{},
+ Vars: map[token.Pos]map[token.Pos]struct{}{},
+ Types: pass.TypesInfo.Types,
+ }
+
+ nodeFilter := []ast.Node{
+ (*ast.RangeStmt)(nil),
+ (*ast.ForStmt)(nil),
+ (*ast.DeclStmt)(nil),
+ (*ast.AssignStmt)(nil),
+ (*ast.UnaryExpr)(nil),
+ }
+
+ inspect.WithStack(nodeFilter, func(n ast.Node, push bool, stack []ast.Node) bool {
+ id, digg := search.Check(n, stack)
+ if id != nil {
+ pass.ReportRangef(id, "exporting a pointer for the loop variable %s", id.Name)
+ }
+ return digg
+ })
+
+ return nil, nil
+}
+
+type Searcher struct {
+ // Statement variables : map to collect positions that
+ // variables are declared like below.
+ // - for , := range ...
+ // - var int
+ // - D := ...
+ Stats map[token.Pos]struct{}
+ // Internal variables maps loop-position, decl-location to ignore
+ // safe pointers for variable which declared in the loop.
+ Vars map[token.Pos]map[token.Pos]struct{}
+ Types map[ast.Expr]types.TypeAndValue
+}
+
+func (s *Searcher) Check(n ast.Node, stack []ast.Node) (*ast.Ident, bool) {
+ switch typed := n.(type) {
+ case *ast.RangeStmt:
+ s.parseRangeStmt(typed)
+ case *ast.ForStmt:
+ s.parseForStmt(typed)
+ case *ast.DeclStmt:
+ s.parseDeclStmt(typed, stack)
+ case *ast.AssignStmt:
+ s.parseAssignStmt(typed, stack)
+
+ case *ast.UnaryExpr:
+ return s.checkUnaryExpr(typed, stack)
+ }
+ return nil, true
+}
+
+func (s *Searcher) parseRangeStmt(n *ast.RangeStmt) {
+ s.addStat(n.Key)
+ s.addStat(n.Value)
+}
+
+func (s *Searcher) parseForStmt(n *ast.ForStmt) {
+ switch post := n.Post.(type) {
+ case *ast.AssignStmt:
+ // e.g. for p = head; p != nil; p = p.next
+ for _, lhs := range post.Lhs {
+ s.addStat(lhs)
+ }
+ case *ast.IncDecStmt:
+ // e.g. for i := 0; i < n; i++
+ s.addStat(post.X)
+ }
+}
+
+func (s *Searcher) addStat(expr ast.Expr) {
+ if id, ok := expr.(*ast.Ident); ok {
+ s.Stats[id.Pos()] = struct{}{}
+ }
+}
+
+func (s *Searcher) parseDeclStmt(n *ast.DeclStmt, stack []ast.Node) {
+ loop := s.innermostLoop(stack)
+ if loop == nil {
+ return
+ }
+
+ // Register declaring variables
+ if genDecl, ok := n.Decl.(*ast.GenDecl); ok && genDecl.Tok == token.VAR {
+ for _, spec := range genDecl.Specs {
+ for _, name := range spec.(*ast.ValueSpec).Names {
+ s.addVar(loop, name)
+ }
+ }
+ }
+}
+
+func (s *Searcher) parseAssignStmt(n *ast.AssignStmt, stack []ast.Node) {
+ loop := s.innermostLoop(stack)
+ if loop == nil {
+ return
+ }
+
+ // Find statements declaring internal variable
+ if n.Tok == token.DEFINE {
+ for _, h := range n.Lhs {
+ s.addVar(loop, h)
+ }
+ }
+}
+
+func (s *Searcher) addVar(loop ast.Node, expr ast.Expr) {
+ loopPos := loop.Pos()
+ id, ok := expr.(*ast.Ident)
+ if !ok {
+ return
+ }
+ vars, ok := s.Vars[loopPos]
+ if !ok {
+ vars = map[token.Pos]struct{}{}
+ }
+ vars[id.Obj.Pos()] = struct{}{}
+ s.Vars[loopPos] = vars
+}
+
+func (s *Searcher) innermostLoop(stack []ast.Node) ast.Node {
+ for i := len(stack) - 1; i >= 0; i-- {
+ switch stack[i].(type) {
+ case *ast.RangeStmt, *ast.ForStmt:
+ return stack[i]
+ }
+ }
+ return nil
+}
+
+func (s *Searcher) checkUnaryExpr(n *ast.UnaryExpr, stack []ast.Node) (*ast.Ident, bool) {
+ loop := s.innermostLoop(stack)
+ if loop == nil {
+ return nil, true
+ }
+
+ if n.Op != token.AND {
+ return nil, true
+ }
+
+ // Get identity of the referred item
+ id := s.getIdentity(n.X)
+ if id == nil {
+ return nil, true
+ }
+
+ // If the identity is not the loop statement variable,
+ // it will not be reported.
+ if _, isStat := s.Stats[id.Obj.Pos()]; !isStat {
+ return nil, true
+ }
+
+ // check stack append(), []X{}, map[Type]X{}, Struct{}, &Struct{}, X.(Type), (X)
+ // in the =
+ var mayRHPos token.Pos
+ for i := len(stack) - 2; i >= 0; i-- {
+ switch typed := stack[i].(type) {
+ case (*ast.UnaryExpr):
+ // noop
+ case (*ast.CompositeLit):
+ // noop
+ case (*ast.KeyValueExpr):
+ // noop
+ case (*ast.CallExpr):
+ fun, ok := typed.Fun.(*ast.Ident)
+ if !ok {
+ return nil, false // it's calling a function other of `append`. It cannot be checked
+ }
+
+ if fun.Name != "append" {
+ return nil, false // it's calling a function other of `append`. It cannot be checked
+ }
+
+ case (*ast.AssignStmt):
+ if len(typed.Rhs) != len(typed.Lhs) {
+ return nil, false // dead logic
+ }
+
+ // search x where Rhs[x].Pos() == mayRHPos
+ var index int
+ for ri, rh := range typed.Rhs {
+ if rh.Pos() == mayRHPos {
+ index = ri
+ break
+ }
+ }
+
+ // check Lhs[x] is not inner variable
+ lh := typed.Lhs[index]
+ isVar := s.isVar(loop, lh)
+ if !isVar {
+ return id, false
+ }
+
+ return nil, true
+ default:
+ // Other statement is not able to be checked.
+ return nil, false
+ }
+
+ // memory an expr that may be right-hand in the AssignStmt
+ mayRHPos = stack[i].Pos()
+ }
+ return nil, true
+}
+
+func (s *Searcher) isVar(loop ast.Node, expr ast.Expr) bool {
+ vars := s.Vars[loop.Pos()] // map[token.Pos]struct{}
+ if vars == nil {
+ return false
+ }
+ switch typed := expr.(type) {
+ case (*ast.Ident):
+ _, isVar := vars[typed.Obj.Pos()]
+ return isVar
+ case (*ast.IndexExpr): // like X[Y], check X
+ return s.isVar(loop, typed.X)
+ case (*ast.SelectorExpr): // like X.Y, check X
+ return s.isVar(loop, typed.X)
+ }
+ return false
+}
+
+// Get variable identity
+func (s *Searcher) getIdentity(expr ast.Expr) *ast.Ident {
+ switch typed := expr.(type) {
+ case *ast.SelectorExpr:
+ // Ignore if the parent is pointer ref (fix for #2)
+ if _, ok := s.Types[typed.X].Type.(*types.Pointer); ok {
+ return nil
+ }
+
+ // Get parent identity; i.e. `a.b` of the `a.b.c`.
+ return s.getIdentity(typed.X)
+
+ case *ast.Ident:
+ // Get simple identity; i.e. `a` of the `a`.
+ if typed.Obj == nil {
+ return nil
+ }
+ return typed
+ }
+ return nil
+}
diff --git a/vendor/github.com/kyoh86/exportloopref/go.mod b/vendor/github.com/kyoh86/exportloopref/go.mod
new file mode 100644
index 00000000..2b61b220
--- /dev/null
+++ b/vendor/github.com/kyoh86/exportloopref/go.mod
@@ -0,0 +1,5 @@
+module github.com/kyoh86/exportloopref
+
+go 1.14
+
+require golang.org/x/tools v0.0.0-20200321224714-0d839f3cf2ed
diff --git a/vendor/github.com/kyoh86/exportloopref/go.sum b/vendor/github.com/kyoh86/exportloopref/go.sum
new file mode 100644
index 00000000..eb0e5ab1
--- /dev/null
+++ b/vendor/github.com/kyoh86/exportloopref/go.sum
@@ -0,0 +1,20 @@
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200321224714-0d839f3cf2ed h1:OCZDlBlLYiUK6T33/8+3BnojrS2W+Dg1rKYJhR89xGE=
+golang.org/x/tools v0.0.0-20200321224714-0d839f3cf2ed/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/magiconair/properties/.travis.yml b/vendor/github.com/magiconair/properties/.travis.yml
index f07376f9..1a30a6cb 100644
--- a/vendor/github.com/magiconair/properties/.travis.yml
+++ b/vendor/github.com/magiconair/properties/.travis.yml
@@ -1,5 +1,6 @@
language: go
go:
+ - 1.3.x
- 1.4.x
- 1.5.x
- 1.6.x
@@ -9,4 +10,7 @@ go:
- "1.10.x"
- "1.11.x"
- "1.12.x"
+ - "1.13.x"
+ - "1.14.x"
+ - "1.15.x"
- tip
diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md
index 176626a1..ff8d0253 100644
--- a/vendor/github.com/magiconair/properties/CHANGELOG.md
+++ b/vendor/github.com/magiconair/properties/CHANGELOG.md
@@ -1,8 +1,29 @@
## Changelog
+### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020
+
+ * [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write
+
+ This patch ensures that backslashes are escaped on write. Existing applications which
+ rely on the old behavior may need to be updated.
+
+ Thanks to [@apesternikov](https://github.com/apesternikov) for the patch.
+
+ * [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL()
+
+ Thanks to [@aliras1](https://github.com/aliras1) for the patch.
+
+ * [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write()
+
+ Thanks to [@mkjor](https://github.com/mkjor) for the patch.
+
+ * [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys
+
+ Thanks to [@mkjor](https://github.com/mkjor) for the patch.
+
### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019
- * [PR #26](https://github.com/magiconair/properties/pull/35): Close body always after request
+ * [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request
This patch ensures that in `LoadURL` the response body is always closed.
diff --git a/vendor/github.com/magiconair/properties/LICENSE b/vendor/github.com/magiconair/properties/LICENSE.md
similarity index 84%
rename from vendor/github.com/magiconair/properties/LICENSE
rename to vendor/github.com/magiconair/properties/LICENSE.md
index b387087c..79c87e3e 100644
--- a/vendor/github.com/magiconair/properties/LICENSE
+++ b/vendor/github.com/magiconair/properties/LICENSE.md
@@ -1,15 +1,14 @@
-goproperties - properties file decoder for Go
-
-Copyright (c) 2013-2018 - Frank Schroeder
+Copyright (c) 2013-2020, Frank Schroeder
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
-1. Redistributions of source code must retain the above copyright notice, this
+ * Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright notice,
+
+ * Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
diff --git a/vendor/github.com/magiconair/properties/go.mod b/vendor/github.com/magiconair/properties/go.mod
index 02a6f865..4ff090bd 100644
--- a/vendor/github.com/magiconair/properties/go.mod
+++ b/vendor/github.com/magiconair/properties/go.mod
@@ -1 +1,3 @@
module github.com/magiconair/properties
+
+go 1.13
diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go
index ab953253..c83c2dad 100644
--- a/vendor/github.com/magiconair/properties/load.go
+++ b/vendor/github.com/magiconair/properties/load.go
@@ -132,11 +132,12 @@ func (l *Loader) LoadURL(url string) (*Properties, error) {
}
ct := resp.Header.Get("Content-Type")
+ ct = strings.Join(strings.Fields(ct), "")
var enc Encoding
switch strings.ToLower(ct) {
- case "text/plain", "text/plain; charset=iso-8859-1", "text/plain; charset=latin1":
+ case "text/plain", "text/plain;charset=iso-8859-1", "text/plain;charset=latin1":
enc = ISO_8859_1
- case "", "text/plain; charset=utf-8":
+ case "", "text/plain;charset=utf-8":
enc = UTF8
default:
return nil, fmt.Errorf("properties: invalid content type %s", ct)
diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go
index cb3d1a33..6e350a28 100644
--- a/vendor/github.com/magiconair/properties/properties.go
+++ b/vendor/github.com/magiconair/properties/properties.go
@@ -13,6 +13,7 @@ import (
"log"
"os"
"regexp"
+ "sort"
"strconv"
"strings"
"time"
@@ -69,6 +70,9 @@ type Properties struct {
// Stores the keys in order of appearance.
k []string
+
+ // WriteSeparator specifies the separator of key and value while writing the properties.
+ WriteSeparator string
}
// NewProperties creates a new Properties struct with the default
@@ -111,7 +115,7 @@ func (p *Properties) Get(key string) (value string, ok bool) {
// circular references and malformed expressions
// so we panic if we still get an error here.
if err != nil {
- ErrorHandler(fmt.Errorf("%s in %q", err, key+" = "+v))
+ ErrorHandler(err)
}
return expanded, true
@@ -586,6 +590,12 @@ func (p *Properties) String() string {
return s
}
+// Sort sorts the properties keys in alphabetical order.
+// This is helpfully before writing the properties.
+func (p *Properties) Sort() {
+ sort.Strings(p.k)
+}
+
// Write writes all unexpanded 'key = value' pairs to the given writer.
// Write returns the number of bytes written and any write error encountered.
func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) {
@@ -635,8 +645,11 @@ func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n i
}
}
}
-
- x, err = fmt.Fprintf(w, "%s = %s\n", encode(key, " :", enc), encode(value, "", enc))
+ sep := " = "
+ if p.WriteSeparator != "" {
+ sep = p.WriteSeparator
+ }
+ x, err = fmt.Fprintf(w, "%s%s%s\n", encode(key, " :", enc), sep, encode(value, "", enc))
if err != nil {
return
}
@@ -753,7 +766,7 @@ func expand(s string, keys []string, prefix, postfix string, values map[string]s
for _, k := range keys {
if key == k {
- return "", fmt.Errorf("circular reference")
+ return "", fmt.Errorf("circular reference in %q", key + " = " + prefix + k + postfix)
}
}
@@ -820,6 +833,8 @@ func escape(r rune, special string) string {
return "\\r"
case '\t':
return "\\t"
+ case '\\':
+ return "\\\\"
default:
if strings.ContainsRune(special, r) {
return "\\" + string(r)
diff --git a/vendor/github.com/matoous/godox/godox.go b/vendor/github.com/matoous/godox/godox.go
index 13f3f3be..6d7104b0 100644
--- a/vendor/github.com/matoous/godox/godox.go
+++ b/vendor/github.com/matoous/godox/godox.go
@@ -49,7 +49,7 @@ func getMessages(c *ast.Comment, fset *token.FileSet, keywords []string) []Messa
pos := fset.Position(c.Pos())
// trim the comment
if len(sComment) > 40 {
- sComment = []byte(fmt.Sprintf("%s...", sComment[:40]))
+ sComment = []byte(fmt.Sprintf("%.40s...", sComment))
}
comments = append(comments, Message{
Pos: pos,
diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml
index 98db8f06..7942c565 100644
--- a/vendor/github.com/mattn/go-colorable/.travis.yml
+++ b/vendor/github.com/mattn/go-colorable/.travis.yml
@@ -1,9 +1,15 @@
language: go
+sudo: false
go:
+ - 1.13.x
- tip
before_install:
- - go get github.com/mattn/goveralls
- - go get golang.org/x/tools/cmd/cover
+ - go get -t -v ./...
+
script:
- - $HOME/gopath/bin/goveralls -repotoken xnXqRGwgW3SXIguzxf90ZSK1GPYZPaGrw
+ - ./go.test.sh
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
+
diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md
index 56729a92..e055952b 100644
--- a/vendor/github.com/mattn/go-colorable/README.md
+++ b/vendor/github.com/mattn/go-colorable/README.md
@@ -1,8 +1,8 @@
# go-colorable
-[![Godoc Reference](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable)
[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable)
-[![Coverage Status](https://coveralls.io/repos/github/mattn/go-colorable/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-colorable?branch=master)
+[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable)
+[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable)
[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable)
Colorable writer for windows.
diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go
index 0b0aef83..1f7806fe 100644
--- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go
+++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go
@@ -27,3 +27,11 @@ func NewColorableStdout() io.Writer {
func NewColorableStderr() io.Writer {
return os.Stderr
}
+
+// EnableColorsStdout enable colors if possible.
+func EnableColorsStdout(enabled *bool) func() {
+ if enabled != nil {
+ *enabled = true
+ }
+ return func() {}
+}
diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go
index 3fb771dc..08cbd1e0 100644
--- a/vendor/github.com/mattn/go-colorable/colorable_others.go
+++ b/vendor/github.com/mattn/go-colorable/colorable_others.go
@@ -28,3 +28,11 @@ func NewColorableStdout() io.Writer {
func NewColorableStderr() io.Writer {
return os.Stderr
}
+
+// EnableColorsStdout enable colors if possible.
+func EnableColorsStdout(enabled *bool) func() {
+ if enabled != nil {
+ *enabled = true
+ }
+ return func() {}
+}
diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go
index 1bd628f2..04c4229c 100644
--- a/vendor/github.com/mattn/go-colorable/colorable_windows.go
+++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go
@@ -10,6 +10,7 @@ import (
"os"
"strconv"
"strings"
+ "sync"
"syscall"
"unsafe"
@@ -27,6 +28,9 @@ const (
backgroundRed = 0x40
backgroundIntensity = 0x80
backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
+ commonLvbUnderscore = 0x8000
+
+ cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4
)
const (
@@ -78,6 +82,8 @@ var (
procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo")
procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo")
procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW")
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+ procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer")
)
@@ -89,6 +95,7 @@ type Writer struct {
oldattr word
oldpos coord
rest bytes.Buffer
+ mutex sync.Mutex
}
// NewColorable returns new instance of Writer which handles escape sequence from File.
@@ -98,6 +105,10 @@ func NewColorable(file *os.File) io.Writer {
}
if isatty.IsTerminal(file.Fd()) {
+ var mode uint32
+ if r, _, _ := procGetConsoleMode.Call(file.Fd(), uintptr(unsafe.Pointer(&mode))); r != 0 && mode&cENABLE_VIRTUAL_TERMINAL_PROCESSING != 0 {
+ return file
+ }
var csbi consoleScreenBufferInfo
handle := syscall.Handle(file.Fd())
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
@@ -424,6 +435,8 @@ func atoiWithDefault(s string, def int) (int, error) {
// Write writes data on console
func (w *Writer) Write(data []byte) (n int, err error) {
+ w.mutex.Lock()
+ defer w.mutex.Unlock()
var csbi consoleScreenBufferInfo
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
@@ -675,14 +688,19 @@ loop:
switch {
case n == 0 || n == 100:
attr = w.oldattr
- case 1 <= n && n <= 5:
+ case n == 4:
+ attr |= commonLvbUnderscore
+ case (1 <= n && n <= 3) || n == 5:
attr |= foregroundIntensity
- case n == 7:
- attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
- case n == 22 || n == 25:
- attr |= foregroundIntensity
- case n == 27:
- attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+ case n == 7 || n == 27:
+ attr =
+ (attr &^ (foregroundMask | backgroundMask)) |
+ ((attr & foregroundMask) << 4) |
+ ((attr & backgroundMask) >> 4)
+ case n == 22:
+ attr &^= foregroundIntensity
+ case n == 24:
+ attr &^= commonLvbUnderscore
case 30 <= n && n <= 37:
attr &= backgroundMask
if (n-30)&1 != 0 {
@@ -1003,3 +1021,23 @@ func n256setup() {
n256backAttr[i] = c.backgroundAttr()
}
}
+
+// EnableColorsStdout enable colors if possible.
+func EnableColorsStdout(enabled *bool) func() {
+ var mode uint32
+ h := os.Stdout.Fd()
+ if r, _, _ := procGetConsoleMode.Call(h, uintptr(unsafe.Pointer(&mode))); r != 0 {
+ if r, _, _ = procSetConsoleMode.Call(h, uintptr(mode|cENABLE_VIRTUAL_TERMINAL_PROCESSING)); r != 0 {
+ if enabled != nil {
+ *enabled = true
+ }
+ return func() {
+ procSetConsoleMode.Call(h, uintptr(mode))
+ }
+ }
+ }
+ if enabled != nil {
+ *enabled = true
+ }
+ return func() {}
+}
diff --git a/vendor/github.com/mattn/go-colorable/go.mod b/vendor/github.com/mattn/go-colorable/go.mod
index ef3ca9d4..1e590b81 100644
--- a/vendor/github.com/mattn/go-colorable/go.mod
+++ b/vendor/github.com/mattn/go-colorable/go.mod
@@ -1,3 +1,8 @@
module github.com/mattn/go-colorable
-require github.com/mattn/go-isatty v0.0.8
+require (
+ github.com/mattn/go-isatty v0.0.12
+ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae // indirect
+)
+
+go 1.13
diff --git a/vendor/github.com/mattn/go-colorable/go.sum b/vendor/github.com/mattn/go-colorable/go.sum
index 2c12960e..cf5b95d9 100644
--- a/vendor/github.com/mattn/go-colorable/go.sum
+++ b/vendor/github.com/mattn/go-colorable/go.sum
@@ -1,4 +1,5 @@
-github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw=
-github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/mattn/go-colorable/go.test.sh b/vendor/github.com/mattn/go-colorable/go.test.sh
new file mode 100644
index 00000000..012162b0
--- /dev/null
+++ b/vendor/github.com/mattn/go-colorable/go.test.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -e
+echo "" > coverage.txt
+
+for d in $(go list ./... | grep -v vendor); do
+ go test -race -coverprofile=profile.out -covermode=atomic "$d"
+ if [ -f profile.out ]; then
+ cat profile.out >> coverage.txt
+ rm profile.out
+ fi
+done
diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml
index 5597e026..604314dd 100644
--- a/vendor/github.com/mattn/go-isatty/.travis.yml
+++ b/vendor/github.com/mattn/go-isatty/.travis.yml
@@ -1,13 +1,14 @@
language: go
+sudo: false
go:
+ - 1.13.x
- tip
-os:
- - linux
- - osx
-
before_install:
- - go get github.com/mattn/goveralls
- - go get golang.org/x/tools/cmd/cover
+ - go get -t -v ./...
+
script:
- - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5
+ - ./go.test.sh
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md
index 1e69004b..38418353 100644
--- a/vendor/github.com/mattn/go-isatty/README.md
+++ b/vendor/github.com/mattn/go-isatty/README.md
@@ -1,7 +1,7 @@
# go-isatty
[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty)
-[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty)
+[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty)
[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master)
[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty)
diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod
index f310320c..605c4c22 100644
--- a/vendor/github.com/mattn/go-isatty/go.mod
+++ b/vendor/github.com/mattn/go-isatty/go.mod
@@ -1,3 +1,5 @@
module github.com/mattn/go-isatty
-require golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223
+go 1.12
+
+require golang.org/x/sys v0.0.0-20200116001909-b77594299b42
diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum
index 426c8973..912e29cb 100644
--- a/vendor/github.com/mattn/go-isatty/go.sum
+++ b/vendor/github.com/mattn/go-isatty/go.sum
@@ -1,2 +1,2 @@
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh
new file mode 100644
index 00000000..012162b0
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/go.test.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -e
+echo "" > coverage.txt
+
+for d in $(go list ./... | grep -v vendor); do
+ go test -race -coverprofile=profile.out -covermode=atomic "$d"
+ if [ -f profile.out ]; then
+ cat profile.out >> coverage.txt
+ rm profile.out
+ fi
+done
diff --git a/vendor/github.com/mattn/go-isatty/isatty_android.go b/vendor/github.com/mattn/go-isatty/isatty_android.go
deleted file mode 100644
index d3567cb5..00000000
--- a/vendor/github.com/mattn/go-isatty/isatty_android.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// +build android
-
-package isatty
-
-import (
- "syscall"
- "unsafe"
-)
-
-const ioctlReadTermios = syscall.TCGETS
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
-
-// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
-// terminal. This is also always false on this environment.
-func IsCygwinTerminal(fd uintptr) bool {
- return false
-}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
index 07e93039..711f2880 100644
--- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go
+++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go
@@ -3,18 +3,12 @@
package isatty
-import (
- "syscall"
- "unsafe"
-)
-
-const ioctlReadTermios = syscall.TIOCGETA
+import "golang.org/x/sys/unix"
// IsTerminal return true if the file descriptor is terminal.
func IsTerminal(fd uintptr) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
+ _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA)
+ return err == nil
}
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go
new file mode 100644
index 00000000..c5b6e0c0
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go
@@ -0,0 +1,22 @@
+// +build plan9
+
+package isatty
+
+import (
+ "syscall"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd uintptr) bool {
+ path, err := syscall.Fd2path(int(fd))
+ if err != nil {
+ return false
+ }
+ return path == "/dev/cons" || path == "/mnt/term/dev/cons"
+}
+
+// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
+// terminal. This is also always false on this environment.
+func IsCygwinTerminal(fd uintptr) bool {
+ return false
+}
diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
index 453b025d..31a1ca97 100644
--- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
+++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
@@ -1,6 +1,5 @@
// +build linux aix
// +build !appengine
-// +build !android
package isatty
diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go
index af51cbca..1fa86915 100644
--- a/vendor/github.com/mattn/go-isatty/isatty_windows.go
+++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go
@@ -4,6 +4,7 @@
package isatty
import (
+ "errors"
"strings"
"syscall"
"unicode/utf16"
@@ -11,15 +12,18 @@ import (
)
const (
- fileNameInfo uintptr = 2
- fileTypePipe = 3
+ objectNameInfo uintptr = 1
+ fileNameInfo = 2
+ fileTypePipe = 3
)
var (
kernel32 = syscall.NewLazyDLL("kernel32.dll")
+ ntdll = syscall.NewLazyDLL("ntdll.dll")
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx")
procGetFileType = kernel32.NewProc("GetFileType")
+ procNtQueryObject = ntdll.NewProc("NtQueryObject")
)
func init() {
@@ -45,7 +49,10 @@ func isCygwinPipeName(name string) bool {
return false
}
- if token[0] != `\msys` && token[0] != `\cygwin` {
+ if token[0] != `\msys` &&
+ token[0] != `\cygwin` &&
+ token[0] != `\Device\NamedPipe\msys` &&
+ token[0] != `\Device\NamedPipe\cygwin` {
return false
}
@@ -68,11 +75,35 @@ func isCygwinPipeName(name string) bool {
return true
}
+// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler
+// since GetFileInformationByHandleEx is not avilable under windows Vista and still some old fashion
+// guys are using Windows XP, this is a workaround for those guys, it will also work on system from
+// Windows vista to 10
+// see https://stackoverflow.com/a/18792477 for details
+func getFileNameByHandle(fd uintptr) (string, error) {
+ if procNtQueryObject == nil {
+ return "", errors.New("ntdll.dll: NtQueryObject not supported")
+ }
+
+ var buf [4 + syscall.MAX_PATH]uint16
+ var result int
+ r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5,
+ fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0)
+ if r != 0 {
+ return "", e
+ }
+ return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil
+}
+
// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
// terminal.
func IsCygwinTerminal(fd uintptr) bool {
if procGetFileInformationByHandleEx == nil {
- return false
+ name, err := getFileNameByHandle(fd)
+ if err != nil {
+ return false
+ }
+ return isCygwinPipeName(name)
}
// Cygwin/msys's pty is a pipe.
diff --git a/vendor/github.com/mattn/go-isatty/renovate.json b/vendor/github.com/mattn/go-isatty/renovate.json
new file mode 100644
index 00000000..5ae9d96b
--- /dev/null
+++ b/vendor/github.com/mattn/go-isatty/renovate.json
@@ -0,0 +1,8 @@
+{
+ "extends": [
+ "config:base"
+ ],
+ "postUpdateOptions": [
+ "gomodTidy"
+ ]
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml
index 1689c7d7..5e31a95a 100644
--- a/vendor/github.com/mitchellh/mapstructure/.travis.yml
+++ b/vendor/github.com/mitchellh/mapstructure/.travis.yml
@@ -1,8 +1,9 @@
language: go
go:
- - "1.11.x"
+ - "1.14.x"
- tip
script:
- go test
+ - go test -bench . -benchmem
diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
index 3b3cb723..20eea2b7 100644
--- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
+++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
@@ -1,3 +1,43 @@
+## 1.3.3
+
+* Decoding maps from maps creates a settable value for decode hooks [GH-203]
+
+## 1.3.2
+
+* Decode into interface type with a struct value is supported [GH-187]
+
+## 1.3.1
+
+* Squash should only squash embedded structs. [GH-194]
+
+## 1.3.0
+
+* Added `",omitempty"` support. This will ignore zero values in the source
+ structure when encoding. [GH-145]
+
+## 1.2.3
+
+* Fix duplicate entries in Keys list with pointer values. [GH-185]
+
+## 1.2.2
+
+* Do not add unsettable (unexported) values to the unused metadata key
+ or "remain" value. [GH-150]
+
+## 1.2.1
+
+* Go modules checksum mismatch fix
+
+## 1.2.0
+
+* Added support to capture unused values in a field using the `",remain"` value
+ in the mapstructure tag. There is an example to showcase usage.
+* Added `DecoderConfig` option to always squash embedded structs
+* `json.Number` can decode into `uint` types
+* Empty slices are preserved and not replaced with nil slices
+* Fix panic that can occur in when decoding a map into a nil slice of structs
+* Improved package documentation for godoc
+
## 1.1.2
* Fix error when decode hook decodes interface implementation into interface
diff --git a/vendor/github.com/mitchellh/mapstructure/go.mod b/vendor/github.com/mitchellh/mapstructure/go.mod
index d2a71256..a03ae973 100644
--- a/vendor/github.com/mitchellh/mapstructure/go.mod
+++ b/vendor/github.com/mitchellh/mapstructure/go.mod
@@ -1 +1,3 @@
module github.com/mitchellh/mapstructure
+
+go 1.14
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
index 256ee63f..f41bcc58 100644
--- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -1,10 +1,150 @@
-// Package mapstructure exposes functionality to convert an arbitrary
-// map[string]interface{} into a native Go structure.
+// Package mapstructure exposes functionality to convert one arbitrary
+// Go type into another, typically to convert a map[string]interface{}
+// into a native Go structure.
//
// The Go structure can be arbitrarily complex, containing slices,
// other structs, etc. and the decoder will properly decode nested
// maps and so on into the proper structures in the native Go struct.
// See the examples to see what the decoder is capable of.
+//
+// The simplest function to start with is Decode.
+//
+// Field Tags
+//
+// When decoding to a struct, mapstructure will use the field name by
+// default to perform the mapping. For example, if a struct has a field
+// "Username" then mapstructure will look for a key in the source value
+// of "username" (case insensitive).
+//
+// type User struct {
+// Username string
+// }
+//
+// You can change the behavior of mapstructure by using struct tags.
+// The default struct tag that mapstructure looks for is "mapstructure"
+// but you can customize it using DecoderConfig.
+//
+// Renaming Fields
+//
+// To rename the key that mapstructure looks for, use the "mapstructure"
+// tag and set a value directly. For example, to change the "username" example
+// above to "user":
+//
+// type User struct {
+// Username string `mapstructure:"user"`
+// }
+//
+// Embedded Structs and Squashing
+//
+// Embedded structs are treated as if they're another field with that name.
+// By default, the two structs below are equivalent when decoding with
+// mapstructure:
+//
+// type Person struct {
+// Name string
+// }
+//
+// type Friend struct {
+// Person
+// }
+//
+// type Friend struct {
+// Person Person
+// }
+//
+// This would require an input that looks like below:
+//
+// map[string]interface{}{
+// "person": map[string]interface{}{"name": "alice"},
+// }
+//
+// If your "person" value is NOT nested, then you can append ",squash" to
+// your tag value and mapstructure will treat it as if the embedded struct
+// were part of the struct directly. Example:
+//
+// type Friend struct {
+// Person `mapstructure:",squash"`
+// }
+//
+// Now the following input would be accepted:
+//
+// map[string]interface{}{
+// "name": "alice",
+// }
+//
+// DecoderConfig has a field that changes the behavior of mapstructure
+// to always squash embedded structs.
+//
+// Remainder Values
+//
+// If there are any unmapped keys in the source value, mapstructure by
+// default will silently ignore them. You can error by setting ErrorUnused
+// in DecoderConfig. If you're using Metadata you can also maintain a slice
+// of the unused keys.
+//
+// You can also use the ",remain" suffix on your tag to collect all unused
+// values in a map. The field with this tag MUST be a map type and should
+// probably be a "map[string]interface{}" or "map[interface{}]interface{}".
+// See example below:
+//
+// type Friend struct {
+// Name string
+// Other map[string]interface{} `mapstructure:",remain"`
+// }
+//
+// Given the input below, Other would be populated with the other
+// values that weren't used (everything but "name"):
+//
+// map[string]interface{}{
+// "name": "bob",
+// "address": "123 Maple St.",
+// }
+//
+// Omit Empty Values
+//
+// When decoding from a struct to any other value, you may use the
+// ",omitempty" suffix on your tag to omit that value if it equates to
+// the zero value. The zero value of all types is specified in the Go
+// specification.
+//
+// For example, the zero type of a numeric type is zero ("0"). If the struct
+// field value is zero and a numeric type, the field is empty, and it won't
+// be encoded into the destination type.
+//
+// type Source {
+// Age int `mapstructure:",omitempty"`
+// }
+//
+// Unexported fields
+//
+// Since unexported (private) struct fields cannot be set outside the package
+// where they are defined, the decoder will simply skip them.
+//
+// For this output type definition:
+//
+// type Exported struct {
+// private string // this unexported field will be skipped
+// Public string
+// }
+//
+// Using this map as input:
+//
+// map[string]interface{}{
+// "private": "I will be ignored",
+// "Public": "I made it through!",
+// }
+//
+// The following struct will be decoded:
+//
+// type Exported struct {
+// private: "" // field is left with an empty string (zero value)
+// Public: "I made it through!"
+// }
+//
+// Other Configuration
+//
+// mapstructure is highly configurable. See the DecoderConfig struct
+// for other features and options that are supported.
package mapstructure
import (
@@ -80,6 +220,14 @@ type DecoderConfig struct {
//
WeaklyTypedInput bool
+ // Squash will squash embedded structs. A squash tag may also be
+ // added to an individual struct field using a tag. For example:
+ //
+ // type Parent struct {
+ // Child `mapstructure:",squash"`
+ // }
+ Squash bool
+
// Metadata is the struct that will contain extra metadata about
// the decoding. If this is nil, then no metadata will be tracked.
Metadata *Metadata
@@ -271,6 +419,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e
var err error
outputKind := getKind(outVal)
+ addMetaKey := true
switch outputKind {
case reflect.Bool:
err = d.decodeBool(name, input, outVal)
@@ -289,7 +438,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e
case reflect.Map:
err = d.decodeMap(name, input, outVal)
case reflect.Ptr:
- err = d.decodePtr(name, input, outVal)
+ addMetaKey, err = d.decodePtr(name, input, outVal)
case reflect.Slice:
err = d.decodeSlice(name, input, outVal)
case reflect.Array:
@@ -303,7 +452,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e
// If we reached here, then we successfully decoded SOMETHING, so
// mark the key as used if we're tracking metainput.
- if d.config.Metadata != nil && name != "" {
+ if addMetaKey && d.config.Metadata != nil && name != "" {
d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
}
@@ -314,7 +463,34 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e
// value to "data" of that type.
func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
if val.IsValid() && val.Elem().IsValid() {
- return d.decode(name, data, val.Elem())
+ elem := val.Elem()
+
+ // If we can't address this element, then its not writable. Instead,
+ // we make a copy of the value (which is a pointer and therefore
+ // writable), decode into that, and replace the whole value.
+ copied := false
+ if !elem.CanAddr() {
+ copied = true
+
+ // Make *T
+ copy := reflect.New(elem.Type())
+
+ // *T = elem
+ copy.Elem().Set(elem)
+
+ // Set elem so we decode into it
+ elem = copy
+ }
+
+ // Decode. If we have an error then return. We also return right
+ // away if we're not a copy because that means we decoded directly.
+ if err := d.decode(name, data, elem); err != nil || !copied {
+ return err
+ }
+
+ // If we're a copy, we need to set te final result
+ val.Set(elem.Elem())
+ return nil
}
dataVal := reflect.ValueOf(data)
@@ -438,6 +614,7 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er
func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
dataKind := getKind(dataVal)
+ dataType := dataVal.Type()
switch {
case dataKind == reflect.Int:
@@ -469,6 +646,18 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e
} else {
return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
}
+ case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+ jn := data.(json.Number)
+ i, err := jn.Int64()
+ if err != nil {
+ return fmt.Errorf(
+ "error decoding json.Number into %s: %s", name, err)
+ }
+ if i < 0 && !d.config.WeaklyTypedInput {
+ return fmt.Errorf("cannot parse '%s', %d overflows uint",
+ name, i)
+ }
+ val.SetUint(uint64(i))
default:
return fmt.Errorf(
"'%s' expected type '%s', got unconvertible type '%s'",
@@ -678,27 +867,31 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
}
tagValue := f.Tag.Get(d.config.TagName)
- tagParts := strings.Split(tagValue, ",")
-
- // Determine the name of the key in the map
keyName := f.Name
- if tagParts[0] != "" {
- if tagParts[0] == "-" {
+
+ // If Squash is set in the config, we squash the field down.
+ squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous
+ // Determine the name of the key in the map
+ if index := strings.Index(tagValue, ","); index != -1 {
+ if tagValue[:index] == "-" {
continue
}
- keyName = tagParts[0]
- }
-
- // If "squash" is specified in the tag, we squash the field down.
- squash := false
- for _, tag := range tagParts[1:] {
- if tag == "squash" {
- squash = true
- break
+ // If "omitempty" is specified in the tag, it ignores empty values.
+ if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) {
+ continue
}
- }
- if squash && v.Kind() != reflect.Struct {
- return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
+
+ // If "squash" is specified in the tag, we squash the field down.
+ squash = !squash && strings.Index(tagValue[index+1:], "squash") != -1
+ if squash && v.Kind() != reflect.Struct {
+ return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
+ }
+ keyName = tagValue[:index]
+ } else if len(tagValue) > 0 {
+ if tagValue == "-" {
+ continue
+ }
+ keyName = tagValue
}
switch v.Kind() {
@@ -713,11 +906,22 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
mType := reflect.MapOf(vKeyType, vElemType)
vMap := reflect.MakeMap(mType)
- err := d.decode(keyName, x.Interface(), vMap)
+ // Creating a pointer to a map so that other methods can completely
+ // overwrite the map if need be (looking at you decodeMapFromMap). The
+ // indirection allows the underlying map to be settable (CanSet() == true)
+ // where as reflect.MakeMap returns an unsettable map.
+ addrVal := reflect.New(vMap.Type())
+ reflect.Indirect(addrVal).Set(vMap)
+
+ err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal))
if err != nil {
return err
}
+ // the underlying map may have been completely overwritten so pull
+ // it indirectly out of the enclosing value.
+ vMap = reflect.Indirect(addrVal)
+
if squash {
for _, k := range vMap.MapKeys() {
valMap.SetMapIndex(k, vMap.MapIndex(k))
@@ -738,7 +942,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
return nil
}
-func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
+func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) {
// If the input data is nil, then we want to just set the output
// pointer to be nil as well.
isNil := data == nil
@@ -759,7 +963,7 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er
val.Set(nilValue)
}
- return nil
+ return true, nil
}
// Create an element of the concrete (non pointer) type and decode
@@ -773,16 +977,16 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er
}
if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
- return err
+ return false, err
}
val.Set(realVal)
} else {
if err := d.decode(name, data, reflect.Indirect(val)); err != nil {
- return err
+ return false, err
}
}
- return nil
+ return false, nil
}
func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
@@ -805,8 +1009,8 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
valElemType := valType.Elem()
sliceType := reflect.SliceOf(valElemType)
- valSlice := val
- if valSlice.IsNil() || d.config.ZeroFields {
+ // If we have a non array/slice type then we first attempt to convert.
+ if dataValKind != reflect.Array && dataValKind != reflect.Slice {
if d.config.WeaklyTypedInput {
switch {
// Slice and array we use the normal logic
@@ -833,18 +1037,17 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
}
}
- // Check input type
- if dataValKind != reflect.Array && dataValKind != reflect.Slice {
- return fmt.Errorf(
- "'%s': source data must be an array or slice, got %s", name, dataValKind)
+ return fmt.Errorf(
+ "'%s': source data must be an array or slice, got %s", name, dataValKind)
+ }
- }
-
- // If the input value is empty, then don't allocate since non-nil != nil
- if dataVal.Len() == 0 {
- return nil
- }
+ // If the input value is nil, then don't allocate since empty != nil
+ if dataVal.IsNil() {
+ return nil
+ }
+ valSlice := val
+ if valSlice.IsNil() || d.config.ZeroFields {
// Make a new slice to hold our result, same size as the original data.
valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
}
@@ -962,13 +1165,23 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
// Not the most efficient way to do this but we can optimize later if
// we want to. To convert from struct to struct we go to map first
// as an intermediary.
- m := make(map[string]interface{})
- mval := reflect.Indirect(reflect.ValueOf(&m))
- if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil {
+
+ // Make a new map to hold our result
+ mapType := reflect.TypeOf((map[string]interface{})(nil))
+ mval := reflect.MakeMap(mapType)
+
+ // Creating a pointer to a map so that other methods can completely
+ // overwrite the map if need be (looking at you decodeMapFromMap). The
+ // indirection allows the underlying map to be settable (CanSet() == true)
+ // where as reflect.MakeMap returns an unsettable map.
+ addrVal := reflect.New(mval.Type())
+
+ reflect.Indirect(addrVal).Set(mval)
+ if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil {
return err
}
- result := d.decodeStructFromMap(name, mval, val)
+ result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val)
return result
default:
@@ -1005,6 +1218,11 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
field reflect.StructField
val reflect.Value
}
+
+ // remainField is set to a valid field set with the "remain" tag if
+ // we are keeping track of remaining values.
+ var remainField *field
+
fields := []field{}
for len(structs) > 0 {
structVal := structs[0]
@@ -1017,13 +1235,21 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
fieldKind := fieldType.Type.Kind()
// If "squash" is specified in the tag, we squash the field down.
- squash := false
+ squash := d.config.Squash && fieldKind == reflect.Struct && fieldType.Anonymous
+ remain := false
+
+ // We always parse the tags cause we're looking for other tags too
tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
for _, tag := range tagParts[1:] {
if tag == "squash" {
squash = true
break
}
+
+ if tag == "remain" {
+ remain = true
+ break
+ }
}
if squash {
@@ -1036,8 +1262,13 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
continue
}
- // Normal struct field, store it away
- fields = append(fields, field{fieldType, structVal.Field(i)})
+ // Build our field
+ if remain {
+ remainField = &field{fieldType, structVal.Field(i)}
+ } else {
+ // Normal struct field, store it away
+ fields = append(fields, field{fieldType, structVal.Field(i)})
+ }
}
}
@@ -1078,9 +1309,6 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
}
}
- // Delete the key we're using from the unused map so we stop tracking
- delete(dataValKeysUnused, rawMapKey.Interface())
-
if !fieldValue.IsValid() {
// This should never happen
panic("field is not valid")
@@ -1092,6 +1320,9 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
continue
}
+ // Delete the key we're using from the unused map so we stop tracking
+ delete(dataValKeysUnused, rawMapKey.Interface())
+
// If the name is empty string, then we're at the root, and we
// don't dot-join the fields.
if name != "" {
@@ -1103,6 +1334,25 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
}
}
+ // If we have a "remain"-tagged field and we have unused keys then
+ // we put the unused keys directly into the remain field.
+ if remainField != nil && len(dataValKeysUnused) > 0 {
+ // Build a map of only the unused values
+ remain := map[interface{}]interface{}{}
+ for key := range dataValKeysUnused {
+ remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface()
+ }
+
+ // Decode it as-if we were just decoding this map onto our map.
+ if err := d.decodeMap(name, remain, remainField.val); err != nil {
+ errors = appendErrors(errors, err)
+ }
+
+ // Set the map to nil so we have none so that the next check will
+ // not error (ErrorUnused)
+ dataValKeysUnused = nil
+ }
+
if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
keys := make([]string, 0, len(dataValKeysUnused))
for rawKey := range dataValKeysUnused {
@@ -1133,6 +1383,24 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
return nil
}
+func isEmptyValue(v reflect.Value) bool {
+ switch getKind(v) {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
func getKind(val reflect.Value) reflect.Kind {
kind := val.Kind()
diff --git a/vendor/github.com/nishanths/exhaustive/.gitignore b/vendor/github.com/nishanths/exhaustive/.gitignore
new file mode 100644
index 00000000..a724b56a
--- /dev/null
+++ b/vendor/github.com/nishanths/exhaustive/.gitignore
@@ -0,0 +1,5 @@
+.DS_Store
+
+# binary
+cmd/exhaustive/exhaustive
+exhaustive
diff --git a/vendor/github.com/go-lintpack/lintpack/LICENSE b/vendor/github.com/nishanths/exhaustive/LICENSE
similarity index 55%
rename from vendor/github.com/go-lintpack/lintpack/LICENSE
rename to vendor/github.com/nishanths/exhaustive/LICENSE
index de0abccd..32befa68 100644
--- a/vendor/github.com/go-lintpack/lintpack/LICENSE
+++ b/vendor/github.com/nishanths/exhaustive/LICENSE
@@ -1,21 +1,22 @@
-Copyright (c) 2018, go-lintpack maintainers
+BSD 2-Clause License
+
+Copyright (c) 2020, Nishanth Shanmugham
+All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of Intel Corporation nor the names of its contributors
- may be used to endorse or promote products derived from this software
- without specific prior written permission.
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
diff --git a/vendor/github.com/nishanths/exhaustive/README.md b/vendor/github.com/nishanths/exhaustive/README.md
new file mode 100644
index 00000000..ecc76c7c
--- /dev/null
+++ b/vendor/github.com/nishanths/exhaustive/README.md
@@ -0,0 +1,85 @@
+# exhaustive
+
+[![Godoc](https://godoc.org/github.com/nishanths/exhaustive?status.svg)](https://godoc.org/github.com/nishanths/exhaustive)
+
+The `exhaustive` package and command line program can be used to detect
+enum switch statements that are not exhaustive.
+
+An enum switch statement is exhaustive if it has cases for each of the enum's members. See godoc for the definition of enum used by the program.
+
+The `exhaustive` package provides an `Analyzer` that follows the guidelines
+described in the [go/analysis](https://godoc.org/golang.org/x/tools/go/analysis) package; this makes
+it possible to integrate into existing analysis driver programs.
+
+## Install
+
+```
+go get github.com/nishanths/exhaustive/...
+```
+
+## Docs
+
+https://godoc.org/github.com/nishanths/exhaustive
+
+## Usage
+
+The command line usage is:
+
+```
+Usage: exhaustive [-flags] [packages...]
+
+Flags:
+ -default-signifies-exhaustive
+ indicates that switch statements are to be considered exhaustive if a 'default' case
+ is present, even if all enum members aren't listed in the switch (default false)
+ -fix
+ apply all suggested fixes (default false)
+
+Examples:
+ exhaustive code.org/proj/...
+ exhaustive -fix example.org/foo/pkg example.org/foo/bar
+```
+
+## Example
+
+Given the code:
+
+```diff
+package token
+
+type Token int
+
+const (
+ Add Token = iota
+ Subtract
+ Multiply
++ Quotient
++ Remainder
+)
+```
+```
+package calc
+
+import "token"
+
+func processToken(t token.Token) {
+ switch t {
+ case token.Add:
+ // ...
+ case token.Subtract:
+ // ...
+ case token.Multiply:
+ // ...
+ }
+}
+```
+
+Running the `exhaustive` command will print:
+
+```
+calc.go:6:2: missing cases in switch of type token.Token: Quotient, Remainder
+```
+
+## License
+
+BSD 2-Clause
diff --git a/vendor/github.com/nishanths/exhaustive/enum.go b/vendor/github.com/nishanths/exhaustive/enum.go
new file mode 100644
index 00000000..98b5656b
--- /dev/null
+++ b/vendor/github.com/nishanths/exhaustive/enum.go
@@ -0,0 +1,99 @@
+package exhaustive
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+
+ "golang.org/x/tools/go/analysis"
+)
+
+type enums map[string][]string // enum type name -> enum member names
+
+func findEnums(pass *analysis.Pass) enums {
+ pkgEnums := make(enums)
+
+ // Gather enum types.
+ for _, f := range pass.Files {
+ for _, decl := range f.Decls {
+ gen, ok := decl.(*ast.GenDecl)
+ if !ok {
+ continue
+ }
+ if gen.Tok != token.TYPE {
+ continue
+ }
+ for _, s := range gen.Specs {
+ // Must be TypeSpec since we've filtered on token.TYPE.
+ t, ok := s.(*ast.TypeSpec)
+ obj := pass.TypesInfo.Defs[t.Name]
+ if obj == nil {
+ continue
+ }
+
+ named, ok := obj.Type().(*types.Named)
+ if !ok {
+ continue
+ }
+ basic, ok := named.Underlying().(*types.Basic)
+ if !ok {
+ continue
+ }
+ switch i := basic.Info(); {
+ case i&types.IsInteger != 0:
+ pkgEnums[named.Obj().Name()] = nil
+ case i&types.IsFloat != 0:
+ pkgEnums[named.Obj().Name()] = nil
+ case i&types.IsString != 0:
+ pkgEnums[named.Obj().Name()] = nil
+ }
+ }
+ }
+ }
+
+ // Gather enum members.
+ for _, f := range pass.Files {
+ for _, decl := range f.Decls {
+ gen, ok := decl.(*ast.GenDecl)
+ if !ok {
+ continue
+ }
+ if gen.Tok != token.CONST && gen.Tok != token.VAR {
+ continue
+ }
+ for _, s := range gen.Specs {
+ // Must be ValueSpec since we've filtered on token.CONST, token.VAR.
+ v := s.(*ast.ValueSpec)
+ for _, name := range v.Names {
+ obj := pass.TypesInfo.Defs[name]
+ if obj == nil {
+ continue
+ }
+ named, ok := obj.Type().(*types.Named)
+ if !ok {
+ continue
+ }
+
+ members, ok := pkgEnums[named.Obj().Name()]
+ if !ok {
+ continue
+ }
+ members = append(members, obj.Name())
+ pkgEnums[named.Obj().Name()] = members
+ }
+ }
+ }
+ }
+
+ // Delete member-less enum types.
+ // We can't call these enums, since we can't be sure without
+ // the existence of members. (The type may just be a named type,
+ // for instance.)
+ for k, v := range pkgEnums {
+ if len(v) == 0 {
+ delete(pkgEnums, k)
+ }
+ }
+
+ return pkgEnums
+}
diff --git a/vendor/github.com/nishanths/exhaustive/exhaustive.go b/vendor/github.com/nishanths/exhaustive/exhaustive.go
new file mode 100644
index 00000000..ef869f26
--- /dev/null
+++ b/vendor/github.com/nishanths/exhaustive/exhaustive.go
@@ -0,0 +1,182 @@
+// Package exhaustive provides an analyzer that helps ensure enum switch statements
+// are exhaustive. The analyzer also provides fixes to make the offending switch
+// statements exhaustive (see "Fixes" section).
+//
+// See "cmd/exhaustive" subpackage for the related command line program.
+//
+// Definition of enum
+//
+// The language spec does not provide an explicit definition for enums.
+// For the purpose of this program, an enum type is a package-level named type
+// whose underlying type is an integer (includes byte and rune), a float, or
+// a string type. An enum type must have associated with it one or more
+// package-level variables of the named type in the package. These variables
+// constitute the enum's members.
+//
+// In the code snippet below, Biome is an enum type with 3 members.
+//
+// type Biome int
+//
+// const (
+// Tundra Biome = iota
+// Savanna
+// Desert
+// )
+//
+// Switch statement exhaustiveness
+//
+// An enum switch statement is exhaustive if it has cases for each of the enum's members.
+//
+// For an enum type defined in the same package as the switch statement, both
+// exported and unexported enum members must be present in order to consider
+// the switch exhaustive. On the other hand, for an enum type defined
+// in an external package it is sufficient for just exported enum members
+// to be present in order to consider the switch exhaustive.
+//
+// Flags
+//
+// The analyzer accepts a boolean flag: -default-signifies-exhaustive.
+// The flag, if set, indicates to the analyzer that switch statements
+// are to be considered exhaustive as long as a 'default' case is present, even
+// if all enum members aren't listed in the switch statements cases.
+//
+// The other relevant flag is the -fix flag.
+//
+// Fixes
+//
+// The analyzer suggests fixes for a switch statement if it is not exhaustive
+// and does not have a 'default' case. The suggested fix always adds a single
+// case clause for the missing enum members.
+//
+// case missingA, missingB, missingC:
+// panic(fmt.Sprintf("unhandled value: %v", v))
+//
+// where v is the expression in the switch statement's tag (in other words, the
+// value being switched upon). If the switch statement's tag is a function or a
+// method call the analyzer does not suggest a fix, as reusing the call expression
+// in the panic/fmt.Sprintf call could be mutative.
+//
+// The rationale for the fix using panic is that it might be better to fail loudly on
+// existing unhandled or impossible cases than to let them slip by quietly unnoticed.
+// An even better fix may, of course, be to manually inspect the sites reported
+// by the package and handle the missing cases if necessary.
+//
+// Imports will be adjusted automatically to account for the "fmt" dependency.
+//
+// Skip analysis of specific switch statements
+//
+// If the following directive comment:
+//
+// //exhaustive:ignore
+//
+// is associated with a switch statement, the analyzer skips
+// checking of the switch statement and no diagnostics are reported.
+package exhaustive
+
+import (
+ "go/ast"
+ "go/types"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+const (
+ // DefaultSignifiesExhaustiveFlag is a flag name used by the analyzer. It
+ // is exported for use by analyzer driver programs.
+ DefaultSignifiesExhaustiveFlag = "default-signifies-exhaustive"
+)
+
+var (
+ fCheckMaps bool
+ fDefaultSignifiesExhaustive bool
+)
+
+func init() {
+ Analyzer.Flags.BoolVar(&fCheckMaps, "maps", false, "check key exhaustiveness for map literals of enum key type, in addition to checking switch statements")
+ Analyzer.Flags.BoolVar(&fDefaultSignifiesExhaustive, DefaultSignifiesExhaustiveFlag, false, "indicates that switch statements are to be considered exhaustive if a 'default' case is present, even if all enum members aren't listed in the switch")
+}
+
+var Analyzer = &analysis.Analyzer{
+ Name: "exhaustive",
+ Doc: "check exhaustiveness of enum switch statements",
+ Run: run,
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+ FactTypes: []analysis.Fact{&enumsFact{}},
+}
+
+// IgnoreDirectivePrefix is used to exclude checking of specific switch statements.
+// See https://godoc.org/github.com/nishanths/exhaustive#hdr-Skip_analysis_of_specific_switch_statements
+// for details.
+const IgnoreDirectivePrefix = "//exhaustive:ignore"
+
+func containsIgnoreDirective(comments []*ast.Comment) bool {
+ for _, c := range comments {
+ if strings.HasPrefix(c.Text, IgnoreDirectivePrefix) {
+ return true
+ }
+ }
+ return false
+}
+
+type enumsFact struct {
+ Entries enums
+}
+
+var _ analysis.Fact = (*enumsFact)(nil)
+
+func (e *enumsFact) AFact() {}
+
+func (e *enumsFact) String() string {
+ // sort for stability (required for testing)
+ var sortedKeys []string
+ for k := range e.Entries {
+ sortedKeys = append(sortedKeys, k)
+ }
+ sort.Strings(sortedKeys)
+
+ var buf strings.Builder
+ for i, k := range sortedKeys {
+ v := e.Entries[k]
+ buf.WriteString(k)
+ buf.WriteString(":")
+ for j, vv := range v {
+ buf.WriteString(vv)
+ // add comma separator between each enum member in an enum type
+ if j != len(v)-1 {
+ buf.WriteString(",")
+ }
+ }
+ // add semicolon separator between each enum type
+ if i != len(sortedKeys)-1 {
+ buf.WriteString("; ")
+ }
+ }
+ return buf.String()
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ e := findEnums(pass)
+ if len(e) != 0 {
+ pass.ExportPackageFact(&enumsFact{Entries: e})
+ }
+
+ inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ comments := make(map[*ast.File]ast.CommentMap) // CommentMap per package file, lazily populated by reference
+
+ checkSwitchStatements(pass, inspect, comments)
+ if fCheckMaps {
+ checkMapLiterals(pass, inspect, comments)
+ }
+ return nil, nil
+}
+
+func enumTypeName(e *types.Named, samePkg bool) string {
+ if samePkg {
+ return e.Obj().Name()
+ }
+ return e.Obj().Pkg().Name() + "." + e.Obj().Name()
+}
diff --git a/vendor/github.com/nishanths/exhaustive/go.mod b/vendor/github.com/nishanths/exhaustive/go.mod
new file mode 100644
index 00000000..b15048ea
--- /dev/null
+++ b/vendor/github.com/nishanths/exhaustive/go.mod
@@ -0,0 +1,5 @@
+module github.com/nishanths/exhaustive
+
+go 1.14
+
+require golang.org/x/tools v0.0.0-20200519015757-0d0afa43d58a
diff --git a/vendor/github.com/nishanths/exhaustive/go.sum b/vendor/github.com/nishanths/exhaustive/go.sum
new file mode 100644
index 00000000..01ba9996
--- /dev/null
+++ b/vendor/github.com/nishanths/exhaustive/go.sum
@@ -0,0 +1,21 @@
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200519015757-0d0afa43d58a h1:gILuVKC+ZPD6g/tj6zBOdnOH1ZHI0zZ86+KLMogc6/s=
+golang.org/x/tools v0.0.0-20200519015757-0d0afa43d58a/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200519142718-10921354bc51 h1:GtYAC9y+dpwWCXBwbcZgxcFfiqW4SI93yvQqpF+9+P8=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/nishanths/exhaustive/map.go b/vendor/github.com/nishanths/exhaustive/map.go
new file mode 100644
index 00000000..6d875e32
--- /dev/null
+++ b/vendor/github.com/nishanths/exhaustive/map.go
@@ -0,0 +1,158 @@
+package exhaustive
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+func checkMapLiterals(pass *analysis.Pass, inspect *inspector.Inspector, comments map[*ast.File]ast.CommentMap) {
+ for _, f := range pass.Files {
+ for _, d := range f.Decls {
+ gen, ok := d.(*ast.GenDecl)
+ if !ok {
+ continue
+ }
+ if gen.Tok != token.VAR {
+ continue // map literals have to be declared as "var"
+ }
+ for _, s := range gen.Specs {
+ valueSpec := s.(*ast.ValueSpec)
+ for idx, name := range valueSpec.Names {
+ obj := pass.TypesInfo.Defs[name]
+ if obj == nil {
+ continue
+ }
+
+ mapType, ok := obj.Type().(*types.Map)
+ if !ok {
+ continue
+ }
+
+ keyType, ok := mapType.Key().(*types.Named)
+ if !ok {
+ continue
+ }
+ keyPkg := keyType.Obj().Pkg()
+ if keyPkg == nil {
+ // Doc comment: nil for labels and objects in the Universe scope.
+ // This happens for the `error` type, for example.
+ // Continuing would mean that ImportPackageFact panics.
+ continue
+ }
+
+ var enums enumsFact
+ if !pass.ImportPackageFact(keyPkg, &enums) {
+ // Can't do anything further.
+ continue
+ }
+
+ enumMembers, ok := enums.Entries[keyType.Obj().Name()]
+ if !ok {
+ // Key type is not a known enum.
+ continue
+ }
+
+ // Check comments for the ignore directive.
+
+ var allComments ast.CommentMap
+ if cm, ok := comments[f]; ok {
+ allComments = cm
+ } else {
+ allComments = ast.NewCommentMap(pass.Fset, f, f.Comments)
+ comments[f] = allComments
+ }
+
+ genDeclComments := allComments.Filter(gen)
+ genDeclIgnore := false
+ for _, group := range genDeclComments.Comments() {
+ if containsIgnoreDirective(group.List) && gen.Lparen == token.NoPos && len(gen.Specs) == 1 {
+ genDeclIgnore = true
+ break
+ }
+ }
+ if genDeclIgnore {
+ continue
+ }
+
+ if (valueSpec.Doc != nil && containsIgnoreDirective(valueSpec.Doc.List)) ||
+ (valueSpec.Comment != nil && containsIgnoreDirective(valueSpec.Comment.List)) {
+ continue
+ }
+
+ samePkg := keyPkg == pass.Pkg
+ checkUnexported := samePkg
+
+ hitlist := hitlistFromEnumMembers(enumMembers, checkUnexported)
+ if len(hitlist) == 0 {
+ // can happen if external package and enum consists only of
+ // unexported members
+ continue
+ }
+
+ if !(len(valueSpec.Values) > idx) {
+ continue // no value for name
+ }
+ comp, ok := valueSpec.Values[idx].(*ast.CompositeLit)
+ if !ok {
+ continue
+ }
+ for _, el := range comp.Elts {
+ kvExpr, ok := el.(*ast.KeyValueExpr)
+ if !ok {
+ continue
+ }
+ e := astutil.Unparen(kvExpr.Key)
+ if samePkg {
+ ident, ok := e.(*ast.Ident)
+ if !ok {
+ continue
+ }
+ delete(hitlist, ident.Name)
+ } else {
+ selExpr, ok := e.(*ast.SelectorExpr)
+ if !ok {
+ continue
+ }
+
+ // ensure X is package identifier
+ ident, ok := selExpr.X.(*ast.Ident)
+ if !ok {
+ continue
+ }
+ if !isPackageNameIdentifier(pass, ident) {
+ continue
+ }
+
+ delete(hitlist, selExpr.Sel.Name)
+ }
+ }
+
+ if len(hitlist) > 0 {
+ reportMapLiteral(pass, name, samePkg, keyType, hitlist)
+ }
+ }
+ }
+ }
+ }
+}
+
+func reportMapLiteral(pass *analysis.Pass, mapVarIdent *ast.Ident, samePkg bool, enumType *types.Named, missingMembers map[string]struct{}) {
+ missing := make([]string, 0, len(missingMembers))
+ for m := range missingMembers {
+ missing = append(missing, m)
+ }
+ sort.Strings(missing)
+
+ pass.Report(analysis.Diagnostic{
+ Pos: mapVarIdent.Pos(),
+ Message: fmt.Sprintf("missing keys in map %s of key type %s: %s", mapVarIdent.Name, enumTypeName(enumType, samePkg), strings.Join(missing, ", ")),
+ })
+}
diff --git a/vendor/github.com/nishanths/exhaustive/switch.go b/vendor/github.com/nishanths/exhaustive/switch.go
new file mode 100644
index 00000000..5889c293
--- /dev/null
+++ b/vendor/github.com/nishanths/exhaustive/switch.go
@@ -0,0 +1,367 @@
+package exhaustive
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/printer"
+ "go/token"
+ "go/types"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+func isDefaultCase(c *ast.CaseClause) bool {
+ return c.List == nil // see doc comment on field
+}
+
+func checkSwitchStatements(pass *analysis.Pass, inspect *inspector.Inspector, comments map[*ast.File]ast.CommentMap) {
+ inspect.WithStack([]ast.Node{&ast.SwitchStmt{}}, func(n ast.Node, push bool, stack []ast.Node) bool {
+ if !push {
+ return true
+ }
+ sw := n.(*ast.SwitchStmt)
+ if sw.Tag == nil {
+ return true
+ }
+ t := pass.TypesInfo.Types[sw.Tag]
+ if !t.IsValue() {
+ return true
+ }
+ tagType, ok := t.Type.(*types.Named)
+ if !ok {
+ return true
+ }
+
+ tagPkg := tagType.Obj().Pkg()
+ if tagPkg == nil {
+ // Doc comment: nil for labels and objects in the Universe scope.
+ // This happens for the `error` type, for example.
+ // Continuing would mean that ImportPackageFact panics.
+ return true
+ }
+
+ var enums enumsFact
+ if !pass.ImportPackageFact(tagPkg, &enums) {
+ // Can't do anything further.
+ return true
+ }
+
+ enumMembers, isEnum := enums.Entries[tagType.Obj().Name()]
+ if !isEnum {
+ // Tag's type is not a known enum.
+ return true
+ }
+
+ // Get comment map.
+ file := stack[0].(*ast.File)
+ var allComments ast.CommentMap
+ if cm, ok := comments[file]; ok {
+ allComments = cm
+ } else {
+ allComments = ast.NewCommentMap(pass.Fset, file, file.Comments)
+ comments[file] = allComments
+ }
+
+ specificComments := allComments.Filter(sw)
+ for _, group := range specificComments.Comments() {
+ if containsIgnoreDirective(group.List) {
+ return true // skip checking due to ignore directive
+ }
+ }
+
+ samePkg := tagPkg == pass.Pkg
+ checkUnexported := samePkg
+
+ hitlist := hitlistFromEnumMembers(enumMembers, checkUnexported)
+ if len(hitlist) == 0 {
+ // can happen if external package and enum consists only of
+ // unexported members
+ return true
+ }
+
+ defaultCaseExists := false
+ for _, stmt := range sw.Body.List {
+ caseCl := stmt.(*ast.CaseClause)
+ if isDefaultCase(caseCl) {
+ defaultCaseExists = true
+ continue // nothing more to do if it's the default case
+ }
+ for _, e := range caseCl.List {
+ e = astutil.Unparen(e)
+ if samePkg {
+ ident, ok := e.(*ast.Ident)
+ if !ok {
+ continue
+ }
+ delete(hitlist, ident.Name)
+ } else {
+ selExpr, ok := e.(*ast.SelectorExpr)
+ if !ok {
+ continue
+ }
+
+ // ensure X is package identifier
+ ident, ok := selExpr.X.(*ast.Ident)
+ if !ok {
+ continue
+ }
+ if !isPackageNameIdentifier(pass, ident) {
+ continue
+ }
+
+ delete(hitlist, selExpr.Sel.Name)
+ }
+ }
+ }
+
+ defaultSuffices := fDefaultSignifiesExhaustive && defaultCaseExists
+ shouldReport := len(hitlist) > 0 && !defaultSuffices
+
+ if shouldReport {
+ reportSwitch(pass, sw, samePkg, tagType, hitlist, defaultCaseExists, file)
+ }
+ return true
+ })
+}
+
+func isPackageNameIdentifier(pass *analysis.Pass, ident *ast.Ident) bool {
+ obj := pass.TypesInfo.ObjectOf(ident)
+ if obj == nil {
+ return false
+ }
+ _, ok := obj.(*types.PkgName)
+ return ok
+}
+
+func hitlistFromEnumMembers(enumMembers []string, checkUnexported bool) map[string]struct{} {
+ hitlist := make(map[string]struct{})
+ for _, m := range enumMembers {
+ if m == "_" {
+ // blank identifier is often used to skip entries in iota lists
+ continue
+ }
+ if ast.IsExported(m) || checkUnexported {
+ hitlist[m] = struct{}{}
+ }
+ }
+ return hitlist
+}
+
+func reportSwitch(pass *analysis.Pass, sw *ast.SwitchStmt, samePkg bool, enumType *types.Named, missingMembers map[string]struct{}, defaultCaseExists bool, f *ast.File) {
+ missing := make([]string, 0, len(missingMembers))
+ for m := range missingMembers {
+ missing = append(missing, m)
+ }
+ sort.Strings(missing)
+
+ var fixes []analysis.SuggestedFix
+ if !defaultCaseExists {
+ if fix, ok := computeFix(pass, pass.Fset, f, sw, enumType, samePkg, missingMembers); ok {
+ fixes = append(fixes, fix)
+ }
+ }
+
+ pass.Report(analysis.Diagnostic{
+ Pos: sw.Pos(),
+ End: sw.End(),
+ Message: fmt.Sprintf("missing cases in switch of type %s: %s", enumTypeName(enumType, samePkg), strings.Join(missing, ", ")),
+ SuggestedFixes: fixes,
+ })
+}
+
+func computeFix(pass *analysis.Pass, fset *token.FileSet, f *ast.File, sw *ast.SwitchStmt, enumType *types.Named, samePkg bool, missingMembers map[string]struct{}) (analysis.SuggestedFix, bool) {
+ // Function and method calls may be mutative, so we don't want to reuse the
+ // call expression in the about-to-be-inserted case clause body. So we just
+ // don't suggest a fix in such situations.
+ //
+ // However, we need to make an exception for type conversions, which are
+ // also call expressions in the AST.
+ //
+ // We'll need to lookup type information for this, and can't rely solely
+ // on the AST.
+ if containsFuncCall(pass, sw.Tag) {
+ return analysis.SuggestedFix{}, false
+ }
+
+ textEdits := []analysis.TextEdit{
+ missingCasesTextEdit(fset, f, samePkg, sw, enumType, missingMembers),
+ }
+
+ // need to add "fmt" import if "fmt" import doesn't already exist
+ if !hasImportWithPath(fset, f, `"fmt"`) {
+ textEdits = append(textEdits, fmtImportTextEdit(fset, f))
+ }
+
+ missing := make([]string, 0, len(missingMembers))
+ for m := range missingMembers {
+ missing = append(missing, m)
+ }
+ sort.Strings(missing)
+
+ return analysis.SuggestedFix{
+ Message: fmt.Sprintf("add case clause for: %s?", strings.Join(missing, ", ")),
+ TextEdits: textEdits,
+ }, true
+}
+
+func containsFuncCall(pass *analysis.Pass, e ast.Expr) bool {
+ e = astutil.Unparen(e)
+ c, ok := e.(*ast.CallExpr)
+ if !ok {
+ return false
+ }
+ if _, isFunc := pass.TypesInfo.TypeOf(c.Fun).Underlying().(*types.Signature); isFunc {
+ return true
+ }
+ for _, a := range c.Args {
+ if containsFuncCall(pass, a) {
+ return true
+ }
+ }
+ return false
+}
+
+func firstImportDecl(fset *token.FileSet, f *ast.File) *ast.GenDecl {
+ for _, decl := range f.Decls {
+ genDecl, ok := decl.(*ast.GenDecl)
+ if ok && genDecl.Tok == token.IMPORT {
+ // first IMPORT GenDecl
+ return genDecl
+ }
+ }
+ return nil
+}
+
+// copies an GenDecl in a manner such that appending to the returned GenDecl's Specs field
+// doesn't mutate the original GenDecl
+func copyGenDecl(im *ast.GenDecl) *ast.GenDecl {
+ imCopy := *im
+ imCopy.Specs = make([]ast.Spec, len(im.Specs))
+ for i := range im.Specs {
+ imCopy.Specs[i] = im.Specs[i]
+ }
+ return &imCopy
+}
+
+func hasImportWithPath(fset *token.FileSet, f *ast.File, pathLiteral string) bool {
+ igroups := astutil.Imports(fset, f)
+ for _, igroup := range igroups {
+ for _, importSpec := range igroup {
+ if importSpec.Path.Value == pathLiteral {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func fmtImportTextEdit(fset *token.FileSet, f *ast.File) analysis.TextEdit {
+ firstDecl := firstImportDecl(fset, f)
+
+ if firstDecl == nil {
+ // file has no import declarations
+ // insert "fmt" import spec after package statement
+ return analysis.TextEdit{
+ Pos: f.Name.End() + 1, // end of package name + 1
+ End: f.Name.End() + 1,
+ NewText: []byte(`import (
+ "fmt"
+ )`),
+ }
+ }
+
+ // copy because we'll be mutating its Specs field
+ firstDeclCopy := copyGenDecl(firstDecl)
+
+ // find insertion index for "fmt" import spec
+ var i int
+ for ; i < len(firstDeclCopy.Specs); i++ {
+ im := firstDeclCopy.Specs[i].(*ast.ImportSpec)
+ if v, _ := strconv.Unquote(im.Path.Value); v > "fmt" {
+ break
+ }
+ }
+
+ // insert "fmt" import spec at the index
+ fmtSpec := &ast.ImportSpec{
+ Path: &ast.BasicLit{
+ // NOTE: Pos field doesn't seem to be required for our
+ // purposes here.
+ Kind: token.STRING,
+ Value: `"fmt"`,
+ },
+ }
+ s := firstDeclCopy.Specs // local var for easier comprehension of next line
+ s = append(s[:i], append([]ast.Spec{fmtSpec}, s[i:]...)...)
+ firstDeclCopy.Specs = s
+
+ // create the text edit
+ var buf bytes.Buffer
+ printer.Fprint(&buf, fset, firstDeclCopy)
+
+ return analysis.TextEdit{
+ Pos: firstDecl.Pos(),
+ End: firstDecl.End(),
+ NewText: buf.Bytes(),
+ }
+}
+
+func missingCasesTextEdit(fset *token.FileSet, f *ast.File, samePkg bool, sw *ast.SwitchStmt, enumType *types.Named, missingMembers map[string]struct{}) analysis.TextEdit {
+ // ... Construct insertion text for case clause and its body ...
+
+ var tag bytes.Buffer
+ printer.Fprint(&tag, fset, sw.Tag)
+
+ // If possible and if necessary, determine the package identifier based on the AST of other `case` clauses.
+ var pkgIdent *ast.Ident
+ if !samePkg {
+ for _, stmt := range sw.Body.List {
+ caseCl := stmt.(*ast.CaseClause)
+ // At least one expression must exist in List at this point.
+ // List cannot be nil because we only arrive here if the "default" clause
+ // does not exist. Additionally, a syntactically valid case clause must
+ // have at least one expression.
+ if sel, ok := caseCl.List[0].(*ast.SelectorExpr); ok {
+ pkgIdent = sel.X.(*ast.Ident)
+ break
+ }
+ }
+ }
+
+ missing := make([]string, 0, len(missingMembers))
+ for m := range missingMembers {
+ if !samePkg {
+ if pkgIdent != nil {
+ // we were able to determine package identifier
+ missing = append(missing, pkgIdent.Name+"."+m)
+ } else {
+ // use the package name (may not be correct always)
+ //
+ // TODO: May need to also add import if the package isn't imported
+ // elsewhere. This (ie, a switch with zero case clauses) should
+ // happen rarely, so don't implement this for now.
+ missing = append(missing, enumType.Obj().Pkg().Name()+"."+m)
+ }
+ } else {
+ missing = append(missing, m)
+ }
+ }
+ sort.Strings(missing)
+
+ insert := `case ` + strings.Join(missing, ", ") + `:
+ panic(fmt.Sprintf("unhandled value: %v",` + tag.String() + `))`
+
+ // ... Create the text edit ...
+
+ return analysis.TextEdit{
+ Pos: sw.Body.Rbrace - 1,
+ End: sw.Body.Rbrace - 1,
+ NewText: []byte(insert),
+ }
+}
diff --git a/vendor/github.com/pelletier/go-toml/.dockerignore b/vendor/github.com/pelletier/go-toml/.dockerignore
new file mode 100644
index 00000000..7b588347
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/.dockerignore
@@ -0,0 +1,2 @@
+cmd/tomll/tomll
+cmd/tomljson/tomljson
diff --git a/vendor/github.com/pelletier/go-toml/.gitignore b/vendor/github.com/pelletier/go-toml/.gitignore
index 99e38bbc..e6ba63a5 100644
--- a/vendor/github.com/pelletier/go-toml/.gitignore
+++ b/vendor/github.com/pelletier/go-toml/.gitignore
@@ -1,2 +1,5 @@
test_program/test_program_bin
fuzz/
+cmd/tomll/tomll
+cmd/tomljson/tomljson
+cmd/tomltestgen/tomltestgen
diff --git a/vendor/github.com/pelletier/go-toml/.travis.yml b/vendor/github.com/pelletier/go-toml/.travis.yml
deleted file mode 100644
index c9fbf304..00000000
--- a/vendor/github.com/pelletier/go-toml/.travis.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-sudo: false
-language: go
-go:
- - 1.8.x
- - 1.9.x
- - 1.10.x
- - tip
-matrix:
- allow_failures:
- - go: tip
- fast_finish: true
-script:
- - if [ -n "$(go fmt ./...)" ]; then exit 1; fi
- - ./test.sh
- - ./benchmark.sh $TRAVIS_BRANCH https://github.com/$TRAVIS_REPO_SLUG.git
-before_install:
- - go get github.com/axw/gocov/gocov
- - go get github.com/mattn/goveralls
- - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
-branches:
- only: [master]
-after_success:
- - $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=coverage.out -repotoken $COVERALLS_TOKEN
diff --git a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md
new file mode 100644
index 00000000..405c911c
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md
@@ -0,0 +1,132 @@
+## Contributing
+
+Thank you for your interest in go-toml! We appreciate you considering
+contributing to go-toml!
+
+The main goal is the project is to provide an easy-to-use TOML
+implementation for Go that gets the job done and gets out of your way –
+dealing with TOML is probably not the central piece of your project.
+
+As the single maintainer of go-toml, time is scarce. All help, big or
+small, is more than welcomed!
+
+### Ask questions
+
+Any question you may have, somebody else might have it too. Always feel
+free to ask them on the [issues tracker][issues-tracker]. We will try to
+answer them as clearly and quickly as possible, time permitting.
+
+Asking questions also helps us identify areas where the documentation needs
+improvement, or new features that weren't envisioned before. Sometimes, a
+seemingly innocent question leads to the fix of a bug. Don't hesitate and
+ask away!
+
+### Improve the documentation
+
+The best way to share your knowledge and experience with go-toml is to
+improve the documentation. Fix a typo, clarify an interface, add an
+example, anything goes!
+
+The documentation is present in the [README][readme] and thorough the
+source code. On release, it gets updated on [GoDoc][godoc]. To make a
+change to the documentation, create a pull request with your proposed
+changes. For simple changes like that, the easiest way to go is probably
+the "Fork this project and edit the file" button on Github, displayed at
+the top right of the file. Unless it's a trivial change (for example a
+typo), provide a little bit of context in your pull request description or
+commit message.
+
+### Report a bug
+
+Found a bug! Sorry to hear that :(. Help us and other track them down and
+fix by reporting it. [File a new bug report][bug-report] on the [issues
+tracker][issues-tracker]. The template should provide enough guidance on
+what to include. When in doubt: add more details! By reducing ambiguity and
+providing more information, it decreases back and forth and saves everyone
+time.
+
+### Code changes
+
+Want to contribute a patch? Very happy to hear that!
+
+First, some high-level rules:
+
+* A short proposal with some POC code is better than a lengthy piece of
+ text with no code. Code speaks louder than words.
+* No backward-incompatible patch will be accepted unless discussed.
+ Sometimes it's hard, and Go's lack of versioning by default does not
+ help, but we try not to break people's programs unless we absolutely have
+ to.
+* If you are writing a new feature or extending an existing one, make sure
+ to write some documentation.
+* Bug fixes need to be accompanied with regression tests.
+* New code needs to be tested.
+* Your commit messages need to explain why the change is needed, even if
+ already included in the PR description.
+
+It does sound like a lot, but those best practices are here to save time
+overall and continuously improve the quality of the project, which is
+something everyone benefits from.
+
+#### Get started
+
+The fairly standard code contribution process looks like that:
+
+1. [Fork the project][fork].
+2. Make your changes, commit on any branch you like.
+3. [Open up a pull request][pull-request]
+4. Review, potential ask for changes.
+5. Merge. You're in!
+
+Feel free to ask for help! You can create draft pull requests to gather
+some early feedback!
+
+#### Run the tests
+
+You can run tests for go-toml using Go's test tool: `go test ./...`.
+When creating a pull requests, all tests will be ran on Linux on a few Go
+versions (Travis CI), and on Windows using the latest Go version
+(AppVeyor).
+
+#### Style
+
+Try to look around and follow the same format and structure as the rest of
+the code. We enforce using `go fmt` on the whole code base.
+
+---
+
+### Maintainers-only
+
+#### Merge pull request
+
+Checklist:
+
+* Passing CI.
+* Does not introduce backward-incompatible changes (unless discussed).
+* Has relevant doc changes.
+* Has relevant unit tests.
+
+1. Merge using "squash and merge".
+2. Make sure to edit the commit message to keep all the useful information
+ nice and clean.
+3. Make sure the commit title is clear and contains the PR number (#123).
+
+#### New release
+
+1. Go to [releases][releases]. Click on "X commits to master since this
+ release".
+2. Make note of all the changes. Look for backward incompatible changes,
+ new features, and bug fixes.
+3. Pick the new version using the above and semver.
+4. Create a [new release][new-release].
+5. Follow the same format as [1.1.0][release-110].
+
+[issues-tracker]: https://github.com/pelletier/go-toml/issues
+[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md
+[godoc]: https://godoc.org/github.com/pelletier/go-toml
+[readme]: ./README.md
+[fork]: https://help.github.com/articles/fork-a-repo
+[pull-request]: https://help.github.com/en/articles/creating-a-pull-request
+[releases]: https://github.com/pelletier/go-toml/releases
+[new-release]: https://github.com/pelletier/go-toml/releases/new
+[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0
diff --git a/vendor/github.com/pelletier/go-toml/Dockerfile b/vendor/github.com/pelletier/go-toml/Dockerfile
new file mode 100644
index 00000000..fffdb016
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/Dockerfile
@@ -0,0 +1,11 @@
+FROM golang:1.12-alpine3.9 as builder
+WORKDIR /go/src/github.com/pelletier/go-toml
+COPY . .
+ENV CGO_ENABLED=0
+ENV GOOS=linux
+RUN go install ./...
+
+FROM scratch
+COPY --from=builder /go/bin/tomll /usr/bin/tomll
+COPY --from=builder /go/bin/tomljson /usr/bin/tomljson
+COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml
diff --git a/vendor/github.com/pelletier/go-toml/Makefile b/vendor/github.com/pelletier/go-toml/Makefile
new file mode 100644
index 00000000..9e4503ae
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/Makefile
@@ -0,0 +1,29 @@
+export CGO_ENABLED=0
+go := go
+go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1)
+go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2)
+
+out.tools := tomll tomljson jsontoml
+out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz)
+sources := $(wildcard **/*.go)
+
+
+.PHONY:
+tools: $(out.tools)
+
+$(out.tools): $(sources)
+ GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@
+
+.PHONY:
+dist: $(out.dist)
+
+$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: %
+ if [ "$(go.goos)" = "windows" ]; then \
+ tar -cJf $@ $^.exe; \
+ else \
+ tar -cJf $@ $^; \
+ fi
+
+.PHONY:
+clean:
+ rm -rf $(out.tools) $(out.dist)
diff --git a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 00000000..041cdc4a
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,5 @@
+**Issue:** add link to pelletier/go-toml issue here
+
+Explanation of what this pull request does.
+
+More detailed description of the decisions being made and the reasons why (if the patch is non-trivial).
diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md
index 0d357acf..6831deb5 100644
--- a/vendor/github.com/pelletier/go-toml/README.md
+++ b/vendor/github.com/pelletier/go-toml/README.md
@@ -3,13 +3,14 @@
Go library for the [TOML](https://github.com/mojombo/toml) format.
This library supports TOML version
-[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
+[v1.0.0-rc.1](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v1.0.0-rc.1.md)
[![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml)
[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE)
-[![Build Status](https://travis-ci.org/pelletier/go-toml.svg?branch=master)](https://travis-ci.org/pelletier/go-toml)
-[![Coverage Status](https://coveralls.io/repos/github/pelletier/go-toml/badge.svg?branch=master)](https://coveralls.io/github/pelletier/go-toml?branch=master)
+[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master)
+[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml)
[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml)
+[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield)
## Features
@@ -17,7 +18,7 @@ Go-toml provides the following features for using data parsed from TOML document
* Load TOML documents from files and string data
* Easily navigate TOML structure using Tree
-* Mashaling and unmarshaling to and from data structures
+* Marshaling and unmarshaling to and from data structures
* Line & column position data for all parsed elements
* [Query support similar to JSON-Path](query/)
* Syntax errors contain line and column numbers
@@ -73,7 +74,7 @@ Or use a query:
q, _ := query.Compile("$..[user,password]")
results := q.Execute(config)
for ii, item := range results.Values() {
- fmt.Println("Query result %d: %v", ii, item)
+ fmt.Printf("Query result %d: %v\n", ii, item)
}
```
@@ -86,7 +87,7 @@ The documentation and additional examples are available at
Go-toml provides two handy command line tools:
-* `tomll`: Reads TOML files and lint them.
+* `tomll`: Reads TOML files and lints them.
```
go install github.com/pelletier/go-toml/cmd/tomll
@@ -99,6 +100,30 @@ Go-toml provides two handy command line tools:
tomljson --help
```
+ * `jsontoml`: Reads a JSON file and outputs a TOML representation.
+
+ ```
+ go install github.com/pelletier/go-toml/cmd/jsontoml
+ jsontoml --help
+ ```
+
+### Docker image
+
+Those tools are also availble as a Docker image from
+[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to
+use `tomljson`:
+
+```
+docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml
+```
+
+Only master (`latest`) and tagged versions are published to dockerhub. You
+can build your own image as usual:
+
+```
+docker build -t go-toml .
+```
+
## Contribute
Feel free to report bugs and patches using GitHub's pull requests system on
@@ -107,12 +132,7 @@ much appreciated!
### Run tests
-You have to make sure two kind of tests run:
-
-1. The Go unit tests
-2. The TOML examples base
-
-You can run both of them using `./test.sh`.
+`go test ./...`
### Fuzzing
diff --git a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml
new file mode 100644
index 00000000..ff5376b0
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml
@@ -0,0 +1,230 @@
+trigger:
+- master
+
+stages:
+- stage: fuzzit
+ displayName: "Run Fuzzit"
+ dependsOn: []
+ condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master'))
+ jobs:
+ - job: submit
+ displayName: "Submit"
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: GoTool@0
+ displayName: "Install Go 1.15"
+ inputs:
+ version: "1.15"
+ - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/"
+ - script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml
+ - script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml
+ - task: Bash@3
+ inputs:
+ filePath: './fuzzit.sh'
+ env:
+ TYPE: fuzzing
+ FUZZIT_API_KEY: $(FUZZIT_API_KEY)
+
+- stage: run_checks
+ displayName: "Check"
+ dependsOn: []
+ jobs:
+ - job: fmt
+ displayName: "fmt"
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: GoTool@0
+ displayName: "Install Go 1.15"
+ inputs:
+ version: "1.15"
+ - task: Go@0
+ displayName: "go fmt ./..."
+ inputs:
+ command: 'custom'
+ customCommand: 'fmt'
+ arguments: './...'
+ - job: coverage
+ displayName: "coverage"
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: GoTool@0
+ displayName: "Install Go 1.15"
+ inputs:
+ version: "1.15"
+ - task: Go@0
+ displayName: "Generate coverage"
+ inputs:
+ command: 'test'
+ arguments: "-race -coverprofile=coverage.txt -covermode=atomic"
+ - task: Bash@3
+ inputs:
+ targetType: 'inline'
+ script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}'
+ env:
+ CODECOV_TOKEN: $(CODECOV_TOKEN)
+ - job: benchmark
+ displayName: "benchmark"
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: GoTool@0
+ displayName: "Install Go 1.15"
+ inputs:
+ version: "1.15"
+ - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/"
+ - task: Bash@3
+ inputs:
+ filePath: './benchmark.sh'
+ arguments: "master $(Build.Repository.Uri)"
+
+ - job: fuzzing
+ displayName: "fuzzing"
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: GoTool@0
+ displayName: "Install Go 1.15"
+ inputs:
+ version: "1.15"
+ - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/"
+ - script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml
+ - script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml
+ - task: Bash@3
+ inputs:
+ filePath: './fuzzit.sh'
+ env:
+ TYPE: local-regression
+
+ - job: go_unit_tests
+ displayName: "unit tests"
+ strategy:
+ matrix:
+ linux 1.15:
+ goVersion: '1.15'
+ imageName: 'ubuntu-latest'
+ mac 1.15:
+ goVersion: '1.15'
+ imageName: 'macOS-latest'
+ windows 1.15:
+ goVersion: '1.15'
+ imageName: 'windows-latest'
+ linux 1.14:
+ goVersion: '1.14'
+ imageName: 'ubuntu-latest'
+ mac 1.14:
+ goVersion: '1.14'
+ imageName: 'macOS-latest'
+ windows 1.14:
+ goVersion: '1.14'
+ imageName: 'windows-latest'
+ pool:
+ vmImage: $(imageName)
+ steps:
+ - task: GoTool@0
+ displayName: "Install Go $(goVersion)"
+ inputs:
+ version: $(goVersion)
+ - task: Go@0
+ displayName: "go test ./..."
+ inputs:
+ command: 'test'
+ arguments: './...'
+- stage: build_binaries
+ displayName: "Build binaries"
+ dependsOn: run_checks
+ jobs:
+ - job: build_binary
+ displayName: "Build binary"
+ strategy:
+ matrix:
+ linux_amd64:
+ GOOS: linux
+ GOARCH: amd64
+ darwin_amd64:
+ GOOS: darwin
+ GOARCH: amd64
+ windows_amd64:
+ GOOS: windows
+ GOARCH: amd64
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: GoTool@0
+ displayName: "Install Go"
+ inputs:
+ version: 1.15
+ - task: Bash@3
+ inputs:
+ targetType: inline
+ script: "make dist"
+ env:
+ go.goos: $(GOOS)
+ go.goarch: $(GOARCH)
+ - task: CopyFiles@2
+ inputs:
+ sourceFolder: '$(Build.SourcesDirectory)'
+ contents: '*.tar.xz'
+ TargetFolder: '$(Build.ArtifactStagingDirectory)'
+ - task: PublishBuildArtifacts@1
+ inputs:
+ pathtoPublish: '$(Build.ArtifactStagingDirectory)'
+ artifactName: binaries
+- stage: build_binaries_manifest
+ displayName: "Build binaries manifest"
+ dependsOn: build_binaries
+ jobs:
+ - job: build_manifest
+ displayName: "Build binaries manifest"
+ steps:
+ - task: DownloadBuildArtifacts@0
+ inputs:
+ buildType: 'current'
+ downloadType: 'single'
+ artifactName: 'binaries'
+ downloadPath: '$(Build.SourcesDirectory)'
+ - task: Bash@3
+ inputs:
+ targetType: inline
+ script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt"
+ - task: PublishBuildArtifacts@1
+ inputs:
+ pathtoPublish: '$(Build.ArtifactStagingDirectory)'
+ artifactName: manifest
+
+- stage: build_docker_image
+ displayName: "Build Docker image"
+ dependsOn: run_checks
+ jobs:
+ - job: build
+ displayName: "Build"
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: Docker@2
+ inputs:
+ command: 'build'
+ Dockerfile: 'Dockerfile'
+ buildContext: '.'
+ addPipelineData: false
+
+- stage: publish_docker_image
+ displayName: "Publish Docker image"
+ dependsOn: build_docker_image
+ condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master'))
+ jobs:
+ - job: publish
+ displayName: "Publish"
+ pool:
+ vmImage: ubuntu-latest
+ steps:
+ - task: Docker@2
+ inputs:
+ containerRegistry: 'DockerHub'
+ repository: 'pelletier/go-toml'
+ command: 'buildAndPush'
+ Dockerfile: 'Dockerfile'
+ buildContext: '.'
+ tags: 'latest'
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.json b/vendor/github.com/pelletier/go-toml/benchmark.json
deleted file mode 100644
index 86f99c6a..00000000
--- a/vendor/github.com/pelletier/go-toml/benchmark.json
+++ /dev/null
@@ -1,164 +0,0 @@
-{
- "array": {
- "key1": [
- 1,
- 2,
- 3
- ],
- "key2": [
- "red",
- "yellow",
- "green"
- ],
- "key3": [
- [
- 1,
- 2
- ],
- [
- 3,
- 4,
- 5
- ]
- ],
- "key4": [
- [
- 1,
- 2
- ],
- [
- "a",
- "b",
- "c"
- ]
- ],
- "key5": [
- 1,
- 2,
- 3
- ],
- "key6": [
- 1,
- 2
- ]
- },
- "boolean": {
- "False": false,
- "True": true
- },
- "datetime": {
- "key1": "1979-05-27T07:32:00Z",
- "key2": "1979-05-27T00:32:00-07:00",
- "key3": "1979-05-27T00:32:00.999999-07:00"
- },
- "float": {
- "both": {
- "key": 6.626e-34
- },
- "exponent": {
- "key1": 5e+22,
- "key2": 1000000,
- "key3": -0.02
- },
- "fractional": {
- "key1": 1,
- "key2": 3.1415,
- "key3": -0.01
- },
- "underscores": {
- "key1": 9224617.445991227,
- "key2": 1e+100
- }
- },
- "fruit": [{
- "name": "apple",
- "physical": {
- "color": "red",
- "shape": "round"
- },
- "variety": [{
- "name": "red delicious"
- },
- {
- "name": "granny smith"
- }
- ]
- },
- {
- "name": "banana",
- "variety": [{
- "name": "plantain"
- }]
- }
- ],
- "integer": {
- "key1": 99,
- "key2": 42,
- "key3": 0,
- "key4": -17,
- "underscores": {
- "key1": 1000,
- "key2": 5349221,
- "key3": 12345
- }
- },
- "products": [{
- "name": "Hammer",
- "sku": 738594937
- },
- {},
- {
- "color": "gray",
- "name": "Nail",
- "sku": 284758393
- }
- ],
- "string": {
- "basic": {
- "basic": "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF."
- },
- "literal": {
- "multiline": {
- "lines": "The first newline is\ntrimmed in raw strings.\n All other whitespace\n is preserved.\n",
- "regex2": "I [dw]on't need \\d{2} apples"
- },
- "quoted": "Tom \"Dubs\" Preston-Werner",
- "regex": "\u003c\\i\\c*\\s*\u003e",
- "winpath": "C:\\Users\\nodejs\\templates",
- "winpath2": "\\\\ServerX\\admin$\\system32\\"
- },
- "multiline": {
- "continued": {
- "key1": "The quick brown fox jumps over the lazy dog.",
- "key2": "The quick brown fox jumps over the lazy dog.",
- "key3": "The quick brown fox jumps over the lazy dog."
- },
- "key1": "One\nTwo",
- "key2": "One\nTwo",
- "key3": "One\nTwo"
- }
- },
- "table": {
- "inline": {
- "name": {
- "first": "Tom",
- "last": "Preston-Werner"
- },
- "point": {
- "x": 1,
- "y": 2
- }
- },
- "key": "value",
- "subtable": {
- "key": "another value"
- }
- },
- "x": {
- "y": {
- "z": {
- "w": {}
- }
- }
- }
-}
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh
index 8b8bb528..a69d3040 100644
--- a/vendor/github.com/pelletier/go-toml/benchmark.sh
+++ b/vendor/github.com/pelletier/go-toml/benchmark.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-set -e
+set -ex
reference_ref=${1:-master}
reference_git=${2:-.}
@@ -8,7 +8,6 @@ reference_git=${2:-.}
if ! `hash benchstat 2>/dev/null`; then
echo "Installing benchstat"
go get golang.org/x/perf/cmd/benchstat
- go install golang.org/x/perf/cmd/benchstat
fi
tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX`
@@ -21,12 +20,16 @@ git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null
pushd ${ref_tempdir} >/dev/null
git checkout ${reference_ref} >/dev/null 2>/dev/null
go test -bench=. -benchmem | tee ${ref_benchmark}
+cd benchmark
+go test -bench=. -benchmem | tee -a ${ref_benchmark}
popd >/dev/null
echo ""
echo "=== local"
go test -bench=. -benchmem | tee ${local_benchmark}
+cd benchmark
+go test -bench=. -benchmem | tee -a ${local_benchmark}
echo ""
echo "=== diff"
-benchstat -delta-test=none ${ref_benchmark} ${local_benchmark}
\ No newline at end of file
+benchstat -delta-test=none ${ref_benchmark} ${local_benchmark}
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.toml b/vendor/github.com/pelletier/go-toml/benchmark.toml
deleted file mode 100644
index dfd77e09..00000000
--- a/vendor/github.com/pelletier/go-toml/benchmark.toml
+++ /dev/null
@@ -1,244 +0,0 @@
-################################################################################
-## Comment
-
-# Speak your mind with the hash symbol. They go from the symbol to the end of
-# the line.
-
-
-################################################################################
-## Table
-
-# Tables (also known as hash tables or dictionaries) are collections of
-# key/value pairs. They appear in square brackets on a line by themselves.
-
-[table]
-
-key = "value" # Yeah, you can do this.
-
-# Nested tables are denoted by table names with dots in them. Name your tables
-# whatever crap you please, just don't use #, ., [ or ].
-
-[table.subtable]
-
-key = "another value"
-
-# You don't need to specify all the super-tables if you don't want to. TOML
-# knows how to do it for you.
-
-# [x] you
-# [x.y] don't
-# [x.y.z] need these
-[x.y.z.w] # for this to work
-
-
-################################################################################
-## Inline Table
-
-# Inline tables provide a more compact syntax for expressing tables. They are
-# especially useful for grouped data that can otherwise quickly become verbose.
-# Inline tables are enclosed in curly braces `{` and `}`. No newlines are
-# allowed between the curly braces unless they are valid within a value.
-
-[table.inline]
-
-name = { first = "Tom", last = "Preston-Werner" }
-point = { x = 1, y = 2 }
-
-
-################################################################################
-## String
-
-# There are four ways to express strings: basic, multi-line basic, literal, and
-# multi-line literal. All strings must contain only valid UTF-8 characters.
-
-[string.basic]
-
-basic = "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF."
-
-[string.multiline]
-
-# The following strings are byte-for-byte equivalent:
-key1 = "One\nTwo"
-key2 = """One\nTwo"""
-key3 = """
-One
-Two"""
-
-[string.multiline.continued]
-
-# The following strings are byte-for-byte equivalent:
-key1 = "The quick brown fox jumps over the lazy dog."
-
-key2 = """
-The quick brown \
-
-
- fox jumps over \
- the lazy dog."""
-
-key3 = """\
- The quick brown \
- fox jumps over \
- the lazy dog.\
- """
-
-[string.literal]
-
-# What you see is what you get.
-winpath = 'C:\Users\nodejs\templates'
-winpath2 = '\\ServerX\admin$\system32\'
-quoted = 'Tom "Dubs" Preston-Werner'
-regex = '<\i\c*\s*>'
-
-
-[string.literal.multiline]
-
-regex2 = '''I [dw]on't need \d{2} apples'''
-lines = '''
-The first newline is
-trimmed in raw strings.
- All other whitespace
- is preserved.
-'''
-
-
-################################################################################
-## Integer
-
-# Integers are whole numbers. Positive numbers may be prefixed with a plus sign.
-# Negative numbers are prefixed with a minus sign.
-
-[integer]
-
-key1 = +99
-key2 = 42
-key3 = 0
-key4 = -17
-
-[integer.underscores]
-
-# For large numbers, you may use underscores to enhance readability. Each
-# underscore must be surrounded by at least one digit.
-key1 = 1_000
-key2 = 5_349_221
-key3 = 1_2_3_4_5 # valid but inadvisable
-
-
-################################################################################
-## Float
-
-# A float consists of an integer part (which may be prefixed with a plus or
-# minus sign) followed by a fractional part and/or an exponent part.
-
-[float.fractional]
-
-key1 = +1.0
-key2 = 3.1415
-key3 = -0.01
-
-[float.exponent]
-
-key1 = 5e+22
-key2 = 1e6
-key3 = -2E-2
-
-[float.both]
-
-key = 6.626e-34
-
-[float.underscores]
-
-key1 = 9_224_617.445_991_228_313
-key2 = 1e1_00
-
-
-################################################################################
-## Boolean
-
-# Booleans are just the tokens you're used to. Always lowercase.
-
-[boolean]
-
-True = true
-False = false
-
-
-################################################################################
-## Datetime
-
-# Datetimes are RFC 3339 dates.
-
-[datetime]
-
-key1 = 1979-05-27T07:32:00Z
-key2 = 1979-05-27T00:32:00-07:00
-key3 = 1979-05-27T00:32:00.999999-07:00
-
-
-################################################################################
-## Array
-
-# Arrays are square brackets with other primitives inside. Whitespace is
-# ignored. Elements are separated by commas. Data types may not be mixed.
-
-[array]
-
-key1 = [ 1, 2, 3 ]
-key2 = [ "red", "yellow", "green" ]
-key3 = [ [ 1, 2 ], [3, 4, 5] ]
-#key4 = [ [ 1, 2 ], ["a", "b", "c"] ] # this is ok
-
-# Arrays can also be multiline. So in addition to ignoring whitespace, arrays
-# also ignore newlines between the brackets. Terminating commas are ok before
-# the closing bracket.
-
-key5 = [
- 1, 2, 3
-]
-key6 = [
- 1,
- 2, # this is ok
-]
-
-
-################################################################################
-## Array of Tables
-
-# These can be expressed by using a table name in double brackets. Each table
-# with the same double bracketed name will be an element in the array. The
-# tables are inserted in the order encountered.
-
-[[products]]
-
-name = "Hammer"
-sku = 738594937
-
-[[products]]
-
-[[products]]
-
-name = "Nail"
-sku = 284758393
-color = "gray"
-
-
-# You can create nested arrays of tables as well.
-
-[[fruit]]
- name = "apple"
-
- [fruit.physical]
- color = "red"
- shape = "round"
-
- [[fruit.variety]]
- name = "red delicious"
-
- [[fruit.variety]]
- name = "granny smith"
-
-[[fruit]]
- name = "banana"
-
- [[fruit.variety]]
- name = "plantain"
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.yml b/vendor/github.com/pelletier/go-toml/benchmark.yml
deleted file mode 100644
index 0bd19f08..00000000
--- a/vendor/github.com/pelletier/go-toml/benchmark.yml
+++ /dev/null
@@ -1,121 +0,0 @@
----
-array:
- key1:
- - 1
- - 2
- - 3
- key2:
- - red
- - yellow
- - green
- key3:
- - - 1
- - 2
- - - 3
- - 4
- - 5
- key4:
- - - 1
- - 2
- - - a
- - b
- - c
- key5:
- - 1
- - 2
- - 3
- key6:
- - 1
- - 2
-boolean:
- 'False': false
- 'True': true
-datetime:
- key1: '1979-05-27T07:32:00Z'
- key2: '1979-05-27T00:32:00-07:00'
- key3: '1979-05-27T00:32:00.999999-07:00'
-float:
- both:
- key: 6.626e-34
- exponent:
- key1: 5.0e+22
- key2: 1000000
- key3: -0.02
- fractional:
- key1: 1
- key2: 3.1415
- key3: -0.01
- underscores:
- key1: 9224617.445991227
- key2: 1.0e+100
-fruit:
-- name: apple
- physical:
- color: red
- shape: round
- variety:
- - name: red delicious
- - name: granny smith
-- name: banana
- variety:
- - name: plantain
-integer:
- key1: 99
- key2: 42
- key3: 0
- key4: -17
- underscores:
- key1: 1000
- key2: 5349221
- key3: 12345
-products:
-- name: Hammer
- sku: 738594937
-- {}
-- color: gray
- name: Nail
- sku: 284758393
-string:
- basic:
- basic: "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF."
- literal:
- multiline:
- lines: |
- The first newline is
- trimmed in raw strings.
- All other whitespace
- is preserved.
- regex2: I [dw]on't need \d{2} apples
- quoted: Tom "Dubs" Preston-Werner
- regex: "<\\i\\c*\\s*>"
- winpath: C:\Users\nodejs\templates
- winpath2: "\\\\ServerX\\admin$\\system32\\"
- multiline:
- continued:
- key1: The quick brown fox jumps over the lazy dog.
- key2: The quick brown fox jumps over the lazy dog.
- key3: The quick brown fox jumps over the lazy dog.
- key1: |-
- One
- Two
- key2: |-
- One
- Two
- key3: |-
- One
- Two
-table:
- inline:
- name:
- first: Tom
- last: Preston-Werner
- point:
- x: 1
- y: 2
- key: value
- subtable:
- key: another value
-x:
- y:
- z:
- w: {}
diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go
index d5fd98c0..a1406a32 100644
--- a/vendor/github.com/pelletier/go-toml/doc.go
+++ b/vendor/github.com/pelletier/go-toml/doc.go
@@ -1,7 +1,7 @@
// Package toml is a TOML parser and manipulation library.
//
// This version supports the specification as described in
-// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md
+// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md
//
// Marshaling
//
diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml
index 12950a16..780d9c68 100644
--- a/vendor/github.com/pelletier/go-toml/example-crlf.toml
+++ b/vendor/github.com/pelletier/go-toml/example-crlf.toml
@@ -27,3 +27,4 @@ enabled = true
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported
\ No newline at end of file
diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml
index 3d902f28..f45bf88b 100644
--- a/vendor/github.com/pelletier/go-toml/example.toml
+++ b/vendor/github.com/pelletier/go-toml/example.toml
@@ -27,3 +27,4 @@ enabled = true
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported
\ No newline at end of file
diff --git a/vendor/github.com/pelletier/go-toml/fuzzit.sh b/vendor/github.com/pelletier/go-toml/fuzzit.sh
new file mode 100644
index 00000000..b575a608
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/fuzzit.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+set -xe
+
+# go-fuzz doesn't support modules yet, so ensure we do everything
+# in the old style GOPATH way
+export GO111MODULE="off"
+
+# install go-fuzz
+go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
+
+# target name can only contain lower-case letters (a-z), digits (0-9) and a dash (-)
+# to add another target, make sure to create it with `fuzzit create target`
+# before using `fuzzit create job`
+TARGET=toml-fuzzer
+
+go-fuzz-build -libfuzzer -o ${TARGET}.a github.com/pelletier/go-toml
+clang -fsanitize=fuzzer ${TARGET}.a -o ${TARGET}
+
+# install fuzzit for talking to fuzzit.dev service
+# or latest version:
+# https://github.com/fuzzitdev/fuzzit/releases/latest/download/fuzzit_Linux_x86_64
+wget -q -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.52/fuzzit_Linux_x86_64
+chmod a+x fuzzit
+
+# TODO: change kkowalczyk to go-toml and create toml-fuzzer target there
+./fuzzit create job --type $TYPE go-toml/${TARGET} ${TARGET}
diff --git a/vendor/github.com/pelletier/go-toml/go.mod b/vendor/github.com/pelletier/go-toml/go.mod
new file mode 100644
index 00000000..e924cb90
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/go.mod
@@ -0,0 +1,5 @@
+module github.com/pelletier/go-toml
+
+go 1.12
+
+require github.com/davecgh/go-spew v1.1.1
diff --git a/vendor/github.com/pelletier/go-toml/go.sum b/vendor/github.com/pelletier/go-toml/go.sum
new file mode 100644
index 00000000..6f356470
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/go.sum
@@ -0,0 +1,19 @@
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
+gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go
index 284db646..e091500b 100644
--- a/vendor/github.com/pelletier/go-toml/keysparsing.go
+++ b/vendor/github.com/pelletier/go-toml/keysparsing.go
@@ -3,83 +3,110 @@
package toml
import (
- "bytes"
"errors"
"fmt"
- "unicode"
)
// Convert the bare key group string to an array.
-// The input supports double quotation to allow "." inside the key name,
+// The input supports double quotation and single quotation,
// but escape sequences are not supported. Lexers must unescape them beforehand.
func parseKey(key string) ([]string, error) {
- groups := []string{}
- var buffer bytes.Buffer
- inQuotes := false
- wasInQuotes := false
- ignoreSpace := true
- expectDot := false
+ runes := []rune(key)
+ var groups []string
- for _, char := range key {
- if ignoreSpace {
- if char == ' ' {
- continue
- }
- ignoreSpace = false
+ if len(key) == 0 {
+ return nil, errors.New("empty key")
+ }
+
+ idx := 0
+ for idx < len(runes) {
+ for ; idx < len(runes) && isSpace(runes[idx]); idx++ {
+ // skip leading whitespace
}
- switch char {
- case '"':
- if inQuotes {
- groups = append(groups, buffer.String())
- buffer.Reset()
- wasInQuotes = true
- }
- inQuotes = !inQuotes
- expectDot = false
- case '.':
- if inQuotes {
- buffer.WriteRune(char)
- } else {
- if !wasInQuotes {
- if buffer.Len() == 0 {
- return nil, errors.New("empty table key")
+ if idx >= len(runes) {
+ break
+ }
+ r := runes[idx]
+ if isValidBareChar(r) {
+ // parse bare key
+ startIdx := idx
+ endIdx := -1
+ idx++
+ for idx < len(runes) {
+ r = runes[idx]
+ if isValidBareChar(r) {
+ idx++
+ } else if r == '.' {
+ endIdx = idx
+ break
+ } else if isSpace(r) {
+ endIdx = idx
+ for ; idx < len(runes) && isSpace(runes[idx]); idx++ {
+ // skip trailing whitespace
}
- groups = append(groups, buffer.String())
- buffer.Reset()
+ if idx < len(runes) && runes[idx] != '.' {
+ return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx])
+ }
+ break
+ } else {
+ return nil, fmt.Errorf("invalid bare key character: %c", r)
}
- ignoreSpace = true
- expectDot = false
- wasInQuotes = false
}
- case ' ':
- if inQuotes {
- buffer.WriteRune(char)
- } else {
- expectDot = true
+ if endIdx == -1 {
+ endIdx = idx
}
- default:
- if !inQuotes && !isValidBareChar(char) {
- return nil, fmt.Errorf("invalid bare character: %c", char)
+ groups = append(groups, string(runes[startIdx:endIdx]))
+ } else if r == '\'' {
+ // parse single quoted key
+ idx++
+ startIdx := idx
+ for {
+ if idx >= len(runes) {
+ return nil, fmt.Errorf("unclosed single-quoted key")
+ }
+ r = runes[idx]
+ if r == '\'' {
+ groups = append(groups, string(runes[startIdx:idx]))
+ idx++
+ break
+ }
+ idx++
}
- if !inQuotes && expectDot {
- return nil, errors.New("what?")
+ } else if r == '"' {
+ // parse double quoted key
+ idx++
+ startIdx := idx
+ for {
+ if idx >= len(runes) {
+ return nil, fmt.Errorf("unclosed double-quoted key")
+ }
+ r = runes[idx]
+ if r == '"' {
+ groups = append(groups, string(runes[startIdx:idx]))
+ idx++
+ break
+ }
+ idx++
}
- buffer.WriteRune(char)
- expectDot = false
+ } else if r == '.' {
+ idx++
+ if idx >= len(runes) {
+ return nil, fmt.Errorf("unexpected end of key")
+ }
+ r = runes[idx]
+ if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' {
+ return nil, fmt.Errorf("expecting key part after dot")
+ }
+ } else {
+ return nil, fmt.Errorf("invalid key character: %c", r)
}
}
- if inQuotes {
- return nil, errors.New("mismatched quotes")
- }
- if buffer.Len() > 0 {
- groups = append(groups, buffer.String())
- }
if len(groups) == 0 {
- return nil, errors.New("empty key")
+ return nil, fmt.Errorf("empty key")
}
return groups, nil
}
func isValidBareChar(r rune) bool {
- return isAlphanumeric(r) || r == '-' || unicode.IsNumber(r)
+ return isAlphanumeric(r) || r == '-' || isDigit(r)
}
diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go
index d11de428..b1886192 100644
--- a/vendor/github.com/pelletier/go-toml/lexer.go
+++ b/vendor/github.com/pelletier/go-toml/lexer.go
@@ -26,7 +26,7 @@ type tomlLexer struct {
currentTokenStart int
currentTokenStop int
tokens []token
- depth int
+ brackets []rune
line int
col int
endbufferLine int
@@ -123,6 +123,8 @@ func (l *tomlLexer) lexVoid() tomlLexStateFn {
for {
next := l.peek()
switch next {
+ case '}': // after '{'
+ return l.lexRightCurlyBrace
case '[':
return l.lexTableKey
case '#':
@@ -140,10 +142,6 @@ func (l *tomlLexer) lexVoid() tomlLexStateFn {
l.skip()
}
- if l.depth > 0 {
- return l.lexRvalue
- }
-
if isKeyStartChar(next) {
return l.lexKey
}
@@ -167,10 +165,8 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
case '=':
return l.lexEqual
case '[':
- l.depth++
return l.lexLeftBracket
case ']':
- l.depth--
return l.lexRightBracket
case '{':
return l.lexLeftCurlyBrace
@@ -188,12 +184,10 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
fallthrough
case '\n':
l.skip()
- if l.depth == 0 {
- return l.lexVoid
+ if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' {
+ return l.lexRvalue
}
- return l.lexRvalue
- case '_':
- return l.errorf("cannot start number with underscore")
+ return l.lexVoid
}
if l.follow("true") {
@@ -223,9 +217,12 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
}
possibleDate := l.peekString(35)
- dateMatch := dateRegexp.FindString(possibleDate)
- if dateMatch != "" {
- l.fastForward(len(dateMatch))
+ dateSubmatches := dateRegexp.FindStringSubmatch(possibleDate)
+ if dateSubmatches != nil && dateSubmatches[0] != "" {
+ l.fastForward(len(dateSubmatches[0]))
+ if dateSubmatches[2] == "" { // no timezone information => local date
+ return l.lexLocalDate
+ }
return l.lexDate
}
@@ -233,10 +230,6 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
return l.lexNumber
}
- if isAlphanumeric(next) {
- return l.lexKey
- }
-
return l.errorf("no value can start with %c", next)
}
@@ -247,12 +240,17 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn {
l.next()
l.emit(tokenLeftCurlyBrace)
- return l.lexRvalue
+ l.brackets = append(l.brackets, '{')
+ return l.lexVoid
}
func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn {
l.next()
l.emit(tokenRightCurlyBrace)
+ if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' {
+ return l.errorf("cannot have '}' here")
+ }
+ l.brackets = l.brackets[:len(l.brackets)-1]
return l.lexRvalue
}
@@ -261,6 +259,11 @@ func (l *tomlLexer) lexDate() tomlLexStateFn {
return l.lexRvalue
}
+func (l *tomlLexer) lexLocalDate() tomlLexStateFn {
+ l.emit(tokenLocalDate)
+ return l.lexRvalue
+}
+
func (l *tomlLexer) lexTrue() tomlLexStateFn {
l.fastForward(4)
l.emit(tokenTrue)
@@ -294,13 +297,16 @@ func (l *tomlLexer) lexEqual() tomlLexStateFn {
func (l *tomlLexer) lexComma() tomlLexStateFn {
l.next()
l.emit(tokenComma)
+ if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' {
+ return l.lexVoid
+ }
return l.lexRvalue
}
// Parse the key and emits its value without escape sequences.
// bare keys, basic string keys and literal string keys are supported.
func (l *tomlLexer) lexKey() tomlLexStateFn {
- growingString := ""
+ var sb strings.Builder
for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() {
if r == '"' {
@@ -309,7 +315,9 @@ func (l *tomlLexer) lexKey() tomlLexStateFn {
if err != nil {
return l.errorf(err.Error())
}
- growingString += str
+ sb.WriteString("\"")
+ sb.WriteString(str)
+ sb.WriteString("\"")
l.next()
continue
} else if r == '\'' {
@@ -318,20 +326,45 @@ func (l *tomlLexer) lexKey() tomlLexStateFn {
if err != nil {
return l.errorf(err.Error())
}
- growingString += str
+ sb.WriteString("'")
+ sb.WriteString(str)
+ sb.WriteString("'")
l.next()
continue
} else if r == '\n' {
return l.errorf("keys cannot contain new lines")
} else if isSpace(r) {
- break
+ var str strings.Builder
+ str.WriteString(" ")
+
+ // skip trailing whitespace
+ l.next()
+ for r = l.peek(); isSpace(r); r = l.peek() {
+ str.WriteRune(r)
+ l.next()
+ }
+ // break loop if not a dot
+ if r != '.' {
+ break
+ }
+ str.WriteString(".")
+ // skip trailing whitespace after dot
+ l.next()
+ for r = l.peek(); isSpace(r); r = l.peek() {
+ str.WriteRune(r)
+ l.next()
+ }
+ sb.WriteString(str.String())
+ continue
+ } else if r == '.' {
+ // skip
} else if !isValidBareChar(r) {
return l.errorf("keys cannot contain %c character", r)
}
- growingString += string(r)
+ sb.WriteRune(r)
l.next()
}
- l.emitWithValue(tokenKey, growingString)
+ l.emitWithValue(tokenKey, sb.String())
return l.lexVoid
}
@@ -351,11 +384,12 @@ func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn {
func (l *tomlLexer) lexLeftBracket() tomlLexStateFn {
l.next()
l.emit(tokenLeftBracket)
+ l.brackets = append(l.brackets, '[')
return l.lexRvalue
}
func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) {
- growingString := ""
+ var sb strings.Builder
if discardLeadingNewLine {
if l.follow("\r\n") {
@@ -369,14 +403,14 @@ func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNe
// find end of string
for {
if l.follow(terminator) {
- return growingString, nil
+ return sb.String(), nil
}
next := l.peek()
if next == eof {
break
}
- growingString += string(l.next())
+ sb.WriteRune(l.next())
}
return "", errors.New("unclosed string")
@@ -410,7 +444,7 @@ func (l *tomlLexer) lexLiteralString() tomlLexStateFn {
// Terminator is the substring indicating the end of the token.
// The resulting string does not include the terminator.
func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) {
- growingString := ""
+ var sb strings.Builder
if discardLeadingNewLine {
if l.follow("\r\n") {
@@ -423,7 +457,7 @@ func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine,
for {
if l.follow(terminator) {
- return growingString, nil
+ return sb.String(), nil
}
if l.follow("\\") {
@@ -441,72 +475,72 @@ func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine,
l.next()
}
case '"':
- growingString += "\""
+ sb.WriteString("\"")
l.next()
case 'n':
- growingString += "\n"
+ sb.WriteString("\n")
l.next()
case 'b':
- growingString += "\b"
+ sb.WriteString("\b")
l.next()
case 'f':
- growingString += "\f"
+ sb.WriteString("\f")
l.next()
case '/':
- growingString += "/"
+ sb.WriteString("/")
l.next()
case 't':
- growingString += "\t"
+ sb.WriteString("\t")
l.next()
case 'r':
- growingString += "\r"
+ sb.WriteString("\r")
l.next()
case '\\':
- growingString += "\\"
+ sb.WriteString("\\")
l.next()
case 'u':
l.next()
- code := ""
+ var code strings.Builder
for i := 0; i < 4; i++ {
c := l.peek()
if !isHexDigit(c) {
return "", errors.New("unfinished unicode escape")
}
l.next()
- code = code + string(c)
+ code.WriteRune(c)
}
- intcode, err := strconv.ParseInt(code, 16, 32)
+ intcode, err := strconv.ParseInt(code.String(), 16, 32)
if err != nil {
- return "", errors.New("invalid unicode escape: \\u" + code)
+ return "", errors.New("invalid unicode escape: \\u" + code.String())
}
- growingString += string(rune(intcode))
+ sb.WriteRune(rune(intcode))
case 'U':
l.next()
- code := ""
+ var code strings.Builder
for i := 0; i < 8; i++ {
c := l.peek()
if !isHexDigit(c) {
return "", errors.New("unfinished unicode escape")
}
l.next()
- code = code + string(c)
+ code.WriteRune(c)
}
- intcode, err := strconv.ParseInt(code, 16, 64)
+ intcode, err := strconv.ParseInt(code.String(), 16, 64)
if err != nil {
- return "", errors.New("invalid unicode escape: \\U" + code)
+ return "", errors.New("invalid unicode escape: \\U" + code.String())
}
- growingString += string(rune(intcode))
+ sb.WriteRune(rune(intcode))
default:
return "", errors.New("invalid escape sequence: \\" + string(l.peek()))
}
} else {
r := l.peek()
- if 0x00 <= r && r <= 0x1F && !(acceptNewLines && (r == '\n' || r == '\r')) {
+ if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) {
return "", fmt.Errorf("unescaped control character %U", r)
}
l.next()
- growingString += string(r)
+ sb.WriteRune(r)
}
if l.peek() == eof {
@@ -533,7 +567,6 @@ func (l *tomlLexer) lexString() tomlLexStateFn {
}
str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines)
-
if err != nil {
return l.errorf(err.Error())
}
@@ -605,6 +638,10 @@ func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn {
func (l *tomlLexer) lexRightBracket() tomlLexStateFn {
l.next()
l.emit(tokenRightBracket)
+ if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' {
+ return l.errorf("cannot have ']' here")
+ }
+ l.brackets = l.brackets[:len(l.brackets)-1]
return l.lexRvalue
}
@@ -731,7 +768,27 @@ func (l *tomlLexer) run() {
}
func init() {
- dateRegexp = regexp.MustCompile(`^\d{1,4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})`)
+ // Regexp for all date/time formats supported by TOML.
+ // Group 1: nano precision
+ // Group 2: timezone
+ //
+ // /!\ also matches the empty string
+ //
+ // Example matches:
+ // 1979-05-27T07:32:00Z
+ // 1979-05-27T00:32:00-07:00
+ // 1979-05-27T00:32:00.999999-07:00
+ // 1979-05-27 07:32:00Z
+ // 1979-05-27 00:32:00-07:00
+ // 1979-05-27 00:32:00.999999-07:00
+ // 1979-05-27T07:32:00
+ // 1979-05-27T00:32:00.999999
+ // 1979-05-27 07:32:00
+ // 1979-05-27 00:32:00.999999
+ // 1979-05-27
+ // 07:32:00
+ // 00:32:00.999999
+ dateRegexp = regexp.MustCompile(`^(?:\d{1,4}-\d{2}-\d{2})?(?:[T ]?\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})?)?`)
}
// Entry point
diff --git a/vendor/github.com/pelletier/go-toml/localtime.go b/vendor/github.com/pelletier/go-toml/localtime.go
new file mode 100644
index 00000000..a2149e96
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/localtime.go
@@ -0,0 +1,281 @@
+// Implementation of TOML's local date/time.
+// Copied over from https://github.com/googleapis/google-cloud-go/blob/master/civil/civil.go
+// to avoid pulling all the Google dependencies.
+//
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package civil implements types for civil time, a time-zone-independent
+// representation of time that follows the rules of the proleptic
+// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second
+// minutes.
+//
+// Because they lack location information, these types do not represent unique
+// moments or intervals of time. Use time.Time for that purpose.
+package toml
+
+import (
+ "fmt"
+ "time"
+)
+
+// A LocalDate represents a date (year, month, day).
+//
+// This type does not include location information, and therefore does not
+// describe a unique 24-hour timespan.
+type LocalDate struct {
+ Year int // Year (e.g., 2014).
+ Month time.Month // Month of the year (January = 1, ...).
+ Day int // Day of the month, starting at 1.
+}
+
+// LocalDateOf returns the LocalDate in which a time occurs in that time's location.
+func LocalDateOf(t time.Time) LocalDate {
+ var d LocalDate
+ d.Year, d.Month, d.Day = t.Date()
+ return d
+}
+
+// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents.
+func ParseLocalDate(s string) (LocalDate, error) {
+ t, err := time.Parse("2006-01-02", s)
+ if err != nil {
+ return LocalDate{}, err
+ }
+ return LocalDateOf(t), nil
+}
+
+// String returns the date in RFC3339 full-date format.
+func (d LocalDate) String() string {
+ return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
+}
+
+// IsValid reports whether the date is valid.
+func (d LocalDate) IsValid() bool {
+ return LocalDateOf(d.In(time.UTC)) == d
+}
+
+// In returns the time corresponding to time 00:00:00 of the date in the location.
+//
+// In is always consistent with time.LocalDate, even when time.LocalDate returns a time
+// on a different day. For example, if loc is America/Indiana/Vincennes, then both
+// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc)
+// and
+// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc)
+// return 23:00:00 on April 30, 1955.
+//
+// In panics if loc is nil.
+func (d LocalDate) In(loc *time.Location) time.Time {
+ return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc)
+}
+
+// AddDays returns the date that is n days in the future.
+// n can also be negative to go into the past.
+func (d LocalDate) AddDays(n int) LocalDate {
+ return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n))
+}
+
+// DaysSince returns the signed number of days between the date and s, not including the end day.
+// This is the inverse operation to AddDays.
+func (d LocalDate) DaysSince(s LocalDate) (days int) {
+ // We convert to Unix time so we do not have to worry about leap seconds:
+ // Unix time increases by exactly 86400 seconds per day.
+ deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix()
+ return int(deltaUnix / 86400)
+}
+
+// Before reports whether d1 occurs before d2.
+func (d1 LocalDate) Before(d2 LocalDate) bool {
+ if d1.Year != d2.Year {
+ return d1.Year < d2.Year
+ }
+ if d1.Month != d2.Month {
+ return d1.Month < d2.Month
+ }
+ return d1.Day < d2.Day
+}
+
+// After reports whether d1 occurs after d2.
+func (d1 LocalDate) After(d2 LocalDate) bool {
+ return d2.Before(d1)
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The output is the result of d.String().
+func (d LocalDate) MarshalText() ([]byte, error) {
+ return []byte(d.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// The date is expected to be a string in a format accepted by ParseLocalDate.
+func (d *LocalDate) UnmarshalText(data []byte) error {
+ var err error
+ *d, err = ParseLocalDate(string(data))
+ return err
+}
+
+// A LocalTime represents a time with nanosecond precision.
+//
+// This type does not include location information, and therefore does not
+// describe a unique moment in time.
+//
+// This type exists to represent the TIME type in storage-based APIs like BigQuery.
+// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type.
+type LocalTime struct {
+ Hour int // The hour of the day in 24-hour format; range [0-23]
+ Minute int // The minute of the hour; range [0-59]
+ Second int // The second of the minute; range [0-59]
+ Nanosecond int // The nanosecond of the second; range [0-999999999]
+}
+
+// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs
+// in that time's location. It ignores the date.
+func LocalTimeOf(t time.Time) LocalTime {
+ var tm LocalTime
+ tm.Hour, tm.Minute, tm.Second = t.Clock()
+ tm.Nanosecond = t.Nanosecond()
+ return tm
+}
+
+// ParseLocalTime parses a string and returns the time value it represents.
+// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After
+// the HH:MM:SS part of the string, an optional fractional part may appear,
+// consisting of a decimal point followed by one to nine decimal digits.
+// (RFC3339 admits only one digit after the decimal point).
+func ParseLocalTime(s string) (LocalTime, error) {
+ t, err := time.Parse("15:04:05.999999999", s)
+ if err != nil {
+ return LocalTime{}, err
+ }
+ return LocalTimeOf(t), nil
+}
+
+// String returns the date in the format described in ParseLocalTime. If Nanoseconds
+// is zero, no fractional part will be generated. Otherwise, the result will
+// end with a fractional part consisting of a decimal point and nine digits.
+func (t LocalTime) String() string {
+ s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second)
+ if t.Nanosecond == 0 {
+ return s
+ }
+ return s + fmt.Sprintf(".%09d", t.Nanosecond)
+}
+
+// IsValid reports whether the time is valid.
+func (t LocalTime) IsValid() bool {
+ // Construct a non-zero time.
+ tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC)
+ return LocalTimeOf(tm) == t
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The output is the result of t.String().
+func (t LocalTime) MarshalText() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// The time is expected to be a string in a format accepted by ParseLocalTime.
+func (t *LocalTime) UnmarshalText(data []byte) error {
+ var err error
+ *t, err = ParseLocalTime(string(data))
+ return err
+}
+
+// A LocalDateTime represents a date and time.
+//
+// This type does not include location information, and therefore does not
+// describe a unique moment in time.
+type LocalDateTime struct {
+ Date LocalDate
+ Time LocalTime
+}
+
+// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub.
+
+// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location.
+func LocalDateTimeOf(t time.Time) LocalDateTime {
+ return LocalDateTime{
+ Date: LocalDateOf(t),
+ Time: LocalTimeOf(t),
+ }
+}
+
+// ParseLocalDateTime parses a string and returns the LocalDateTime it represents.
+// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits
+// the time offset but includes an optional fractional time, as described in
+// ParseLocalTime. Informally, the accepted format is
+// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF]
+// where the 'T' may be a lower-case 't'.
+func ParseLocalDateTime(s string) (LocalDateTime, error) {
+ t, err := time.Parse("2006-01-02T15:04:05.999999999", s)
+ if err != nil {
+ t, err = time.Parse("2006-01-02t15:04:05.999999999", s)
+ if err != nil {
+ return LocalDateTime{}, err
+ }
+ }
+ return LocalDateTimeOf(t), nil
+}
+
+// String returns the date in the format described in ParseLocalDate.
+func (dt LocalDateTime) String() string {
+ return dt.Date.String() + "T" + dt.Time.String()
+}
+
+// IsValid reports whether the datetime is valid.
+func (dt LocalDateTime) IsValid() bool {
+ return dt.Date.IsValid() && dt.Time.IsValid()
+}
+
+// In returns the time corresponding to the LocalDateTime in the given location.
+//
+// If the time is missing or ambigous at the location, In returns the same
+// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then
+// both
+// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc)
+// and
+// civil.LocalDateTime{
+// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}},
+// civil.LocalTime{Minute: 30}}.In(loc)
+// return 23:30:00 on April 30, 1955.
+//
+// In panics if loc is nil.
+func (dt LocalDateTime) In(loc *time.Location) time.Time {
+ return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc)
+}
+
+// Before reports whether dt1 occurs before dt2.
+func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool {
+ return dt1.In(time.UTC).Before(dt2.In(time.UTC))
+}
+
+// After reports whether dt1 occurs after dt2.
+func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool {
+ return dt2.Before(dt1)
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The output is the result of dt.String().
+func (dt LocalDateTime) MarshalText() ([]byte, error) {
+ return []byte(dt.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// The datetime is expected to be a string in a format accepted by ParseLocalDateTime
+func (dt *LocalDateTime) UnmarshalText(data []byte) error {
+ var err error
+ *dt, err = ParseLocalDateTime(string(data))
+ return err
+}
diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go
index 671da556..032e0ffc 100644
--- a/vendor/github.com/pelletier/go-toml/marshal.go
+++ b/vendor/github.com/pelletier/go-toml/marshal.go
@@ -2,24 +2,34 @@ package toml
import (
"bytes"
+ "encoding"
"errors"
"fmt"
"io"
"reflect"
+ "sort"
"strconv"
"strings"
"time"
)
-const tagKeyMultiline = "multiline"
+const (
+ tagFieldName = "toml"
+ tagFieldComment = "comment"
+ tagCommented = "commented"
+ tagMultiline = "multiline"
+ tagDefault = "default"
+)
type tomlOpts struct {
- name string
- comment string
- commented bool
- multiline bool
- include bool
- omitempty bool
+ name string
+ nameFromTag bool
+ comment string
+ commented bool
+ multiline bool
+ include bool
+ omitempty bool
+ defaultValue string
}
type encOpts struct {
@@ -31,10 +41,44 @@ var encOptsDefaults = encOpts{
quoteMapKeys: false,
}
+type annotation struct {
+ tag string
+ comment string
+ commented string
+ multiline string
+ defaultValue string
+}
+
+var annotationDefault = annotation{
+ tag: tagFieldName,
+ comment: tagFieldComment,
+ commented: tagCommented,
+ multiline: tagMultiline,
+ defaultValue: tagDefault,
+}
+
+type marshalOrder int
+
+// Orders the Encoder can write the fields to the output stream.
+const (
+ // Sort fields alphabetically.
+ OrderAlphabetical marshalOrder = iota + 1
+ // Preserve the order the fields are encountered. For example, the order of fields in
+ // a struct.
+ OrderPreserve
+)
+
var timeType = reflect.TypeOf(time.Time{})
var marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
+var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem()
+var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
+var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem()
+var localDateType = reflect.TypeOf(LocalDate{})
+var localTimeType = reflect.TypeOf(LocalTime{})
+var localDateTimeType = reflect.TypeOf(LocalDateTime{})
+var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{})
-// Check if the given marshall type maps to a Tree primitive
+// Check if the given marshal type maps to a Tree primitive
func isPrimitive(mtype reflect.Type) bool {
switch mtype.Kind() {
case reflect.Ptr:
@@ -50,37 +94,69 @@ func isPrimitive(mtype reflect.Type) bool {
case reflect.String:
return true
case reflect.Struct:
- return mtype == timeType || isCustomMarshaler(mtype)
+ return isTimeType(mtype)
default:
return false
}
}
-// Check if the given marshall type maps to a Tree slice
-func isTreeSlice(mtype reflect.Type) bool {
- switch mtype.Kind() {
- case reflect.Slice:
- return !isOtherSlice(mtype)
- default:
- return false
- }
+func isTimeType(mtype reflect.Type) bool {
+ return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType
}
-// Check if the given marshall type maps to a non-Tree slice
-func isOtherSlice(mtype reflect.Type) bool {
+// Check if the given marshal type maps to a Tree slice or array
+func isTreeSequence(mtype reflect.Type) bool {
switch mtype.Kind() {
case reflect.Ptr:
- return isOtherSlice(mtype.Elem())
- case reflect.Slice:
- return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem())
+ return isTreeSequence(mtype.Elem())
+ case reflect.Slice, reflect.Array:
+ return isTree(mtype.Elem())
default:
return false
}
}
-// Check if the given marshall type maps to a Tree
+// Check if the given marshal type maps to a slice or array of a custom marshaler type
+func isCustomMarshalerSequence(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isCustomMarshalerSequence(mtype.Elem())
+ case reflect.Slice, reflect.Array:
+ return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type())
+ default:
+ return false
+ }
+}
+
+// Check if the given marshal type maps to a slice or array of a text marshaler type
+func isTextMarshalerSequence(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isTextMarshalerSequence(mtype.Elem())
+ case reflect.Slice, reflect.Array:
+ return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type())
+ default:
+ return false
+ }
+}
+
+// Check if the given marshal type maps to a non-Tree slice or array
+func isOtherSequence(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isOtherSequence(mtype.Elem())
+ case reflect.Slice, reflect.Array:
+ return !isTreeSequence(mtype)
+ default:
+ return false
+ }
+}
+
+// Check if the given marshal type maps to a Tree
func isTree(mtype reflect.Type) bool {
switch mtype.Kind() {
+ case reflect.Ptr:
+ return isTree(mtype.Elem())
case reflect.Map:
return true
case reflect.Struct:
@@ -98,12 +174,42 @@ func callCustomMarshaler(mval reflect.Value) ([]byte, error) {
return mval.Interface().(Marshaler).MarshalTOML()
}
+func isTextMarshaler(mtype reflect.Type) bool {
+ return mtype.Implements(textMarshalerType) && !isTimeType(mtype)
+}
+
+func callTextMarshaler(mval reflect.Value) ([]byte, error) {
+ return mval.Interface().(encoding.TextMarshaler).MarshalText()
+}
+
+func isCustomUnmarshaler(mtype reflect.Type) bool {
+ return mtype.Implements(unmarshalerType)
+}
+
+func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error {
+ return mval.Interface().(Unmarshaler).UnmarshalTOML(tval)
+}
+
+func isTextUnmarshaler(mtype reflect.Type) bool {
+ return mtype.Implements(textUnmarshalerType)
+}
+
+func callTextUnmarshaler(mval reflect.Value, text []byte) error {
+ return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text)
+}
+
// Marshaler is the interface implemented by types that
// can marshal themselves into valid TOML.
type Marshaler interface {
MarshalTOML() ([]byte, error)
}
+// Unmarshaler is the interface implemented by types that
+// can unmarshal a TOML description of themselves.
+type Unmarshaler interface {
+ UnmarshalTOML(interface{}) error
+}
+
/*
Marshal returns the TOML encoding of v. Behavior is similar to the Go json
encoder, except that there is no concept of a Marshaler interface or MarshalTOML
@@ -135,7 +241,9 @@ Tree primitive types and corresponding marshal types:
float64 float32, float64, pointers to same
string string, pointers to same
bool bool, pointers to same
- time.Time time.Time{}, pointers to same
+ time.LocalTime time.LocalTime{}, pointers to same
+
+For additional flexibility, use the Encoder API.
*/
func Marshal(v interface{}) ([]byte, error) {
return NewEncoder(nil).marshal(v)
@@ -145,13 +253,24 @@ func Marshal(v interface{}) ([]byte, error) {
type Encoder struct {
w io.Writer
encOpts
+ annotation
+ line int
+ col int
+ order marshalOrder
+ promoteAnon bool
+ indentation string
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
- w: w,
- encOpts: encOptsDefaults,
+ w: w,
+ encOpts: encOptsDefaults,
+ annotation: annotationDefault,
+ line: 0,
+ col: 1,
+ order: OrderAlphabetical,
+ indentation: " ",
}
}
@@ -197,65 +316,175 @@ func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder {
return e
}
+// Order allows to change in which order fields will be written to the output stream.
+func (e *Encoder) Order(ord marshalOrder) *Encoder {
+ e.order = ord
+ return e
+}
+
+// Indentation allows to change indentation when marshalling.
+func (e *Encoder) Indentation(indent string) *Encoder {
+ e.indentation = indent
+ return e
+}
+
+// SetTagName allows changing default tag "toml"
+func (e *Encoder) SetTagName(v string) *Encoder {
+ e.tag = v
+ return e
+}
+
+// SetTagComment allows changing default tag "comment"
+func (e *Encoder) SetTagComment(v string) *Encoder {
+ e.comment = v
+ return e
+}
+
+// SetTagCommented allows changing default tag "commented"
+func (e *Encoder) SetTagCommented(v string) *Encoder {
+ e.commented = v
+ return e
+}
+
+// SetTagMultiline allows changing default tag "multiline"
+func (e *Encoder) SetTagMultiline(v string) *Encoder {
+ e.multiline = v
+ return e
+}
+
+// PromoteAnonymous allows to change how anonymous struct fields are marshaled.
+// Usually, they are marshaled as if the inner exported fields were fields in
+// the outer struct. However, if an anonymous struct field is given a name in
+// its TOML tag, it is treated like a regular struct field with that name.
+// rather than being anonymous.
+//
+// In case anonymous promotion is enabled, all anonymous structs are promoted
+// and treated like regular struct fields.
+func (e *Encoder) PromoteAnonymous(promote bool) *Encoder {
+ e.promoteAnon = promote
+ return e
+}
+
func (e *Encoder) marshal(v interface{}) ([]byte, error) {
- mtype := reflect.TypeOf(v)
- if mtype.Kind() != reflect.Struct {
- return []byte{}, errors.New("Only a struct can be marshaled to TOML")
+ // Check if indentation is valid
+ for _, char := range e.indentation {
+ if !isSpace(char) {
+ return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters")
+ }
}
+
+ mtype := reflect.TypeOf(v)
+ if mtype == nil {
+ return []byte{}, errors.New("nil cannot be marshaled to TOML")
+ }
+
+ switch mtype.Kind() {
+ case reflect.Struct, reflect.Map:
+ case reflect.Ptr:
+ if mtype.Elem().Kind() != reflect.Struct {
+ return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML")
+ }
+ if reflect.ValueOf(v).IsNil() {
+ return []byte{}, errors.New("nil pointer cannot be marshaled to TOML")
+ }
+ default:
+ return []byte{}, errors.New("Only a struct or map can be marshaled to TOML")
+ }
+
sval := reflect.ValueOf(v)
if isCustomMarshaler(mtype) {
return callCustomMarshaler(sval)
}
+ if isTextMarshaler(mtype) {
+ return callTextMarshaler(sval)
+ }
t, err := e.valueToTree(mtype, sval)
if err != nil {
return []byte{}, err
}
var buf bytes.Buffer
- _, err = t.writeTo(&buf, "", "", 0, e.arraysOneElementPerLine)
+ _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, false)
return buf.Bytes(), err
}
+// Create next tree with a position based on Encoder.line
+func (e *Encoder) nextTree() *Tree {
+ return newTreeWithPosition(Position{Line: e.line, Col: 1})
+}
+
// Convert given marshal struct or map value to toml tree
func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) {
if mtype.Kind() == reflect.Ptr {
return e.valueToTree(mtype.Elem(), mval.Elem())
}
- tval := newTree()
+ tval := e.nextTree()
switch mtype.Kind() {
case reflect.Struct:
- for i := 0; i < mtype.NumField(); i++ {
- mtypef, mvalf := mtype.Field(i), mval.Field(i)
- opts := tomlOptions(mtypef)
- if opts.include && (!opts.omitempty || !isZero(mvalf)) {
- val, err := e.valueToToml(mtypef.Type, mvalf)
- if err != nil {
- return nil, err
+ switch mval.Interface().(type) {
+ case Tree:
+ reflect.ValueOf(tval).Elem().Set(mval)
+ default:
+ for i := 0; i < mtype.NumField(); i++ {
+ mtypef, mvalf := mtype.Field(i), mval.Field(i)
+ opts := tomlOptions(mtypef, e.annotation)
+ if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) {
+ val, err := e.valueToToml(mtypef.Type, mvalf)
+ if err != nil {
+ return nil, err
+ }
+ if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon {
+ e.appendTree(tval, tree)
+ } else {
+ val = e.wrapTomlValue(val, tval)
+ tval.SetPathWithOptions([]string{opts.name}, SetOptions{
+ Comment: opts.comment,
+ Commented: opts.commented,
+ Multiline: opts.multiline,
+ }, val)
+ }
}
-
- tval.SetWithOptions(opts.name, SetOptions{
- Comment: opts.comment,
- Commented: opts.commented,
- Multiline: opts.multiline,
- }, val)
}
}
case reflect.Map:
- for _, key := range mval.MapKeys() {
+ keys := mval.MapKeys()
+ if e.order == OrderPreserve && len(keys) > 0 {
+ // Sorting []reflect.Value is not straight forward.
+ //
+ // OrderPreserve will support deterministic results when string is used
+ // as the key to maps.
+ typ := keys[0].Type()
+ kind := keys[0].Kind()
+ if kind == reflect.String {
+ ikeys := make([]string, len(keys))
+ for i := range keys {
+ ikeys[i] = keys[i].Interface().(string)
+ }
+ sort.Strings(ikeys)
+ for i := range ikeys {
+ keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ)
+ }
+ }
+ }
+ for _, key := range keys {
mvalf := mval.MapIndex(key)
+ if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() {
+ continue
+ }
val, err := e.valueToToml(mtype.Elem(), mvalf)
if err != nil {
return nil, err
}
+ val = e.wrapTomlValue(val, tval)
if e.quoteMapKeys {
- keyStr, err := tomlValueStringRepresentation(key.String(), "", e.arraysOneElementPerLine)
+ keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine)
if err != nil {
return nil, err
}
tval.SetPath([]string{keyStr}, val)
} else {
- tval.Set(key.String(), val)
+ tval.SetPath([]string{key.String()}, val)
}
}
}
@@ -291,22 +520,39 @@ func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (int
// Convert given marshal value to toml value
func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
if mtype.Kind() == reflect.Ptr {
- return e.valueToToml(mtype.Elem(), mval.Elem())
+ switch {
+ case isCustomMarshaler(mtype):
+ return callCustomMarshaler(mval)
+ case isTextMarshaler(mtype):
+ b, err := callTextMarshaler(mval)
+ return string(b), err
+ default:
+ return e.valueToToml(mtype.Elem(), mval.Elem())
+ }
+ }
+ if mtype.Kind() == reflect.Interface {
+ return e.valueToToml(mval.Elem().Type(), mval.Elem())
}
switch {
case isCustomMarshaler(mtype):
return callCustomMarshaler(mval)
+ case isTextMarshaler(mtype):
+ b, err := callTextMarshaler(mval)
+ return string(b), err
case isTree(mtype):
return e.valueToTree(mtype, mval)
- case isTreeSlice(mtype):
- return e.valueToTreeSlice(mtype, mval)
- case isOtherSlice(mtype):
+ case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype):
return e.valueToOtherSlice(mtype, mval)
+ case isTreeSequence(mtype):
+ return e.valueToTreeSlice(mtype, mval)
default:
switch mtype.Kind() {
case reflect.Bool:
return mval.Bool(), nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) {
+ return fmt.Sprint(mval), nil
+ }
return mval.Int(), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return mval.Uint(), nil
@@ -315,18 +561,50 @@ func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface
case reflect.String:
return mval.String(), nil
case reflect.Struct:
- return mval.Interface().(time.Time), nil
+ return mval.Interface(), nil
default:
return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind())
}
}
}
+func (e *Encoder) appendTree(t, o *Tree) error {
+ for key, value := range o.values {
+ if _, ok := t.values[key]; ok {
+ continue
+ }
+ if tomlValue, ok := value.(*tomlValue); ok {
+ tomlValue.position.Col = t.position.Col
+ }
+ t.values[key] = value
+ }
+ return nil
+}
+
+// Create a toml value with the current line number as the position line
+func (e *Encoder) wrapTomlValue(val interface{}, parent *Tree) interface{} {
+ _, isTree := val.(*Tree)
+ _, isTreeS := val.([]*Tree)
+ if isTree || isTreeS {
+ return val
+ }
+
+ ret := &tomlValue{
+ value: val,
+ position: Position{
+ e.line,
+ parent.position.Col,
+ },
+ }
+ e.line++
+ return ret
+}
+
// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v.
// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for
// sub-structs, and only definite types can be unmarshaled.
func (t *Tree) Unmarshal(v interface{}) error {
- d := Decoder{tval: t}
+ d := Decoder{tval: t, tagName: tagFieldName}
return d.unmarshal(v)
}
@@ -334,8 +612,11 @@ func (t *Tree) Unmarshal(v interface{}) error {
// See Marshal() documentation for types mapping table.
func (t *Tree) Marshal() ([]byte, error) {
var buf bytes.Buffer
- err := NewEncoder(&buf).Encode(t)
- return buf.Bytes(), err
+ _, err := t.WriteTo(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
}
// Unmarshal parses the TOML-encoded data and stores the result in the value
@@ -347,6 +628,14 @@ func (t *Tree) Marshal() ([]byte, error) {
// The following struct annotations are supported:
//
// toml:"Field" Overrides the field's name to map to.
+// default:"foo" Provides a default value.
+//
+// For default values, only fields of the following types are supported:
+// * string
+// * bool
+// * int
+// * int64
+// * float64
//
// See Marshal() documentation for types mapping table.
func Unmarshal(data []byte, v interface{}) error {
@@ -362,6 +651,9 @@ type Decoder struct {
r io.Reader
tval *Tree
encOpts
+ tagName string
+ strict bool
+ visitor visitorState
}
// NewDecoder returns a new decoder that reads from r.
@@ -369,6 +661,7 @@ func NewDecoder(r io.Reader) *Decoder {
return &Decoder{
r: r,
encOpts: encOptsDefaults,
+ tagName: tagFieldName,
}
}
@@ -385,60 +678,200 @@ func (d *Decoder) Decode(v interface{}) error {
return d.unmarshal(v)
}
+// SetTagName allows changing default tag "toml"
+func (d *Decoder) SetTagName(v string) *Decoder {
+ d.tagName = v
+ return d
+}
+
+// Strict allows changing to strict decoding. Any fields that are found in the
+// input data and do not have a corresponding struct member cause an error.
+func (d *Decoder) Strict(strict bool) *Decoder {
+ d.strict = strict
+ return d
+}
+
func (d *Decoder) unmarshal(v interface{}) error {
mtype := reflect.TypeOf(v)
- if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct {
- return errors.New("Only a pointer to struct can be unmarshaled from TOML")
+ if mtype == nil {
+ return errors.New("nil cannot be unmarshaled from TOML")
+ }
+ if mtype.Kind() != reflect.Ptr {
+ return errors.New("only a pointer to struct or map can be unmarshaled from TOML")
}
- sval, err := d.valueFromTree(mtype.Elem(), d.tval)
+ elem := mtype.Elem()
+
+ switch elem.Kind() {
+ case reflect.Struct, reflect.Map:
+ case reflect.Interface:
+ elem = mapStringInterfaceType
+ default:
+ return errors.New("only a pointer to struct or map can be unmarshaled from TOML")
+ }
+
+ if reflect.ValueOf(v).IsNil() {
+ return errors.New("nil pointer cannot be unmarshaled from TOML")
+ }
+
+ vv := reflect.ValueOf(v).Elem()
+
+ if d.strict {
+ d.visitor = newVisitorState(d.tval)
+ }
+
+ sval, err := d.valueFromTree(elem, d.tval, &vv)
if err != nil {
return err
}
+ if err := d.visitor.validate(); err != nil {
+ return err
+ }
reflect.ValueOf(v).Elem().Set(sval)
return nil
}
-// Convert toml tree to marshal struct or map, using marshal type
-func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) {
+// Convert toml tree to marshal struct or map, using marshal type. When mval1
+// is non-nil, merge fields into the given value instead of allocating a new one.
+func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) {
if mtype.Kind() == reflect.Ptr {
- return d.unwrapPointer(mtype, tval)
+ return d.unwrapPointer(mtype, tval, mval1)
}
+
+ // Check if pointer to value implements the Unmarshaler interface.
+ if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) {
+ d.visitor.visitAll()
+
+ if tval == nil {
+ return mvalPtr.Elem(), nil
+ }
+
+ if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil {
+ return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err)
+ }
+ return mvalPtr.Elem(), nil
+ }
+
var mval reflect.Value
switch mtype.Kind() {
case reflect.Struct:
- mval = reflect.New(mtype).Elem()
- for i := 0; i < mtype.NumField(); i++ {
- mtypef := mtype.Field(i)
- opts := tomlOptions(mtypef)
- if opts.include {
+ if mval1 != nil {
+ mval = *mval1
+ } else {
+ mval = reflect.New(mtype).Elem()
+ }
+
+ switch mval.Interface().(type) {
+ case Tree:
+ mval.Set(reflect.ValueOf(tval).Elem())
+ default:
+ for i := 0; i < mtype.NumField(); i++ {
+ mtypef := mtype.Field(i)
+ an := annotation{tag: d.tagName}
+ opts := tomlOptions(mtypef, an)
+ if !opts.include {
+ continue
+ }
baseKey := opts.name
- keysToTry := []string{baseKey, strings.ToLower(baseKey), strings.ToTitle(baseKey)}
- for _, key := range keysToTry {
- exists := tval.Has(key)
- if !exists {
- continue
+ keysToTry := []string{
+ baseKey,
+ strings.ToLower(baseKey),
+ strings.ToTitle(baseKey),
+ strings.ToLower(string(baseKey[0])) + baseKey[1:],
+ }
+
+ found := false
+ if tval != nil {
+ for _, key := range keysToTry {
+ exists := tval.HasPath([]string{key})
+ if !exists {
+ continue
+ }
+
+ d.visitor.push(key)
+ val := tval.GetPath([]string{key})
+ fval := mval.Field(i)
+ mvalf, err := d.valueFromToml(mtypef.Type, val, &fval)
+ if err != nil {
+ return mval, formatError(err, tval.GetPositionPath([]string{key}))
+ }
+ mval.Field(i).Set(mvalf)
+ found = true
+ d.visitor.pop()
+ break
}
- val := tval.Get(key)
- mvalf, err := d.valueFromToml(mtypef.Type, val)
+ }
+
+ if !found && opts.defaultValue != "" {
+ mvalf := mval.Field(i)
+ var val interface{}
+ var err error
+ switch mvalf.Kind() {
+ case reflect.String:
+ val = opts.defaultValue
+ case reflect.Bool:
+ val, err = strconv.ParseBool(opts.defaultValue)
+ case reflect.Uint:
+ val, err = strconv.ParseUint(opts.defaultValue, 10, 0)
+ case reflect.Uint8:
+ val, err = strconv.ParseUint(opts.defaultValue, 10, 8)
+ case reflect.Uint16:
+ val, err = strconv.ParseUint(opts.defaultValue, 10, 16)
+ case reflect.Uint32:
+ val, err = strconv.ParseUint(opts.defaultValue, 10, 32)
+ case reflect.Uint64:
+ val, err = strconv.ParseUint(opts.defaultValue, 10, 64)
+ case reflect.Int:
+ val, err = strconv.ParseInt(opts.defaultValue, 10, 0)
+ case reflect.Int8:
+ val, err = strconv.ParseInt(opts.defaultValue, 10, 8)
+ case reflect.Int16:
+ val, err = strconv.ParseInt(opts.defaultValue, 10, 16)
+ case reflect.Int32:
+ val, err = strconv.ParseInt(opts.defaultValue, 10, 32)
+ case reflect.Int64:
+ val, err = strconv.ParseInt(opts.defaultValue, 10, 64)
+ case reflect.Float32:
+ val, err = strconv.ParseFloat(opts.defaultValue, 32)
+ case reflect.Float64:
+ val, err = strconv.ParseFloat(opts.defaultValue, 64)
+ default:
+ return mvalf, fmt.Errorf("unsupported field type for default option")
+ }
+
if err != nil {
- return mval, formatError(err, tval.GetPosition(key))
+ return mvalf, err
}
- mval.Field(i).Set(mvalf)
- break
+ mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type()))
+ }
+
+ // save the old behavior above and try to check structs
+ if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct {
+ tmpTval := tval
+ if !mtypef.Anonymous {
+ tmpTval = nil
+ }
+ fval := mval.Field(i)
+ v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval)
+ if err != nil {
+ return v, err
+ }
+ mval.Field(i).Set(v)
}
}
}
case reflect.Map:
mval = reflect.MakeMap(mtype)
for _, key := range tval.Keys() {
+ d.visitor.push(key)
// TODO: path splits key
val := tval.GetPath([]string{key})
- mvalf, err := d.valueFromToml(mtype.Elem(), val)
+ mvalf, err := d.valueFromToml(mtype.Elem(), val, nil)
if err != nil {
- return mval, formatError(err, tval.GetPosition(key))
+ return mval, formatError(err, tval.GetPositionPath([]string{key}))
}
- mval.SetMapIndex(reflect.ValueOf(key), mvalf)
+ mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf)
+ d.visitor.pop()
}
}
return mval, nil
@@ -446,9 +879,32 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value,
// Convert toml value to marshal struct/map slice, using marshal type
func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) {
- mval := reflect.MakeSlice(mtype, len(tval), len(tval))
+ mval, err := makeSliceOrArray(mtype, len(tval))
+ if err != nil {
+ return mval, err
+ }
+
for i := 0; i < len(tval); i++ {
- val, err := d.valueFromTree(mtype.Elem(), tval[i])
+ d.visitor.push(strconv.Itoa(i))
+ val, err := d.valueFromTree(mtype.Elem(), tval[i], nil)
+ if err != nil {
+ return mval, err
+ }
+ mval.Index(i).Set(val)
+ d.visitor.pop()
+ }
+ return mval, nil
+}
+
+// Convert toml value to marshal primitive slice, using marshal type
+func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) {
+ mval, err := makeSliceOrArray(mtype, len(tval))
+ if err != nil {
+ return mval, err
+ }
+
+ for i := 0; i < len(tval); i++ {
+ val, err := d.valueFromToml(mtype.Elem(), tval[i], nil)
if err != nil {
return mval, err
}
@@ -458,10 +914,17 @@ func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.
}
// Convert toml value to marshal primitive slice, using marshal type
-func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) {
- mval := reflect.MakeSlice(mtype, len(tval), len(tval))
- for i := 0; i < len(tval); i++ {
- val, err := d.valueFromToml(mtype.Elem(), tval[i])
+func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
+ val := reflect.ValueOf(tval)
+ length := val.Len()
+
+ mval, err := makeSliceOrArray(mtype, length)
+ if err != nil {
+ return mval, err
+ }
+
+ for i := 0; i < length; i++ {
+ val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil)
if err != nil {
return mval, err
}
@@ -470,33 +933,113 @@ func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (r
return mval, nil
}
-// Convert toml value to marshal value, using marshal type
-func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
+// Create a new slice or a new array with specified length
+func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) {
+ var mval reflect.Value
+ switch mtype.Kind() {
+ case reflect.Slice:
+ mval = reflect.MakeSlice(mtype, tLength, tLength)
+ case reflect.Array:
+ mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem()
+ if tLength > mtype.Len() {
+ return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len())
+ }
+ }
+ return mval, nil
+}
+
+// Convert toml value to marshal value, using marshal type. When mval1 is non-nil
+// and the given type is a struct value, merge fields into it.
+func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) {
if mtype.Kind() == reflect.Ptr {
- return d.unwrapPointer(mtype, tval)
+ return d.unwrapPointer(mtype, tval, mval1)
}
- switch tval.(type) {
+ switch t := tval.(type) {
case *Tree:
- if isTree(mtype) {
- return d.valueFromTree(mtype, tval.(*Tree))
+ var mval11 *reflect.Value
+ if mtype.Kind() == reflect.Struct {
+ mval11 = mval1
}
+
+ if isTree(mtype) {
+ return d.valueFromTree(mtype, t, mval11)
+ }
+
+ if mtype.Kind() == reflect.Interface {
+ if mval1 == nil || mval1.IsNil() {
+ return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil)
+ } else {
+ return d.valueFromToml(mval1.Elem().Type(), t, nil)
+ }
+ }
+
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval)
case []*Tree:
- if isTreeSlice(mtype) {
- return d.valueFromTreeSlice(mtype, tval.([]*Tree))
+ if isTreeSequence(mtype) {
+ return d.valueFromTreeSlice(mtype, t)
+ }
+ if mtype.Kind() == reflect.Interface {
+ if mval1 == nil || mval1.IsNil() {
+ return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t)
+ } else {
+ ival := mval1.Elem()
+ return d.valueFromToml(mval1.Elem().Type(), t, &ival)
+ }
}
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval)
case []interface{}:
- if isOtherSlice(mtype) {
- return d.valueFromOtherSlice(mtype, tval.([]interface{}))
+ d.visitor.visit()
+ if isOtherSequence(mtype) {
+ return d.valueFromOtherSlice(mtype, t)
+ }
+ if mtype.Kind() == reflect.Interface {
+ if mval1 == nil || mval1.IsNil() {
+ return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t)
+ } else {
+ ival := mval1.Elem()
+ return d.valueFromToml(mval1.Elem().Type(), t, &ival)
+ }
}
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval)
default:
+ d.visitor.visit()
+ // Check if pointer to value implements the encoding.TextUnmarshaler.
+ if mvalPtr := reflect.New(mtype); isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) {
+ if err := d.unmarshalText(tval, mvalPtr); err != nil {
+ return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err)
+ }
+ return mvalPtr.Elem(), nil
+ }
+
switch mtype.Kind() {
case reflect.Bool, reflect.Struct:
val := reflect.ValueOf(tval)
- // if this passes for when mtype is reflect.Struct, tval is a time.Time
+
+ switch val.Type() {
+ case localDateType:
+ localDate := val.Interface().(LocalDate)
+ switch mtype {
+ case timeType:
+ return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil
+ }
+ case localDateTimeType:
+ localDateTime := val.Interface().(LocalDateTime)
+ switch mtype {
+ case timeType:
+ return reflect.ValueOf(time.Date(
+ localDateTime.Date.Year,
+ localDateTime.Date.Month,
+ localDateTime.Date.Day,
+ localDateTime.Time.Hour,
+ localDateTime.Time.Minute,
+ localDateTime.Time.Second,
+ localDateTime.Time.Nanosecond,
+ time.Local)), nil
+ }
+ }
+
+ // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime
if !val.Type().ConvertibleTo(mtype) {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
}
@@ -512,45 +1055,72 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.V
return val.Convert(mtype), nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
val := reflect.ValueOf(tval)
- if !val.Type().ConvertibleTo(mtype) {
+ if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String {
+ d, err := time.ParseDuration(val.String())
+ if err != nil {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err)
+ }
+ return reflect.ValueOf(d), nil
+ }
+ if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
}
- if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Int()) {
+ if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) {
return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
}
return val.Convert(mtype), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
val := reflect.ValueOf(tval)
- if !val.Type().ConvertibleTo(mtype) {
+ if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
}
- if val.Int() < 0 {
+
+ if val.Convert(reflect.TypeOf(int(1))).Int() < 0 {
return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String())
}
- if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Int())) {
+ if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) {
return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
}
return val.Convert(mtype), nil
case reflect.Float32, reflect.Float64:
val := reflect.ValueOf(tval)
- if !val.Type().ConvertibleTo(mtype) {
+ if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
}
- if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Float()) {
+ if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) {
return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
}
return val.Convert(mtype), nil
+ case reflect.Interface:
+ if mval1 == nil || mval1.IsNil() {
+ return reflect.ValueOf(tval), nil
+ } else {
+ ival := mval1.Elem()
+ return d.valueFromToml(mval1.Elem().Type(), t, &ival)
+ }
+ case reflect.Slice, reflect.Array:
+ if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) {
+ return d.valueFromOtherSliceI(mtype, t)
+ }
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind())
default:
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind())
}
}
}
-func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
- val, err := d.valueFromToml(mtype.Elem(), tval)
+func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) {
+ var melem *reflect.Value
+
+ if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) {
+ elem := mval1.Elem()
+ melem = &elem
+ }
+
+ val, err := d.valueFromToml(mtype.Elem(), tval, melem)
if err != nil {
return reflect.ValueOf(nil), err
}
@@ -559,21 +1129,38 @@ func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.V
return mval, nil
}
-func tomlOptions(vf reflect.StructField) tomlOpts {
- tag := vf.Tag.Get("toml")
+func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error {
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, tval)
+ return callTextUnmarshaler(mval, buf.Bytes())
+}
+
+func tomlOptions(vf reflect.StructField, an annotation) tomlOpts {
+ tag := vf.Tag.Get(an.tag)
parse := strings.Split(tag, ",")
var comment string
- if c := vf.Tag.Get("comment"); c != "" {
+ if c := vf.Tag.Get(an.comment); c != "" {
comment = c
}
- commented, _ := strconv.ParseBool(vf.Tag.Get("commented"))
- multiline, _ := strconv.ParseBool(vf.Tag.Get(tagKeyMultiline))
- result := tomlOpts{name: vf.Name, comment: comment, commented: commented, multiline: multiline, include: true, omitempty: false}
+ commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented))
+ multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline))
+ defaultValue := vf.Tag.Get(tagDefault)
+ result := tomlOpts{
+ name: vf.Name,
+ nameFromTag: false,
+ comment: comment,
+ commented: commented,
+ multiline: multiline,
+ include: true,
+ omitempty: false,
+ defaultValue: defaultValue,
+ }
if parse[0] != "" {
if parse[0] == "-" && len(parse) == 1 {
result.include = false
} else {
result.name = strings.Trim(parse[0], " ")
+ result.nameFromTag = true
}
}
if vf.PkgPath != "" {
@@ -590,11 +1177,7 @@ func tomlOptions(vf reflect.StructField) tomlOpts {
func isZero(val reflect.Value) bool {
switch val.Type().Kind() {
- case reflect.Map:
- fallthrough
- case reflect.Array:
- fallthrough
- case reflect.Slice:
+ case reflect.Slice, reflect.Array, reflect.Map:
return val.Len() == 0
default:
return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface())
@@ -607,3 +1190,80 @@ func formatError(err error, pos Position) error {
}
return fmt.Errorf("%s: %s", pos, err)
}
+
+// visitorState keeps track of which keys were unmarshaled.
+type visitorState struct {
+ tree *Tree
+ path []string
+ keys map[string]struct{}
+ active bool
+}
+
+func newVisitorState(tree *Tree) visitorState {
+ path, result := []string{}, map[string]struct{}{}
+ insertKeys(path, result, tree)
+ return visitorState{
+ tree: tree,
+ path: path[:0],
+ keys: result,
+ active: true,
+ }
+}
+
+func (s *visitorState) push(key string) {
+ if s.active {
+ s.path = append(s.path, key)
+ }
+}
+
+func (s *visitorState) pop() {
+ if s.active {
+ s.path = s.path[:len(s.path)-1]
+ }
+}
+
+func (s *visitorState) visit() {
+ if s.active {
+ delete(s.keys, strings.Join(s.path, "."))
+ }
+}
+
+func (s *visitorState) visitAll() {
+ if s.active {
+ for k := range s.keys {
+ if strings.HasPrefix(k, strings.Join(s.path, ".")) {
+ delete(s.keys, k)
+ }
+ }
+ }
+}
+
+func (s *visitorState) validate() error {
+ if !s.active {
+ return nil
+ }
+ undecoded := make([]string, 0, len(s.keys))
+ for key := range s.keys {
+ undecoded = append(undecoded, key)
+ }
+ sort.Strings(undecoded)
+ if len(undecoded) > 0 {
+ return fmt.Errorf("undecoded keys: %q", undecoded)
+ }
+ return nil
+}
+
+func insertKeys(path []string, m map[string]struct{}, tree *Tree) {
+ for k, v := range tree.values {
+ switch node := v.(type) {
+ case []*Tree:
+ for i, item := range node {
+ insertKeys(append(path, k, strconv.Itoa(i)), m, item)
+ }
+ case *Tree:
+ insertKeys(append(path, k), m, node)
+ case *tomlValue:
+ m[strings.Join(append(path, k), ".")] = struct{}{}
+ }
+ }
+}
diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml
new file mode 100644
index 00000000..792b72ed
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml
@@ -0,0 +1,39 @@
+title = "TOML Marshal Testing"
+
+[basic_lists]
+ floats = [12.3,45.6,78.9]
+ bools = [true,false,true]
+ dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z]
+ ints = [8001,8001,8002]
+ uints = [5002,5003]
+ strings = ["One","Two","Three"]
+
+[[subdocptrs]]
+ name = "Second"
+
+[basic_map]
+ one = "one"
+ two = "two"
+
+[subdoc]
+
+ [subdoc.second]
+ name = "Second"
+
+ [subdoc.first]
+ name = "First"
+
+[basic]
+ uint = 5001
+ bool = true
+ float = 123.4
+ float64 = 123.456782132399
+ int = 5000
+ string = "Bite me"
+ date = 1979-05-27T07:32:00Z
+
+[[subdoclist]]
+ name = "List.First"
+
+[[subdoclist]]
+ name = "List.Second"
diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml
index 1c5f98e7..ba5e110b 100644
--- a/vendor/github.com/pelletier/go-toml/marshal_test.toml
+++ b/vendor/github.com/pelletier/go-toml/marshal_test.toml
@@ -4,6 +4,7 @@ title = "TOML Marshal Testing"
bool = true
date = 1979-05-27T07:32:00Z
float = 123.4
+ float64 = 123.456782132399
int = 5000
string = "Bite me"
uint = 5001
diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go
index 2d27599a..7bf40bbd 100644
--- a/vendor/github.com/pelletier/go-toml/parser.go
+++ b/vendor/github.com/pelletier/go-toml/parser.go
@@ -77,8 +77,10 @@ func (p *tomlParser) parseStart() tomlParserStateFn {
return p.parseAssign
case tokenEOF:
return nil
+ case tokenError:
+ p.raiseError(tok, "parsing error: %s", tok.String())
default:
- p.raiseError(tok, "unexpected token")
+ p.raiseError(tok, "unexpected token %s", tok.typ)
}
return nil
}
@@ -156,6 +158,11 @@ func (p *tomlParser) parseGroup() tomlParserStateFn {
if err := p.tree.createSubTree(keys, startToken.Position); err != nil {
p.raiseError(key, "%s", err)
}
+ destTree := p.tree.GetPath(keys)
+ if target, ok := destTree.(*Tree); ok && target != nil && target.inline {
+ p.raiseError(key, "could not re-define exist inline table or its sub-table : %s",
+ strings.Join(keys, "."))
+ }
p.assume(tokenRightBracket)
p.currentTable = keys
return p.parseStart
@@ -165,6 +172,11 @@ func (p *tomlParser) parseAssign() tomlParserStateFn {
key := p.getToken()
p.assume(tokenEqual)
+ parsedKey, err := parseKey(key.val)
+ if err != nil {
+ p.raiseError(key, "invalid key: %s", err.Error())
+ }
+
value := p.parseRvalue()
var tableKey []string
if len(p.currentTable) > 0 {
@@ -173,6 +185,9 @@ func (p *tomlParser) parseAssign() tomlParserStateFn {
tableKey = []string{}
}
+ prefixKey := parsedKey[0 : len(parsedKey)-1]
+ tableKey = append(tableKey, prefixKey...)
+
// find the table to assign, looking out for arrays of tables
var targetNode *Tree
switch node := p.tree.GetPath(tableKey).(type) {
@@ -180,17 +195,24 @@ func (p *tomlParser) parseAssign() tomlParserStateFn {
targetNode = node[len(node)-1]
case *Tree:
targetNode = node
+ case nil:
+ // create intermediate
+ if err := p.tree.createSubTree(tableKey, key.Position); err != nil {
+ p.raiseError(key, "could not create intermediate group: %s", err)
+ }
+ targetNode = p.tree.GetPath(tableKey).(*Tree)
default:
p.raiseError(key, "Unknown table type for path: %s",
strings.Join(tableKey, "."))
}
- // assign value to the found table
- keyVals := []string{key.val}
- if len(keyVals) != 1 {
- p.raiseError(key, "Invalid key")
+ if targetNode.inline {
+ p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s",
+ strings.Join(tableKey, "."))
}
- keyVal := keyVals[0]
+
+ // assign value to the found table
+ keyVal := parsedKey[len(parsedKey)-1]
localKey := []string{keyVal}
finalKey := append(tableKey, keyVal)
if targetNode.GetPath(localKey) != nil {
@@ -301,7 +323,41 @@ func (p *tomlParser) parseRvalue() interface{} {
}
return val
case tokenDate:
- val, err := time.ParseInLocation(time.RFC3339Nano, tok.val, time.UTC)
+ layout := time.RFC3339Nano
+ if !strings.Contains(tok.val, "T") {
+ layout = strings.Replace(layout, "T", " ", 1)
+ }
+ val, err := time.ParseInLocation(layout, tok.val, time.UTC)
+ if err != nil {
+ p.raiseError(tok, "%s", err)
+ }
+ return val
+ case tokenLocalDate:
+ v := strings.Replace(tok.val, " ", "T", -1)
+ isDateTime := false
+ isTime := false
+ for _, c := range v {
+ if c == 'T' || c == 't' {
+ isDateTime = true
+ break
+ }
+ if c == ':' {
+ isTime = true
+ break
+ }
+ }
+
+ var val interface{}
+ var err error
+
+ if isDateTime {
+ val, err = ParseLocalDateTime(v)
+ } else if isTime {
+ val, err = ParseLocalTime(v)
+ } else {
+ val, err = ParseLocalDate(v)
+ }
+
if err != nil {
p.raiseError(tok, "%s", err)
}
@@ -338,18 +394,21 @@ Loop:
case tokenRightCurlyBrace:
p.getToken()
break Loop
- case tokenKey:
+ case tokenKey, tokenInteger, tokenString:
if !tokenIsComma(previous) && previous != nil {
p.raiseError(follow, "comma expected between fields in inline table")
}
key := p.getToken()
p.assume(tokenEqual)
- value := p.parseRvalue()
- tree.Set(key.val, value)
- case tokenComma:
- if previous == nil {
- p.raiseError(follow, "inline table cannot start with a comma")
+
+ parsedKey, err := parseKey(key.val)
+ if err != nil {
+ p.raiseError(key, "invalid key: %s", err)
}
+
+ value := p.parseRvalue()
+ tree.SetPath(parsedKey, value)
+ case tokenComma:
if tokenIsComma(previous) {
p.raiseError(follow, "need field between two commas in inline table")
}
@@ -362,12 +421,13 @@ Loop:
if tokenIsComma(previous) {
p.raiseError(previous, "trailing comma at the end of inline table")
}
+ tree.inline = true
return tree
}
func (p *tomlParser) parseArray() interface{} {
var array []interface{}
- arrayType := reflect.TypeOf(nil)
+ arrayType := reflect.TypeOf(newTree())
for {
follow := p.peek()
if follow == nil || follow.typ == tokenEOF {
@@ -378,11 +438,8 @@ func (p *tomlParser) parseArray() interface{} {
break
}
val := p.parseRvalue()
- if arrayType == nil {
- arrayType = reflect.TypeOf(val)
- }
if reflect.TypeOf(val) != arrayType {
- p.raiseError(follow, "mixed types in array")
+ arrayType = nil
}
array = append(array, val)
follow = p.peek()
@@ -396,6 +453,12 @@ func (p *tomlParser) parseArray() interface{} {
p.getToken()
}
}
+
+ // if the array is a mixed-type array or its length is 0,
+ // don't convert it to a table array
+ if len(array) <= 0 {
+ arrayType = nil
+ }
// An array of Trees is actually an array of inline
// tables, which is a shorthand for a table array. If the
// array was not converted from []interface{} to []*Tree,
diff --git a/vendor/github.com/pelletier/go-toml/test.sh b/vendor/github.com/pelletier/go-toml/test.sh
deleted file mode 100644
index ba6adf3f..00000000
--- a/vendor/github.com/pelletier/go-toml/test.sh
+++ /dev/null
@@ -1,88 +0,0 @@
-#!/bin/bash
-# fail out of the script if anything here fails
-set -e
-set -o pipefail
-
-# set the path to the present working directory
-export GOPATH=`pwd`
-
-function git_clone() {
- path=$1
- branch=$2
- version=$3
- if [ ! -d "src/$path" ]; then
- mkdir -p src/$path
- git clone https://$path.git src/$path
- fi
- pushd src/$path
- git checkout "$branch"
- git reset --hard "$version"
- popd
-}
-
-# Remove potential previous runs
-rm -rf src test_program_bin toml-test
-
-go get github.com/pelletier/go-buffruneio
-go get github.com/davecgh/go-spew/spew
-go get gopkg.in/yaml.v2
-go get github.com/BurntSushi/toml
-
-# get code for BurntSushi TOML validation
-# pinning all to 'HEAD' for version 0.3.x work (TODO: pin to commit hash when tests stabilize)
-git_clone github.com/BurntSushi/toml master HEAD
-git_clone github.com/BurntSushi/toml-test master HEAD #was: 0.2.0 HEAD
-
-# build the BurntSushi test application
-go build -o toml-test github.com/BurntSushi/toml-test
-
-# vendorize the current lib for testing
-# NOTE: this basically mocks an install without having to go back out to github for code
-mkdir -p src/github.com/pelletier/go-toml/cmd
-mkdir -p src/github.com/pelletier/go-toml/query
-cp *.go *.toml src/github.com/pelletier/go-toml
-cp -R cmd/* src/github.com/pelletier/go-toml/cmd
-cp -R query/* src/github.com/pelletier/go-toml/query
-go build -o test_program_bin src/github.com/pelletier/go-toml/cmd/test_program.go
-
-# Run basic unit tests
-go test github.com/pelletier/go-toml -covermode=count -coverprofile=coverage.out
-go test github.com/pelletier/go-toml/cmd/tomljson
-go test github.com/pelletier/go-toml/query
-
-# run the entire BurntSushi test suite
-if [[ $# -eq 0 ]] ; then
- echo "Running all BurntSushi tests"
- ./toml-test ./test_program_bin | tee test_out
-else
- # run a specific test
- test=$1
- test_path='src/github.com/BurntSushi/toml-test/tests'
- valid_test="$test_path/valid/$test"
- invalid_test="$test_path/invalid/$test"
-
- if [ -e "$valid_test.toml" ]; then
- echo "Valid Test TOML for $test:"
- echo "===="
- cat "$valid_test.toml"
-
- echo "Valid Test JSON for $test:"
- echo "===="
- cat "$valid_test.json"
-
- echo "Go-TOML Output for $test:"
- echo "===="
- cat "$valid_test.toml" | ./test_program_bin
- fi
-
- if [ -e "$invalid_test.toml" ]; then
- echo "Invalid Test TOML for $test:"
- echo "===="
- cat "$invalid_test.toml"
-
- echo "Go-TOML Output for $test:"
- echo "===="
- echo "go-toml Output:"
- cat "$invalid_test.toml" | ./test_program_bin
- fi
-fi
diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go
index 1a908134..6af4ec46 100644
--- a/vendor/github.com/pelletier/go-toml/token.go
+++ b/vendor/github.com/pelletier/go-toml/token.go
@@ -1,10 +1,6 @@
package toml
-import (
- "fmt"
- "strconv"
- "unicode"
-)
+import "fmt"
// Define tokens
type tokenType int
@@ -35,6 +31,7 @@ const (
tokenDoubleLeftBracket
tokenDoubleRightBracket
tokenDate
+ tokenLocalDate
tokenKeyGroup
tokenKeyGroupArray
tokenComma
@@ -68,7 +65,8 @@ var tokenTypeNames = []string{
")",
"]]",
"[[",
- "Date",
+ "LocalDate",
+ "LocalDate",
"KeyGroup",
"KeyGroupArray",
",",
@@ -95,14 +93,6 @@ func (tt tokenType) String() string {
return "Unknown"
}
-func (t token) Int() int {
- if result, err := strconv.Atoi(t.val); err != nil {
- panic(err)
- } else {
- return result
- }
-}
-
func (t token) String() string {
switch t.typ {
case tokenEOF:
@@ -119,7 +109,7 @@ func isSpace(r rune) bool {
}
func isAlphanumeric(r rune) bool {
- return unicode.IsLetter(r) || r == '_'
+ return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_'
}
func isKeyChar(r rune) bool {
@@ -134,7 +124,7 @@ func isKeyStartChar(r rune) bool {
}
func isDigit(r rune) bool {
- return unicode.IsNumber(r)
+ return '0' <= r && r <= '9'
}
func isHexDigit(r rune) bool {
diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go
index 98c185ad..cbb89a9a 100644
--- a/vendor/github.com/pelletier/go-toml/toml.go
+++ b/vendor/github.com/pelletier/go-toml/toml.go
@@ -23,13 +23,18 @@ type Tree struct {
values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree
comment string
commented bool
+ inline bool
position Position
}
func newTree() *Tree {
+ return newTreeWithPosition(Position{})
+}
+
+func newTreeWithPosition(pos Position) *Tree {
return &Tree{
values: make(map[string]interface{}),
- position: Position{},
+ position: pos,
}
}
@@ -117,6 +122,89 @@ func (t *Tree) GetPath(keys []string) interface{} {
}
}
+// GetArray returns the value at key in the Tree.
+// It returns []string, []int64, etc type if key has homogeneous lists
+// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings.
+// Returns nil if the path does not exist in the tree.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) GetArray(key string) interface{} {
+ if key == "" {
+ return t
+ }
+ return t.GetArrayPath(strings.Split(key, "."))
+}
+
+// GetArrayPath returns the element in the tree indicated by 'keys'.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) GetArrayPath(keys []string) interface{} {
+ if len(keys) == 0 {
+ return t
+ }
+ subtree := t
+ for _, intermediateKey := range keys[:len(keys)-1] {
+ value, exists := subtree.values[intermediateKey]
+ if !exists {
+ return nil
+ }
+ switch node := value.(type) {
+ case *Tree:
+ subtree = node
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return nil
+ }
+ subtree = node[len(node)-1]
+ default:
+ return nil // cannot navigate through other node types
+ }
+ }
+ // branch based on final node type
+ switch node := subtree.values[keys[len(keys)-1]].(type) {
+ case *tomlValue:
+ switch n := node.value.(type) {
+ case []interface{}:
+ return getArray(n)
+ default:
+ return node.value
+ }
+ default:
+ return node
+ }
+}
+
+// if homogeneous array, then return slice type object over []interface{}
+func getArray(n []interface{}) interface{} {
+ var s []string
+ var i64 []int64
+ var f64 []float64
+ var bl []bool
+ for _, value := range n {
+ switch v := value.(type) {
+ case string:
+ s = append(s, v)
+ case int64:
+ i64 = append(i64, v)
+ case float64:
+ f64 = append(f64, v)
+ case bool:
+ bl = append(bl, v)
+ default:
+ return n
+ }
+ }
+ if len(s) == len(n) {
+ return s
+ } else if len(i64) == len(n) {
+ return i64
+ } else if len(f64) == len(n) {
+ return f64
+ } else if len(bl) == len(n) {
+ return bl
+ }
+ return n
+}
+
// GetPosition returns the position of the given key.
func (t *Tree) GetPosition(key string) Position {
if key == "" {
@@ -125,6 +213,50 @@ func (t *Tree) GetPosition(key string) Position {
return t.GetPositionPath(strings.Split(key, "."))
}
+// SetPositionPath sets the position of element in the tree indicated by 'keys'.
+// If keys is of length zero, the current tree position is set.
+func (t *Tree) SetPositionPath(keys []string, pos Position) {
+ if len(keys) == 0 {
+ t.position = pos
+ return
+ }
+ subtree := t
+ for _, intermediateKey := range keys[:len(keys)-1] {
+ value, exists := subtree.values[intermediateKey]
+ if !exists {
+ return
+ }
+ switch node := value.(type) {
+ case *Tree:
+ subtree = node
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return
+ }
+ subtree = node[len(node)-1]
+ default:
+ return
+ }
+ }
+ // branch based on final node type
+ switch node := subtree.values[keys[len(keys)-1]].(type) {
+ case *tomlValue:
+ node.position = pos
+ return
+ case *Tree:
+ node.position = pos
+ return
+ case []*Tree:
+ // go to most recent element
+ if len(node) == 0 {
+ return
+ }
+ node[len(node)-1].position = pos
+ return
+ }
+}
+
// GetPositionPath returns the element in the tree indicated by 'keys'.
// If keys is of length zero, the current tree is returned.
func (t *Tree) GetPositionPath(keys []string) Position {
@@ -194,10 +326,10 @@ func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) {
// formatting instructions to the key, that will be reused by Marshal().
func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) {
subtree := t
- for _, intermediateKey := range keys[:len(keys)-1] {
+ for i, intermediateKey := range keys[:len(keys)-1] {
nextTree, exists := subtree.values[intermediateKey]
if !exists {
- nextTree = newTree()
+ nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})
subtree.values[intermediateKey] = nextTree // add new element here
}
switch node := nextTree.(type) {
@@ -207,7 +339,8 @@ func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interfac
// go to most recent element
if len(node) == 0 {
// create element if it does not exist
- subtree.values[intermediateKey] = append(node, newTree())
+ node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}))
+ subtree.values[intermediateKey] = node
}
subtree = node[len(node)-1]
}
@@ -215,19 +348,27 @@ func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interfac
var toInsert interface{}
- switch value.(type) {
+ switch v := value.(type) {
case *Tree:
- tt := value.(*Tree)
- tt.comment = opts.Comment
+ v.comment = opts.Comment
+ v.commented = opts.Commented
toInsert = value
case []*Tree:
+ for i := range v {
+ v[i].commented = opts.Commented
+ }
toInsert = value
case *tomlValue:
- tt := value.(*tomlValue)
- tt.comment = opts.Comment
- toInsert = tt
+ v.comment = opts.Comment
+ v.commented = opts.Commented
+ v.multiline = opts.Multiline
+ toInsert = v
default:
- toInsert = &tomlValue{value: value, comment: opts.Comment, commented: opts.Commented, multiline: opts.Multiline}
+ toInsert = &tomlValue{value: value,
+ comment: opts.Comment,
+ commented: opts.Commented,
+ multiline: opts.Multiline,
+ position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}}
}
subtree.values[keys[len(keys)-1]] = toInsert
@@ -256,44 +397,35 @@ func (t *Tree) SetPath(keys []string, value interface{}) {
// SetPathWithComment is the same as SetPath, but allows you to provide comment
// information to the key, that will be reused by Marshal().
func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) {
- subtree := t
- for _, intermediateKey := range keys[:len(keys)-1] {
- nextTree, exists := subtree.values[intermediateKey]
- if !exists {
- nextTree = newTree()
- subtree.values[intermediateKey] = nextTree // add new element here
- }
- switch node := nextTree.(type) {
- case *Tree:
- subtree = node
- case []*Tree:
- // go to most recent element
- if len(node) == 0 {
- // create element if it does not exist
- subtree.values[intermediateKey] = append(node, newTree())
- }
- subtree = node[len(node)-1]
- }
+ t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value)
+}
+
+// Delete removes a key from the tree.
+// Key is a dot-separated path (e.g. a.b.c).
+func (t *Tree) Delete(key string) error {
+ keys, err := parseKey(key)
+ if err != nil {
+ return err
}
+ return t.DeletePath(keys)
+}
- var toInsert interface{}
-
- switch value.(type) {
+// DeletePath removes a key from the tree.
+// Keys is an array of path elements (e.g. {"a","b","c"}).
+func (t *Tree) DeletePath(keys []string) error {
+ keyLen := len(keys)
+ if keyLen == 1 {
+ delete(t.values, keys[0])
+ return nil
+ }
+ tree := t.GetPath(keys[:keyLen-1])
+ item := keys[keyLen-1]
+ switch node := tree.(type) {
case *Tree:
- tt := value.(*Tree)
- tt.comment = comment
- toInsert = value
- case []*Tree:
- toInsert = value
- case *tomlValue:
- tt := value.(*tomlValue)
- tt.comment = comment
- toInsert = tt
- default:
- toInsert = &tomlValue{value: value, comment: comment, commented: commented}
+ delete(node.values, item)
+ return nil
}
-
- subtree.values[keys[len(keys)-1]] = toInsert
+ return errors.New("no such key to delete")
}
// createSubTree takes a tree and a key and create the necessary intermediate
@@ -305,11 +437,12 @@ func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool,
// Returns nil on success, error object on failure
func (t *Tree) createSubTree(keys []string, pos Position) error {
subtree := t
- for _, intermediateKey := range keys {
+ for i, intermediateKey := range keys {
nextTree, exists := subtree.values[intermediateKey]
if !exists {
- tree := newTree()
+ tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})
tree.position = pos
+ tree.inline = subtree.inline
subtree.values[intermediateKey] = tree
nextTree = tree
}
@@ -337,10 +470,39 @@ func LoadBytes(b []byte) (tree *Tree, err error) {
err = errors.New(r.(string))
}
}()
+
+ if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) {
+ b = b[4:]
+ } else if len(b) >= 3 && hasUTF8BOM3(b) {
+ b = b[3:]
+ } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) {
+ b = b[2:]
+ }
+
tree = parseToml(lexToml(b))
return
}
+func hasUTF16BigEndianBOM2(b []byte) bool {
+ return b[0] == 0xFE && b[1] == 0xFF
+}
+
+func hasUTF16LittleEndianBOM2(b []byte) bool {
+ return b[0] == 0xFF && b[1] == 0xFE
+}
+
+func hasUTF8BOM3(b []byte) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+func hasUTF32BigEndianBOM4(b []byte) bool {
+ return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF
+}
+
+func hasUTF32LittleEndianBOM4(b []byte) bool {
+ return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00
+}
+
// LoadReader creates a Tree from any io.Reader.
func LoadReader(reader io.Reader) (tree *Tree, err error) {
inputBytes, err := ioutil.ReadAll(reader)
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go
index 79610e9b..80353500 100644
--- a/vendor/github.com/pelletier/go-toml/tomltree_create.go
+++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go
@@ -57,6 +57,19 @@ func simpleValueCoercion(object interface{}) (interface{}, error) {
return float64(original), nil
case fmt.Stringer:
return original.String(), nil
+ case []interface{}:
+ value := reflect.ValueOf(original)
+ length := value.Len()
+ arrayValue := reflect.MakeSlice(value.Type(), 0, length)
+ for i := 0; i < length; i++ {
+ val := value.Index(i).Interface()
+ simpleValue, err := simpleValueCoercion(val)
+ if err != nil {
+ return nil, err
+ }
+ arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue))
+ }
+ return arrayValue.Interface(), nil
default:
return nil, fmt.Errorf("cannot convert type %T to Tree", object)
}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go
index e4049e29..ae6dac49 100644
--- a/vendor/github.com/pelletier/go-toml/tomltree_write.go
+++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go
@@ -5,6 +5,7 @@ import (
"fmt"
"io"
"math"
+ "math/big"
"reflect"
"sort"
"strconv"
@@ -12,26 +13,50 @@ import (
"time"
)
+type valueComplexity int
+
+const (
+ valueSimple valueComplexity = iota + 1
+ valueComplex
+)
+
+type sortNode struct {
+ key string
+ complexity valueComplexity
+}
+
// Encodes a string to a TOML-compliant multi-line string value
// This function is a clone of the existing encodeTomlString function, except that whitespace characters
// are preserved. Quotation marks and backslashes are also not escaped.
-func encodeMultilineTomlString(value string) string {
+func encodeMultilineTomlString(value string, commented string) string {
var b bytes.Buffer
+ adjacentQuoteCount := 0
- for _, rr := range value {
+ b.WriteString(commented)
+ for i, rr := range value {
+ if rr != '"' {
+ adjacentQuoteCount = 0
+ } else {
+ adjacentQuoteCount++
+ }
switch rr {
case '\b':
b.WriteString(`\b`)
case '\t':
b.WriteString("\t")
case '\n':
- b.WriteString("\n")
+ b.WriteString("\n" + commented)
case '\f':
b.WriteString(`\f`)
case '\r':
b.WriteString("\r")
case '"':
- b.WriteString(`"`)
+ if adjacentQuoteCount >= 3 || i == len(value)-1 {
+ adjacentQuoteCount = 0
+ b.WriteString(`\"`)
+ } else {
+ b.WriteString(`"`)
+ }
case '\\':
b.WriteString(`\`)
default:
@@ -78,7 +103,30 @@ func encodeTomlString(value string) string {
return b.String()
}
-func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElementPerLine bool) (string, error) {
+func tomlTreeStringRepresentation(t *Tree, ord marshalOrder) (string, error) {
+ var orderedVals []sortNode
+ switch ord {
+ case OrderPreserve:
+ orderedVals = sortByLines(t)
+ default:
+ orderedVals = sortAlphabetical(t)
+ }
+
+ var values []string
+ for _, node := range orderedVals {
+ k := node.key
+ v := t.values[k]
+
+ repr, err := tomlValueStringRepresentation(v, "", "", ord, false)
+ if err != nil {
+ return "", err
+ }
+ values = append(values, quoteKeyIfNeeded(k)+" = "+repr)
+ }
+ return "{ " + strings.Join(values, ", ") + " }", nil
+}
+
+func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord marshalOrder, arraysOneElementPerLine bool) (string, error) {
// this interface check is added to dereference the change made in the writeTo function.
// That change was made to allow this function to see formatting options.
tv, ok := v.(*tomlValue)
@@ -94,20 +142,28 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen
case int64:
return strconv.FormatInt(value, 10), nil
case float64:
- // Ensure a round float does contain a decimal point. Otherwise feeding
- // the output back to the parser would convert to an integer.
- if math.Trunc(value) == value {
- return strings.ToLower(strconv.FormatFloat(value, 'f', 1, 32)), nil
+ // Default bit length is full 64
+ bits := 64
+ // Float panics if nan is used
+ if !math.IsNaN(value) {
+ // if 32 bit accuracy is enough to exactly show, use 32
+ _, acc := big.NewFloat(value).Float32()
+ if acc == big.Exact {
+ bits = 32
+ }
}
- return strings.ToLower(strconv.FormatFloat(value, 'f', -1, 32)), nil
+ if math.Trunc(value) == value {
+ return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil
+ }
+ return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil
case string:
if tv.multiline {
- return "\"\"\"\n" + encodeMultilineTomlString(value) + "\"\"\"", nil
+ return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil
}
return "\"" + encodeTomlString(value) + "\"", nil
case []byte:
b, _ := v.([]byte)
- return tomlValueStringRepresentation(string(b), indent, arraysOneElementPerLine)
+ return string(b), nil
case bool:
if value {
return "true", nil
@@ -115,6 +171,14 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen
return "false", nil
case time.Time:
return value.Format(time.RFC3339), nil
+ case LocalDate:
+ return value.String(), nil
+ case LocalDateTime:
+ return value.String(), nil
+ case LocalTime:
+ return value.String(), nil
+ case *Tree:
+ return tomlTreeStringRepresentation(value, ord)
case nil:
return "", nil
}
@@ -125,7 +189,7 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen
var values []string
for i := 0; i < rv.Len(); i++ {
item := rv.Index(i).Interface()
- itemRepr, err := tomlValueStringRepresentation(item, indent, arraysOneElementPerLine)
+ itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine)
if err != nil {
return "", err
}
@@ -139,131 +203,252 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen
for _, value := range values {
stringBuffer.WriteString(valueIndent)
- stringBuffer.WriteString(value)
+ stringBuffer.WriteString(commented + value)
stringBuffer.WriteString(`,`)
stringBuffer.WriteString("\n")
}
- stringBuffer.WriteString(indent + "]")
+ stringBuffer.WriteString(indent + commented + "]")
return stringBuffer.String(), nil
}
- return "[" + strings.Join(values, ",") + "]", nil
+ return "[" + strings.Join(values, ", ") + "]", nil
}
return "", fmt.Errorf("unsupported value type %T: %v", v, v)
}
-func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) {
- simpleValuesKeys := make([]string, 0)
- complexValuesKeys := make([]string, 0)
+func getTreeArrayLine(trees []*Tree) (line int) {
+ // get lowest line number that is not 0
+ for _, tv := range trees {
+ if tv.position.Line < line || line == 0 {
+ line = tv.position.Line
+ }
+ }
+ return
+}
+
+func sortByLines(t *Tree) (vals []sortNode) {
+ var (
+ line int
+ lines []int
+ tv *Tree
+ tom *tomlValue
+ node sortNode
+ )
+ vals = make([]sortNode, 0)
+ m := make(map[int]sortNode)
+
+ for k := range t.values {
+ v := t.values[k]
+ switch v.(type) {
+ case *Tree:
+ tv = v.(*Tree)
+ line = tv.position.Line
+ node = sortNode{key: k, complexity: valueComplex}
+ case []*Tree:
+ line = getTreeArrayLine(v.([]*Tree))
+ node = sortNode{key: k, complexity: valueComplex}
+ default:
+ tom = v.(*tomlValue)
+ line = tom.position.Line
+ node = sortNode{key: k, complexity: valueSimple}
+ }
+ lines = append(lines, line)
+ vals = append(vals, node)
+ m[line] = node
+ }
+ sort.Ints(lines)
+
+ for i, line := range lines {
+ vals[i] = m[line]
+ }
+
+ return vals
+}
+
+func sortAlphabetical(t *Tree) (vals []sortNode) {
+ var (
+ node sortNode
+ simpVals []string
+ compVals []string
+ )
+ vals = make([]sortNode, 0)
+ m := make(map[string]sortNode)
for k := range t.values {
v := t.values[k]
switch v.(type) {
case *Tree, []*Tree:
- complexValuesKeys = append(complexValuesKeys, k)
+ node = sortNode{key: k, complexity: valueComplex}
+ compVals = append(compVals, node.key)
default:
- simpleValuesKeys = append(simpleValuesKeys, k)
+ node = sortNode{key: k, complexity: valueSimple}
+ simpVals = append(simpVals, node.key)
}
+ vals = append(vals, node)
+ m[node.key] = node
}
- sort.Strings(simpleValuesKeys)
- sort.Strings(complexValuesKeys)
-
- for _, k := range simpleValuesKeys {
- v, ok := t.values[k].(*tomlValue)
- if !ok {
- return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
- }
-
- repr, err := tomlValueStringRepresentation(v, indent, arraysOneElementPerLine)
- if err != nil {
- return bytesCount, err
- }
-
- if v.comment != "" {
- comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1)
- start := "# "
- if strings.HasPrefix(comment, "#") {
- start = ""
- }
- writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n")
- bytesCount += int64(writtenBytesCountComment)
- if errc != nil {
- return bytesCount, errc
- }
- }
-
- var commented string
- if v.commented {
- commented = "# "
- }
- writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n")
- bytesCount += int64(writtenBytesCount)
- if err != nil {
- return bytesCount, err
- }
+ // Simples first to match previous implementation
+ sort.Strings(simpVals)
+ i := 0
+ for _, key := range simpVals {
+ vals[i] = m[key]
+ i++
}
- for _, k := range complexValuesKeys {
- v := t.values[k]
+ sort.Strings(compVals)
+ for _, key := range compVals {
+ vals[i] = m[key]
+ i++
+ }
- combinedKey := k
- if keyspace != "" {
- combinedKey = keyspace + "." + combinedKey
- }
- var commented string
- if t.commented {
- commented = "# "
- }
+ return vals
+}
- switch node := v.(type) {
- // node has to be of those two types given how keys are sorted above
- case *Tree:
- tv, ok := t.values[k].(*Tree)
+func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) {
+ return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false)
+}
+
+func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord marshalOrder, indentString string, parentCommented bool) (int64, error) {
+ var orderedVals []sortNode
+
+ switch ord {
+ case OrderPreserve:
+ orderedVals = sortByLines(t)
+ default:
+ orderedVals = sortAlphabetical(t)
+ }
+
+ for _, node := range orderedVals {
+ switch node.complexity {
+ case valueComplex:
+ k := node.key
+ v := t.values[k]
+
+ combinedKey := quoteKeyIfNeeded(k)
+ if keyspace != "" {
+ combinedKey = keyspace + "." + combinedKey
+ }
+
+ switch node := v.(type) {
+ // node has to be of those two types given how keys are sorted above
+ case *Tree:
+ tv, ok := t.values[k].(*Tree)
+ if !ok {
+ return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
+ }
+ if tv.comment != "" {
+ comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1)
+ start := "# "
+ if strings.HasPrefix(comment, "#") {
+ start = ""
+ }
+ writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment)
+ bytesCount += int64(writtenBytesCountComment)
+ if errc != nil {
+ return bytesCount, errc
+ }
+ }
+
+ var commented string
+ if parentCommented || t.commented || tv.commented {
+ commented = "# "
+ }
+ writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n")
+ bytesCount += int64(writtenBytesCount)
+ if err != nil {
+ return bytesCount, err
+ }
+ bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, parentCommented || t.commented || tv.commented)
+ if err != nil {
+ return bytesCount, err
+ }
+ case []*Tree:
+ for _, subTree := range node {
+ var commented string
+ if parentCommented || t.commented || subTree.commented {
+ commented = "# "
+ }
+ writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n")
+ bytesCount += int64(writtenBytesCount)
+ if err != nil {
+ return bytesCount, err
+ }
+
+ bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, parentCommented || t.commented || subTree.commented)
+ if err != nil {
+ return bytesCount, err
+ }
+ }
+ }
+ default: // Simple
+ k := node.key
+ v, ok := t.values[k].(*tomlValue)
if !ok {
return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
}
- if tv.comment != "" {
- comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1)
+
+ var commented string
+ if parentCommented || t.commented || v.commented {
+ commented = "# "
+ }
+ repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine)
+ if err != nil {
+ return bytesCount, err
+ }
+
+ if v.comment != "" {
+ comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1)
start := "# "
if strings.HasPrefix(comment, "#") {
start = ""
}
- writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment)
+ writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n")
bytesCount += int64(writtenBytesCountComment)
if errc != nil {
return bytesCount, errc
}
}
- writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n")
+
+ quotedKey := quoteKeyIfNeeded(k)
+ writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n")
bytesCount += int64(writtenBytesCount)
if err != nil {
return bytesCount, err
}
- bytesCount, err = node.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine)
- if err != nil {
- return bytesCount, err
- }
- case []*Tree:
- for _, subTree := range node {
- writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n")
- bytesCount += int64(writtenBytesCount)
- if err != nil {
- return bytesCount, err
- }
-
- bytesCount, err = subTree.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine)
- if err != nil {
- return bytesCount, err
- }
- }
}
}
return bytesCount, nil
}
+// quote a key if it does not fit the bare key format (A-Za-z0-9_-)
+// quoted keys use the same rules as strings
+func quoteKeyIfNeeded(k string) string {
+ // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain
+ // keys that have already been quoted.
+ // not an ideal situation, but good enough of a stop gap.
+ if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' {
+ return k
+ }
+ isBare := true
+ for _, r := range k {
+ if !isValidBareChar(r) {
+ isBare = false
+ break
+ }
+ }
+ if isBare {
+ return k
+ }
+ return quoteKey(k)
+}
+
+func quoteKey(k string) string {
+ return "\"" + encodeTomlString(k) + "\""
+}
+
func writeStrings(w io.Writer, s ...string) (int, error) {
var n int
for i := range s {
@@ -286,12 +471,11 @@ func (t *Tree) WriteTo(w io.Writer) (int64, error) {
// Output spans multiple lines, and is suitable for ingest by a TOML parser.
// If the conversion cannot be performed, ToString returns a non-nil error.
func (t *Tree) ToTomlString() (string, error) {
- var buf bytes.Buffer
- _, err := t.WriteTo(&buf)
+ b, err := t.Marshal()
if err != nil {
return "", err
}
- return buf.String(), nil
+ return string(b), nil
}
// String generates a human-readable representation of the current tree.
diff --git a/vendor/github.com/phayes/checkstyle/.scrutinizer.yml b/vendor/github.com/phayes/checkstyle/.scrutinizer.yml
new file mode 100644
index 00000000..d9284b6b
--- /dev/null
+++ b/vendor/github.com/phayes/checkstyle/.scrutinizer.yml
@@ -0,0 +1,15 @@
+build:
+ dependencies:
+ before:
+ - 'source <(curl -fsSL https://raw.githubusercontent.com/phayes/go-scrutinize/master/install-golang)'
+
+ tests:
+ override:
+ -
+ command: 'cd $PROJECTPATH && go-scrutinize'
+ coverage:
+ file: 'coverage.xml'
+ format: 'clover'
+ analysis:
+ file: 'checkstyle_report.xml'
+ format: 'general-checkstyle'
\ No newline at end of file
diff --git a/vendor/github.com/phayes/checkstyle/LICENSE b/vendor/github.com/phayes/checkstyle/LICENSE
new file mode 100644
index 00000000..6dc912f3
--- /dev/null
+++ b/vendor/github.com/phayes/checkstyle/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2017, Patrick D Hayes
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/phayes/checkstyle/README.md b/vendor/github.com/phayes/checkstyle/README.md
new file mode 100644
index 00000000..358cf675
--- /dev/null
+++ b/vendor/github.com/phayes/checkstyle/README.md
@@ -0,0 +1,44 @@
+# checkstyle
+[![GoDoc](https://godoc.org/github.com/phayes/checkstyle?status.svg)](https://godoc.org/github.com/phayes/checkstyle)
+[![Go Report Card](https://goreportcard.com/badge/github.com/phayes/checkstyle)](https://goreportcard.com/report/github.com/phayes/checkstyle)
+[![Build Status](https://scrutinizer-ci.com/g/phayes/checkstyle/badges/build.png?b=master)](https://scrutinizer-ci.com/g/phayes/checkstyle/build-status/master)
+
+Read and write checksyle_report.xml files with golang
+
+Checkstyle XML files are a standard file format for reporting errors in source code, and is often generated by static analysis tools.
+
+Example usage:
+
+```go
+
+import "github.com/phayes/checkstyle"
+
+// Print XML into human readable format
+checkSyle, err := checkstyle.ReadFile("checkstyle_report.xml")
+if err != nil {
+ log.Fatal(err)
+}
+for _, file := range checkStyle.File {
+ fmt.Println(File.Name)
+ for _, codingError := range file.Error {
+ fmt.Println("\t", codingError.Line, codingError.Message)
+ }
+}
+
+// Create a new XML file from scratch
+check := checkstyle.New()
+
+// Ensure that a file has been added
+file := check.EnsureFile("/path/to/file")
+
+// Create an error on line 10
+codingError := checkstyle.NewError(10, "format", "line must end with a full stop")
+
+// Add the error to the file
+file.AddError(codingError)
+
+// Output XML
+fmt.Print(check)
+```
+
+For more information on checkstyle XML see: http://checkstyle.sourceforge.net/checks.html
diff --git a/vendor/github.com/phayes/checkstyle/checkstyle.go b/vendor/github.com/phayes/checkstyle/checkstyle.go
new file mode 100644
index 00000000..cabbd4b4
--- /dev/null
+++ b/vendor/github.com/phayes/checkstyle/checkstyle.go
@@ -0,0 +1,112 @@
+package checkstyle
+
+import "encoding/xml"
+import "io/ioutil"
+
+// DefaultCheckStyleVersion defines the default "version" attribute on "" lememnt
+var DefaultCheckStyleVersion = "1.0.0"
+
+// Severity defines a checkstyle severity code
+type Severity string
+
+var (
+ SeverityError Severity = "error"
+ SeverityInfo Severity = "info"
+ SeverityWarning Severity = "warning"
+ SeverityIgnore Severity = "ignore"
+ SeverityNone Severity
+)
+
+// CheckStyle represents a xml element found in a checkstyle_report.xml file.
+type CheckStyle struct {
+ XMLName xml.Name `xml:"checkstyle"`
+ Version string `xml:"version,attr"`
+ File []*File `xml:"file"`
+}
+
+// AddFile adds a checkstyle.File with the given filename.
+func (cs *CheckStyle) AddFile(csf *File) {
+ cs.File = append(cs.File, csf)
+}
+
+// GetFile gets a CheckStyleFile with the given filename.
+func (cs *CheckStyle) GetFile(filename string) (csf *File, ok bool) {
+ for _, file := range cs.File {
+ if file.Name == filename {
+ csf = file
+ ok = true
+ return
+ }
+ }
+ return
+}
+
+// EnsureFile ensures that a CheckStyleFile with the given name exists
+// Returns either an exiting CheckStyleFile (if a file with that name exists)
+// or a new CheckStyleFile (if a file with that name does not exists)
+func (cs *CheckStyle) EnsureFile(filename string) (csf *File) {
+ csf, ok := cs.GetFile(filename)
+ if !ok {
+ csf = NewFile(filename)
+ cs.AddFile(csf)
+ }
+ return csf
+}
+
+// String implements Stringer. Returns as xml.
+func (cs *CheckStyle) String() string {
+ checkStyleXML, err := xml.Marshal(cs)
+ if err != nil {
+ panic(err)
+ }
+ return string(checkStyleXML)
+}
+
+// New returns a new CheckStyle
+func New() *CheckStyle {
+ return &CheckStyle{Version: DefaultCheckStyleVersion, File: []*File{}}
+}
+
+// File represents a xml element.
+type File struct {
+ XMLName xml.Name `xml:"file"`
+ Name string `xml:"name,attr"`
+ Error []*Error `xml:"error"`
+}
+
+// AddError adds a checkstyle.Error to the file.
+func (csf *File) AddError(cse *Error) {
+ csf.Error = append(csf.Error, cse)
+}
+
+// NewFile creates a new checkstyle.File
+func NewFile(filename string) *File {
+ return &File{Name: filename, Error: []*Error{}}
+}
+
+// Error represents a xml element
+type Error struct {
+ XMLName xml.Name `xml:"error"`
+ Line int `xml:"line,attr"`
+ Column int `xml:"column,attr,omitempty"`
+ Severity Severity `xml:"severity,attr,omitempty"`
+ Message string `xml:"message,attr"`
+ Source string `xml:"source,attr"`
+}
+
+// NewError creates a new checkstyle.Error
+// Note that line starts at 0, and column starts at 1
+func NewError(line int, column int, severity Severity, message string, source string) *Error {
+ return &Error{Line: line, Column: column, Severity: severity, Message: message, Source: source}
+}
+
+// ReadFile reads a checkfile.xml file and returns a CheckStyle object.
+func ReadFile(filename string) (*CheckStyle, error) {
+ checkStyleXML, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ checkStyle := New()
+ err = xml.Unmarshal(checkStyleXML, checkStyle)
+ return checkStyle, err
+}
diff --git a/vendor/github.com/phayes/checkstyle/godoc.go b/vendor/github.com/phayes/checkstyle/godoc.go
new file mode 100644
index 00000000..c9662fe9
--- /dev/null
+++ b/vendor/github.com/phayes/checkstyle/godoc.go
@@ -0,0 +1,36 @@
+/*
+Package checkstyle allows the parsing of generation of checkstyle XML files.
+
+Checkstyle XML files are a standard file format for reporting errors in source code, and is often generated by static analysis tools.
+
+Example usage:
+ // Print XML into human readable format
+ checkSyle, err := checkstyle.ReadFile("checkstyle_report.xml")
+ if err != nil {
+ log.Fatal(err)
+ }
+ for _, file := range checkStyle.File {
+ fmt.Println(File.Name)
+ for _, codingError := range file.Error {
+ fmt.Println("\t", codingError.Line, codingError.Message)
+ }
+ }
+
+ // Create a new XML file from scratch
+ check := checkstyle.New()
+
+ // Ensure that a file has been added
+ file := check.EnsureFile("/path/to/file")
+
+ // Create an error on line 10, column 5
+ codingError := checkstyle.NewError(10, 5, checkstyle.SeverityWarning, "format", "line must end with a full stop")
+
+ // Add the error to the file
+ file.AddError(codingError)
+
+ // Output XML
+ fmt.Print(check)
+
+For more information on checkstyle XML see: http://checkstyle.sourceforge.net/checks.html
+*/
+package checkstyle
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
index d4b92663..9159de03 100644
--- a/vendor/github.com/pkg/errors/.travis.yml
+++ b/vendor/github.com/pkg/errors/.travis.yml
@@ -1,15 +1,10 @@
language: go
go_import_path: github.com/pkg/errors
go:
- - 1.4.x
- - 1.5.x
- - 1.6.x
- - 1.7.x
- - 1.8.x
- - 1.9.x
- - 1.10.x
- 1.11.x
+ - 1.12.x
+ - 1.13.x
- tip
script:
- - go test -v ./...
+ - make check
diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile
new file mode 100644
index 00000000..ce9d7cde
--- /dev/null
+++ b/vendor/github.com/pkg/errors/Makefile
@@ -0,0 +1,44 @@
+PKGS := github.com/pkg/errors
+SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
+GO := go
+
+check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
+
+test:
+ $(GO) test $(PKGS)
+
+vet: | test
+ $(GO) vet $(PKGS)
+
+staticcheck:
+ $(GO) get honnef.co/go/tools/cmd/staticcheck
+ staticcheck -checks all $(PKGS)
+
+misspell:
+ $(GO) get github.com/client9/misspell/cmd/misspell
+ misspell \
+ -locale GB \
+ -error \
+ *.md *.go
+
+unconvert:
+ $(GO) get github.com/mdempsky/unconvert
+ unconvert -v $(PKGS)
+
+ineffassign:
+ $(GO) get github.com/gordonklaus/ineffassign
+ find $(SRCDIRS) -name '*.go' | xargs ineffassign
+
+pedantic: check errcheck
+
+unparam:
+ $(GO) get mvdan.cc/unparam
+ unparam ./...
+
+errcheck:
+ $(GO) get github.com/kisielk/errcheck
+ errcheck $(PKGS)
+
+gofmt:
+ @echo Checking code is gofmted
+ @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
index 6483ba2a..54dfdcb1 100644
--- a/vendor/github.com/pkg/errors/README.md
+++ b/vendor/github.com/pkg/errors/README.md
@@ -41,11 +41,18 @@ default:
[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+## Roadmap
+
+With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
+
+- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
+- 1.0. Final release.
+
## Contributing
-We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
+Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports.
-Before proposing a change, please discuss your change by raising an issue.
+Before sending a PR, please discuss your change by raising an issue.
## License
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
index 7421f326..161aea25 100644
--- a/vendor/github.com/pkg/errors/errors.go
+++ b/vendor/github.com/pkg/errors/errors.go
@@ -82,7 +82,7 @@
//
// if err, ok := err.(stackTracer); ok {
// for _, f := range err.StackTrace() {
-// fmt.Printf("%+s:%d", f)
+// fmt.Printf("%+s:%d\n", f, f)
// }
// }
//
@@ -159,6 +159,9 @@ type withStack struct {
func (w *withStack) Cause() error { return w.error }
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withStack) Unwrap() error { return w.error }
+
func (w *withStack) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
@@ -241,6 +244,9 @@ type withMessage struct {
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
func (w *withMessage) Cause() error { return w.cause }
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withMessage) Unwrap() error { return w.cause }
+
func (w *withMessage) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go
new file mode 100644
index 00000000..be0d10d0
--- /dev/null
+++ b/vendor/github.com/pkg/errors/go113.go
@@ -0,0 +1,38 @@
+// +build go1.13
+
+package errors
+
+import (
+ stderrors "errors"
+)
+
+// Is reports whether any error in err's chain matches target.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error is considered to match a target if it is equal to that target or if
+// it implements a method Is(error) bool such that Is(target) returns true.
+func Is(err, target error) bool { return stderrors.Is(err, target) }
+
+// As finds the first error in err's chain that matches target, and if so, sets
+// target to that error value and returns true.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error matches target if the error's concrete value is assignable to the value
+// pointed to by target, or if the error has a method As(interface{}) bool such that
+// As(target) returns true. In the latter case, the As method is responsible for
+// setting target.
+//
+// As will panic if target is not a non-nil pointer to either a type that implements
+// error, or to any interface type. As returns false if err is nil.
+func As(err error, target interface{}) bool { return stderrors.As(err, target) }
+
+// Unwrap returns the result of calling the Unwrap method on err, if err's
+// type contains an Unwrap method returning error.
+// Otherwise, Unwrap returns nil.
+func Unwrap(err error) error {
+ return stderrors.Unwrap(err)
+}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
index 2874a048..779a8348 100644
--- a/vendor/github.com/pkg/errors/stack.go
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -5,10 +5,13 @@ import (
"io"
"path"
"runtime"
+ "strconv"
"strings"
)
// Frame represents a program counter inside a stack frame.
+// For historical reasons if Frame is interpreted as a uintptr
+// its value represents the program counter + 1.
type Frame uintptr
// pc returns the program counter for this frame;
@@ -37,6 +40,15 @@ func (f Frame) line() int {
return line
}
+// name returns the name of this function, if known.
+func (f Frame) name() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ return fn.Name()
+}
+
// Format formats the frame according to the fmt.Formatter interface.
//
// %s source file
@@ -54,22 +66,16 @@ func (f Frame) Format(s fmt.State, verb rune) {
case 's':
switch {
case s.Flag('+'):
- pc := f.pc()
- fn := runtime.FuncForPC(pc)
- if fn == nil {
- io.WriteString(s, "unknown")
- } else {
- file, _ := fn.FileLine(pc)
- fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
- }
+ io.WriteString(s, f.name())
+ io.WriteString(s, "\n\t")
+ io.WriteString(s, f.file())
default:
io.WriteString(s, path.Base(f.file()))
}
case 'd':
- fmt.Fprintf(s, "%d", f.line())
+ io.WriteString(s, strconv.Itoa(f.line()))
case 'n':
- name := runtime.FuncForPC(f.pc()).Name()
- io.WriteString(s, funcname(name))
+ io.WriteString(s, funcname(f.name()))
case 'v':
f.Format(s, 's')
io.WriteString(s, ":")
@@ -77,6 +83,16 @@ func (f Frame) Format(s fmt.State, verb rune) {
}
}
+// MarshalText formats a stacktrace Frame as a text string. The output is the
+// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
+func (f Frame) MarshalText() ([]byte, error) {
+ name := f.name()
+ if name == "unknown" {
+ return []byte(name), nil
+ }
+ return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
+}
+
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
type StackTrace []Frame
@@ -94,18 +110,32 @@ func (st StackTrace) Format(s fmt.State, verb rune) {
switch {
case s.Flag('+'):
for _, f := range st {
- fmt.Fprintf(s, "\n%+v", f)
+ io.WriteString(s, "\n")
+ f.Format(s, verb)
}
case s.Flag('#'):
fmt.Fprintf(s, "%#v", []Frame(st))
default:
- fmt.Fprintf(s, "%v", []Frame(st))
+ st.formatSlice(s, verb)
}
case 's':
- fmt.Fprintf(s, "%s", []Frame(st))
+ st.formatSlice(s, verb)
}
}
+// formatSlice will format this StackTrace into the given buffer as a slice of
+// Frame, only valid when called with '%s' or '%v'.
+func (st StackTrace) formatSlice(s fmt.State, verb rune) {
+ io.WriteString(s, "[")
+ for i, f := range st {
+ if i > 0 {
+ io.WriteString(s, " ")
+ }
+ f.Format(s, verb)
+ }
+ io.WriteString(s, "]")
+}
+
// stack represents a stack of program counters.
type stack []uintptr
diff --git a/vendor/github.com/quasilyte/go-ruleguard/LICENSE b/vendor/github.com/quasilyte/go-ruleguard/LICENSE
new file mode 100644
index 00000000..f0381fb4
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2019, Iskander (Alex) Sharipov / quasilyte
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/quasilyte/go-ruleguard/dslgen/dsl_sources.go b/vendor/github.com/quasilyte/go-ruleguard/dslgen/dsl_sources.go
new file mode 100644
index 00000000..3ba584d3
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/dslgen/dsl_sources.go
@@ -0,0 +1,3 @@
+package dslgen
+
+var Fluent = []byte("package fluent\n\n// Matcher is a main API group-level entry point.\n// It's used to define and configure the group rules.\n// It also represents a map of all rule-local variables.\ntype Matcher map[string]Var\n\n// Import loads given package path into a rule group imports table.\n//\n// That table is used during the rules compilation.\n//\n// The table has the following effect on the rules:\n//\t* For type expressions, it's used to resolve the\n//\t full package paths of qualified types, like `foo.Bar`.\n//\t If Import(`a/b/foo`) is called, `foo.Bar` will match\n//\t `a/b/foo.Bar` type during the pattern execution.\nfunc (m Matcher) Import(pkgPath string) {}\n\n// Match specifies a set of patterns that match a rule being defined.\n// Pattern matching succeeds if at least 1 pattern matches.\n//\n// If none of the given patterns matched, rule execution stops.\nfunc (m Matcher) Match(pattern string, alternatives ...string) Matcher {\n\treturn m\n}\n\n// Where applies additional constraint to a match.\n// If a given cond is not satisfied, a match is rejected and\n// rule execution stops.\nfunc (m Matcher) Where(cond bool) Matcher {\n\treturn m\n}\n\n// Report prints a message if associated rule match is successful.\n//\n// A message is a string that can contain interpolated expressions.\n// For every matched variable it's possible to interpolate\n// their printed representation into the message text with $.\n// An entire match can be addressed with $$.\nfunc (m Matcher) Report(message string) Matcher {\n\treturn m\n}\n\n// Suggest assigns a quickfix suggestion for the matched code.\nfunc (m Matcher) Suggest(suggestion string) Matcher {\n\treturn m\n}\n\n// At binds the reported node to a named submatch.\n// If no explicit location is given, the outermost node ($$) is used.\nfunc (m Matcher) At(v Var) Matcher {\n\treturn m\n}\n\n// Var is a pattern variable that describes a named submatch.\ntype Var struct {\n\t// Pure reports whether expr matched by var is side-effect-free.\n\tPure bool\n\n\t// Const reports whether expr matched by var is a constant value.\n\tConst bool\n\n\t// Addressable reports whether the corresponding expression is addressable.\n\t// See https://golang.org/ref/spec#Address_operators.\n\tAddressable bool\n\n\t// Type is a type of a matched expr.\n\tType ExprType\n\n\t// Test is a captured node text as in the source code.\n\tText MatchedText\n}\n\n// ExprType describes a type of a matcher expr.\ntype ExprType struct {\n\t// Size represents expression type size in bytes.\n\tSize int\n}\n\n// AssignableTo reports whether a type is assign-compatible with a given type.\n// See https://golang.org/pkg/go/types/#AssignableTo.\nfunc (ExprType) AssignableTo(typ string) bool { return boolResult }\n\n// ConvertibleTo reports whether a type is conversible to a given type.\n// See https://golang.org/pkg/go/types/#ConvertibleTo.\nfunc (ExprType) ConvertibleTo(typ string) bool { return boolResult }\n\n// Implements reports whether a type implements a given interface.\n// See https://golang.org/pkg/go/types/#Implements.\nfunc (ExprType) Implements(typ string) bool { return boolResult }\n\n// Is reports whether a type is identical to a given type.\nfunc (ExprType) Is(typ string) bool { return boolResult }\n\n// MatchedText represents a source text associated with a matched node.\ntype MatchedText string\n\n// Matches reports whether the text matches the given regexp pattern.\nfunc (MatchedText) Matches(pattern string) bool { return boolResult }\n\n\n\nvar boolResult bool\n\n")
diff --git a/vendor/github.com/quasilyte/go-ruleguard/dslgen/dslgen.go b/vendor/github.com/quasilyte/go-ruleguard/dslgen/dslgen.go
new file mode 100644
index 00000000..a2269b2e
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/dslgen/dslgen.go
@@ -0,0 +1,53 @@
+// +build generate
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+func main() {
+ // See #23.
+
+ data, err := dirToBytes("../dsl/fluent")
+ if err != nil {
+ panic(err)
+ }
+
+ f, err := os.Create("./dsl_sources.go")
+ if err != nil {
+ panic(err)
+ }
+ defer f.Close()
+
+ fmt.Fprintf(f, `package dslgen
+
+var Fluent = []byte(%q)
+`, string(data))
+}
+
+func dirToBytes(dir string) ([]byte, error) {
+ files, err := ioutil.ReadDir(dir)
+ if err != nil {
+ return nil, err
+ }
+
+ var buf bytes.Buffer
+ for i, f := range files {
+ data, err := ioutil.ReadFile(filepath.Join(dir, f.Name()))
+ if err != nil {
+ return nil, err
+ }
+ if i != 0 {
+ newline := bytes.IndexByte(data, '\n')
+ data = data[newline:]
+ }
+ buf.Write(data)
+ buf.WriteByte('\n')
+ }
+ return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/.gitattributes b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/.gitattributes
new file mode 100644
index 00000000..6f952299
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/.gitattributes
@@ -0,0 +1,2 @@
+# To prevent CRLF breakages on Windows for fragile files, like testdata.
+* -text
diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/LICENSE
similarity index 73%
rename from vendor/github.com/gogo/protobuf/LICENSE
rename to vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/LICENSE
index f57de90d..a06c5ebf 100644
--- a/vendor/github.com/gogo/protobuf/LICENSE
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/LICENSE
@@ -1,23 +1,16 @@
-Copyright (c) 2013, The GoGo Authors. All rights reserved.
-
-Protocol Buffers for Go with Gadgets
-
-Go support for Protocol Buffers - Google's data interchange format
-
-Copyright 2010 The Go Authors. All rights reserved.
-https://github.com/golang/protobuf
+Copyright (c) 2017, Daniel Martí. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- * Redistributions of source code must retain the above copyright
+ * Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
+ * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
@@ -32,4 +25,3 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/README.md b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/README.md
new file mode 100644
index 00000000..12cb0fdc
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/README.md
@@ -0,0 +1,55 @@
+# gogrep
+
+ go get mvdan.cc/gogrep
+
+Search for Go code using syntax trees. Work in progress.
+
+ gogrep -x 'if $x != nil { return $x, $*_ }'
+
+### Instructions
+
+ usage: gogrep commands [packages]
+
+A command is of the form "-A pattern", where -A is one of:
+
+ -x find all nodes matching a pattern
+ -g discard nodes not matching a pattern
+ -v discard nodes matching a pattern
+ -a filter nodes by certain attributes
+ -s substitute with a given syntax tree
+ -w write source back to disk or stdout
+
+A pattern is a piece of Go code which may include wildcards. It can be:
+
+ a statement (many if split by semicolonss)
+ an expression (many if split by commas)
+ a type expression
+ a top-level declaration (var, func, const)
+ an entire file
+
+Wildcards consist of `$` and a name. All wildcards with the same name
+within an expression must match the same node, excluding "_". Example:
+
+ $x.$_ = $x // assignment of self to a field in self
+
+If `*` is before the name, it will match any number of nodes. Example:
+
+ fmt.Fprintf(os.Stdout, $*_) // all Fprintfs on stdout
+
+`*` can also be used to match optional nodes, like:
+
+ for $*_ { $*_ } // will match all for loops
+ if $*_; $b { $*_ } // will match all ifs with condition $b
+
+Regexes can also be used to match certain identifier names only. The
+`.*` pattern can be used to match all identifiers. Example:
+
+ fmt.$(_ /Fprint.*/)(os.Stdout, $*_) // all Fprint* on stdout
+
+The nodes resulting from applying the commands will be printed line by
+line to standard output.
+
+Here are two simple examples of the -a operand:
+
+ gogrep -x '$x + $y' // will match both numerical and string "+" operations
+ gogrep -x '$x + $y' -a 'type(string)' // matches only string concatenations
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/kludge.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/kludge.go
new file mode 100644
index 00000000..f366af84
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/kludge.go
@@ -0,0 +1,61 @@
+package gogrep
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// This is an ugly way to use gogrep as a library.
+// It can go away when there will be another option.
+
+// Parse creates a gogrep pattern out of a given string expression.
+func Parse(fset *token.FileSet, expr string) (*Pattern, error) {
+ m := matcher{
+ fset: fset,
+ Info: &types.Info{},
+ }
+ node, err := m.parseExpr(expr)
+ if err != nil {
+ return nil, err
+ }
+ return &Pattern{m: &m, Expr: node}, nil
+}
+
+// Pattern is a compiled gogrep pattern.
+type Pattern struct {
+ Expr ast.Node
+ m *matcher
+}
+
+// MatchData describes a successful pattern match.
+type MatchData struct {
+ Node ast.Node
+ Values map[string]ast.Node
+}
+
+// MatchNode calls cb if n matches a pattern.
+func (p *Pattern) MatchNode(n ast.Node, cb func(MatchData)) {
+ p.m.values = map[string]ast.Node{}
+ if p.m.node(p.Expr, n) {
+ cb(MatchData{
+ Values: p.m.values,
+ Node: n,
+ })
+ }
+}
+
+// Match calls cb for any pattern match found in n.
+func (p *Pattern) Match(n ast.Node, cb func(MatchData)) {
+ cmd := exprCmd{name: "x", value: p.Expr}
+ matches := p.m.cmdRange(cmd, []submatch{{
+ values: map[string]ast.Node{},
+ node: n,
+ }})
+ for _, match := range matches {
+ cb(MatchData{
+ Values: match.values,
+ Node: match.node,
+ })
+ }
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/load.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/load.go
new file mode 100644
index 00000000..09ab3fd0
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/load.go
@@ -0,0 +1,72 @@
+// Copyright (c) 2017, Daniel Martí
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/go/packages"
+)
+
+func (m *matcher) load(wd string, args ...string) ([]*packages.Package, error) {
+ mode := packages.NeedName | packages.NeedImports | packages.NeedSyntax |
+ packages.NeedTypes | packages.NeedTypesInfo
+ if m.recursive { // need the syntax trees for the dependencies too
+ mode |= packages.NeedDeps
+ }
+ cfg := &packages.Config{
+ Mode: mode,
+ Dir: wd,
+ Fset: m.fset,
+ Tests: m.tests,
+ }
+ pkgs, err := packages.Load(cfg, args...)
+ if err != nil {
+ return nil, err
+ }
+ jointErr := ""
+ packages.Visit(pkgs, nil, func(pkg *packages.Package) {
+ for _, err := range pkg.Errors {
+ jointErr += err.Error() + "\n"
+ }
+ })
+ if jointErr != "" {
+ return nil, fmt.Errorf("%s", jointErr)
+ }
+
+ // Make a sorted list of the packages, including transitive dependencies
+ // if recurse is true.
+ byPath := make(map[string]*packages.Package)
+ var addDeps func(*packages.Package)
+ addDeps = func(pkg *packages.Package) {
+ if strings.HasSuffix(pkg.PkgPath, ".test") {
+ // don't add recursive test deps
+ return
+ }
+ for _, imp := range pkg.Imports {
+ if _, ok := byPath[imp.PkgPath]; ok {
+ continue // seen; avoid recursive call
+ }
+ byPath[imp.PkgPath] = imp
+ addDeps(imp)
+ }
+ }
+ for _, pkg := range pkgs {
+ byPath[pkg.PkgPath] = pkg
+ if m.recursive {
+ // add all dependencies once
+ addDeps(pkg)
+ }
+ }
+ pkgs = pkgs[:0]
+ for _, pkg := range byPath {
+ pkgs = append(pkgs, pkg)
+ }
+ sort.Slice(pkgs, func(i, j int) bool {
+ return pkgs[i].PkgPath < pkgs[j].PkgPath
+ })
+ return pkgs, nil
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/main.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/main.go
new file mode 100644
index 00000000..004cb32e
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/main.go
@@ -0,0 +1,332 @@
+// Copyright (c) 2017, Daniel Martí
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/printer"
+ "go/token"
+ "go/types"
+ "io"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var usage = func() {
+ fmt.Fprint(os.Stderr, `usage: gogrep commands [packages]
+
+gogrep performs a query on the given Go packages.
+
+ -r search dependencies recursively too
+ -tests search test files too (and direct test deps, with -r)
+
+A command is one of the following:
+
+ -x pattern find all nodes matching a pattern
+ -g pattern discard nodes not matching a pattern
+ -v pattern discard nodes matching a pattern
+ -a attribute discard nodes without an attribute
+ -s pattern substitute with a given syntax tree
+ -p number navigate up a number of node parents
+ -w write the entire source code back
+
+A pattern is a piece of Go code which may include dollar expressions. It can be
+a number of statements, a number of expressions, a declaration, or an entire
+file.
+
+A dollar expression consist of '$' and a name. Dollar expressions with the same
+name within a query always match the same node, excluding "_". Example:
+
+ -x '$x.$_ = $x' # assignment of self to a field in self
+
+If '*' is before the name, it will match any number of nodes. Example:
+
+ -x 'fmt.Fprintf(os.Stdout, $*_)' # all Fprintfs on stdout
+
+By default, the resulting nodes will be printed one per line to standard output.
+To update the input files, use -w.
+`)
+}
+
+func main() {
+ m := matcher{
+ out: os.Stdout,
+ ctx: &build.Default,
+ }
+ err := m.fromArgs(".", os.Args[1:])
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+}
+
+type matcher struct {
+ out io.Writer
+ ctx *build.Context
+
+ fset *token.FileSet
+
+ parents map[ast.Node]ast.Node
+
+ recursive, tests bool
+ aggressive bool
+
+ // information about variables (wildcards), by id (which is an
+ // integer starting at 0)
+ vars []varInfo
+
+ // node values recorded by name, excluding "_" (used only by the
+ // actual matching phase)
+ values map[string]ast.Node
+ scope *types.Scope
+
+ *types.Info
+ stdImporter types.Importer
+}
+
+type varInfo struct {
+ name string
+ any bool
+}
+
+func (m *matcher) info(id int) varInfo {
+ if id < 0 {
+ return varInfo{}
+ }
+ return m.vars[id]
+}
+
+type exprCmd struct {
+ name string
+ src string
+ value interface{}
+}
+
+type strCmdFlag struct {
+ name string
+ cmds *[]exprCmd
+}
+
+func (o *strCmdFlag) String() string { return "" }
+func (o *strCmdFlag) Set(val string) error {
+ *o.cmds = append(*o.cmds, exprCmd{name: o.name, src: val})
+ return nil
+}
+
+type boolCmdFlag struct {
+ name string
+ cmds *[]exprCmd
+}
+
+func (o *boolCmdFlag) String() string { return "" }
+func (o *boolCmdFlag) Set(val string) error {
+ if val != "true" {
+ return fmt.Errorf("flag can only be true")
+ }
+ *o.cmds = append(*o.cmds, exprCmd{name: o.name})
+ return nil
+}
+func (o *boolCmdFlag) IsBoolFlag() bool { return true }
+
+func (m *matcher) fromArgs(wd string, args []string) error {
+ m.fset = token.NewFileSet()
+ cmds, args, err := m.parseCmds(args)
+ if err != nil {
+ return err
+ }
+ pkgs, err := m.load(wd, args...)
+ if err != nil {
+ return err
+ }
+ var all []ast.Node
+ for _, pkg := range pkgs {
+ m.Info = pkg.TypesInfo
+ nodes := make([]ast.Node, len(pkg.Syntax))
+ for i, f := range pkg.Syntax {
+ nodes[i] = f
+ }
+ all = append(all, m.matches(cmds, nodes)...)
+ }
+ for _, n := range all {
+ fpos := m.fset.Position(n.Pos())
+ if strings.HasPrefix(fpos.Filename, wd) {
+ fpos.Filename = fpos.Filename[len(wd)+1:]
+ }
+ fmt.Fprintf(m.out, "%v: %s\n", fpos, singleLinePrint(n))
+ }
+ return nil
+}
+
+func (m *matcher) parseCmds(args []string) ([]exprCmd, []string, error) {
+ flagSet := flag.NewFlagSet("gogrep", flag.ExitOnError)
+ flagSet.Usage = usage
+ flagSet.BoolVar(&m.recursive, "r", false, "search dependencies recursively too")
+ flagSet.BoolVar(&m.tests, "tests", false, "search test files too (and direct test deps, with -r)")
+
+ var cmds []exprCmd
+ flagSet.Var(&strCmdFlag{
+ name: "x",
+ cmds: &cmds,
+ }, "x", "")
+ flagSet.Var(&strCmdFlag{
+ name: "g",
+ cmds: &cmds,
+ }, "g", "")
+ flagSet.Var(&strCmdFlag{
+ name: "v",
+ cmds: &cmds,
+ }, "v", "")
+ flagSet.Var(&strCmdFlag{
+ name: "a",
+ cmds: &cmds,
+ }, "a", "")
+ flagSet.Var(&strCmdFlag{
+ name: "s",
+ cmds: &cmds,
+ }, "s", "")
+ flagSet.Var(&strCmdFlag{
+ name: "p",
+ cmds: &cmds,
+ }, "p", "")
+ flagSet.Var(&boolCmdFlag{
+ name: "w",
+ cmds: &cmds,
+ }, "w", "")
+ flagSet.Parse(args)
+ paths := flagSet.Args()
+
+ if len(cmds) < 1 {
+ return nil, nil, fmt.Errorf("need at least one command")
+ }
+ for i, cmd := range cmds {
+ switch cmd.name {
+ case "w":
+ continue // no expr
+ case "p":
+ n, err := strconv.Atoi(cmd.src)
+ if err != nil {
+ return nil, nil, err
+ }
+ cmds[i].value = n
+ case "a":
+ m, err := m.parseAttrs(cmd.src)
+ if err != nil {
+ return nil, nil, fmt.Errorf("cannot parse mods: %v", err)
+ }
+ cmds[i].value = m
+ default:
+ node, err := m.parseExpr(cmd.src)
+ if err != nil {
+ return nil, nil, err
+ }
+ cmds[i].value = node
+ }
+ }
+ return cmds, paths, nil
+}
+
+type bufferJoinLines struct {
+ bytes.Buffer
+ last string
+}
+
+var rxNeedSemicolon = regexp.MustCompile(`([])}a-zA-Z0-9"'` + "`" + `]|\+\+|--)$`)
+
+func (b *bufferJoinLines) Write(p []byte) (n int, err error) {
+ if string(p) == "\n" {
+ if b.last == "\n" {
+ return 1, nil
+ }
+ if rxNeedSemicolon.MatchString(b.last) {
+ b.Buffer.WriteByte(';')
+ }
+ b.Buffer.WriteByte(' ')
+ b.last = "\n"
+ return 1, nil
+ }
+ p = bytes.Trim(p, "\t")
+ n, err = b.Buffer.Write(p)
+ b.last = string(p)
+ return
+}
+
+func (b *bufferJoinLines) String() string {
+ return strings.TrimSuffix(b.Buffer.String(), "; ")
+}
+
+// inspect is like ast.Inspect, but it supports our extra nodeList Node
+// type (only at the top level).
+func inspect(node ast.Node, fn func(ast.Node) bool) {
+ // ast.Walk barfs on ast.Node types it doesn't know, so
+ // do the first level manually here
+ list, ok := node.(nodeList)
+ if !ok {
+ ast.Inspect(node, fn)
+ return
+ }
+ if !fn(list) {
+ return
+ }
+ for i := 0; i < list.len(); i++ {
+ ast.Inspect(list.at(i), fn)
+ }
+ fn(nil)
+}
+
+var emptyFset = token.NewFileSet()
+
+func singleLinePrint(node ast.Node) string {
+ var buf bufferJoinLines
+ inspect(node, func(node ast.Node) bool {
+ bl, ok := node.(*ast.BasicLit)
+ if !ok || bl.Kind != token.STRING {
+ return true
+ }
+ if !strings.HasPrefix(bl.Value, "`") {
+ return true
+ }
+ if !strings.Contains(bl.Value, "\n") {
+ return true
+ }
+ bl.Value = strconv.Quote(bl.Value[1 : len(bl.Value)-1])
+ return true
+ })
+ printNode(&buf, emptyFset, node)
+ return buf.String()
+}
+
+func printNode(w io.Writer, fset *token.FileSet, node ast.Node) {
+ switch x := node.(type) {
+ case exprList:
+ if len(x) == 0 {
+ return
+ }
+ printNode(w, fset, x[0])
+ for _, n := range x[1:] {
+ fmt.Fprintf(w, ", ")
+ printNode(w, fset, n)
+ }
+ case stmtList:
+ if len(x) == 0 {
+ return
+ }
+ printNode(w, fset, x[0])
+ for _, n := range x[1:] {
+ fmt.Fprintf(w, "; ")
+ printNode(w, fset, n)
+ }
+ default:
+ err := printer.Fprint(w, fset, node)
+ if err != nil && strings.Contains(err.Error(), "go/printer: unsupported node type") {
+ // Should never happen, but make it obvious when it does.
+ panic(fmt.Errorf("cannot print node %T: %v", node, err))
+ }
+ }
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/match.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/match.go
new file mode 100644
index 00000000..08b53d87
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/match.go
@@ -0,0 +1,1108 @@
+// Copyright (c) 2017, Daniel Martí
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "fmt"
+ "go/ast"
+ "go/importer"
+ "go/token"
+ "go/types"
+ "regexp"
+ "strconv"
+)
+
+func (m *matcher) matches(cmds []exprCmd, nodes []ast.Node) []ast.Node {
+ m.parents = make(map[ast.Node]ast.Node)
+ m.fillParents(nodes...)
+ initial := make([]submatch, len(nodes))
+ for i, node := range nodes {
+ initial[i].node = node
+ initial[i].values = make(map[string]ast.Node)
+ }
+ final := m.submatches(cmds, initial)
+ finalNodes := make([]ast.Node, len(final))
+ for i := range finalNodes {
+ finalNodes[i] = final[i].node
+ }
+ return finalNodes
+}
+
+func (m *matcher) fillParents(nodes ...ast.Node) {
+ stack := make([]ast.Node, 1, 32)
+ for _, node := range nodes {
+ inspect(node, func(node ast.Node) bool {
+ if node == nil {
+ stack = stack[:len(stack)-1]
+ return true
+ }
+ if _, ok := node.(nodeList); !ok {
+ m.parents[node] = stack[len(stack)-1]
+ }
+ stack = append(stack, node)
+ return true
+ })
+ }
+}
+
+type submatch struct {
+ node ast.Node
+ values map[string]ast.Node
+}
+
+func valsCopy(values map[string]ast.Node) map[string]ast.Node {
+ v2 := make(map[string]ast.Node, len(values))
+ for k, v := range values {
+ v2[k] = v
+ }
+ return v2
+}
+
+func (m *matcher) submatches(cmds []exprCmd, subs []submatch) []submatch {
+ if len(cmds) == 0 {
+ return subs
+ }
+ cmd := cmds[0]
+ var fn func(exprCmd, []submatch) []submatch
+ switch cmd.name {
+ case "x":
+ fn = m.cmdRange
+ case "g":
+ fn = m.cmdFilter(true)
+ case "v":
+ fn = m.cmdFilter(false)
+ case "s":
+ fn = m.cmdSubst
+ case "a":
+ fn = m.cmdAttr
+ case "p":
+ fn = m.cmdParents
+ case "w":
+ if len(cmds) > 1 {
+ panic("-w must be the last command")
+ }
+ fn = m.cmdWrite
+ default:
+ panic(fmt.Sprintf("unknown command: %q", cmd.name))
+ }
+ return m.submatches(cmds[1:], fn(cmd, subs))
+}
+
+func (m *matcher) cmdRange(cmd exprCmd, subs []submatch) []submatch {
+ var matches []submatch
+ seen := map[nodePosHash]bool{}
+
+ // The values context for each new submatch must be a new copy
+ // from its parent submatch. If we don't do this copy, all the
+ // submatches would share the same map and have side effects.
+ var startValues map[string]ast.Node
+
+ match := func(exprNode, node ast.Node) {
+ if node == nil {
+ return
+ }
+ m.values = valsCopy(startValues)
+ found := m.topNode(exprNode, node)
+ if found == nil {
+ return
+ }
+ hash := posHash(found)
+ if !seen[hash] {
+ matches = append(matches, submatch{
+ node: found,
+ values: m.values,
+ })
+ seen[hash] = true
+ }
+ }
+ for _, sub := range subs {
+ startValues = valsCopy(sub.values)
+ m.walkWithLists(cmd.value.(ast.Node), sub.node, match)
+ }
+ return matches
+}
+
+func (m *matcher) cmdFilter(wantAny bool) func(exprCmd, []submatch) []submatch {
+ return func(cmd exprCmd, subs []submatch) []submatch {
+ var matches []submatch
+ any := false
+ match := func(exprNode, node ast.Node) {
+ if node == nil {
+ return
+ }
+ found := m.topNode(exprNode, node)
+ if found != nil {
+ any = true
+ }
+ }
+ for _, sub := range subs {
+ any = false
+ m.values = sub.values
+ m.walkWithLists(cmd.value.(ast.Node), sub.node, match)
+ if any == wantAny {
+ matches = append(matches, sub)
+ }
+ }
+ return matches
+ }
+}
+
+func (m *matcher) cmdAttr(cmd exprCmd, subs []submatch) []submatch {
+ var matches []submatch
+ for _, sub := range subs {
+ m.values = sub.values
+ if m.attrApplies(sub.node, cmd.value.(attribute)) {
+ matches = append(matches, sub)
+ }
+ }
+ return matches
+}
+
+func (m *matcher) cmdParents(cmd exprCmd, subs []submatch) []submatch {
+ for i := range subs {
+ sub := &subs[i]
+ reps := cmd.value.(int)
+ for j := 0; j < reps; j++ {
+ sub.node = m.parentOf(sub.node)
+ }
+ }
+ return subs
+}
+
+func (m *matcher) attrApplies(node ast.Node, attr interface{}) bool {
+ if rx, ok := attr.(*regexp.Regexp); ok {
+ if exprStmt, ok := node.(*ast.ExprStmt); ok {
+ // since we prefer matching entire statements, get the
+ // ident from the ExprStmt
+ node = exprStmt.X
+ }
+ ident, ok := node.(*ast.Ident)
+ return ok && rx.MatchString(ident.Name)
+ }
+ expr, _ := node.(ast.Expr)
+ if expr == nil {
+ return false // only exprs have types
+ }
+ t := m.Info.TypeOf(expr)
+ if t == nil {
+ return false // an expr, but no type?
+ }
+ tv := m.Info.Types[expr]
+ switch x := attr.(type) {
+ case typeCheck:
+ want := m.resolveType(m.scope, x.expr)
+ switch {
+ case x.op == "type" && !types.Identical(t, want):
+ return false
+ case x.op == "asgn" && !types.AssignableTo(t, want):
+ return false
+ case x.op == "conv" && !types.ConvertibleTo(t, want):
+ return false
+ }
+ case typProperty:
+ switch {
+ case x == "comp" && !types.Comparable(t):
+ return false
+ case x == "addr" && !tv.Addressable():
+ return false
+ }
+ case typUnderlying:
+ u := t.Underlying()
+ uok := true
+ switch x {
+ case "basic":
+ _, uok = u.(*types.Basic)
+ case "array":
+ _, uok = u.(*types.Array)
+ case "slice":
+ _, uok = u.(*types.Slice)
+ case "struct":
+ _, uok = u.(*types.Struct)
+ case "interface":
+ _, uok = u.(*types.Interface)
+ case "pointer":
+ _, uok = u.(*types.Pointer)
+ case "func":
+ _, uok = u.(*types.Signature)
+ case "map":
+ _, uok = u.(*types.Map)
+ case "chan":
+ _, uok = u.(*types.Chan)
+ }
+ if !uok {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *matcher) walkWithLists(exprNode, node ast.Node, fn func(exprNode, node ast.Node)) {
+ visit := func(node ast.Node) bool {
+ fn(exprNode, node)
+ for _, list := range nodeLists(node) {
+ fn(exprNode, list)
+ if id := m.wildAnyIdent(exprNode); id != nil {
+ // so that "$*a" will match "a, b"
+ fn(exprList([]ast.Expr{id}), list)
+ // so that "$*a" will match "a; b"
+ fn(toStmtList(id), list)
+ }
+ }
+ return true
+ }
+ inspect(node, visit)
+}
+
+func (m *matcher) topNode(exprNode, node ast.Node) ast.Node {
+ sts1, ok1 := exprNode.(stmtList)
+ sts2, ok2 := node.(stmtList)
+ if ok1 && ok2 {
+ // allow a partial match at the top level
+ return m.nodes(sts1, sts2, true)
+ }
+ if m.node(exprNode, node) {
+ return node
+ }
+ return nil
+}
+
+// optNode is like node, but for those nodes that can be nil and are not
+// part of a list. For example, init and post statements in a for loop.
+func (m *matcher) optNode(expr, node ast.Node) bool {
+ if ident := m.wildAnyIdent(expr); ident != nil {
+ if m.node(toStmtList(ident), toStmtList(node)) {
+ return true
+ }
+ }
+ return m.node(expr, node)
+}
+
+func (m *matcher) node(expr, node ast.Node) bool {
+ switch node.(type) {
+ case *ast.File, *ast.FuncType, *ast.BlockStmt, *ast.IfStmt,
+ *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.CaseClause,
+ *ast.CommClause, *ast.ForStmt, *ast.RangeStmt:
+ if scope := m.Info.Scopes[node]; scope != nil {
+ m.scope = scope
+ }
+ }
+ if !m.aggressive {
+ if expr == nil || node == nil {
+ return expr == node
+ }
+ } else {
+ if expr == nil && node == nil {
+ return true
+ }
+ if node == nil {
+ expr, node = node, expr
+ }
+ }
+ switch x := expr.(type) {
+ case nil: // only in aggressive mode
+ y, ok := node.(*ast.Ident)
+ return ok && y.Name == "_"
+
+ case *ast.File:
+ y, ok := node.(*ast.File)
+ if !ok || !m.node(x.Name, y.Name) || len(x.Decls) != len(y.Decls) ||
+ len(x.Imports) != len(y.Imports) {
+ return false
+ }
+ for i, decl := range x.Decls {
+ if !m.node(decl, y.Decls[i]) {
+ return false
+ }
+ }
+ for i, imp := range x.Imports {
+ if !m.node(imp, y.Imports[i]) {
+ return false
+ }
+ }
+ return true
+
+ case *ast.Ident:
+ y, yok := node.(*ast.Ident)
+ if !isWildName(x.Name) {
+ // not a wildcard
+ return yok && x.Name == y.Name
+ }
+ if _, ok := node.(ast.Node); !ok {
+ return false // to not include our extra node types
+ }
+ id := fromWildName(x.Name)
+ info := m.info(id)
+ if info.any {
+ return false
+ }
+ if info.name == "_" {
+ // values are discarded, matches anything
+ return true
+ }
+ prev, ok := m.values[info.name]
+ if !ok {
+ // first occurrence, record value
+ m.values[info.name] = node
+ return true
+ }
+ // multiple uses must match
+ return m.node(prev, node)
+
+ // lists (ys are generated by us while walking)
+ case exprList:
+ y, ok := node.(exprList)
+ return ok && m.exprs(x, y)
+ case stmtList:
+ y, ok := node.(stmtList)
+ return ok && m.stmts(x, y)
+
+ // lits
+ case *ast.BasicLit:
+ y, ok := node.(*ast.BasicLit)
+ return ok && x.Kind == y.Kind && x.Value == y.Value
+ case *ast.CompositeLit:
+ y, ok := node.(*ast.CompositeLit)
+ return ok && m.node(x.Type, y.Type) && m.exprs(x.Elts, y.Elts)
+ case *ast.FuncLit:
+ y, ok := node.(*ast.FuncLit)
+ return ok && m.node(x.Type, y.Type) && m.node(x.Body, y.Body)
+
+ // types
+ case *ast.ArrayType:
+ y, ok := node.(*ast.ArrayType)
+ return ok && m.node(x.Len, y.Len) && m.node(x.Elt, y.Elt)
+ case *ast.MapType:
+ y, ok := node.(*ast.MapType)
+ return ok && m.node(x.Key, y.Key) && m.node(x.Value, y.Value)
+ case *ast.StructType:
+ y, ok := node.(*ast.StructType)
+ return ok && m.fields(x.Fields, y.Fields)
+ case *ast.Field:
+ // TODO: tags?
+ y, ok := node.(*ast.Field)
+ if !ok {
+ return false
+ }
+ if len(x.Names) == 0 && x.Tag == nil && m.node(x.Type, y) {
+ // Allow $var to match a field.
+ return true
+ }
+ return m.idents(x.Names, y.Names) && m.node(x.Type, y.Type)
+ case *ast.FuncType:
+ y, ok := node.(*ast.FuncType)
+ return ok && m.fields(x.Params, y.Params) &&
+ m.fields(x.Results, y.Results)
+ case *ast.InterfaceType:
+ y, ok := node.(*ast.InterfaceType)
+ return ok && m.fields(x.Methods, y.Methods)
+ case *ast.ChanType:
+ y, ok := node.(*ast.ChanType)
+ return ok && x.Dir == y.Dir && m.node(x.Value, y.Value)
+
+ // other exprs
+ case *ast.Ellipsis:
+ y, ok := node.(*ast.Ellipsis)
+ return ok && m.node(x.Elt, y.Elt)
+ case *ast.ParenExpr:
+ y, ok := node.(*ast.ParenExpr)
+ return ok && m.node(x.X, y.X)
+ case *ast.UnaryExpr:
+ y, ok := node.(*ast.UnaryExpr)
+ return ok && x.Op == y.Op && m.node(x.X, y.X)
+ case *ast.BinaryExpr:
+ y, ok := node.(*ast.BinaryExpr)
+ return ok && x.Op == y.Op && m.node(x.X, y.X) && m.node(x.Y, y.Y)
+ case *ast.CallExpr:
+ y, ok := node.(*ast.CallExpr)
+ return ok && m.node(x.Fun, y.Fun) && m.exprs(x.Args, y.Args) &&
+ bothValid(x.Ellipsis, y.Ellipsis)
+ case *ast.KeyValueExpr:
+ y, ok := node.(*ast.KeyValueExpr)
+ return ok && m.node(x.Key, y.Key) && m.node(x.Value, y.Value)
+ case *ast.StarExpr:
+ y, ok := node.(*ast.StarExpr)
+ return ok && m.node(x.X, y.X)
+ case *ast.SelectorExpr:
+ y, ok := node.(*ast.SelectorExpr)
+ return ok && m.node(x.X, y.X) && m.node(x.Sel, y.Sel)
+ case *ast.IndexExpr:
+ y, ok := node.(*ast.IndexExpr)
+ return ok && m.node(x.X, y.X) && m.node(x.Index, y.Index)
+ case *ast.SliceExpr:
+ y, ok := node.(*ast.SliceExpr)
+ return ok && m.node(x.X, y.X) && m.node(x.Low, y.Low) &&
+ m.node(x.High, y.High) && m.node(x.Max, y.Max)
+ case *ast.TypeAssertExpr:
+ y, ok := node.(*ast.TypeAssertExpr)
+ return ok && m.node(x.X, y.X) && m.node(x.Type, y.Type)
+
+ // decls
+ case *ast.GenDecl:
+ y, ok := node.(*ast.GenDecl)
+ return ok && x.Tok == y.Tok && m.specs(x.Specs, y.Specs)
+ case *ast.FuncDecl:
+ y, ok := node.(*ast.FuncDecl)
+ return ok && m.fields(x.Recv, y.Recv) && m.node(x.Name, y.Name) &&
+ m.node(x.Type, y.Type) && m.node(x.Body, y.Body)
+
+ // specs
+ case *ast.ValueSpec:
+ y, ok := node.(*ast.ValueSpec)
+ if !ok || !m.node(x.Type, y.Type) {
+ return false
+ }
+ if m.aggressive && len(x.Names) == 1 {
+ for i := range y.Names {
+ if m.node(x.Names[i], y.Names[i]) &&
+ (x.Values == nil || m.node(x.Values[i], y.Values[i])) {
+ return true
+ }
+ }
+ }
+ return m.idents(x.Names, y.Names) && m.exprs(x.Values, y.Values)
+
+ // stmt bridge nodes
+ case *ast.ExprStmt:
+ if id, ok := x.X.(*ast.Ident); ok && isWildName(id.Name) {
+ // prefer matching $x as a statement, as it's
+ // the parent
+ return m.node(id, node)
+ }
+ y, ok := node.(*ast.ExprStmt)
+ return ok && m.node(x.X, y.X)
+ case *ast.DeclStmt:
+ y, ok := node.(*ast.DeclStmt)
+ return ok && m.node(x.Decl, y.Decl)
+
+ // stmts
+ case *ast.EmptyStmt:
+ _, ok := node.(*ast.EmptyStmt)
+ return ok
+ case *ast.LabeledStmt:
+ y, ok := node.(*ast.LabeledStmt)
+ return ok && m.node(x.Label, y.Label) && m.node(x.Stmt, y.Stmt)
+ case *ast.SendStmt:
+ y, ok := node.(*ast.SendStmt)
+ return ok && m.node(x.Chan, y.Chan) && m.node(x.Value, y.Value)
+ case *ast.IncDecStmt:
+ y, ok := node.(*ast.IncDecStmt)
+ return ok && x.Tok == y.Tok && m.node(x.X, y.X)
+ case *ast.AssignStmt:
+ y, ok := node.(*ast.AssignStmt)
+ if !m.aggressive {
+ return ok && x.Tok == y.Tok &&
+ m.exprs(x.Lhs, y.Lhs) && m.exprs(x.Rhs, y.Rhs)
+ }
+ if ok {
+ return m.exprs(x.Lhs, y.Lhs) && m.exprs(x.Rhs, y.Rhs)
+ }
+ vs, ok := node.(*ast.ValueSpec)
+ return ok && m.nodesMatch(exprList(x.Lhs), identList(vs.Names)) &&
+ m.exprs(x.Rhs, vs.Values)
+ case *ast.GoStmt:
+ y, ok := node.(*ast.GoStmt)
+ return ok && m.node(x.Call, y.Call)
+ case *ast.DeferStmt:
+ y, ok := node.(*ast.DeferStmt)
+ return ok && m.node(x.Call, y.Call)
+ case *ast.ReturnStmt:
+ y, ok := node.(*ast.ReturnStmt)
+ return ok && m.exprs(x.Results, y.Results)
+ case *ast.BranchStmt:
+ y, ok := node.(*ast.BranchStmt)
+ return ok && x.Tok == y.Tok && m.node(maybeNilIdent(x.Label), maybeNilIdent(y.Label))
+ case *ast.BlockStmt:
+ if m.aggressive && m.node(stmtList(x.List), node) {
+ return true
+ }
+ y, ok := node.(*ast.BlockStmt)
+ if !ok {
+ return false
+ }
+ if x == nil || y == nil {
+ return x == y
+ }
+ return m.cases(x.List, y.List) || m.stmts(x.List, y.List)
+ case *ast.IfStmt:
+ y, ok := node.(*ast.IfStmt)
+ if !ok {
+ return false
+ }
+ condAny := m.wildAnyIdent(x.Cond)
+ if condAny != nil && x.Init == nil {
+ // if $*x { ... } on the left
+ left := toStmtList(condAny)
+ return m.node(left, toStmtList(y.Init, y.Cond)) &&
+ m.node(x.Body, y.Body) && m.optNode(x.Else, y.Else)
+ }
+ return m.optNode(x.Init, y.Init) && m.node(x.Cond, y.Cond) &&
+ m.node(x.Body, y.Body) && m.node(x.Else, y.Else)
+ case *ast.CaseClause:
+ y, ok := node.(*ast.CaseClause)
+ return ok && m.exprs(x.List, y.List) && m.stmts(x.Body, y.Body)
+ case *ast.SwitchStmt:
+ y, ok := node.(*ast.SwitchStmt)
+ if !ok {
+ return false
+ }
+ tagAny := m.wildAnyIdent(x.Tag)
+ if tagAny != nil && x.Init == nil {
+ // switch $*x { ... } on the left
+ left := toStmtList(tagAny)
+ return m.node(left, toStmtList(y.Init, y.Tag)) &&
+ m.node(x.Body, y.Body)
+ }
+ return m.optNode(x.Init, y.Init) && m.node(x.Tag, y.Tag) && m.node(x.Body, y.Body)
+ case *ast.TypeSwitchStmt:
+ y, ok := node.(*ast.TypeSwitchStmt)
+ return ok && m.optNode(x.Init, y.Init) && m.node(x.Assign, y.Assign) && m.node(x.Body, y.Body)
+ case *ast.CommClause:
+ y, ok := node.(*ast.CommClause)
+ return ok && m.node(x.Comm, y.Comm) && m.stmts(x.Body, y.Body)
+ case *ast.SelectStmt:
+ y, ok := node.(*ast.SelectStmt)
+ return ok && m.node(x.Body, y.Body)
+ case *ast.ForStmt:
+ condIdent := m.wildAnyIdent(x.Cond)
+ if condIdent != nil && x.Init == nil && x.Post == nil {
+ // "for $*x { ... }" on the left
+ left := toStmtList(condIdent)
+ // also accept RangeStmt on the right
+ switch y := node.(type) {
+ case *ast.ForStmt:
+ return m.node(left, toStmtList(y.Init, y.Cond, y.Post)) &&
+ m.node(x.Body, y.Body)
+ case *ast.RangeStmt:
+ return m.node(left, toStmtList(y.Key, y.Value, y.X)) &&
+ m.node(x.Body, y.Body)
+ default:
+ return false
+ }
+ }
+ y, ok := node.(*ast.ForStmt)
+ if !ok {
+ return false
+ }
+ return m.optNode(x.Init, y.Init) && m.node(x.Cond, y.Cond) &&
+ m.optNode(x.Post, y.Post) && m.node(x.Body, y.Body)
+ case *ast.RangeStmt:
+ y, ok := node.(*ast.RangeStmt)
+ return ok && m.node(x.Key, y.Key) && m.node(x.Value, y.Value) &&
+ m.node(x.X, y.X) && m.node(x.Body, y.Body)
+
+ case *ast.TypeSpec:
+ y, ok := node.(*ast.TypeSpec)
+ return ok && m.node(x.Name, y.Name) && m.node(x.Type, y.Type)
+
+ case *ast.FieldList:
+ // we ignore these, for now
+ return false
+ default:
+ panic(fmt.Sprintf("unexpected node: %T", x))
+ }
+}
+
+func (m *matcher) wildAnyIdent(node ast.Node) *ast.Ident {
+ switch x := node.(type) {
+ case *ast.ExprStmt:
+ return m.wildAnyIdent(x.X)
+ case *ast.Ident:
+ if !isWildName(x.Name) {
+ return nil
+ }
+ if !m.info(fromWildName(x.Name)).any {
+ return nil
+ }
+ return x
+ }
+ return nil
+}
+
+// resolveType resolves a type expression from a given scope.
+func (m *matcher) resolveType(scope *types.Scope, expr ast.Expr) types.Type {
+ switch x := expr.(type) {
+ case *ast.Ident:
+ _, obj := scope.LookupParent(x.Name, token.NoPos)
+ if obj == nil {
+ // TODO: error if all resolveType calls on a type
+ // expression fail? or perhaps resolve type expressions
+ // across the entire program?
+ return nil
+ }
+ return obj.Type()
+ case *ast.ArrayType:
+ elt := m.resolveType(scope, x.Elt)
+ if x.Len == nil {
+ return types.NewSlice(elt)
+ }
+ bl, ok := x.Len.(*ast.BasicLit)
+ if !ok || bl.Kind != token.INT {
+ panic(fmt.Sprintf("TODO: %T", x))
+ }
+ len, _ := strconv.ParseInt(bl.Value, 0, 0)
+ return types.NewArray(elt, len)
+ case *ast.StarExpr:
+ return types.NewPointer(m.resolveType(scope, x.X))
+ case *ast.ChanType:
+ dir := types.SendRecv
+ switch x.Dir {
+ case ast.SEND:
+ dir = types.SendOnly
+ case ast.RECV:
+ dir = types.RecvOnly
+ }
+ return types.NewChan(dir, m.resolveType(scope, x.Value))
+ case *ast.SelectorExpr:
+ scope = m.findScope(scope, x.X)
+ return m.resolveType(scope, x.Sel)
+ default:
+ panic(fmt.Sprintf("resolveType TODO: %T", x))
+ }
+}
+
+func (m *matcher) findScope(scope *types.Scope, expr ast.Expr) *types.Scope {
+ switch x := expr.(type) {
+ case *ast.Ident:
+ _, obj := scope.LookupParent(x.Name, token.NoPos)
+ if pkg, ok := obj.(*types.PkgName); ok {
+ return pkg.Imported().Scope()
+ }
+ // try to fall back to std
+ if m.stdImporter == nil {
+ m.stdImporter = importer.Default()
+ }
+ path := x.Name
+ if longer, ok := stdImportFixes[path]; ok {
+ path = longer
+ }
+ pkg, err := m.stdImporter.Import(path)
+ if err != nil {
+ panic(fmt.Sprintf("findScope err: %v", err))
+ }
+ return pkg.Scope()
+ default:
+ panic(fmt.Sprintf("findScope TODO: %T", x))
+ }
+}
+
+var stdImportFixes = map[string]string{
+ // go list std | grep -vE 'vendor|internal' | grep '/' | sed -r 's@^(.*)/([^/]*)$@"\2": "\1/\2",@' | sort
+ // (after commenting out the less likely duplicates)
+ "adler32": "hash/adler32",
+ "aes": "crypto/aes",
+ "ascii85": "encoding/ascii85",
+ "asn1": "encoding/asn1",
+ "ast": "go/ast",
+ "atomic": "sync/atomic",
+ "base32": "encoding/base32",
+ "base64": "encoding/base64",
+ "big": "math/big",
+ "binary": "encoding/binary",
+ "bits": "math/bits",
+ "build": "go/build",
+ "bzip2": "compress/bzip2",
+ "cgi": "net/http/cgi",
+ "cgo": "runtime/cgo",
+ "cipher": "crypto/cipher",
+ "cmplx": "math/cmplx",
+ "color": "image/color",
+ "constant": "go/constant",
+ "cookiejar": "net/http/cookiejar",
+ "crc32": "hash/crc32",
+ "crc64": "hash/crc64",
+ "csv": "encoding/csv",
+ "debug": "runtime/debug",
+ "des": "crypto/des",
+ "doc": "go/doc",
+ "draw": "image/draw",
+ "driver": "database/sql/driver",
+ "dsa": "crypto/dsa",
+ "dwarf": "debug/dwarf",
+ "ecdsa": "crypto/ecdsa",
+ "elf": "debug/elf",
+ "elliptic": "crypto/elliptic",
+ "exec": "os/exec",
+ "fcgi": "net/http/fcgi",
+ "filepath": "path/filepath",
+ "flate": "compress/flate",
+ "fnv": "hash/fnv",
+ "format": "go/format",
+ "gif": "image/gif",
+ "gob": "encoding/gob",
+ "gosym": "debug/gosym",
+ "gzip": "compress/gzip",
+ "heap": "container/heap",
+ "hex": "encoding/hex",
+ "hmac": "crypto/hmac",
+ "http": "net/http",
+ "httptest": "net/http/httptest",
+ "httptrace": "net/http/httptrace",
+ "httputil": "net/http/httputil",
+ "importer": "go/importer",
+ "iotest": "testing/iotest",
+ "ioutil": "io/ioutil",
+ "jpeg": "image/jpeg",
+ "json": "encoding/json",
+ "jsonrpc": "net/rpc/jsonrpc",
+ "list": "container/list",
+ "lzw": "compress/lzw",
+ "macho": "debug/macho",
+ "mail": "net/mail",
+ "md5": "crypto/md5",
+ "multipart": "mime/multipart",
+ "palette": "image/color/palette",
+ "parser": "go/parser",
+ "parse": "text/template/parse",
+ "pe": "debug/pe",
+ "pem": "encoding/pem",
+ "pkix": "crypto/x509/pkix",
+ "plan9obj": "debug/plan9obj",
+ "png": "image/png",
+ //"pprof": "net/http/pprof",
+ "pprof": "runtime/pprof",
+ "printer": "go/printer",
+ "quick": "testing/quick",
+ "quotedprintable": "mime/quotedprintable",
+ "race": "runtime/race",
+ //"rand": "crypto/rand",
+ "rand": "math/rand",
+ "rc4": "crypto/rc4",
+ "ring": "container/ring",
+ "rpc": "net/rpc",
+ "rsa": "crypto/rsa",
+ //"scanner": "go/scanner",
+ "scanner": "text/scanner",
+ "sha1": "crypto/sha1",
+ "sha256": "crypto/sha256",
+ "sha512": "crypto/sha512",
+ "signal": "os/signal",
+ "smtp": "net/smtp",
+ "sql": "database/sql",
+ "subtle": "crypto/subtle",
+ "suffixarray": "index/suffixarray",
+ "syntax": "regexp/syntax",
+ "syslog": "log/syslog",
+ "tabwriter": "text/tabwriter",
+ "tar": "archive/tar",
+ //"template": "html/template",
+ "template": "text/template",
+ "textproto": "net/textproto",
+ "tls": "crypto/tls",
+ "token": "go/token",
+ "trace": "runtime/trace",
+ "types": "go/types",
+ "url": "net/url",
+ "user": "os/user",
+ "utf16": "unicode/utf16",
+ "utf8": "unicode/utf8",
+ "x509": "crypto/x509",
+ "xml": "encoding/xml",
+ "zip": "archive/zip",
+ "zlib": "compress/zlib",
+}
+
+func maybeNilIdent(x *ast.Ident) ast.Node {
+ if x == nil {
+ return nil
+ }
+ return x
+}
+
+func bothValid(p1, p2 token.Pos) bool {
+ return p1.IsValid() == p2.IsValid()
+}
+
+type nodeList interface {
+ at(i int) ast.Node
+ len() int
+ slice(from, to int) nodeList
+ ast.Node
+}
+
+// nodes matches two lists of nodes. It uses a common algorithm to match
+// wildcard patterns with any number of nodes without recursion.
+func (m *matcher) nodes(ns1, ns2 nodeList, partial bool) ast.Node {
+ ns1len, ns2len := ns1.len(), ns2.len()
+ if ns1len == 0 {
+ if ns2len == 0 {
+ return ns2
+ }
+ return nil
+ }
+ partialStart, partialEnd := 0, ns2len
+ i1, i2 := 0, 0
+ next1, next2 := 0, 0
+
+ // We need to keep a copy of m.values so that we can restart
+ // with a different "any of" match while discarding any matches
+ // we found while trying it.
+ type restart struct {
+ matches map[string]ast.Node
+ next1, next2 int
+ }
+ // We need to stack these because otherwise some edge cases
+ // would not match properly. Since we have various kinds of
+ // wildcards (nodes containing them, $_, and $*_), in some cases
+ // we may have to go back and do multiple restarts to get to the
+ // right starting position.
+ var stack []restart
+ push := func(n1, n2 int) {
+ if n2 > ns2len {
+ return // would be discarded anyway
+ }
+ stack = append(stack, restart{valsCopy(m.values), n1, n2})
+ next1, next2 = n1, n2
+ }
+ pop := func() {
+ i1, i2 = next1, next2
+ m.values = stack[len(stack)-1].matches
+ stack = stack[:len(stack)-1]
+ next1, next2 = 0, 0
+ if len(stack) > 0 {
+ next1 = stack[len(stack)-1].next1
+ next2 = stack[len(stack)-1].next2
+ }
+ }
+ wildName := ""
+ wildStart := 0
+
+ // wouldMatch returns whether the current wildcard - if any -
+ // matches the nodes we are currently trying it on.
+ wouldMatch := func() bool {
+ switch wildName {
+ case "", "_":
+ return true
+ }
+ list := ns2.slice(wildStart, i2)
+ // check that it matches any nodes found elsewhere
+ prev, ok := m.values[wildName]
+ if ok && !m.node(prev, list) {
+ return false
+ }
+ m.values[wildName] = list
+ return true
+ }
+ for i1 < ns1len || i2 < ns2len {
+ if i1 < ns1len {
+ n1 := ns1.at(i1)
+ id := fromWildNode(n1)
+ info := m.info(id)
+ if info.any {
+ // keep track of where this wildcard
+ // started (if info.name == wildName,
+ // we're trying the same wildcard
+ // matching one more node)
+ if info.name != wildName {
+ wildStart = i2
+ wildName = info.name
+ }
+ // try to match zero or more at i2,
+ // restarting at i2+1 if it fails
+ push(i1, i2+1)
+ i1++
+ continue
+ }
+ if partial && i1 == 0 {
+ // let "b; c" match "a; b; c"
+ // (simulates a $*_ at the beginning)
+ partialStart = i2
+ push(i1, i2+1)
+ }
+ if i2 < ns2len && wouldMatch() && m.node(n1, ns2.at(i2)) {
+ wildName = ""
+ // ordinary match
+ i1++
+ i2++
+ continue
+ }
+ }
+ if partial && i1 == ns1len && wildName == "" {
+ partialEnd = i2
+ break // let "b; c" match "b; c; d"
+ }
+ // mismatch, try to restart
+ if 0 < next2 && next2 <= ns2len && (i1 != next1 || i2 != next2) {
+ pop()
+ continue
+ }
+ return nil
+ }
+ if !wouldMatch() {
+ return nil
+ }
+ return ns2.slice(partialStart, partialEnd)
+}
+
+func (m *matcher) nodesMatch(list1, list2 nodeList) bool {
+ return m.nodes(list1, list2, false) != nil
+}
+
+func (m *matcher) exprs(exprs1, exprs2 []ast.Expr) bool {
+ return m.nodesMatch(exprList(exprs1), exprList(exprs2))
+}
+
+func (m *matcher) idents(ids1, ids2 []*ast.Ident) bool {
+ return m.nodesMatch(identList(ids1), identList(ids2))
+}
+
+func toStmtList(nodes ...ast.Node) stmtList {
+ var stmts []ast.Stmt
+ for _, node := range nodes {
+ switch x := node.(type) {
+ case nil:
+ case ast.Stmt:
+ stmts = append(stmts, x)
+ case ast.Expr:
+ stmts = append(stmts, &ast.ExprStmt{X: x})
+ default:
+ panic(fmt.Sprintf("unexpected node type: %T", x))
+ }
+ }
+ return stmtList(stmts)
+}
+
+func (m *matcher) cases(stmts1, stmts2 []ast.Stmt) bool {
+ for _, stmt := range stmts2 {
+ switch stmt.(type) {
+ case *ast.CaseClause, *ast.CommClause:
+ default:
+ return false
+ }
+ }
+ var left []*ast.Ident
+ for _, stmt := range stmts1 {
+ var expr ast.Expr
+ var bstmt ast.Stmt
+ switch x := stmt.(type) {
+ case *ast.CaseClause:
+ if len(x.List) != 1 || len(x.Body) != 1 {
+ return false
+ }
+ expr, bstmt = x.List[0], x.Body[0]
+ case *ast.CommClause:
+ if x.Comm == nil || len(x.Body) != 1 {
+ return false
+ }
+ if commExpr, ok := x.Comm.(*ast.ExprStmt); ok {
+ expr = commExpr.X
+ }
+ bstmt = x.Body[0]
+ default:
+ return false
+ }
+ xs, ok := bstmt.(*ast.ExprStmt)
+ if !ok {
+ return false
+ }
+ bodyIdent, ok := xs.X.(*ast.Ident)
+ if !ok || bodyIdent.Name != "gogrep_body" {
+ return false
+ }
+ id, ok := expr.(*ast.Ident)
+ if !ok || !isWildName(id.Name) {
+ return false
+ }
+ left = append(left, id)
+ }
+ return m.nodesMatch(identList(left), stmtList(stmts2))
+}
+
+func (m *matcher) stmts(stmts1, stmts2 []ast.Stmt) bool {
+ return m.nodesMatch(stmtList(stmts1), stmtList(stmts2))
+}
+
+func (m *matcher) specs(specs1, specs2 []ast.Spec) bool {
+ return m.nodesMatch(specList(specs1), specList(specs2))
+}
+
+func (m *matcher) fields(fields1, fields2 *ast.FieldList) bool {
+ if fields1 == nil || fields2 == nil {
+ return fields1 == fields2
+ }
+ return m.nodesMatch(fieldList(fields1.List), fieldList(fields2.List))
+}
+
+func fromWildNode(node ast.Node) int {
+ switch node := node.(type) {
+ case *ast.Ident:
+ return fromWildName(node.Name)
+ case *ast.ExprStmt:
+ return fromWildNode(node.X)
+ case *ast.Field:
+ // Allow $var to represent an entire field; the lone identifier
+ // gets picked up as an anonymous field.
+ if len(node.Names) == 0 && node.Tag == nil {
+ return fromWildNode(node.Type)
+ }
+ }
+ return -1
+}
+
+func nodeLists(n ast.Node) []nodeList {
+ var lists []nodeList
+ addList := func(list nodeList) {
+ if list.len() > 0 {
+ lists = append(lists, list)
+ }
+ }
+ switch x := n.(type) {
+ case nodeList:
+ addList(x)
+ case *ast.CompositeLit:
+ addList(exprList(x.Elts))
+ case *ast.CallExpr:
+ addList(exprList(x.Args))
+ case *ast.AssignStmt:
+ addList(exprList(x.Lhs))
+ addList(exprList(x.Rhs))
+ case *ast.ReturnStmt:
+ addList(exprList(x.Results))
+ case *ast.ValueSpec:
+ addList(exprList(x.Values))
+ case *ast.BlockStmt:
+ addList(stmtList(x.List))
+ case *ast.CaseClause:
+ addList(exprList(x.List))
+ addList(stmtList(x.Body))
+ case *ast.CommClause:
+ addList(stmtList(x.Body))
+ }
+ return lists
+}
+
+type exprList []ast.Expr
+type identList []*ast.Ident
+type stmtList []ast.Stmt
+type specList []ast.Spec
+type fieldList []*ast.Field
+
+func (l exprList) len() int { return len(l) }
+func (l identList) len() int { return len(l) }
+func (l stmtList) len() int { return len(l) }
+func (l specList) len() int { return len(l) }
+func (l fieldList) len() int { return len(l) }
+
+func (l exprList) at(i int) ast.Node { return l[i] }
+func (l identList) at(i int) ast.Node { return l[i] }
+func (l stmtList) at(i int) ast.Node { return l[i] }
+func (l specList) at(i int) ast.Node { return l[i] }
+func (l fieldList) at(i int) ast.Node { return l[i] }
+
+func (l exprList) slice(i, j int) nodeList { return l[i:j] }
+func (l identList) slice(i, j int) nodeList { return l[i:j] }
+func (l stmtList) slice(i, j int) nodeList { return l[i:j] }
+func (l specList) slice(i, j int) nodeList { return l[i:j] }
+func (l fieldList) slice(i, j int) nodeList { return l[i:j] }
+
+func (l exprList) Pos() token.Pos { return l[0].Pos() }
+func (l identList) Pos() token.Pos { return l[0].Pos() }
+func (l stmtList) Pos() token.Pos { return l[0].Pos() }
+func (l specList) Pos() token.Pos { return l[0].Pos() }
+func (l fieldList) Pos() token.Pos { return l[0].Pos() }
+
+func (l exprList) End() token.Pos { return l[len(l)-1].End() }
+func (l identList) End() token.Pos { return l[len(l)-1].End() }
+func (l stmtList) End() token.Pos { return l[len(l)-1].End() }
+func (l specList) End() token.Pos { return l[len(l)-1].End() }
+func (l fieldList) End() token.Pos { return l[len(l)-1].End() }
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/parse.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/parse.go
new file mode 100644
index 00000000..b46e6439
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/parse.go
@@ -0,0 +1,452 @@
+// Copyright (c) 2017, Daniel Martí
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "regexp"
+ "strconv"
+ "strings"
+ "text/template"
+)
+
+func (m *matcher) transformSource(expr string) (string, []posOffset, error) {
+ toks, err := m.tokenize([]byte(expr))
+ if err != nil {
+ return "", nil, fmt.Errorf("cannot tokenize expr: %v", err)
+ }
+ var offs []posOffset
+ lbuf := lineColBuffer{line: 1, col: 1}
+ addOffset := func(length int) {
+ lbuf.offs -= length
+ offs = append(offs, posOffset{
+ atLine: lbuf.line,
+ atCol: lbuf.col,
+ offset: length,
+ })
+ }
+ if len(toks) > 0 && toks[0].tok == tokAggressive {
+ toks = toks[1:]
+ m.aggressive = true
+ }
+ lastLit := false
+ for _, t := range toks {
+ if lbuf.offs >= t.pos.Offset && lastLit && t.lit != "" {
+ lbuf.WriteString(" ")
+ }
+ for lbuf.offs < t.pos.Offset {
+ lbuf.WriteString(" ")
+ }
+ if t.lit == "" {
+ lbuf.WriteString(t.tok.String())
+ lastLit = false
+ continue
+ }
+ if isWildName(t.lit) {
+ // to correct the position offsets for the extra
+ // info attached to ident name strings
+ addOffset(len(wildPrefix) - 1)
+ }
+ lbuf.WriteString(t.lit)
+ lastLit = strings.TrimSpace(t.lit) != ""
+ }
+ // trailing newlines can cause issues with commas
+ return strings.TrimSpace(lbuf.String()), offs, nil
+}
+
+func (m *matcher) parseExpr(expr string) (ast.Node, error) {
+ exprStr, offs, err := m.transformSource(expr)
+ if err != nil {
+ return nil, err
+ }
+ node, _, err := parseDetectingNode(m.fset, exprStr)
+ if err != nil {
+ err = subPosOffsets(err, offs...)
+ return nil, fmt.Errorf("cannot parse expr: %v", err)
+ }
+ return node, nil
+}
+
+type lineColBuffer struct {
+ bytes.Buffer
+ line, col, offs int
+}
+
+func (l *lineColBuffer) WriteString(s string) (n int, err error) {
+ for _, r := range s {
+ if r == '\n' {
+ l.line++
+ l.col = 1
+ } else {
+ l.col++
+ }
+ l.offs++
+ }
+ return l.Buffer.WriteString(s)
+}
+
+var tmplDecl = template.Must(template.New("").Parse(`` +
+ `package p; {{ . }}`))
+
+var tmplExprs = template.Must(template.New("").Parse(`` +
+ `package p; var _ = []interface{}{ {{ . }}, }`))
+
+var tmplStmts = template.Must(template.New("").Parse(`` +
+ `package p; func _() { {{ . }} }`))
+
+var tmplType = template.Must(template.New("").Parse(`` +
+ `package p; var _ {{ . }}`))
+
+var tmplValSpec = template.Must(template.New("").Parse(`` +
+ `package p; var {{ . }}`))
+
+func execTmpl(tmpl *template.Template, src string) string {
+ var buf bytes.Buffer
+ if err := tmpl.Execute(&buf, src); err != nil {
+ panic(err)
+ }
+ return buf.String()
+}
+
+func noBadNodes(node ast.Node) bool {
+ any := false
+ ast.Inspect(node, func(n ast.Node) bool {
+ if any {
+ return false
+ }
+ switch n.(type) {
+ case *ast.BadExpr, *ast.BadDecl:
+ any = true
+ }
+ return true
+ })
+ return !any
+}
+
+func parseType(fset *token.FileSet, src string) (ast.Expr, *ast.File, error) {
+ asType := execTmpl(tmplType, src)
+ f, err := parser.ParseFile(fset, "", asType, 0)
+ if err != nil {
+ err = subPosOffsets(err, posOffset{1, 1, 17})
+ return nil, nil, err
+ }
+ vs := f.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec)
+ return vs.Type, f, nil
+}
+
+// parseDetectingNode tries its best to parse the ast.Node contained in src, as
+// one of: *ast.File, ast.Decl, ast.Expr, ast.Stmt, *ast.ValueSpec.
+// It also returns the *ast.File used for the parsing, so that the returned node
+// can be easily type-checked.
+func parseDetectingNode(fset *token.FileSet, src string) (ast.Node, *ast.File, error) {
+ file := fset.AddFile("", fset.Base(), len(src))
+ scan := scanner.Scanner{}
+ scan.Init(file, []byte(src), nil, 0)
+ if _, tok, _ := scan.Scan(); tok == token.EOF {
+ return nil, nil, fmt.Errorf("empty source code")
+ }
+ var mainErr error
+
+ // first try as a whole file
+ if f, err := parser.ParseFile(fset, "", src, 0); err == nil && noBadNodes(f) {
+ return f, f, nil
+ }
+
+ // then as a single declaration, or many
+ asDecl := execTmpl(tmplDecl, src)
+ if f, err := parser.ParseFile(fset, "", asDecl, 0); err == nil && noBadNodes(f) {
+ if len(f.Decls) == 1 {
+ return f.Decls[0], f, nil
+ }
+ return f, f, nil
+ }
+
+ // then as value expressions
+ asExprs := execTmpl(tmplExprs, src)
+ if f, err := parser.ParseFile(fset, "", asExprs, 0); err == nil && noBadNodes(f) {
+ vs := f.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec)
+ cl := vs.Values[0].(*ast.CompositeLit)
+ if len(cl.Elts) == 1 {
+ return cl.Elts[0], f, nil
+ }
+ return exprList(cl.Elts), f, nil
+ }
+
+ // then try as statements
+ asStmts := execTmpl(tmplStmts, src)
+ if f, err := parser.ParseFile(fset, "", asStmts, 0); err == nil && noBadNodes(f) {
+ bl := f.Decls[0].(*ast.FuncDecl).Body
+ if len(bl.List) == 1 {
+ return bl.List[0], f, nil
+ }
+ return stmtList(bl.List), f, nil
+ } else {
+ // Statements is what covers most cases, so it will give
+ // the best overall error message. Show positions
+ // relative to where the user's code is put in the
+ // template.
+ mainErr = subPosOffsets(err, posOffset{1, 1, 22})
+ }
+
+ // type expressions not yet picked up, for e.g. chans and interfaces
+ if typ, f, err := parseType(fset, src); err == nil && noBadNodes(f) {
+ return typ, f, nil
+ }
+
+ // value specs
+ asValSpec := execTmpl(tmplValSpec, src)
+ if f, err := parser.ParseFile(fset, "", asValSpec, 0); err == nil && noBadNodes(f) {
+ vs := f.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec)
+ return vs, f, nil
+ }
+ return nil, nil, mainErr
+}
+
+type posOffset struct {
+ atLine, atCol int
+ offset int
+}
+
+func subPosOffsets(err error, offs ...posOffset) error {
+ list, ok := err.(scanner.ErrorList)
+ if !ok {
+ return err
+ }
+ for i, err := range list {
+ for _, off := range offs {
+ if err.Pos.Line != off.atLine {
+ continue
+ }
+ if err.Pos.Column < off.atCol {
+ continue
+ }
+ err.Pos.Column -= off.offset
+ }
+ list[i] = err
+ }
+ return list
+}
+
+const (
+ _ token.Token = -iota
+ tokAggressive
+)
+
+type fullToken struct {
+ pos token.Position
+ tok token.Token
+ lit string
+}
+
+type caseStatus uint
+
+const (
+ caseNone caseStatus = iota
+ caseNeedBlock
+ caseHere
+)
+
+func (m *matcher) tokenize(src []byte) ([]fullToken, error) {
+ var s scanner.Scanner
+ fset := token.NewFileSet()
+ file := fset.AddFile("", fset.Base(), len(src))
+
+ var err error
+ onError := func(pos token.Position, msg string) {
+ switch msg { // allow certain extra chars
+ case `illegal character U+0024 '$'`:
+ case `illegal character U+007E '~'`:
+ default:
+ err = fmt.Errorf("%v: %s", pos, msg)
+ }
+ }
+
+ // we will modify the input source under the scanner's nose to
+ // enable some features such as regexes.
+ s.Init(file, src, onError, scanner.ScanComments)
+
+ next := func() fullToken {
+ pos, tok, lit := s.Scan()
+ return fullToken{fset.Position(pos), tok, lit}
+ }
+
+ caseStat := caseNone
+
+ var toks []fullToken
+ for t := next(); t.tok != token.EOF; t = next() {
+ switch t.lit {
+ case "$": // continues below
+ case "~":
+ toks = append(toks, fullToken{t.pos, tokAggressive, ""})
+ continue
+ case "switch", "select", "case":
+ if t.lit == "case" {
+ caseStat = caseNone
+ } else {
+ caseStat = caseNeedBlock
+ }
+ fallthrough
+ default: // regular Go code
+ if t.tok == token.LBRACE && caseStat == caseNeedBlock {
+ caseStat = caseHere
+ }
+ toks = append(toks, t)
+ continue
+ }
+ wt, err := m.wildcard(t.pos, next)
+ if err != nil {
+ return nil, err
+ }
+ if caseStat == caseHere {
+ toks = append(toks, fullToken{wt.pos, token.IDENT, "case"})
+ }
+ toks = append(toks, wt)
+ if caseStat == caseHere {
+ toks = append(toks, fullToken{wt.pos, token.COLON, ""})
+ toks = append(toks, fullToken{wt.pos, token.IDENT, "gogrep_body"})
+ }
+ }
+ return toks, err
+}
+
+func (m *matcher) wildcard(pos token.Position, next func() fullToken) (fullToken, error) {
+ wt := fullToken{pos, token.IDENT, wildPrefix}
+ t := next()
+ var info varInfo
+ if t.tok == token.MUL {
+ t = next()
+ info.any = true
+ }
+ if t.tok != token.IDENT {
+ return wt, fmt.Errorf("%v: $ must be followed by ident, got %v",
+ t.pos, t.tok)
+ }
+ id := len(m.vars)
+ wt.lit += strconv.Itoa(id)
+ info.name = t.lit
+ m.vars = append(m.vars, info)
+ return wt, nil
+}
+
+type typeCheck struct {
+ op string // "type", "asgn", "conv"
+ expr ast.Expr
+}
+
+type attribute interface{}
+
+type typProperty string
+
+type typUnderlying string
+
+func (m *matcher) parseAttrs(src string) (attribute, error) {
+ toks, err := m.tokenize([]byte(src))
+ if err != nil {
+ return nil, err
+ }
+ i := -1
+ var t fullToken
+ next := func() fullToken {
+ if i++; i < len(toks) {
+ return toks[i]
+ }
+ return fullToken{tok: token.EOF, pos: t.pos}
+ }
+ t = next()
+ op := t.lit
+ switch op { // the ones that don't take args
+ case "comp", "addr":
+ if t = next(); t.tok != token.SEMICOLON {
+ return nil, fmt.Errorf("%v: wanted EOF, got %v", t.pos, t.tok)
+ }
+ return typProperty(op), nil
+ }
+ opPos := t.pos
+ if t = next(); t.tok != token.LPAREN {
+ return nil, fmt.Errorf("%v: wanted (", t.pos)
+ }
+ var attr attribute
+ switch op {
+ case "rx":
+ t = next()
+ rxStr, err := strconv.Unquote(t.lit)
+ if err != nil {
+ return nil, fmt.Errorf("%v: %v", t.pos, err)
+ }
+ if !strings.HasPrefix(rxStr, "^") {
+ rxStr = "^" + rxStr
+ }
+ if !strings.HasSuffix(rxStr, "$") {
+ rxStr = rxStr + "$"
+ }
+ rx, err := regexp.Compile(rxStr)
+ if err != nil {
+ return nil, fmt.Errorf("%v: %v", t.pos, err)
+ }
+ attr = rx
+ case "type", "asgn", "conv":
+ t = next()
+ start := t.pos.Offset
+ for open := 1; open > 0; t = next() {
+ switch t.tok {
+ case token.LPAREN:
+ open++
+ case token.RPAREN:
+ open--
+ case token.EOF:
+ return nil, fmt.Errorf("%v: expected ) to close (", t.pos)
+ }
+ }
+ end := t.pos.Offset - 1
+ typeStr := strings.TrimSpace(string(src[start:end]))
+ fset := token.NewFileSet()
+ typeExpr, _, err := parseType(fset, typeStr)
+ if err != nil {
+ return nil, err
+ }
+ attr = typeCheck{op, typeExpr}
+ i -= 2 // since we went past RPAREN above
+ case "is":
+ switch t = next(); t.lit {
+ case "basic", "array", "slice", "struct", "interface",
+ "pointer", "func", "map", "chan":
+ default:
+ return nil, fmt.Errorf("%v: unknown type: %q", t.pos,
+ t.lit)
+ }
+ attr = typUnderlying(t.lit)
+ default:
+ return nil, fmt.Errorf("%v: unknown op %q", opPos, op)
+ }
+ if t = next(); t.tok != token.RPAREN {
+ return nil, fmt.Errorf("%v: wanted ), got %v", t.pos, t.tok)
+ }
+ if t = next(); t.tok != token.SEMICOLON {
+ return nil, fmt.Errorf("%v: wanted EOF, got %v", t.pos, t.tok)
+ }
+ return attr, nil
+}
+
+// using a prefix is good enough for now
+const wildPrefix = "gogrep_"
+
+func isWildName(name string) bool {
+ return strings.HasPrefix(name, wildPrefix)
+}
+
+func fromWildName(s string) int {
+ if !isWildName(s) {
+ return -1
+ }
+ n, err := strconv.Atoi(s[len(wildPrefix):])
+ if err != nil {
+ return -1
+ }
+ return n
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/subst.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/subst.go
new file mode 100644
index 00000000..8870858e
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/subst.go
@@ -0,0 +1,261 @@
+// Copyright (c) 2018, Daniel Martí
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "reflect"
+)
+
+func (m *matcher) cmdSubst(cmd exprCmd, subs []submatch) []submatch {
+ for i := range subs {
+ sub := &subs[i]
+ nodeCopy, _ := m.parseExpr(cmd.src)
+ // since we'll want to set positions within the file's
+ // FileSet
+ scrubPositions(nodeCopy)
+
+ m.fillParents(nodeCopy)
+ nodeCopy = m.fillValues(nodeCopy, sub.values)
+ m.substNode(sub.node, nodeCopy)
+ sub.node = nodeCopy
+ }
+ return subs
+}
+
+type topNode struct {
+ Node ast.Node
+}
+
+func (t topNode) Pos() token.Pos { return t.Node.Pos() }
+func (t topNode) End() token.Pos { return t.Node.End() }
+
+func (m *matcher) fillValues(node ast.Node, values map[string]ast.Node) ast.Node {
+ // node might not have a parent, in which case we need to set an
+ // artificial one. Its pointer interface is a copy, so we must also
+ // return it.
+ top := &topNode{node}
+ m.setParentOf(node, top)
+
+ inspect(node, func(node ast.Node) bool {
+ id := fromWildNode(node)
+ info := m.info(id)
+ if info.name == "" {
+ return true
+ }
+ prev := values[info.name]
+ switch prev.(type) {
+ case exprList:
+ node = exprList([]ast.Expr{
+ node.(*ast.Ident),
+ })
+ case stmtList:
+ if ident, ok := node.(*ast.Ident); ok {
+ node = &ast.ExprStmt{X: ident}
+ }
+ node = stmtList([]ast.Stmt{
+ node.(*ast.ExprStmt),
+ })
+ }
+ m.substNode(node, prev)
+ return true
+ })
+ m.setParentOf(node, nil)
+ return top.Node
+}
+
+func (m *matcher) substNode(oldNode, newNode ast.Node) {
+ parent := m.parentOf(oldNode)
+ m.setParentOf(newNode, parent)
+
+ ptr := m.nodePtr(oldNode)
+ switch x := ptr.(type) {
+ case **ast.Ident:
+ *x = newNode.(*ast.Ident)
+ case *ast.Node:
+ *x = newNode
+ case *ast.Expr:
+ *x = newNode.(ast.Expr)
+ case *ast.Stmt:
+ switch y := newNode.(type) {
+ case ast.Expr:
+ stmt := &ast.ExprStmt{X: y}
+ m.setParentOf(stmt, parent)
+ *x = stmt
+ case ast.Stmt:
+ *x = y
+ default:
+ panic(fmt.Sprintf("cannot replace stmt with %T", y))
+ }
+ case *[]ast.Expr:
+ oldList := oldNode.(exprList)
+ var first, last []ast.Expr
+ for i, expr := range *x {
+ if expr == oldList[0] {
+ first = (*x)[:i]
+ last = (*x)[i+len(oldList):]
+ break
+ }
+ }
+ switch y := newNode.(type) {
+ case ast.Expr:
+ *x = append(first, y)
+ case exprList:
+ *x = append(first, y...)
+ default:
+ panic(fmt.Sprintf("cannot replace exprs with %T", y))
+ }
+ *x = append(*x, last...)
+ case *[]ast.Stmt:
+ oldList := oldNode.(stmtList)
+ var first, last []ast.Stmt
+ for i, stmt := range *x {
+ if stmt == oldList[0] {
+ first = (*x)[:i]
+ last = (*x)[i+len(oldList):]
+ break
+ }
+ }
+ switch y := newNode.(type) {
+ case ast.Expr:
+ stmt := &ast.ExprStmt{X: y}
+ m.setParentOf(stmt, parent)
+ *x = append(first, stmt)
+ case ast.Stmt:
+ *x = append(first, y)
+ case stmtList:
+ *x = append(first, y...)
+ default:
+ panic(fmt.Sprintf("cannot replace stmts with %T", y))
+ }
+ *x = append(*x, last...)
+ case nil:
+ return
+ default:
+ panic(fmt.Sprintf("unsupported substitution: %T", x))
+ }
+ // the new nodes have scrubbed positions, so try our best to use
+ // sensible ones
+ fixPositions(parent)
+}
+
+func (m *matcher) parentOf(node ast.Node) ast.Node {
+ list, ok := node.(nodeList)
+ if ok {
+ node = list.at(0)
+ }
+ return m.parents[node]
+}
+
+func (m *matcher) setParentOf(node, parent ast.Node) {
+ list, ok := node.(nodeList)
+ if ok {
+ if list.len() == 0 {
+ return
+ }
+ node = list.at(0)
+ }
+ m.parents[node] = parent
+}
+
+func (m *matcher) nodePtr(node ast.Node) interface{} {
+ list, wantSlice := node.(nodeList)
+ if wantSlice {
+ node = list.at(0)
+ }
+ parent := m.parentOf(node)
+ if parent == nil {
+ return nil
+ }
+ v := reflect.ValueOf(parent).Elem()
+ for i := 0; i < v.NumField(); i++ {
+ fld := v.Field(i)
+ switch fld.Type().Kind() {
+ case reflect.Slice:
+ for i := 0; i < fld.Len(); i++ {
+ ifld := fld.Index(i)
+ if ifld.Interface() != node {
+ continue
+ }
+ if wantSlice {
+ return fld.Addr().Interface()
+ }
+ return ifld.Addr().Interface()
+ }
+ case reflect.Interface:
+ if fld.Interface() == node {
+ return fld.Addr().Interface()
+ }
+ }
+ }
+ return nil
+}
+
+// nodePosHash is an ast.Node that can always be used as a key in maps,
+// even for nodes that are slices like nodeList.
+type nodePosHash struct {
+ pos, end token.Pos
+}
+
+func (n nodePosHash) Pos() token.Pos { return n.pos }
+func (n nodePosHash) End() token.Pos { return n.end }
+
+func posHash(node ast.Node) nodePosHash {
+ return nodePosHash{pos: node.Pos(), end: node.End()}
+}
+
+var posType = reflect.TypeOf(token.NoPos)
+
+func scrubPositions(node ast.Node) {
+ inspect(node, func(node ast.Node) bool {
+ v := reflect.ValueOf(node)
+ if v.Kind() != reflect.Ptr {
+ return true
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return true
+ }
+ for i := 0; i < v.NumField(); i++ {
+ fld := v.Field(i)
+ if fld.Type() == posType {
+ fld.SetInt(0)
+ }
+ }
+ return true
+ })
+}
+
+// fixPositions tries to fix common syntax errors caused from syntax rewrites.
+func fixPositions(node ast.Node) {
+ if top, ok := node.(*topNode); ok {
+ node = top.Node
+ }
+ // fallback sets pos to the 'to' position if not valid.
+ fallback := func(pos *token.Pos, to token.Pos) {
+ if !pos.IsValid() {
+ *pos = to
+ }
+ }
+ ast.Inspect(node, func(node ast.Node) bool {
+ // TODO: many more node types
+ switch x := node.(type) {
+ case *ast.GoStmt:
+ fallback(&x.Go, x.Call.Pos())
+ case *ast.ReturnStmt:
+ if len(x.Results) == 0 {
+ break
+ }
+ // Ensure that there's no newline before the returned
+ // values, as otherwise we have a naked return. See
+ // https://github.com/golang/go/issues/32854.
+ if pos := x.Results[0].Pos(); pos > x.Return {
+ x.Return = pos
+ }
+ }
+ return true
+ })
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/write.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/write.go
new file mode 100644
index 00000000..b4796a89
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/write.go
@@ -0,0 +1,63 @@
+// Copyright (c) 2018, Daniel Martí
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "go/ast"
+ "go/printer"
+ "os"
+)
+
+func (m *matcher) cmdWrite(cmd exprCmd, subs []submatch) []submatch {
+ seenRoot := make(map[nodePosHash]bool)
+ filePaths := make(map[*ast.File]string)
+ var next []submatch
+ for _, sub := range subs {
+ root := m.nodeRoot(sub.node)
+ hash := posHash(root)
+ if seenRoot[hash] {
+ continue // avoid dups
+ }
+ seenRoot[hash] = true
+ file, ok := root.(*ast.File)
+ if ok {
+ path := m.fset.Position(file.Package).Filename
+ if path != "" {
+ // write to disk
+ filePaths[file] = path
+ continue
+ }
+ }
+ // pass it on, to print to stdout
+ next = append(next, submatch{node: root})
+ }
+ for file, path := range filePaths {
+ f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC, 0)
+ if err != nil {
+ // TODO: return errors instead
+ panic(err)
+ }
+ if err := printConfig.Fprint(f, m.fset, file); err != nil {
+ // TODO: return errors instead
+ panic(err)
+ }
+ }
+ return next
+}
+
+var printConfig = printer.Config{
+ Mode: printer.UseSpaces | printer.TabIndent,
+ Tabwidth: 8,
+}
+
+func (m *matcher) nodeRoot(node ast.Node) ast.Node {
+ parent := m.parentOf(node)
+ if parent == nil {
+ return node
+ }
+ if _, ok := parent.(nodeList); ok {
+ return parent
+ }
+ return m.nodeRoot(parent)
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/bool3.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/bool3.go
new file mode 100644
index 00000000..6e9550c1
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/bool3.go
@@ -0,0 +1,9 @@
+package ruleguard
+
+type bool3 int
+
+const (
+ bool3unset bool3 = iota
+ bool3false
+ bool3true
+)
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/dsl_importer.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/dsl_importer.go
new file mode 100644
index 00000000..c566578d
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/dsl_importer.go
@@ -0,0 +1,40 @@
+package ruleguard
+
+import (
+ "go/ast"
+ "go/importer"
+ "go/parser"
+ "go/token"
+ "go/types"
+
+ "github.com/quasilyte/go-ruleguard/dslgen"
+)
+
+type dslImporter struct {
+ fallback types.Importer
+}
+
+func newDSLImporter() *dslImporter {
+ return &dslImporter{fallback: importer.Default()}
+}
+
+func (i *dslImporter) Import(path string) (*types.Package, error) {
+ switch path {
+ case "github.com/quasilyte/go-ruleguard/dsl/fluent":
+ return i.importDSL(path, dslgen.Fluent)
+
+ default:
+ return i.fallback.Import(path)
+ }
+}
+
+func (i *dslImporter) importDSL(path string, src []byte) (*types.Package, error) {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "dsl.go", src, 0)
+ if err != nil {
+ return nil, err
+ }
+ var typecheker types.Config
+ var info types.Info
+ return typecheker.Check(path, fset, []*ast.File{f}, &info)
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/gorule.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/gorule.go
new file mode 100644
index 00000000..1192d849
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/gorule.go
@@ -0,0 +1,36 @@
+package ruleguard
+
+import (
+ "go/types"
+
+ "github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep"
+)
+
+type scopedGoRuleSet struct {
+ uncategorized []goRule
+ categorizedNum int
+ rulesByCategory [nodeCategoriesCount][]goRule
+}
+
+type goRule struct {
+ filename string
+ severity string
+ pat *gogrep.Pattern
+ msg string
+ location string
+ suggestion string
+ filters map[string]submatchFilter
+}
+
+type submatchFilter struct {
+ typePred func(typeQuery) bool
+ textPred func(string) bool
+ pure bool3
+ constant bool3
+ addressable bool3
+}
+
+type typeQuery struct {
+ x types.Type
+ ctx *Context
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/merge.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/merge.go
new file mode 100644
index 00000000..e494930a
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/merge.go
@@ -0,0 +1,24 @@
+package ruleguard
+
+func mergeRuleSets(toMerge []*GoRuleSet) *GoRuleSet {
+ out := &GoRuleSet{
+ local: &scopedGoRuleSet{},
+ universal: &scopedGoRuleSet{},
+ }
+
+ for _, x := range toMerge {
+ out.local = appendScopedRuleSet(out.local, x.local)
+ out.universal = appendScopedRuleSet(out.universal, x.universal)
+ }
+
+ return out
+}
+
+func appendScopedRuleSet(dst, src *scopedGoRuleSet) *scopedGoRuleSet {
+ dst.uncategorized = append(dst.uncategorized, src.uncategorized...)
+ for cat, rules := range src.rulesByCategory {
+ dst.rulesByCategory[cat] = append(dst.rulesByCategory[cat], rules...)
+ dst.categorizedNum += len(rules)
+ }
+ return dst
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/node_category.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/node_category.go
new file mode 100644
index 00000000..859ed39a
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/node_category.go
@@ -0,0 +1,159 @@
+package ruleguard
+
+import (
+ "go/ast"
+)
+
+type nodeCategory int
+
+const (
+ nodeUnknown nodeCategory = iota
+
+ nodeArrayType
+ nodeAssignStmt
+ nodeBasicLit
+ nodeBinaryExpr
+ nodeBlockStmt
+ nodeBranchStmt
+ nodeCallExpr
+ nodeCaseClause
+ nodeChanType
+ nodeCommClause
+ nodeCompositeLit
+ nodeDeclStmt
+ nodeDeferStmt
+ nodeEllipsis
+ nodeEmptyStmt
+ nodeExprStmt
+ nodeForStmt
+ nodeFuncDecl
+ nodeFuncLit
+ nodeFuncType
+ nodeGenDecl
+ nodeGoStmt
+ nodeIdent
+ nodeIfStmt
+ nodeImportSpec
+ nodeIncDecStmt
+ nodeIndexExpr
+ nodeInterfaceType
+ nodeKeyValueExpr
+ nodeLabeledStmt
+ nodeMapType
+ nodeParenExpr
+ nodeRangeStmt
+ nodeReturnStmt
+ nodeSelectStmt
+ nodeSelectorExpr
+ nodeSendStmt
+ nodeSliceExpr
+ nodeStarExpr
+ nodeStructType
+ nodeSwitchStmt
+ nodeTypeAssertExpr
+ nodeTypeSpec
+ nodeTypeSwitchStmt
+ nodeUnaryExpr
+ nodeValueSpec
+
+ nodeCategoriesCount
+)
+
+func categorizeNode(n ast.Node) nodeCategory {
+ switch n.(type) {
+ case *ast.ArrayType:
+ return nodeArrayType
+ case *ast.AssignStmt:
+ return nodeAssignStmt
+ case *ast.BasicLit:
+ return nodeBasicLit
+ case *ast.BinaryExpr:
+ return nodeBinaryExpr
+ case *ast.BlockStmt:
+ return nodeBlockStmt
+ case *ast.BranchStmt:
+ return nodeBranchStmt
+ case *ast.CallExpr:
+ return nodeCallExpr
+ case *ast.CaseClause:
+ return nodeCaseClause
+ case *ast.ChanType:
+ return nodeChanType
+ case *ast.CommClause:
+ return nodeCommClause
+ case *ast.CompositeLit:
+ return nodeCompositeLit
+ case *ast.DeclStmt:
+ return nodeDeclStmt
+ case *ast.DeferStmt:
+ return nodeDeferStmt
+ case *ast.Ellipsis:
+ return nodeEllipsis
+ case *ast.EmptyStmt:
+ return nodeEmptyStmt
+ case *ast.ExprStmt:
+ return nodeExprStmt
+ case *ast.ForStmt:
+ return nodeForStmt
+ case *ast.FuncDecl:
+ return nodeFuncDecl
+ case *ast.FuncLit:
+ return nodeFuncLit
+ case *ast.FuncType:
+ return nodeFuncType
+ case *ast.GenDecl:
+ return nodeGenDecl
+ case *ast.GoStmt:
+ return nodeGoStmt
+ case *ast.Ident:
+ return nodeIdent
+ case *ast.IfStmt:
+ return nodeIfStmt
+ case *ast.ImportSpec:
+ return nodeImportSpec
+ case *ast.IncDecStmt:
+ return nodeIncDecStmt
+ case *ast.IndexExpr:
+ return nodeIndexExpr
+ case *ast.InterfaceType:
+ return nodeInterfaceType
+ case *ast.KeyValueExpr:
+ return nodeKeyValueExpr
+ case *ast.LabeledStmt:
+ return nodeLabeledStmt
+ case *ast.MapType:
+ return nodeMapType
+ case *ast.ParenExpr:
+ return nodeParenExpr
+ case *ast.RangeStmt:
+ return nodeRangeStmt
+ case *ast.ReturnStmt:
+ return nodeReturnStmt
+ case *ast.SelectStmt:
+ return nodeSelectStmt
+ case *ast.SelectorExpr:
+ return nodeSelectorExpr
+ case *ast.SendStmt:
+ return nodeSendStmt
+ case *ast.SliceExpr:
+ return nodeSliceExpr
+ case *ast.StarExpr:
+ return nodeStarExpr
+ case *ast.StructType:
+ return nodeStructType
+ case *ast.SwitchStmt:
+ return nodeSwitchStmt
+ case *ast.TypeAssertExpr:
+ return nodeTypeAssertExpr
+ case *ast.TypeSpec:
+ return nodeTypeSpec
+ case *ast.TypeSwitchStmt:
+ return nodeTypeSwitchStmt
+ case *ast.UnaryExpr:
+ return nodeUnaryExpr
+ case *ast.ValueSpec:
+ return nodeValueSpec
+ default:
+ return nodeUnknown
+ }
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/parser.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/parser.go
new file mode 100644
index 00000000..98fcd20d
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/parser.go
@@ -0,0 +1,669 @@
+package ruleguard
+
+import (
+ "fmt"
+ "go/ast"
+ "go/constant"
+ "go/importer"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "io"
+ "path"
+ "regexp"
+ "strconv"
+
+ "github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep"
+ "github.com/quasilyte/go-ruleguard/ruleguard/typematch"
+)
+
+type rulesParser struct {
+ filename string
+ fset *token.FileSet
+ res *GoRuleSet
+ types *types.Info
+
+ itab *typematch.ImportsTab
+ dslImporter types.Importer
+ stdImporter types.Importer // TODO(quasilyte): share importer with gogrep?
+ srcImporter types.Importer
+}
+
+func newRulesParser() *rulesParser {
+ var stdlib = map[string]string{
+ "adler32": "hash/adler32",
+ "aes": "crypto/aes",
+ "ascii85": "encoding/ascii85",
+ "asn1": "encoding/asn1",
+ "ast": "go/ast",
+ "atomic": "sync/atomic",
+ "base32": "encoding/base32",
+ "base64": "encoding/base64",
+ "big": "math/big",
+ "binary": "encoding/binary",
+ "bits": "math/bits",
+ "bufio": "bufio",
+ "build": "go/build",
+ "bytes": "bytes",
+ "bzip2": "compress/bzip2",
+ "cgi": "net/http/cgi",
+ "cgo": "runtime/cgo",
+ "cipher": "crypto/cipher",
+ "cmplx": "math/cmplx",
+ "color": "image/color",
+ "constant": "go/constant",
+ "context": "context",
+ "cookiejar": "net/http/cookiejar",
+ "crc32": "hash/crc32",
+ "crc64": "hash/crc64",
+ "crypto": "crypto",
+ "csv": "encoding/csv",
+ "debug": "runtime/debug",
+ "des": "crypto/des",
+ "doc": "go/doc",
+ "draw": "image/draw",
+ "driver": "database/sql/driver",
+ "dsa": "crypto/dsa",
+ "dwarf": "debug/dwarf",
+ "ecdsa": "crypto/ecdsa",
+ "ed25519": "crypto/ed25519",
+ "elf": "debug/elf",
+ "elliptic": "crypto/elliptic",
+ "encoding": "encoding",
+ "errors": "errors",
+ "exec": "os/exec",
+ "expvar": "expvar",
+ "fcgi": "net/http/fcgi",
+ "filepath": "path/filepath",
+ "flag": "flag",
+ "flate": "compress/flate",
+ "fmt": "fmt",
+ "fnv": "hash/fnv",
+ "format": "go/format",
+ "gif": "image/gif",
+ "gob": "encoding/gob",
+ "gosym": "debug/gosym",
+ "gzip": "compress/gzip",
+ "hash": "hash",
+ "heap": "container/heap",
+ "hex": "encoding/hex",
+ "hmac": "crypto/hmac",
+ "html": "html",
+ "http": "net/http",
+ "httptest": "net/http/httptest",
+ "httptrace": "net/http/httptrace",
+ "httputil": "net/http/httputil",
+ "image": "image",
+ "importer": "go/importer",
+ "io": "io",
+ "iotest": "testing/iotest",
+ "ioutil": "io/ioutil",
+ "jpeg": "image/jpeg",
+ "json": "encoding/json",
+ "jsonrpc": "net/rpc/jsonrpc",
+ "list": "container/list",
+ "log": "log",
+ "lzw": "compress/lzw",
+ "macho": "debug/macho",
+ "mail": "net/mail",
+ "math": "math",
+ "md5": "crypto/md5",
+ "mime": "mime",
+ "multipart": "mime/multipart",
+ "net": "net",
+ "os": "os",
+ "palette": "image/color/palette",
+ "parse": "text/template/parse",
+ "parser": "go/parser",
+ "path": "path",
+ "pe": "debug/pe",
+ "pem": "encoding/pem",
+ "pkix": "crypto/x509/pkix",
+ "plan9obj": "debug/plan9obj",
+ "plugin": "plugin",
+ "png": "image/png",
+ "pprof": "runtime/pprof",
+ "printer": "go/printer",
+ "quick": "testing/quick",
+ "quotedprintable": "mime/quotedprintable",
+ "race": "runtime/race",
+ "rand": "math/rand",
+ "rc4": "crypto/rc4",
+ "reflect": "reflect",
+ "regexp": "regexp",
+ "ring": "container/ring",
+ "rpc": "net/rpc",
+ "rsa": "crypto/rsa",
+ "runtime": "runtime",
+ "scanner": "text/scanner",
+ "sha1": "crypto/sha1",
+ "sha256": "crypto/sha256",
+ "sha512": "crypto/sha512",
+ "signal": "os/signal",
+ "smtp": "net/smtp",
+ "sort": "sort",
+ "sql": "database/sql",
+ "strconv": "strconv",
+ "strings": "strings",
+ "subtle": "crypto/subtle",
+ "suffixarray": "index/suffixarray",
+ "sync": "sync",
+ "syntax": "regexp/syntax",
+ "syscall": "syscall",
+ "syslog": "log/syslog",
+ "tabwriter": "text/tabwriter",
+ "tar": "archive/tar",
+ "template": "text/template",
+ "testing": "testing",
+ "textproto": "net/textproto",
+ "time": "time",
+ "tls": "crypto/tls",
+ "token": "go/token",
+ "trace": "runtime/trace",
+ "types": "go/types",
+ "unicode": "unicode",
+ "unsafe": "unsafe",
+ "url": "net/url",
+ "user": "os/user",
+ "utf16": "unicode/utf16",
+ "utf8": "unicode/utf8",
+ "x509": "crypto/x509",
+ "xml": "encoding/xml",
+ "zip": "archive/zip",
+ "zlib": "compress/zlib",
+ }
+
+ // TODO(quasilyte): do we need to pass the fileset here?
+ fset := token.NewFileSet()
+ return &rulesParser{
+ itab: typematch.NewImportsTab(stdlib),
+ stdImporter: importer.Default(),
+ srcImporter: importer.ForCompiler(fset, "source", nil),
+ dslImporter: newDSLImporter(),
+ }
+}
+
+func (p *rulesParser) ParseFile(filename string, fset *token.FileSet, r io.Reader) (*GoRuleSet, error) {
+ p.filename = filename
+ p.fset = fset
+ p.res = &GoRuleSet{
+ local: &scopedGoRuleSet{},
+ universal: &scopedGoRuleSet{},
+ }
+
+ parserFlags := parser.Mode(0)
+ f, err := parser.ParseFile(fset, filename, r, parserFlags)
+ if err != nil {
+ return nil, fmt.Errorf("parser error: %v", err)
+ }
+
+ if f.Name.Name != "gorules" {
+ return nil, fmt.Errorf("expected a gorules package name, found %s", f.Name.Name)
+ }
+
+ typechecker := types.Config{Importer: p.dslImporter}
+ p.types = &types.Info{Types: map[ast.Expr]types.TypeAndValue{}}
+ _, err = typechecker.Check("gorules", fset, []*ast.File{f}, p.types)
+ if err != nil {
+ return nil, fmt.Errorf("typechecker error: %v", err)
+ }
+
+ for _, decl := range f.Decls {
+ decl, ok := decl.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ if err := p.parseRuleGroup(decl); err != nil {
+ return nil, err
+ }
+ }
+
+ return p.res, nil
+}
+
+func (p *rulesParser) parseRuleGroup(f *ast.FuncDecl) error {
+ if f.Body == nil {
+ return p.errorf(f, "unexpected empty function body")
+ }
+ if f.Type.Results != nil {
+ return p.errorf(f.Type.Results, "rule group function should not return anything")
+ }
+ params := f.Type.Params.List
+ if len(params) != 1 || len(params[0].Names) != 1 {
+ return p.errorf(f.Type.Params, "rule group function should accept exactly 1 Matcher param")
+ }
+ // TODO(quasilyte): do an actual matcher param type check?
+ matcher := params[0].Names[0].Name
+
+ p.itab.EnterScope()
+ defer p.itab.LeaveScope()
+
+ for _, stmt := range f.Body.List {
+ if _, ok := stmt.(*ast.DeclStmt); ok {
+ continue
+ }
+ stmtExpr, ok := stmt.(*ast.ExprStmt)
+ if !ok {
+ return p.errorf(stmt, "expected a %s method call, found %s", matcher, sprintNode(p.fset, stmt))
+ }
+ call, ok := stmtExpr.X.(*ast.CallExpr)
+ if !ok {
+ return p.errorf(stmt, "expected a %s method call, found %s", matcher, sprintNode(p.fset, stmt))
+ }
+ if err := p.parseCall(matcher, call); err != nil {
+ return err
+ }
+
+ }
+
+ return nil
+}
+
+func (p *rulesParser) parseCall(matcher string, call *ast.CallExpr) error {
+ f := call.Fun.(*ast.SelectorExpr)
+ x, ok := f.X.(*ast.Ident)
+ if ok && x.Name == matcher {
+ return p.parseStmt(f.Sel, call.Args)
+ }
+
+ return p.parseRule(matcher, call)
+}
+
+func (p *rulesParser) parseStmt(fn *ast.Ident, args []ast.Expr) error {
+ switch fn.Name {
+ case "Import":
+ pkgPath, ok := p.toStringValue(args[0])
+ if !ok {
+ return p.errorf(args[0], "expected a string literal argument")
+ }
+ pkgName := path.Base(pkgPath)
+ p.itab.Load(pkgName, pkgPath)
+ return nil
+ default:
+ return p.errorf(fn, "unexpected %s method", fn.Name)
+ }
+}
+
+func (p *rulesParser) parseRule(matcher string, call *ast.CallExpr) error {
+ origCall := call
+ var (
+ matchArgs *[]ast.Expr
+ whereArgs *[]ast.Expr
+ suggestArgs *[]ast.Expr
+ reportArgs *[]ast.Expr
+ atArgs *[]ast.Expr
+ )
+ for {
+ chain, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ break
+ }
+ switch chain.Sel.Name {
+ case "Match":
+ matchArgs = &call.Args
+ case "Where":
+ whereArgs = &call.Args
+ case "Suggest":
+ suggestArgs = &call.Args
+ case "Report":
+ reportArgs = &call.Args
+ case "At":
+ atArgs = &call.Args
+ default:
+ return p.errorf(chain.Sel, "unexpected %s method", chain.Sel.Name)
+ }
+ call, ok = chain.X.(*ast.CallExpr)
+ if !ok {
+ break
+ }
+ }
+
+ dst := p.res.universal
+ filters := map[string]submatchFilter{}
+ proto := goRule{
+ filename: p.filename,
+ filters: filters,
+ }
+ var alternatives []string
+
+ if matchArgs == nil {
+ return p.errorf(origCall, "missing Match() call")
+ }
+ for _, arg := range *matchArgs {
+ alt, ok := p.toStringValue(arg)
+ if !ok {
+ return p.errorf(arg, "expected a string literal argument")
+ }
+ alternatives = append(alternatives, alt)
+ }
+
+ if whereArgs != nil {
+ if err := p.walkFilter(filters, (*whereArgs)[0], false); err != nil {
+ return err
+ }
+ }
+
+ if suggestArgs != nil {
+ s, ok := p.toStringValue((*suggestArgs)[0])
+ if !ok {
+ return p.errorf((*suggestArgs)[0], "expected string literal argument")
+ }
+ proto.suggestion = s
+ }
+
+ if reportArgs == nil {
+ if suggestArgs == nil {
+ return p.errorf(origCall, "missing Report() or Suggest() call")
+ }
+ proto.msg = "suggestion: " + proto.suggestion
+ } else {
+ message, ok := p.toStringValue((*reportArgs)[0])
+ if !ok {
+ return p.errorf((*reportArgs)[0], "expected string literal argument")
+ }
+ proto.msg = message
+ }
+
+ if atArgs != nil {
+ index, ok := (*atArgs)[0].(*ast.IndexExpr)
+ if !ok {
+ return p.errorf((*atArgs)[0], "expected %s[`varname`] expression", matcher)
+ }
+ arg, ok := p.toStringValue(index.Index)
+ if !ok {
+ return p.errorf(index.Index, "expected a string literal index")
+ }
+ proto.location = arg
+ }
+
+ for i, alt := range alternatives {
+ rule := proto
+ pat, err := gogrep.Parse(p.fset, alt)
+ if err != nil {
+ return p.errorf((*matchArgs)[i], "gogrep parse: %v", err)
+ }
+ rule.pat = pat
+ cat := categorizeNode(pat.Expr)
+ if cat == nodeUnknown {
+ dst.uncategorized = append(dst.uncategorized, rule)
+ } else {
+ dst.categorizedNum++
+ dst.rulesByCategory[cat] = append(dst.rulesByCategory[cat], rule)
+ }
+ }
+
+ return nil
+}
+
+func (p *rulesParser) walkFilter(dst map[string]submatchFilter, e ast.Expr, negate bool) error {
+ typeAnd := func(x, y func(typeQuery) bool) func(typeQuery) bool {
+ if x == nil {
+ return y
+ }
+ return func(q typeQuery) bool {
+ return x(q) && y(q)
+ }
+ }
+ textAnd := func(x, y func(string) bool) func(string) bool {
+ if x == nil {
+ return y
+ }
+ return func(s string) bool {
+ return x(s) && y(s)
+ }
+ }
+
+ switch e := e.(type) {
+ case *ast.UnaryExpr:
+ if e.Op == token.NOT {
+ return p.walkFilter(dst, e.X, !negate)
+ }
+ case *ast.BinaryExpr:
+ switch e.Op {
+ case token.LAND:
+ err := p.walkFilter(dst, e.X, negate)
+ if err != nil {
+ return err
+ }
+ return p.walkFilter(dst, e.Y, negate)
+ case token.GEQ, token.LEQ, token.LSS, token.GTR, token.EQL, token.NEQ:
+ operand := p.toFilterOperand(e.X)
+ y := p.types.Types[e.Y].Value
+ expectedResult := !negate
+ if operand.path == "Type.Size" && y != nil {
+ filter := dst[operand.varName]
+ filter.typePred = typeAnd(filter.typePred, func(q typeQuery) bool {
+ x := constant.MakeInt64(q.ctx.Sizes.Sizeof(q.x))
+ return expectedResult == constant.Compare(x, e.Op, y)
+ })
+ dst[operand.varName] = filter
+ return nil
+ }
+ if operand.path == "Text" && y != nil {
+ filter := dst[operand.varName]
+ filter.textPred = textAnd(filter.textPred, func(s string) bool {
+ x := constant.MakeString(s)
+ return expectedResult == constant.Compare(x, e.Op, y)
+ })
+ dst[operand.varName] = filter
+ return nil
+ }
+ }
+ }
+
+ // TODO(quasilyte): refactor and extend.
+ operand := p.toFilterOperand(e)
+ args := operand.args
+ filter := dst[operand.varName]
+ switch operand.path {
+ default:
+ return p.errorf(e, "%s is not a valid filter expression", sprintNode(p.fset, e))
+ case "Pure":
+ if negate {
+ filter.pure = bool3false
+ } else {
+ filter.pure = bool3true
+ }
+ dst[operand.varName] = filter
+ case "Const":
+ if negate {
+ filter.constant = bool3false
+ } else {
+ filter.constant = bool3true
+ }
+ dst[operand.varName] = filter
+ case "Addressable":
+ if negate {
+ filter.addressable = bool3false
+ } else {
+ filter.addressable = bool3true
+ }
+ dst[operand.varName] = filter
+ case "Text.Matches":
+ patternString, ok := p.toStringValue(args[0])
+ if !ok {
+ return p.errorf(args[0], "expected a string literal argument")
+ }
+ re, err := regexp.Compile(patternString)
+ if err != nil {
+ return p.errorf(args[0], "parse regexp: %v", err)
+ }
+ wantMatched := !negate
+ filter.textPred = textAnd(filter.textPred, func(s string) bool {
+ return wantMatched == re.MatchString(s)
+ })
+ dst[operand.varName] = filter
+ case "Type.Is":
+ typeString, ok := p.toStringValue(args[0])
+ if !ok {
+ return p.errorf(args[0], "expected a string literal argument")
+ }
+ ctx := typematch.Context{Itab: p.itab}
+ pat, err := typematch.Parse(&ctx, typeString)
+ if err != nil {
+ return p.errorf(args[0], "parse type expr: %v", err)
+ }
+ wantIdentical := !negate
+ filter.typePred = typeAnd(filter.typePred, func(q typeQuery) bool {
+ return wantIdentical == pat.MatchIdentical(q.x)
+ })
+ dst[operand.varName] = filter
+ case "Type.ConvertibleTo":
+ typeString, ok := p.toStringValue(args[0])
+ if !ok {
+ return p.errorf(args[0], "expected a string literal argument")
+ }
+ y, err := typeFromString(typeString)
+ if err != nil {
+ return p.errorf(args[0], "parse type expr: %v", err)
+ }
+ if y == nil {
+ return p.errorf(args[0], "can't convert %s into a type constraint yet", typeString)
+ }
+ wantConvertible := !negate
+ filter.typePred = typeAnd(filter.typePred, func(q typeQuery) bool {
+ return wantConvertible == types.ConvertibleTo(q.x, y)
+ })
+ dst[operand.varName] = filter
+ case "Type.AssignableTo":
+ typeString, ok := p.toStringValue(args[0])
+ if !ok {
+ return p.errorf(args[0], "expected a string literal argument")
+ }
+ y, err := typeFromString(typeString)
+ if err != nil {
+ return p.errorf(args[0], "parse type expr: %v", err)
+ }
+ if y == nil {
+ return p.errorf(args[0], "can't convert %s into a type constraint yet", typeString)
+ }
+ wantAssignable := !negate
+ filter.typePred = typeAnd(filter.typePred, func(q typeQuery) bool {
+ return wantAssignable == types.AssignableTo(q.x, y)
+ })
+ dst[operand.varName] = filter
+ case "Type.Implements":
+ typeString, ok := p.toStringValue(args[0])
+ if !ok {
+ return p.errorf(args[0], "expected a string literal argument")
+ }
+ n, err := parser.ParseExpr(typeString)
+ if err != nil {
+ return p.errorf(args[0], "parse type expr: %v", err)
+ }
+ e, ok := n.(*ast.SelectorExpr)
+ if !ok {
+ return p.errorf(args[0], "only qualified names are supported")
+ }
+ pkgName, ok := e.X.(*ast.Ident)
+ if !ok {
+ return p.errorf(e.X, "invalid package name")
+ }
+ pkgPath, ok := p.itab.Lookup(pkgName.Name)
+ if !ok {
+ return p.errorf(e.X, "package %s is not imported", pkgName.Name)
+ }
+ pkg, err := p.stdImporter.Import(pkgPath)
+ if err != nil {
+ pkg, err = p.srcImporter.Import(pkgPath)
+ if err != nil {
+ return p.errorf(e, "can't load %s: %v", pkgPath, err)
+ }
+ }
+ obj := pkg.Scope().Lookup(e.Sel.Name)
+ if obj == nil {
+ return p.errorf(e, "%s is not found in %s", e.Sel.Name, pkgPath)
+ }
+ iface, ok := obj.Type().Underlying().(*types.Interface)
+ if !ok {
+ return p.errorf(e, "%s is not an interface type", e.Sel.Name)
+ }
+ wantImplemented := !negate
+ filter.typePred = typeAnd(filter.typePred, func(q typeQuery) bool {
+ return wantImplemented == types.Implements(q.x, iface)
+ })
+ dst[operand.varName] = filter
+ }
+
+ return nil
+}
+
+func (p *rulesParser) toIntValue(x ast.Node) (int64, bool) {
+ lit, ok := x.(*ast.BasicLit)
+ if !ok || lit.Kind != token.INT {
+ return 0, false
+ }
+ v, err := strconv.ParseInt(lit.Value, 10, 64)
+ return v, err == nil
+}
+
+func (p *rulesParser) toStringValue(x ast.Node) (string, bool) {
+ switch x := x.(type) {
+ case *ast.BasicLit:
+ if x.Kind != token.STRING {
+ return "", false
+ }
+ return unquoteNode(x), true
+ case ast.Expr:
+ typ, ok := p.types.Types[x]
+ if !ok || typ.Type.String() != "string" {
+ return "", false
+ }
+ str := typ.Value.ExactString()
+ str = str[1 : len(str)-1] // remove quotes
+ return str, true
+ }
+ return "", false
+}
+
+func (p *rulesParser) toFilterOperand(e ast.Expr) filterOperand {
+ var o filterOperand
+
+ if call, ok := e.(*ast.CallExpr); ok {
+ o.args = call.Args
+ e = call.Fun
+ }
+ var path string
+ for {
+ selector, ok := e.(*ast.SelectorExpr)
+ if !ok {
+ break
+ }
+ if path == "" {
+ path = selector.Sel.Name
+ } else {
+ path = selector.Sel.Name + "." + path
+ }
+ e = selector.X
+ }
+ indexing, ok := e.(*ast.IndexExpr)
+ if !ok {
+ return o
+ }
+ mapIdent, ok := indexing.X.(*ast.Ident)
+ if !ok {
+ return o
+ }
+ indexString, ok := p.toStringValue(indexing.Index)
+ if !ok {
+ return o
+ }
+
+ o.mapName = mapIdent.Name
+ o.varName = indexString
+ o.path = path
+ return o
+}
+
+func (p *rulesParser) errorf(n ast.Node, format string, args ...interface{}) error {
+ loc := p.fset.Position(n.Pos())
+ return fmt.Errorf("%s:%d: %s",
+ loc.Filename, loc.Line, fmt.Sprintf(format, args...))
+}
+
+type filterOperand struct {
+ mapName string
+ varName string
+ path string
+ args []ast.Expr
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go
new file mode 100644
index 00000000..f6032c86
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go
@@ -0,0 +1,45 @@
+package ruleguard
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+ "io"
+)
+
+type Context struct {
+ Types *types.Info
+ Sizes types.Sizes
+ Fset *token.FileSet
+ Report func(rule GoRuleInfo, n ast.Node, msg string, s *Suggestion)
+ Pkg *types.Package
+}
+
+type Suggestion struct {
+ From token.Pos
+ To token.Pos
+ Replacement []byte
+}
+
+func ParseRules(filename string, fset *token.FileSet, r io.Reader) (*GoRuleSet, error) {
+ p := newRulesParser()
+ return p.ParseFile(filename, fset, r)
+}
+
+func RunRules(ctx *Context, f *ast.File, rules *GoRuleSet) error {
+ return newRulesRunner(ctx, rules).run(f)
+}
+
+type GoRuleInfo struct {
+ // Filename is a file that defined this rule.
+ Filename string
+}
+
+type GoRuleSet struct {
+ universal *scopedGoRuleSet
+ local *scopedGoRuleSet
+}
+
+func MergeRuleSets(toMerge []*GoRuleSet) *GoRuleSet {
+ return mergeRuleSets(toMerge)
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go
new file mode 100644
index 00000000..971e92ae
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go
@@ -0,0 +1,194 @@
+package ruleguard
+
+import (
+ "bytes"
+ "go/ast"
+ "go/printer"
+ "io/ioutil"
+ "strings"
+
+ "github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep"
+)
+
+type rulesRunner struct {
+ ctx *Context
+ rules *GoRuleSet
+
+ filename string
+ src []byte
+}
+
+func newRulesRunner(ctx *Context, rules *GoRuleSet) *rulesRunner {
+ return &rulesRunner{
+ ctx: ctx,
+ rules: rules,
+ }
+}
+
+func (rr *rulesRunner) nodeText(n ast.Node) []byte {
+ from := rr.ctx.Fset.Position(n.Pos()).Offset
+ to := rr.ctx.Fset.Position(n.End()).Offset
+ src := rr.fileBytes()
+ if (from >= 0 && int(from) < len(src)) && (to >= 0 && int(to) < len(src)) {
+ return src[from:to]
+ }
+ // Fallback to the printer.
+ var buf bytes.Buffer
+ if err := printer.Fprint(&buf, rr.ctx.Fset, n); err != nil {
+ panic(err)
+ }
+ return buf.Bytes()
+}
+
+func (rr *rulesRunner) fileBytes() []byte {
+ if rr.src != nil {
+ return rr.src
+ }
+
+ // TODO(quasilyte): re-use src slice?
+ src, err := ioutil.ReadFile(rr.filename)
+ if err != nil || src == nil {
+ // Assign a zero-length slice so rr.src
+ // is never nil during the second fileBytes call.
+ rr.src = make([]byte, 0)
+ } else {
+ rr.src = src
+ }
+ return rr.src
+}
+
+func (rr *rulesRunner) run(f *ast.File) error {
+ // TODO(quasilyte): run local rules as well.
+
+ rr.filename = rr.ctx.Fset.Position(f.Pos()).Filename
+
+ for _, rule := range rr.rules.universal.uncategorized {
+ rule.pat.Match(f, func(m gogrep.MatchData) {
+ rr.handleMatch(rule, m)
+ })
+ }
+
+ if rr.rules.universal.categorizedNum != 0 {
+ ast.Inspect(f, func(n ast.Node) bool {
+ cat := categorizeNode(n)
+ for _, rule := range rr.rules.universal.rulesByCategory[cat] {
+ matched := false
+ rule.pat.MatchNode(n, func(m gogrep.MatchData) {
+ matched = rr.handleMatch(rule, m)
+ })
+ if matched {
+ break
+ }
+ }
+ return true
+ })
+ }
+
+ return nil
+}
+
+func (rr *rulesRunner) handleMatch(rule goRule, m gogrep.MatchData) bool {
+ for name, node := range m.Values {
+ expr, ok := node.(ast.Expr)
+ if !ok {
+ continue
+ }
+ filter, ok := rule.filters[name]
+ if !ok {
+ continue
+ }
+ if filter.typePred != nil {
+ typ := rr.ctx.Types.TypeOf(expr)
+ q := typeQuery{x: typ, ctx: rr.ctx}
+ if !filter.typePred(q) {
+ return false
+ }
+ }
+ if filter.textPred != nil {
+ if !filter.textPred(string(rr.nodeText(expr))) {
+ return false
+ }
+ }
+ switch filter.addressable {
+ case bool3true:
+ if !isAddressable(rr.ctx.Types, expr) {
+ return false
+ }
+ case bool3false:
+ if isAddressable(rr.ctx.Types, expr) {
+ return false
+ }
+ }
+ switch filter.pure {
+ case bool3true:
+ if !isPure(rr.ctx.Types, expr) {
+ return false
+ }
+ case bool3false:
+ if isPure(rr.ctx.Types, expr) {
+ return false
+ }
+ }
+ switch filter.constant {
+ case bool3true:
+ if !isConstant(rr.ctx.Types, expr) {
+ return false
+ }
+ case bool3false:
+ if isConstant(rr.ctx.Types, expr) {
+ return false
+ }
+ }
+ }
+
+ prefix := ""
+ if rule.severity != "" {
+ prefix = rule.severity + ": "
+ }
+ message := prefix + rr.renderMessage(rule.msg, m.Node, m.Values, true)
+ node := m.Node
+ if rule.location != "" {
+ node = m.Values[rule.location]
+ }
+ var suggestion *Suggestion
+ if rule.suggestion != "" {
+ suggestion = &Suggestion{
+ Replacement: []byte(rr.renderMessage(rule.suggestion, m.Node, m.Values, false)),
+ From: node.Pos(),
+ To: node.End(),
+ }
+ }
+ info := GoRuleInfo{
+ Filename: rule.filename,
+ }
+ rr.ctx.Report(info, node, message, suggestion)
+ return true
+}
+
+func (rr *rulesRunner) renderMessage(msg string, n ast.Node, nodes map[string]ast.Node, truncate bool) string {
+ var buf strings.Builder
+ if strings.Contains(msg, "$$") {
+ buf.Write(rr.nodeText(n))
+ msg = strings.ReplaceAll(msg, "$$", buf.String())
+ }
+ if len(nodes) == 0 {
+ return msg
+ }
+ for name, n := range nodes {
+ key := "$" + name
+ if !strings.Contains(msg, key) {
+ continue
+ }
+ buf.Reset()
+ buf.Write(rr.nodeText(n))
+ // Don't interpolate strings that are too long.
+ var replacement string
+ if truncate && buf.Len() > 60 {
+ replacement = key
+ } else {
+ replacement = buf.String()
+ }
+ msg = strings.ReplaceAll(msg, key, replacement)
+ }
+ return msg
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go
new file mode 100644
index 00000000..5e14880c
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go
@@ -0,0 +1,340 @@
+package typematch
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "strconv"
+ "strings"
+)
+
+type patternOp int
+
+const (
+ opBuiltinType patternOp = iota
+ opPointer
+ opVar
+ opSlice
+ opArray
+ opMap
+ opChan
+ opNamed
+)
+
+type Pattern struct {
+ typeMatches map[string]types.Type
+ int64Matches map[string]int64
+
+ root *pattern
+}
+
+type pattern struct {
+ value interface{}
+ op patternOp
+ subs []*pattern
+}
+
+type ImportsTab struct {
+ imports []map[string]string
+}
+
+func NewImportsTab(initial map[string]string) *ImportsTab {
+ return &ImportsTab{imports: []map[string]string{initial}}
+}
+
+func (itab *ImportsTab) Lookup(pkgName string) (string, bool) {
+ for i := len(itab.imports) - 1; i >= 0; i-- {
+ pkgPath, ok := itab.imports[i][pkgName]
+ if ok {
+ return pkgPath, true
+ }
+ }
+ return "", false
+}
+
+func (itab *ImportsTab) Load(pkgName, pkgPath string) {
+ itab.imports[len(itab.imports)-1][pkgName] = pkgPath
+}
+
+func (itab *ImportsTab) EnterScope() {
+ itab.imports = append(itab.imports, map[string]string{})
+}
+
+func (itab *ImportsTab) LeaveScope() {
+ itab.imports = itab.imports[:len(itab.imports)-1]
+}
+
+type Context struct {
+ Itab *ImportsTab
+}
+
+func Parse(ctx *Context, s string) (*Pattern, error) {
+ noDollars := strings.ReplaceAll(s, "$", "__")
+ n, err := parser.ParseExpr(noDollars)
+ if err != nil {
+ return nil, err
+ }
+ root := parseExpr(ctx, n)
+ if root == nil {
+ return nil, fmt.Errorf("can't convert %s type expression", s)
+ }
+ p := &Pattern{
+ typeMatches: map[string]types.Type{},
+ int64Matches: map[string]int64{},
+ root: root,
+ }
+ return p, nil
+}
+
+var (
+ builtinTypeByName = map[string]types.Type{
+ "bool": types.Typ[types.Bool],
+ "int": types.Typ[types.Int],
+ "int8": types.Typ[types.Int8],
+ "int16": types.Typ[types.Int16],
+ "int32": types.Typ[types.Int32],
+ "int64": types.Typ[types.Int64],
+ "uint": types.Typ[types.Uint],
+ "uint8": types.Typ[types.Uint8],
+ "uint16": types.Typ[types.Uint16],
+ "uint32": types.Typ[types.Uint32],
+ "uint64": types.Typ[types.Uint64],
+ "uintptr": types.Typ[types.Uintptr],
+ "float32": types.Typ[types.Float32],
+ "float64": types.Typ[types.Float64],
+ "complex64": types.Typ[types.Complex64],
+ "complex128": types.Typ[types.Complex128],
+ "string": types.Typ[types.String],
+
+ "error": types.Universe.Lookup("error").Type(),
+
+ // Aliases.
+ "byte": types.Typ[types.Uint8],
+ "rune": types.Typ[types.Int32],
+ }
+
+ efaceType = types.NewInterfaceType(nil, nil)
+)
+
+func parseExpr(ctx *Context, e ast.Expr) *pattern {
+ switch e := e.(type) {
+ case *ast.Ident:
+ basic, ok := builtinTypeByName[e.Name]
+ if ok {
+ return &pattern{op: opBuiltinType, value: basic}
+ }
+ if strings.HasPrefix(e.Name, "__") {
+ name := strings.TrimPrefix(e.Name, "__")
+ return &pattern{op: opVar, value: name}
+ }
+
+ case *ast.SelectorExpr:
+ pkg, ok := e.X.(*ast.Ident)
+ if !ok {
+ return nil
+ }
+ pkgPath, ok := ctx.Itab.Lookup(pkg.Name)
+ if !ok {
+ return nil
+ }
+ return &pattern{op: opNamed, value: [2]string{pkgPath, e.Sel.Name}}
+
+ case *ast.StarExpr:
+ elem := parseExpr(ctx, e.X)
+ if elem == nil {
+ return nil
+ }
+ return &pattern{op: opPointer, subs: []*pattern{elem}}
+
+ case *ast.ArrayType:
+ elem := parseExpr(ctx, e.Elt)
+ if elem == nil {
+ return nil
+ }
+ if e.Len == nil {
+ return &pattern{
+ op: opSlice,
+ subs: []*pattern{elem},
+ }
+ }
+ if id, ok := e.Len.(*ast.Ident); ok && strings.HasPrefix(id.Name, "__") {
+ name := strings.TrimPrefix(id.Name, "__")
+ return &pattern{
+ op: opArray,
+ value: name,
+ subs: []*pattern{elem},
+ }
+ }
+ lit, ok := e.Len.(*ast.BasicLit)
+ if !ok || lit.Kind != token.INT {
+ return nil
+ }
+ length, err := strconv.ParseInt(lit.Value, 10, 64)
+ if err != nil {
+ return nil
+ }
+ return &pattern{
+ op: opArray,
+ value: length,
+ subs: []*pattern{elem},
+ }
+
+ case *ast.MapType:
+ keyType := parseExpr(ctx, e.Key)
+ if keyType == nil {
+ return nil
+ }
+ valType := parseExpr(ctx, e.Value)
+ if valType == nil {
+ return nil
+ }
+ return &pattern{
+ op: opMap,
+ subs: []*pattern{keyType, valType},
+ }
+
+ case *ast.ChanType:
+ valType := parseExpr(ctx, e.Value)
+ if valType == nil {
+ return nil
+ }
+ var dir types.ChanDir
+ switch {
+ case e.Dir&ast.SEND != 0 && e.Dir&ast.RECV != 0:
+ dir = types.SendRecv
+ case e.Dir&ast.SEND != 0:
+ dir = types.SendOnly
+ case e.Dir&ast.RECV != 0:
+ dir = types.RecvOnly
+ default:
+ return nil
+ }
+ return &pattern{
+ op: opChan,
+ value: dir,
+ subs: []*pattern{valType},
+ }
+
+ case *ast.ParenExpr:
+ return parseExpr(ctx, e.X)
+
+ case *ast.InterfaceType:
+ if len(e.Methods.List) == 0 {
+ return &pattern{op: opBuiltinType, value: efaceType}
+ }
+ }
+
+ return nil
+}
+
+func (p *Pattern) MatchIdentical(typ types.Type) bool {
+ p.reset()
+ return p.matchIdentical(p.root, typ)
+}
+
+func (p *Pattern) reset() {
+ if len(p.int64Matches) != 0 {
+ p.int64Matches = map[string]int64{}
+ }
+ if len(p.typeMatches) != 0 {
+ p.typeMatches = map[string]types.Type{}
+ }
+}
+
+func (p *Pattern) matchIdentical(sub *pattern, typ types.Type) bool {
+ switch sub.op {
+ case opVar:
+ name := sub.value.(string)
+ if name == "_" {
+ return true
+ }
+ y, ok := p.typeMatches[name]
+ if !ok {
+ p.typeMatches[name] = typ
+ return true
+ }
+ if y == nil {
+ return typ == nil
+ }
+ return types.Identical(typ, y)
+
+ case opBuiltinType:
+ return types.Identical(typ, sub.value.(types.Type))
+
+ case opPointer:
+ typ, ok := typ.(*types.Pointer)
+ if !ok {
+ return false
+ }
+ return p.matchIdentical(sub.subs[0], typ.Elem())
+
+ case opSlice:
+ typ, ok := typ.(*types.Slice)
+ if !ok {
+ return false
+ }
+ return p.matchIdentical(sub.subs[0], typ.Elem())
+
+ case opArray:
+ typ, ok := typ.(*types.Array)
+ if !ok {
+ return false
+ }
+ var wantLen int64
+ switch v := sub.value.(type) {
+ case string:
+ if v == "_" {
+ wantLen = typ.Len()
+ break
+ }
+ length, ok := p.int64Matches[v]
+ if ok {
+ wantLen = length
+ } else {
+ p.int64Matches[v] = typ.Len()
+ wantLen = typ.Len()
+ }
+ case int64:
+ wantLen = v
+ }
+ return wantLen == typ.Len() && p.matchIdentical(sub.subs[0], typ.Elem())
+
+ case opMap:
+ typ, ok := typ.(*types.Map)
+ if !ok {
+ return false
+ }
+ return p.matchIdentical(sub.subs[0], typ.Key()) &&
+ p.matchIdentical(sub.subs[1], typ.Elem())
+
+ case opChan:
+ typ, ok := typ.(*types.Chan)
+ if !ok {
+ return false
+ }
+ dir := sub.value.(types.ChanDir)
+ return dir == typ.Dir() && p.matchIdentical(sub.subs[0], typ.Elem())
+
+ case opNamed:
+ typ, ok := typ.(*types.Named)
+ if !ok {
+ return false
+ }
+ obj := typ.Obj()
+ pkg := obj.Pkg()
+ // pkg can be nil for builtin named types.
+ // There is no point in checking anything else as we never
+ // generate the opNamed for such types.
+ if pkg == nil {
+ return false
+ }
+ pkgPath := sub.value.([2]string)[0]
+ typeName := sub.value.([2]string)[1]
+ return obj.Pkg().Path() == pkgPath && typeName == obj.Name()
+
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go
new file mode 100644
index 00000000..c17dc243
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go
@@ -0,0 +1,205 @@
+package ruleguard
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "go/types"
+ "strconv"
+ "strings"
+)
+
+func unquoteNode(lit *ast.BasicLit) string {
+ return lit.Value[1 : len(lit.Value)-1]
+}
+
+func sprintNode(fset *token.FileSet, n ast.Node) string {
+ if fset == nil {
+ fset = token.NewFileSet()
+ }
+ var buf strings.Builder
+ if err := printer.Fprint(&buf, fset, n); err != nil {
+ return ""
+ }
+ return buf.String()
+}
+
+var basicTypeByName = map[string]types.Type{
+ "bool": types.Typ[types.Bool],
+ "int": types.Typ[types.Int],
+ "int8": types.Typ[types.Int8],
+ "int16": types.Typ[types.Int16],
+ "int32": types.Typ[types.Int32],
+ "int64": types.Typ[types.Int64],
+ "uint": types.Typ[types.Uint],
+ "uint8": types.Typ[types.Uint8],
+ "uint16": types.Typ[types.Uint16],
+ "uint32": types.Typ[types.Uint32],
+ "uint64": types.Typ[types.Uint64],
+ "uintptr": types.Typ[types.Uintptr],
+ "float32": types.Typ[types.Float32],
+ "float64": types.Typ[types.Float64],
+ "complex64": types.Typ[types.Complex64],
+ "complex128": types.Typ[types.Complex128],
+ "string": types.Typ[types.String],
+}
+
+func typeFromString(s string) (types.Type, error) {
+ s = strings.ReplaceAll(s, "?", "__any")
+
+ n, err := parser.ParseExpr(s)
+ if err != nil {
+ return nil, err
+ }
+ return typeFromNode(n), nil
+}
+
+func typeFromNode(e ast.Expr) types.Type {
+ switch e := e.(type) {
+ case *ast.Ident:
+ basic, ok := basicTypeByName[e.Name]
+ if ok {
+ return basic
+ }
+
+ case *ast.ArrayType:
+ elem := typeFromNode(e.Elt)
+ if elem == nil {
+ return nil
+ }
+ if e.Len == nil {
+ return types.NewSlice(elem)
+ }
+ lit, ok := e.Len.(*ast.BasicLit)
+ if !ok || lit.Kind != token.INT {
+ return nil
+ }
+ length, err := strconv.Atoi(lit.Value)
+ if err != nil {
+ return nil
+ }
+ types.NewArray(elem, int64(length))
+
+ case *ast.MapType:
+ keyType := typeFromNode(e.Key)
+ if keyType == nil {
+ return nil
+ }
+ valType := typeFromNode(e.Value)
+ if valType == nil {
+ return nil
+ }
+ return types.NewMap(keyType, valType)
+
+ case *ast.StarExpr:
+ typ := typeFromNode(e.X)
+ if typ != nil {
+ return types.NewPointer(typ)
+ }
+
+ case *ast.ParenExpr:
+ return typeFromNode(e.X)
+
+ case *ast.InterfaceType:
+ if len(e.Methods.List) == 0 {
+ return types.NewInterfaceType(nil, nil)
+ }
+ }
+
+ return nil
+}
+
+// isPure reports whether expr is a softly safe expression and contains
+// no significant side-effects. As opposed to strictly safe expressions,
+// soft safe expressions permit some forms of side-effects, like
+// panic possibility during indexing or nil pointer dereference.
+//
+// Uses types info to determine type conversion expressions that
+// are the only permitted kinds of call expressions.
+// Note that is does not check whether called function really
+// has any side effects. The analysis is very conservative.
+func isPure(info *types.Info, expr ast.Expr) bool {
+ // This list switch is not comprehensive and uses
+ // whitelist to be on the conservative side.
+ // Can be extended as needed.
+
+ switch expr := expr.(type) {
+ case *ast.StarExpr:
+ return isPure(info, expr.X)
+ case *ast.BinaryExpr:
+ return isPure(info, expr.X) &&
+ isPure(info, expr.Y)
+ case *ast.UnaryExpr:
+ return expr.Op != token.ARROW &&
+ isPure(info, expr.X)
+ case *ast.BasicLit, *ast.Ident:
+ return true
+ case *ast.IndexExpr:
+ return isPure(info, expr.X) &&
+ isPure(info, expr.Index)
+ case *ast.SelectorExpr:
+ return isPure(info, expr.X)
+ case *ast.ParenExpr:
+ return isPure(info, expr.X)
+ case *ast.CompositeLit:
+ return isPureList(info, expr.Elts)
+ case *ast.CallExpr:
+ return isTypeExpr(info, expr.Fun) && isPureList(info, expr.Args)
+
+ default:
+ return false
+ }
+}
+
+// isPureList reports whether every expr in list is safe.
+//
+// See isPure.
+func isPureList(info *types.Info, list []ast.Expr) bool {
+ for _, expr := range list {
+ if !isPure(info, expr) {
+ return false
+ }
+ }
+ return true
+}
+
+func isAddressable(info *types.Info, expr ast.Expr) bool {
+ tv, ok := info.Types[expr]
+ return ok && tv.Addressable()
+}
+
+func isConstant(info *types.Info, expr ast.Expr) bool {
+ tv, ok := info.Types[expr]
+ return ok && tv.Value != nil
+}
+
+// isTypeExpr reports whether x represents a type expression.
+//
+// Type expression does not evaluate to any run time value,
+// but rather describes a type that is used inside Go expression.
+//
+// For example, (*T)(v) is a CallExpr that "calls" (*T).
+// (*T) is a type expression that tells Go compiler type v should be converted to.
+func isTypeExpr(info *types.Info, x ast.Expr) bool {
+ switch x := x.(type) {
+ case *ast.StarExpr:
+ return isTypeExpr(info, x.X)
+ case *ast.ParenExpr:
+ return isTypeExpr(info, x.X)
+ case *ast.SelectorExpr:
+ return isTypeExpr(info, x.Sel)
+
+ case *ast.Ident:
+ // Identifier may be a type expression if object
+ // it reffers to is a type name.
+ _, ok := info.ObjectOf(x).(*types.TypeName)
+ return ok
+
+ case *ast.FuncType, *ast.StructType, *ast.InterfaceType, *ast.ArrayType, *ast.MapType, *ast.ChanType:
+ return true
+
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/LICENSE b/vendor/github.com/quasilyte/regex/syntax/LICENSE
new file mode 100644
index 00000000..f0c81282
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 Iskander (Alex) Sharipov / quasilyte
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/quasilyte/regex/syntax/README.md b/vendor/github.com/quasilyte/regex/syntax/README.md
new file mode 100644
index 00000000..b70e25ad
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/README.md
@@ -0,0 +1,29 @@
+# Package `regex/syntax`
+
+Package `syntax` provides regular expressions parser as well as AST definitions.
+
+## Rationale
+
+The advantages of this package over stdlib [regexp/syntax](https://golang.org/pkg/regexp/syntax/):
+
+1. Does not transformations/optimizations during the parsing.
+ The produced parse tree is loseless.
+
+2. Simpler AST representation.
+
+3. Can parse most PCRE operations in addition to [re2](https://github.com/google/re2/wiki/Syntax) syntax.
+ It can also handle PHP/Perl style patterns with delimiters.
+
+4. This package is easier to extend than something from the standard library.
+
+This package does almost no assumptions about how generated AST is going to be used
+so it preserves as much syntax information as possible.
+
+It's easy to write another intermediate representation on top of it. The main
+function of this package is to convert a textual regexp pattern into a more
+structured form that can be processed more easily.
+
+## Users
+
+* [go-critic](https://github.com/go-critic/go-critic) - Go static analyzer
+* [NoVerify](https://github.com/VKCOM/noverify) - PHP static analyzer
diff --git a/vendor/github.com/quasilyte/regex/syntax/ast.go b/vendor/github.com/quasilyte/regex/syntax/ast.go
new file mode 100644
index 00000000..4d21a943
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/ast.go
@@ -0,0 +1,64 @@
+package syntax
+
+import (
+ "strings"
+)
+
+type Regexp struct {
+ Pattern string
+ Expr Expr
+}
+
+type RegexpPCRE struct {
+ Pattern string
+ Expr Expr
+
+ Source string
+ Modifiers string
+ Delim [2]byte
+}
+
+func (re *RegexpPCRE) HasModifier(mod byte) bool {
+ return strings.IndexByte(re.Modifiers, mod) >= 0
+}
+
+type Expr struct {
+ // The operations that this expression performs. See `operation.go`.
+ Op Operation
+
+ Form Form
+
+ _ [2]byte // Reserved
+
+ // Pos describes a source location inside regexp pattern.
+ Pos Position
+
+ // Args is a list of sub-expressions of this expression.
+ //
+ // See Operation constants documentation to learn how to
+ // interpret the particular expression args.
+ Args []Expr
+
+ // Value holds expression textual value.
+ //
+ // Usually, that value is identical to src[Begin():End()],
+ // but this is not true for programmatically generated objects.
+ Value string
+}
+
+// Begin returns expression leftmost offset.
+func (e Expr) Begin() uint16 { return e.Pos.Begin }
+
+// End returns expression rightmost offset.
+func (e Expr) End() uint16 { return e.Pos.End }
+
+// LastArg returns expression last argument.
+//
+// Should not be called on expressions that may have 0 arguments.
+func (e Expr) LastArg() Expr {
+ return e.Args[len(e.Args)-1]
+}
+
+type Operation byte
+
+type Form byte
diff --git a/vendor/github.com/quasilyte/regex/syntax/errors.go b/vendor/github.com/quasilyte/regex/syntax/errors.go
new file mode 100644
index 00000000..beefba5f
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/errors.go
@@ -0,0 +1,27 @@
+package syntax
+
+type ParseError struct {
+ Pos Position
+ Message string
+}
+
+func (e ParseError) Error() string { return e.Message }
+
+func throw(pos Position, message string) {
+ panic(ParseError{Pos: pos, Message: message})
+}
+
+func throwExpectedFound(pos Position, expected, found string) {
+ throw(pos, "expected '"+expected+"', found '"+found+"'")
+}
+
+func throwUnexpectedToken(pos Position, token string) {
+ throw(pos, "unexpected token: "+token)
+}
+
+func newPos(begin, end int) Position {
+ return Position{
+ Begin: uint16(begin),
+ End: uint16(end),
+ }
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/go.mod b/vendor/github.com/quasilyte/regex/syntax/go.mod
new file mode 100644
index 00000000..2a4e1f33
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/go.mod
@@ -0,0 +1,3 @@
+module github.com/quasilyte/regex/syntax
+
+go 1.14
diff --git a/vendor/github.com/quasilyte/regex/syntax/lexer.go b/vendor/github.com/quasilyte/regex/syntax/lexer.go
new file mode 100644
index 00000000..aae146c2
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/lexer.go
@@ -0,0 +1,454 @@
+package syntax
+
+import (
+ "strings"
+ "unicode/utf8"
+)
+
+type token struct {
+ kind tokenKind
+ pos Position
+}
+
+func (tok token) String() string {
+ return tok.kind.String()
+}
+
+type tokenKind byte
+
+//go:generate stringer -type=tokenKind -trimprefix=tok -linecomment=true
+const (
+ tokNone tokenKind = iota
+
+ tokChar
+ tokGroupFlags
+ tokPosixClass
+ tokConcat
+ tokRepeat
+ tokEscapeChar
+ tokEscapeMeta
+ tokEscapeOctal
+ tokEscapeUni
+ tokEscapeUniFull
+ tokEscapeHex
+ tokEscapeHexFull
+ tokComment
+
+ tokQ // \Q
+ tokMinus // -
+ tokLbracket // [
+ tokLbracketCaret // [^
+ tokRbracket // ]
+ tokDollar // $
+ tokCaret // ^
+ tokQuestion // ?
+ tokDot // .
+ tokPlus // +
+ tokStar // *
+ tokPipe // |
+ tokLparen // (
+ tokLparenName // (?P
+ tokLparenNameAngle // (?
+ tokLparenNameQuote // (?'name'
+ tokLparenFlags // (?flags
+ tokLparenAtomic // (?>
+ tokLparenPositiveLookahead // (?=
+ tokLparenPositiveLookbehind // (?<=
+ tokLparenNegativeLookahead // (?!
+ tokLparenNegativeLookbehind // (?= utf8.RuneSelf {
+ _, size := utf8.DecodeRuneInString(l.input[l.pos:])
+ l.pushTok(tokChar, size)
+ l.maybeInsertConcat()
+ continue
+ }
+ switch ch {
+ case '\\':
+ l.scanEscape(false)
+ case '.':
+ l.pushTok(tokDot, 1)
+ case '+':
+ l.pushTok(tokPlus, 1)
+ case '*':
+ l.pushTok(tokStar, 1)
+ case '^':
+ l.pushTok(tokCaret, 1)
+ case '$':
+ l.pushTok(tokDollar, 1)
+ case '?':
+ l.pushTok(tokQuestion, 1)
+ case ')':
+ l.pushTok(tokRparen, 1)
+ case '|':
+ l.pushTok(tokPipe, 1)
+ case '[':
+ if l.byteAt(l.pos+1) == '^' {
+ l.pushTok(tokLbracketCaret, 2)
+ } else {
+ l.pushTok(tokLbracket, 1)
+ }
+ l.scanCharClass()
+ case '(':
+ if l.byteAt(l.pos+1) == '?' {
+ switch {
+ case l.byteAt(l.pos+2) == '>':
+ l.pushTok(tokLparenAtomic, len("(?>"))
+ case l.byteAt(l.pos+2) == '=':
+ l.pushTok(tokLparenPositiveLookahead, len("(?="))
+ case l.byteAt(l.pos+2) == '!':
+ l.pushTok(tokLparenNegativeLookahead, len("(?!"))
+ case l.byteAt(l.pos+2) == '<' && l.byteAt(l.pos+3) == '=':
+ l.pushTok(tokLparenPositiveLookbehind, len("(?<="))
+ case l.byteAt(l.pos+2) == '<' && l.byteAt(l.pos+3) == '!':
+ l.pushTok(tokLparenNegativeLookbehind, len("(?= 0 {
+ l.pushTok(tokRepeat, len("{")+j)
+ } else {
+ l.pushTok(tokChar, 1)
+ }
+ default:
+ l.pushTok(tokChar, 1)
+ }
+ l.maybeInsertConcat()
+ }
+}
+
+func (l *lexer) scanCharClass() {
+ l.maybeInsertConcat()
+
+ // We need to handle first `]` in a special way. See #3.
+ if l.byteAt(l.pos) == ']' {
+ l.pushTok(tokChar, 1)
+ }
+
+ for l.pos < len(l.input) {
+ ch := l.input[l.pos]
+ if ch >= utf8.RuneSelf {
+ _, size := utf8.DecodeRuneInString(l.input[l.pos:])
+ l.pushTok(tokChar, size)
+ continue
+ }
+ switch ch {
+ case '\\':
+ l.scanEscape(true)
+ case '[':
+ isPosixClass := false
+ if l.byteAt(l.pos+1) == ':' {
+ j := l.stringIndex(l.pos+2, ":]")
+ if j >= 0 {
+ isPosixClass = true
+ l.pushTok(tokPosixClass, j+len("[::]"))
+ }
+ }
+ if !isPosixClass {
+ l.pushTok(tokChar, 1)
+ }
+ case '-':
+ l.pushTok(tokMinus, 1)
+ case ']':
+ l.pushTok(tokRbracket, 1)
+ return // Stop scanning in the char context
+ default:
+ l.pushTok(tokChar, 1)
+ }
+ }
+}
+
+func (l *lexer) scanEscape(insideCharClass bool) {
+ s := l.input
+ if l.pos+1 >= len(s) {
+ throw(newPos(l.pos, l.pos+1), `unexpected end of pattern: trailing '\'`)
+ }
+ switch {
+ case s[l.pos+1] == 'p' || s[l.pos+1] == 'P':
+ if l.pos+2 >= len(s) {
+ throw(newPos(l.pos, l.pos+2), "unexpected end of pattern: expected uni-class-short or '{'")
+ }
+ if s[l.pos+2] == '{' {
+ j := strings.IndexByte(s[l.pos+2:], '}')
+ if j < 0 {
+ throw(newPos(l.pos, l.pos+2), "can't find closing '}'")
+ }
+ l.pushTok(tokEscapeUniFull, len(`\p{`)+j)
+ } else {
+ l.pushTok(tokEscapeUni, len(`\pL`))
+ }
+ case s[l.pos+1] == 'x':
+ if l.pos+2 >= len(s) {
+ throw(newPos(l.pos, l.pos+2), "unexpected end of pattern: expected hex-digit or '{'")
+ }
+ if s[l.pos+2] == '{' {
+ j := strings.IndexByte(s[l.pos+2:], '}')
+ if j < 0 {
+ throw(newPos(l.pos, l.pos+2), "can't find closing '}'")
+ }
+ l.pushTok(tokEscapeHexFull, len(`\x{`)+j)
+ } else {
+ if isHexDigit(l.byteAt(l.pos + 3)) {
+ l.pushTok(tokEscapeHex, len(`\xFF`))
+ } else {
+ l.pushTok(tokEscapeHex, len(`\xF`))
+ }
+ }
+ case isOctalDigit(s[l.pos+1]):
+ digits := 1
+ if isOctalDigit(l.byteAt(l.pos + 2)) {
+ if isOctalDigit(l.byteAt(l.pos + 3)) {
+ digits = 3
+ } else {
+ digits = 2
+ }
+ }
+ l.pushTok(tokEscapeOctal, len(`\`)+digits)
+ case s[l.pos+1] == 'Q':
+ size := len(s) - l.pos // Until the pattern ends
+ j := l.stringIndex(l.pos+2, `\E`)
+ if j >= 0 {
+ size = j + len(`\Q\E`)
+ }
+ l.pushTok(tokQ, size)
+
+ default:
+ ch := l.byteAt(l.pos + 1)
+ if ch >= utf8.RuneSelf {
+ _, size := utf8.DecodeRuneInString(l.input[l.pos+1:])
+ l.pushTok(tokEscapeChar, len(`\`)+size)
+ return
+ }
+ kind := tokEscapeChar
+ if insideCharClass {
+ if charClassMetachar[ch] {
+ kind = tokEscapeMeta
+ }
+ } else {
+ if reMetachar[ch] {
+ kind = tokEscapeMeta
+ }
+ }
+ l.pushTok(kind, 2)
+ }
+}
+
+func (l *lexer) maybeInsertConcat() {
+ if l.isConcatPos() {
+ last := len(l.tokens) - 1
+ tok := l.tokens[last]
+ l.tokens[last].kind = tokConcat
+ l.tokens = append(l.tokens, tok)
+ }
+}
+
+func (l *lexer) Init(s string) {
+ l.pos = 0
+ l.tokens = l.tokens[:0]
+ l.input = s
+
+ l.scan()
+
+ l.pos = 0
+}
+
+func (l *lexer) tryScanGroupName(pos int) bool {
+ tok := tokLparenName
+ endCh := byte('>')
+ offset := 1
+ switch l.byteAt(pos) {
+ case '\'':
+ endCh = '\''
+ tok = tokLparenNameQuote
+ case '<':
+ tok = tokLparenNameAngle
+ case 'P':
+ offset = 2
+ default:
+ return false
+ }
+ if pos+offset >= len(l.input) {
+ return false
+ }
+ end := strings.IndexByte(l.input[pos+offset:], endCh)
+ if end < 0 {
+ return false
+ }
+ l.pushTok(tok, len("(?")+offset+end+1)
+ return true
+}
+
+func (l *lexer) tryScanGroupFlags(pos int) bool {
+ colonPos := strings.IndexByte(l.input[pos:], ':')
+ parenPos := strings.IndexByte(l.input[pos:], ')')
+ if parenPos < 0 {
+ return false
+ }
+ end := parenPos
+ if colonPos >= 0 && colonPos < parenPos {
+ end = colonPos + len(":")
+ }
+ l.pushTok(tokLparenFlags, len("(?")+end)
+ return true
+}
+
+func (l *lexer) tryScanComment(pos int) bool {
+ if l.byteAt(pos) != '#' {
+ return false
+ }
+ parenPos := strings.IndexByte(l.input[pos:], ')')
+ if parenPos < 0 {
+ return false
+ }
+ l.pushTok(tokComment, len("(?")+parenPos+len(")"))
+ return true
+}
+
+func (l *lexer) repeatWidth(pos int) int {
+ j := pos
+ for isDigit(l.byteAt(j)) {
+ j++
+ }
+ if j == pos {
+ return -1
+ }
+ if l.byteAt(j) == '}' {
+ return (j + len("}")) - pos // {min}
+ }
+ if l.byteAt(j) != ',' {
+ return -1
+ }
+ j += len(",")
+ for isDigit(l.byteAt(j)) {
+ j++
+ }
+ if l.byteAt(j) == '}' {
+ return (j + len("}")) - pos // {min,} or {min,max}
+ }
+ return -1
+}
+
+func (l *lexer) stringIndex(offset int, s string) int {
+ if offset < len(l.input) {
+ return strings.Index(l.input[offset:], s)
+ }
+ return -1
+}
+
+func (l *lexer) byteAt(pos int) byte {
+ if pos >= 0 && pos < len(l.input) {
+ return l.input[pos]
+ }
+ return 0
+}
+
+func (l *lexer) pushTok(kind tokenKind, size int) {
+ l.tokens = append(l.tokens, token{
+ kind: kind,
+ pos: Position{Begin: uint16(l.pos), End: uint16(l.pos + size)},
+ })
+ l.pos += size
+}
+
+func (l *lexer) isConcatPos() bool {
+ if len(l.tokens) < 2 {
+ return false
+ }
+ x := l.tokens[len(l.tokens)-2].kind
+ if concatTable[x]&concatX != 0 {
+ return false
+ }
+ y := l.tokens[len(l.tokens)-1].kind
+ return concatTable[y]&concatY == 0
+}
+
+const (
+ concatX byte = 1 << iota
+ concatY
+)
+
+var concatTable = [256]byte{
+ tokPipe: concatX | concatY,
+
+ tokLparen: concatX,
+ tokLparenFlags: concatX,
+ tokLparenName: concatX,
+ tokLparenNameAngle: concatX,
+ tokLparenNameQuote: concatX,
+ tokLparenAtomic: concatX,
+ tokLbracket: concatX,
+ tokLbracketCaret: concatX,
+ tokLparenPositiveLookahead: concatX,
+ tokLparenPositiveLookbehind: concatX,
+ tokLparenNegativeLookahead: concatX,
+ tokLparenNegativeLookbehind: concatX,
+
+ tokRparen: concatY,
+ tokRbracket: concatY,
+ tokPlus: concatY,
+ tokStar: concatY,
+ tokQuestion: concatY,
+ tokRepeat: concatY,
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/operation.go b/vendor/github.com/quasilyte/regex/syntax/operation.go
new file mode 100644
index 00000000..0fc8fc52
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/operation.go
@@ -0,0 +1,195 @@
+package syntax
+
+//go:generate stringer -type=Operation -trimprefix=Op
+const (
+ OpNone Operation = iota
+
+ // OpConcat is a concatenation of ops.
+ // Examples: `xy` `abc\d` ``
+ // Args - concatenated ops
+ //
+ // As a special case, OpConcat with 0 Args is used for "empty"
+ // set of operations.
+ OpConcat
+
+ // OpDot is a '.' wildcard.
+ OpDot
+
+ // OpAlt is x|y alternation of ops.
+ // Examples: `a|bc` `x(.*?)|y(.*?)`
+ // Args - union-connected regexp branches
+ OpAlt
+
+ // OpStar is a shorthand for {0,} repetition.
+ // Examples: `x*`
+ // Args[0] - repeated expression
+ OpStar
+
+ // OpPlus is a shorthand for {1,} repetition.
+ // Examples: `x+`
+ // Args[0] - repeated expression
+ OpPlus
+
+ // OpQuestion is a shorthand for {0,1} repetition.
+ // Examples: `x?`
+ // Args[0] - repeated expression
+ OpQuestion
+
+ // OpNonGreedy makes its operand quantifier non-greedy.
+ // Examples: `x??` `x*?` `x+?`
+ // Args[0] - quantified expression
+ OpNonGreedy
+
+ // OpPossessive makes its operand quantifier possessive.
+ // Examples: `x?+` `x*+` `x++`
+ // Args[0] - quantified expression
+ OpPossessive
+
+ // OpCaret is ^ anchor.
+ OpCaret
+
+ // OpDollar is $ anchor.
+ OpDollar
+
+ // OpLiteral is a collection of consecutive chars.
+ // Examples: `ab` `10x`
+ // Args - enclosed characters (OpChar)
+ OpLiteral
+
+ // OpChar is a single literal pattern character.
+ // Examples: `a` `6` `ф`
+ OpChar
+
+ // OpString is an artificial element that is used in other expressions.
+ OpString
+
+ // OpQuote is a \Q...\E enclosed literal.
+ // Examples: `\Q.?\E` `\Q?q[]=1`
+ // FormQuoteUnclosed: `\Qabc`
+ // Args[0] - literal value (OpString)
+ OpQuote
+
+ // OpEscapeChar is a single char escape.
+ // Examples: `\d` `\a` `\n`
+ // Args[0] - escaped value (OpString)
+ OpEscapeChar
+
+ // OpEscapeMeta is an escaped meta char.
+ // Examples: `\(` `\[` `\+`
+ // Args[0] - escaped value (OpString)
+ OpEscapeMeta
+
+ // OpEscapeOctal is an octal char code escape (up to 3 digits).
+ // Examples: `\123` `\12`
+ // Args[0] - escaped value (OpString)
+ OpEscapeOctal
+
+ // OpEscapeHex is a hex char code escape.
+ // Examples: `\x7F` `\xF7`
+ // FormEscapeHexFull examples: `\x{10FFFF}` `\x{F}`.
+ // Args[0] - escaped value (OpString)
+ OpEscapeHex
+
+ // OpEscapeUni is a Unicode char class escape.
+ // Examples: `\pS` `\pL` `\PL`
+ // FormEscapeUniFull examples: `\p{Greek}` `\p{Symbol}` `\p{^L}`
+ // Args[0] - escaped value (OpString)
+ OpEscapeUni
+
+ // OpCharClass is a char class enclosed in [].
+ // Examples: `[abc]` `[a-z0-9\]]`
+ // Args - char class elements (can include OpCharRange and OpPosixClass)
+ OpCharClass
+
+ // OpNegCharClass is a negated char class enclosed in [].
+ // Examples: `[^abc]` `[^a-z0-9\]]`
+ // Args - char class elements (can include OpCharRange and OpPosixClass)
+ OpNegCharClass
+
+ // OpCharRange is an inclusive char range inside a char class.
+ // Examples: `0-9` `A-Z`
+ // Args[0] - range lower bound
+ // Args[1] - range upper bound
+ OpCharRange
+
+ // OpPosixClass is a named ASCII char set inside a char class.
+ // Examples: `[:alpha:]` `[:blank:]`
+ OpPosixClass
+
+ // OpRepeat is a {min,max} repetition quantifier.
+ // Examples: `x{5}` `x{min,max}` `x{min,}`
+ // Args[0] - repeated expression
+ // Args[1] - repeat count (OpString)
+ OpRepeat
+
+ // OpCapture is `(re)` capturing group.
+ // Examples: `(abc)` `(x|y)`
+ // Args[0] - enclosed expression
+ OpCapture
+
+ // OpNamedCapture is `(?Pre)` capturing group.
+ // Examples: `(?Pabc)` `(?Px|y)`
+ // FormNamedCaptureAngle examples: `(?abc)` `(?x|y)`
+ // FormNamedCaptureQuote examples: `(?'foo'abc)` `(?'name'x|y)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ // Args[1] - group name (OpString)
+ OpNamedCapture
+
+ // OpGroup is `(?:re)` non-capturing group.
+ // Examples: `(?:abc)` `(?:x|y)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ OpGroup
+
+ // OpGroupWithFlags is `(?flags:re)` non-capturing group.
+ // Examples: `(?i:abc)` `(?i:x|y)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ // Args[1] - flags (OpString)
+ OpGroupWithFlags
+
+ // OpAtomicGroup is `(?>re)` non-capturing group without backtracking.
+ // Examples: `(?>foo)` `(?>)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ OpAtomicGroup
+
+ // OpPositiveLookahead is `(?=re)` asserts that following text matches re.
+ // Examples: `(?=foo)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ OpPositiveLookahead
+
+ // OpNegativeLookahead is `(?!re)` asserts that following text doesn't match re.
+ // Examples: `(?!foo)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ OpNegativeLookahead
+
+ // OpPositiveLookbehind is `(?<=re)` asserts that preceding text matches re.
+ // Examples: `(?<=foo)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ OpPositiveLookbehind
+
+ // OpNegativeLookbehind is `(?=re)` asserts that preceding text doesn't match re.
+ // Examples: `(?= Operation(len(_Operation_index)-1) {
+ return "Operation(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Operation_name[_Operation_index[i]:_Operation_index[i+1]]
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/parser.go b/vendor/github.com/quasilyte/regex/syntax/parser.go
new file mode 100644
index 00000000..c540ac59
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/parser.go
@@ -0,0 +1,503 @@
+package syntax
+
+import (
+ "errors"
+ "strings"
+)
+
+type ParserOptions struct {
+ // NoLiterals disables OpChar merging into OpLiteral.
+ NoLiterals bool
+}
+
+func NewParser(opts *ParserOptions) *Parser {
+ return newParser(opts)
+}
+
+type Parser struct {
+ out Regexp
+ lexer lexer
+ exprPool []Expr
+
+ prefixParselets [256]prefixParselet
+ infixParselets [256]infixParselet
+
+ charClass []Expr
+ allocated uint
+
+ opts ParserOptions
+}
+
+// ParsePCRE parses PHP-style pattern with delimiters.
+// An example of such pattern is `/foo/i`.
+func (p *Parser) ParsePCRE(pattern string) (*RegexpPCRE, error) {
+ pcre, err := p.newPCRE(pattern)
+ if err != nil {
+ return nil, err
+ }
+ if pcre.HasModifier('x') {
+ return nil, errors.New("'x' modifier is not supported")
+ }
+ re, err := p.Parse(pcre.Pattern)
+ if re != nil {
+ pcre.Expr = re.Expr
+ }
+ return pcre, err
+}
+
+func (p *Parser) Parse(pattern string) (result *Regexp, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+ if err2, ok := r.(ParseError); ok {
+ err = err2
+ return
+ }
+ panic(r)
+ }()
+
+ p.lexer.Init(pattern)
+ p.allocated = 0
+ p.out.Pattern = pattern
+ if pattern == "" {
+ p.out.Expr = *p.newExpr(OpConcat, Position{})
+ } else {
+ p.out.Expr = *p.parseExpr(0)
+ }
+
+ if !p.opts.NoLiterals {
+ p.mergeChars(&p.out.Expr)
+ }
+ p.setValues(&p.out.Expr)
+
+ return &p.out, nil
+}
+
+type prefixParselet func(token) *Expr
+
+type infixParselet func(*Expr, token) *Expr
+
+func newParser(opts *ParserOptions) *Parser {
+ var p Parser
+
+ if opts != nil {
+ p.opts = *opts
+ }
+ p.exprPool = make([]Expr, 256)
+
+ for tok, op := range tok2op {
+ if op != 0 {
+ p.prefixParselets[tokenKind(tok)] = p.parsePrefixElementary
+ }
+ }
+
+ p.prefixParselets[tokQ] = func(tok token) *Expr {
+ litPos := tok.pos
+ litPos.Begin += uint16(len(`\Q`))
+ form := FormQuoteUnclosed
+ if strings.HasSuffix(p.tokenValue(tok), `\E`) {
+ litPos.End -= uint16(len(`\E`))
+ form = FormDefault
+ }
+ lit := p.newExpr(OpString, litPos)
+ return p.newExprForm(OpQuote, form, tok.pos, lit)
+ }
+
+ p.prefixParselets[tokEscapeHexFull] = func(tok token) *Expr {
+ litPos := tok.pos
+ litPos.Begin += uint16(len(`\x{`))
+ litPos.End -= uint16(len(`}`))
+ lit := p.newExpr(OpString, litPos)
+ return p.newExprForm(OpEscapeHex, FormEscapeHexFull, tok.pos, lit)
+ }
+ p.prefixParselets[tokEscapeUniFull] = func(tok token) *Expr {
+ litPos := tok.pos
+ litPos.Begin += uint16(len(`\p{`))
+ litPos.End -= uint16(len(`}`))
+ lit := p.newExpr(OpString, litPos)
+ return p.newExprForm(OpEscapeUni, FormEscapeUniFull, tok.pos, lit)
+ }
+
+ p.prefixParselets[tokEscapeHex] = func(tok token) *Expr { return p.parseEscape(OpEscapeHex, `\x`, tok) }
+ p.prefixParselets[tokEscapeOctal] = func(tok token) *Expr { return p.parseEscape(OpEscapeOctal, `\`, tok) }
+ p.prefixParselets[tokEscapeChar] = func(tok token) *Expr { return p.parseEscape(OpEscapeChar, `\`, tok) }
+ p.prefixParselets[tokEscapeMeta] = func(tok token) *Expr { return p.parseEscape(OpEscapeMeta, `\`, tok) }
+ p.prefixParselets[tokEscapeUni] = func(tok token) *Expr { return p.parseEscape(OpEscapeUni, `\p`, tok) }
+
+ p.prefixParselets[tokLparen] = func(tok token) *Expr { return p.parseGroup(OpCapture, tok) }
+ p.prefixParselets[tokLparenAtomic] = func(tok token) *Expr { return p.parseGroup(OpAtomicGroup, tok) }
+ p.prefixParselets[tokLparenPositiveLookahead] = func(tok token) *Expr { return p.parseGroup(OpPositiveLookahead, tok) }
+ p.prefixParselets[tokLparenNegativeLookahead] = func(tok token) *Expr { return p.parseGroup(OpNegativeLookahead, tok) }
+ p.prefixParselets[tokLparenPositiveLookbehind] = func(tok token) *Expr { return p.parseGroup(OpPositiveLookbehind, tok) }
+ p.prefixParselets[tokLparenNegativeLookbehind] = func(tok token) *Expr { return p.parseGroup(OpNegativeLookbehind, tok) }
+
+ p.prefixParselets[tokLparenName] = func(tok token) *Expr {
+ return p.parseNamedCapture(FormDefault, tok)
+ }
+ p.prefixParselets[tokLparenNameAngle] = func(tok token) *Expr {
+ return p.parseNamedCapture(FormNamedCaptureAngle, tok)
+ }
+ p.prefixParselets[tokLparenNameQuote] = func(tok token) *Expr {
+ return p.parseNamedCapture(FormNamedCaptureQuote, tok)
+ }
+
+ p.prefixParselets[tokLparenFlags] = p.parseGroupWithFlags
+
+ p.prefixParselets[tokPipe] = func(tok token) *Expr {
+ // We need prefix pipe parselet to handle `(|x)` syntax.
+ right := p.parseExpr(1)
+ return p.newExpr(OpAlt, tok.pos, p.newEmpty(tok.pos), right)
+ }
+ p.prefixParselets[tokLbracket] = func(tok token) *Expr {
+ return p.parseCharClass(OpCharClass, tok)
+ }
+ p.prefixParselets[tokLbracketCaret] = func(tok token) *Expr {
+ return p.parseCharClass(OpNegCharClass, tok)
+ }
+
+ p.infixParselets[tokRepeat] = func(left *Expr, tok token) *Expr {
+ repeatLit := p.newExpr(OpString, tok.pos)
+ return p.newExpr(OpRepeat, combinePos(left.Pos, tok.pos), left, repeatLit)
+ }
+ p.infixParselets[tokStar] = func(left *Expr, tok token) *Expr {
+ return p.newExpr(OpStar, combinePos(left.Pos, tok.pos), left)
+ }
+ p.infixParselets[tokConcat] = func(left *Expr, tok token) *Expr {
+ right := p.parseExpr(2)
+ if left.Op == OpConcat {
+ left.Args = append(left.Args, *right)
+ left.Pos.End = right.End()
+ return left
+ }
+ return p.newExpr(OpConcat, combinePos(left.Pos, right.Pos), left, right)
+ }
+ p.infixParselets[tokPipe] = p.parseAlt
+ p.infixParselets[tokMinus] = p.parseMinus
+ p.infixParselets[tokPlus] = p.parsePlus
+ p.infixParselets[tokQuestion] = p.parseQuestion
+
+ return &p
+}
+
+func (p *Parser) setValues(e *Expr) {
+ for i := range e.Args {
+ p.setValues(&e.Args[i])
+ }
+ e.Value = p.exprValue(e)
+}
+
+func (p *Parser) tokenValue(tok token) string {
+ return p.out.Pattern[tok.pos.Begin:tok.pos.End]
+}
+
+func (p *Parser) exprValue(e *Expr) string {
+ return p.out.Pattern[e.Begin():e.End()]
+}
+
+func (p *Parser) mergeChars(e *Expr) {
+ for i := range e.Args {
+ p.mergeChars(&e.Args[i])
+ }
+ if e.Op != OpConcat || len(e.Args) < 2 {
+ return
+ }
+
+ args := e.Args[:0]
+ i := 0
+ for i < len(e.Args) {
+ first := i
+ chars := 0
+ for j := i; j < len(e.Args) && e.Args[j].Op == OpChar; j++ {
+ chars++
+ }
+ if chars > 1 {
+ c1 := e.Args[first]
+ c2 := e.Args[first+chars-1]
+ lit := p.newExpr(OpLiteral, combinePos(c1.Pos, c2.Pos))
+ for j := 0; j < chars; j++ {
+ lit.Args = append(lit.Args, e.Args[first+j])
+ }
+ args = append(args, *lit)
+ i += chars
+ } else {
+ args = append(args, e.Args[i])
+ i++
+ }
+ }
+ if len(args) == 1 {
+ *e = args[0] // Turn OpConcat into OpLiteral
+ } else {
+ e.Args = args
+ }
+}
+
+func (p *Parser) newEmpty(pos Position) *Expr {
+ return p.newExpr(OpConcat, pos)
+}
+
+func (p *Parser) newExprForm(op Operation, form Form, pos Position, args ...*Expr) *Expr {
+ e := p.newExpr(op, pos, args...)
+ e.Form = form
+ return e
+}
+
+func (p *Parser) newExpr(op Operation, pos Position, args ...*Expr) *Expr {
+ e := p.allocExpr()
+ *e = Expr{
+ Op: op,
+ Pos: pos,
+ Args: e.Args[:0],
+ }
+ for _, arg := range args {
+ e.Args = append(e.Args, *arg)
+ }
+ return e
+}
+
+func (p *Parser) allocExpr() *Expr {
+ i := p.allocated
+ if i < uint(len(p.exprPool)) {
+ p.allocated++
+ return &p.exprPool[i]
+ }
+ return &Expr{}
+}
+
+func (p *Parser) expect(kind tokenKind) Position {
+ tok := p.lexer.NextToken()
+ if tok.kind != kind {
+ throwExpectedFound(tok.pos, kind.String(), tok.kind.String())
+ }
+ return tok.pos
+}
+
+func (p *Parser) parseExpr(precedence int) *Expr {
+ tok := p.lexer.NextToken()
+ prefix := p.prefixParselets[tok.kind]
+ if prefix == nil {
+ throwUnexpectedToken(tok.pos, tok.String())
+ }
+ left := prefix(tok)
+
+ for precedence < p.precedenceOf(p.lexer.Peek()) {
+ tok := p.lexer.NextToken()
+ infix := p.infixParselets[tok.kind]
+ left = infix(left, tok)
+ }
+
+ return left
+}
+
+func (p *Parser) parsePrefixElementary(tok token) *Expr {
+ return p.newExpr(tok2op[tok.kind], tok.pos)
+}
+
+func (p *Parser) parseCharClass(op Operation, tok token) *Expr {
+ var endPos Position
+ p.charClass = p.charClass[:0]
+ for {
+ p.charClass = append(p.charClass, *p.parseExpr(0))
+ next := p.lexer.Peek()
+ if next.kind == tokRbracket {
+ endPos = next.pos
+ p.lexer.NextToken()
+ break
+ }
+ if next.kind == tokNone {
+ throw(tok.pos, "unterminated '['")
+ }
+ }
+
+ result := p.newExpr(op, combinePos(tok.pos, endPos))
+ result.Args = append(result.Args, p.charClass...)
+ return result
+}
+
+func (p *Parser) parseMinus(left *Expr, tok token) *Expr {
+ if p.isValidCharRangeOperand(left) {
+ if p.lexer.Peek().kind != tokRbracket {
+ right := p.parseExpr(2)
+ return p.newExpr(OpCharRange, combinePos(left.Pos, right.Pos), left, right)
+ }
+ }
+ p.charClass = append(p.charClass, *left)
+ return p.newExpr(OpChar, tok.pos)
+}
+
+func (p *Parser) isValidCharRangeOperand(e *Expr) bool {
+ switch e.Op {
+ case OpEscapeHex, OpEscapeOctal, OpEscapeMeta, OpChar:
+ return true
+ case OpEscapeChar:
+ switch p.exprValue(e) {
+ case `\\`, `\|`, `\*`, `\+`, `\?`, `\.`, `\[`, `\^`, `\$`, `\(`, `\)`:
+ return true
+ }
+ }
+ return false
+}
+
+func (p *Parser) parsePlus(left *Expr, tok token) *Expr {
+ op := OpPlus
+ switch left.Op {
+ case OpPlus, OpStar, OpQuestion, OpRepeat:
+ op = OpPossessive
+ }
+ return p.newExpr(op, combinePos(left.Pos, tok.pos), left)
+}
+
+func (p *Parser) parseQuestion(left *Expr, tok token) *Expr {
+ op := OpQuestion
+ switch left.Op {
+ case OpPlus, OpStar, OpQuestion, OpRepeat:
+ op = OpNonGreedy
+ }
+ return p.newExpr(op, combinePos(left.Pos, tok.pos), left)
+}
+
+func (p *Parser) parseAlt(left *Expr, tok token) *Expr {
+ var right *Expr
+ switch p.lexer.Peek().kind {
+ case tokRparen, tokNone:
+ // This is needed to handle `(x|)` syntax.
+ right = p.newEmpty(tok.pos)
+ default:
+ right = p.parseExpr(1)
+ }
+ if left.Op == OpAlt {
+ left.Args = append(left.Args, *right)
+ left.Pos.End = right.End()
+ return left
+ }
+ return p.newExpr(OpAlt, combinePos(left.Pos, right.Pos), left, right)
+}
+
+func (p *Parser) parseGroupItem(tok token) *Expr {
+ if p.lexer.Peek().kind == tokRparen {
+ // This is needed to handle `() syntax.`
+ return p.newEmpty(tok.pos)
+ }
+ return p.parseExpr(0)
+}
+
+func (p *Parser) parseGroup(op Operation, tok token) *Expr {
+ x := p.parseGroupItem(tok)
+ result := p.newExpr(op, tok.pos, x)
+ result.Pos.End = p.expect(tokRparen).End
+ return result
+}
+
+func (p *Parser) parseNamedCapture(form Form, tok token) *Expr {
+ prefixLen := len("(?<")
+ if form == FormDefault {
+ prefixLen = len("(?P<")
+ }
+ name := p.newExpr(OpString, Position{
+ Begin: tok.pos.Begin + uint16(prefixLen),
+ End: tok.pos.End - uint16(len(">")),
+ })
+ x := p.parseGroupItem(tok)
+ result := p.newExprForm(OpNamedCapture, form, tok.pos, x, name)
+ result.Pos.End = p.expect(tokRparen).End
+ return result
+}
+
+func (p *Parser) parseGroupWithFlags(tok token) *Expr {
+ var result *Expr
+ val := p.out.Pattern[tok.pos.Begin+1 : tok.pos.End]
+ switch {
+ case !strings.HasSuffix(val, ":"):
+ flags := p.newExpr(OpString, Position{
+ Begin: tok.pos.Begin + uint16(len("(?")),
+ End: tok.pos.End,
+ })
+ result = p.newExpr(OpFlagOnlyGroup, tok.pos, flags)
+ case val == "?:":
+ x := p.parseGroupItem(tok)
+ result = p.newExpr(OpGroup, tok.pos, x)
+ default:
+ flags := p.newExpr(OpString, Position{
+ Begin: tok.pos.Begin + uint16(len("(?")),
+ End: tok.pos.End - uint16(len(":")),
+ })
+ x := p.parseGroupItem(tok)
+ result = p.newExpr(OpGroupWithFlags, tok.pos, x, flags)
+ }
+ result.Pos.End = p.expect(tokRparen).End
+ return result
+}
+
+func (p *Parser) parseEscape(op Operation, prefix string, tok token) *Expr {
+ litPos := tok.pos
+ litPos.Begin += uint16(len(prefix))
+ lit := p.newExpr(OpString, litPos)
+ return p.newExpr(op, tok.pos, lit)
+}
+
+func (p *Parser) precedenceOf(tok token) int {
+ switch tok.kind {
+ case tokPipe:
+ return 1
+ case tokConcat, tokMinus:
+ return 2
+ case tokPlus, tokStar, tokQuestion, tokRepeat:
+ return 3
+ default:
+ return 0
+ }
+}
+
+func (p *Parser) newPCRE(source string) (*RegexpPCRE, error) {
+ if source == "" {
+ return nil, errors.New("empty pattern: can't find delimiters")
+ }
+
+ delim := source[0]
+ endDelim := delim
+ switch delim {
+ case '(':
+ endDelim = ')'
+ case '{':
+ endDelim = '}'
+ case '[':
+ endDelim = ']'
+ case '<':
+ endDelim = '>'
+ case '\\':
+ return nil, errors.New("'\\' is not a valid delimiter")
+ default:
+ if isSpace(delim) {
+ return nil, errors.New("whitespace is not a valid delimiter")
+ }
+ if isAlphanumeric(delim) {
+ return nil, errors.New("'" + string(delim) + "' is not a valid delimiter")
+ }
+ }
+
+ const delimLen = 1
+ j := strings.LastIndexByte(source[delimLen:], endDelim)
+ if j == -1 {
+ return nil, errors.New("can't find '" + string(endDelim) + "' ending delimiter")
+ }
+ j += delimLen
+
+ pcre := &RegexpPCRE{
+ Pattern: source[1:j],
+ Source: source,
+ Delim: [2]byte{delim, endDelim},
+ Modifiers: source[j+1:],
+ }
+ return pcre, nil
+}
+
+var tok2op = [256]Operation{
+ tokDollar: OpDollar,
+ tokCaret: OpCaret,
+ tokDot: OpDot,
+ tokChar: OpChar,
+ tokMinus: OpChar,
+ tokPosixClass: OpPosixClass,
+ tokComment: OpComment,
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/pos.go b/vendor/github.com/quasilyte/regex/syntax/pos.go
new file mode 100644
index 00000000..51bdbf87
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/pos.go
@@ -0,0 +1,10 @@
+package syntax
+
+type Position struct {
+ Begin uint16
+ End uint16
+}
+
+func combinePos(begin, end Position) Position {
+ return Position{Begin: begin.Begin, End: end.End}
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/tokenkind_string.go b/vendor/github.com/quasilyte/regex/syntax/tokenkind_string.go
new file mode 100644
index 00000000..8800436b
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/tokenkind_string.go
@@ -0,0 +1,59 @@
+// Code generated by "stringer -type=tokenKind -trimprefix=tok -linecomment=true"; DO NOT EDIT.
+
+package syntax
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[tokNone-0]
+ _ = x[tokChar-1]
+ _ = x[tokGroupFlags-2]
+ _ = x[tokPosixClass-3]
+ _ = x[tokConcat-4]
+ _ = x[tokRepeat-5]
+ _ = x[tokEscapeChar-6]
+ _ = x[tokEscapeMeta-7]
+ _ = x[tokEscapeOctal-8]
+ _ = x[tokEscapeUni-9]
+ _ = x[tokEscapeUniFull-10]
+ _ = x[tokEscapeHex-11]
+ _ = x[tokEscapeHexFull-12]
+ _ = x[tokComment-13]
+ _ = x[tokQ-14]
+ _ = x[tokMinus-15]
+ _ = x[tokLbracket-16]
+ _ = x[tokLbracketCaret-17]
+ _ = x[tokRbracket-18]
+ _ = x[tokDollar-19]
+ _ = x[tokCaret-20]
+ _ = x[tokQuestion-21]
+ _ = x[tokDot-22]
+ _ = x[tokPlus-23]
+ _ = x[tokStar-24]
+ _ = x[tokPipe-25]
+ _ = x[tokLparen-26]
+ _ = x[tokLparenName-27]
+ _ = x[tokLparenNameAngle-28]
+ _ = x[tokLparenNameQuote-29]
+ _ = x[tokLparenFlags-30]
+ _ = x[tokLparenAtomic-31]
+ _ = x[tokLparenPositiveLookahead-32]
+ _ = x[tokLparenPositiveLookbehind-33]
+ _ = x[tokLparenNegativeLookahead-34]
+ _ = x[tokLparenNegativeLookbehind-35]
+ _ = x[tokRparen-36]
+}
+
+const _tokenKind_name = "NoneCharGroupFlagsPosixClassConcatRepeatEscapeCharEscapeMetaEscapeOctalEscapeUniEscapeUniFullEscapeHexEscapeHexFullComment\\Q-[[^]$^?.+*|((?P(?(?'name'(?flags(?>(?=(?<=(?!(?= tokenKind(len(_tokenKind_index)-1) {
+ return "tokenKind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _tokenKind_name[_tokenKind_index[i]:_tokenKind_index[i+1]]
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/utils.go b/vendor/github.com/quasilyte/regex/syntax/utils.go
new file mode 100644
index 00000000..e5b65482
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/utils.go
@@ -0,0 +1,30 @@
+package syntax
+
+func isSpace(ch byte) bool {
+ switch ch {
+ case '\r', '\n', '\t', '\f', '\v', ' ':
+ return true
+ default:
+ return false
+ }
+}
+
+func isAlphanumeric(ch byte) bool {
+ return (ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9')
+}
+
+func isDigit(ch byte) bool {
+ return ch >= '0' && ch <= '9'
+}
+
+func isOctalDigit(ch byte) bool {
+ return ch >= '0' && ch <= '7'
+}
+
+func isHexDigit(ch byte) bool {
+ return (ch >= '0' && ch <= '9') ||
+ (ch >= 'a' && ch <= 'f') ||
+ (ch >= 'A' && ch <= 'F')
+}
diff --git a/vendor/github.com/ryancurrah/gomodguard/.gitignore b/vendor/github.com/ryancurrah/gomodguard/.gitignore
index 5131b46d..030056d4 100644
--- a/vendor/github.com/ryancurrah/gomodguard/.gitignore
+++ b/vendor/github.com/ryancurrah/gomodguard/.gitignore
@@ -19,3 +19,5 @@
*.xml
dist/
+
+coverage.*
\ No newline at end of file
diff --git a/vendor/github.com/ryancurrah/gomodguard/.golangci.yml b/vendor/github.com/ryancurrah/gomodguard/.golangci.yml
new file mode 100644
index 00000000..9c19e63a
--- /dev/null
+++ b/vendor/github.com/ryancurrah/gomodguard/.golangci.yml
@@ -0,0 +1,6 @@
+linters:
+ enable-all: true
+ disable:
+ - funlen
+ - gochecknoglobals
+ - lll
diff --git a/vendor/github.com/ryancurrah/gomodguard/.gomodguard.yaml b/vendor/github.com/ryancurrah/gomodguard/.gomodguard.yaml
index c0f061f5..38a2f0be 100644
--- a/vendor/github.com/ryancurrah/gomodguard/.gomodguard.yaml
+++ b/vendor/github.com/ryancurrah/gomodguard/.gomodguard.yaml
@@ -2,7 +2,7 @@ allowed:
modules: # List of allowed modules
- gopkg.in/yaml.v2
- github.com/go-xmlfmt/xmlfmt
- - github.com/phayes/checkstyle
+ - github.com/Masterminds/semver
domains: # List of allowed module domains
- golang.org
@@ -15,4 +15,13 @@ blocked:
- github.com/mitchellh/go-homedir:
recommendations:
- github.com/ryancurrah/gomodguard
- reason: "testing if the linted module is not blocked when it is recommended"
+ reason: "testing if the current/linted module is not blocked when it is recommended"
+ - github.com/phayes/checkstyle:
+ recommendations:
+ - github.com/someother/module
+ reason: "testing if module is blocked with recommendation"
+
+ versions:
+ - github.com/mitchellh/go-homedir:
+ version: "<= 1.1.0"
+ reason: "testing if blocked version constraint works."
diff --git a/vendor/github.com/ryancurrah/gomodguard/Makefile b/vendor/github.com/ryancurrah/gomodguard/Makefile
index d765f52d..9af2f76e 100644
--- a/vendor/github.com/ryancurrah/gomodguard/Makefile
+++ b/vendor/github.com/ryancurrah/gomodguard/Makefile
@@ -3,7 +3,7 @@ version = $(shell printf '%s' $$(cat VERSION))
.PHONEY: lint
lint:
- golangci-lint run -v --enable-all --disable funlen,gochecknoglobals,lll ./...
+ golangci-lint run ./...
.PHONEY: build
build:
@@ -17,6 +17,14 @@ dockerbuild:
run: build
./gomodguard
+.PHONEY: test
+test:
+ go test -v -coverprofile coverage.out
+
+.PHONEY: cover
+cover:
+ gocover-cobertura < coverage.out > coverage.xml
+
.PHONEY: dockerrun
dockerrun: dockerbuild
docker run -v "${current_dir}/.gomodguard.yaml:/.gomodguard.yaml" ryancurrah/gomodguard:latest
@@ -30,8 +38,12 @@ release:
.PHONEY: clean
clean:
rm -rf dist/
- rm -f gomodguard
+ rm -f gomodguard coverage.xml coverage.out
.PHONEY: install-tools-mac
install-tools-mac:
brew install goreleaser/tap/goreleaser
+
+.PHONEY: install-go-tools
+install-go-tools:
+ go get github.com/t-yuki/gocover-cobertura
diff --git a/vendor/github.com/ryancurrah/gomodguard/README.md b/vendor/github.com/ryancurrah/gomodguard/README.md
index 89a2398b..f09b5e1f 100644
--- a/vendor/github.com/ryancurrah/gomodguard/README.md
+++ b/vendor/github.com/ryancurrah/gomodguard/README.md
@@ -1,4 +1,6 @@
# gomodguard
+![Codecov](https://img.shields.io/codecov/c/gh/ryancurrah/gomodguard?style=flat-square)
+![GitHub Workflow Status](https://img.shields.io/github/workflow/status/ryancurrah/gomodguard/Go?logo=Go&style=flat-square)
@@ -18,6 +20,8 @@ Alternative modules can be optionally recommended in the blocked modules list.
If the linted module imports a blocked module but the linted module is in the recommended modules list the blocked module is ignored. Usually, this means the linted module wraps that blocked module for use by other modules, therefore the import of the blocked module should not be blocked.
+Version constraints can be specified for modules as well which lets you block new or old versions of modules or specific versions.
+
Results are printed to `stdout`.
Logging statements are printed to `stderr`.
@@ -42,6 +46,10 @@ blocked:
recommendations: # Recommended modules that should be used instead (Optional)
- golang.org/x/mod
reason: "`mod` is the official go.mod parser library." # Reason why the recommended module should be used (Optional)
+ versions: # List of blocked module version constraints.
+ - github.com/mitchellh/go-homedir: # Blocked module with version constraint.
+ version: "<= 1.1.0" # Version constraint, see https://github.com/Masterminds/semver#basic-comparisons.
+ reason: "testing if blocked version constraint works." # Reason why the version constraint exists.
```
## Usage
@@ -52,17 +60,22 @@ Usage: gomodguard [files...]
Also supports package syntax but will use it in relative path, i.e. ./pkg/...
Flags:
-f string
- Report results to the specified file. A report type must also be specified
+ Report results to the specified file. A report type must also be specified
-file string
- -h Show this help text
+ -h Show this help text
-help
- -n Don't lint test files
+ -i int
+ Exit code when issues were found (default 2)
+ -issues-exit-code int
+ (default 2)
+
+ -n Don't lint test files
-no-test
-r string
- Report results to one of the following formats: checkstyle. A report file destination must also be specified
+ Report results to one of the following formats: checkstyle. A report file destination must also be specified
-report string
```
diff --git a/vendor/github.com/ryancurrah/gomodguard/cmd.go b/vendor/github.com/ryancurrah/gomodguard/cmd.go
new file mode 100644
index 00000000..652e61f8
--- /dev/null
+++ b/vendor/github.com/ryancurrah/gomodguard/cmd.go
@@ -0,0 +1,239 @@
+package gomodguard
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/go-xmlfmt/xmlfmt"
+ "github.com/mitchellh/go-homedir"
+ "github.com/phayes/checkstyle"
+ "gopkg.in/yaml.v2"
+)
+
+const (
+ errFindingHomedir = "unable to find home directory, %w"
+ errReadingConfigFile = "could not read config file: %w"
+ errParsingConfigFile = "could not parse config file: %w"
+)
+
+var (
+ configFile = ".gomodguard.yaml"
+ logger = log.New(os.Stderr, "", 0)
+ errFindingConfigFile = fmt.Errorf("could not find config file")
+)
+
+// Run the gomodguard linter. Returns the exit code to use.
+func Run() int {
+ var (
+ args []string
+ help bool
+ noTest bool
+ report string
+ reportFile string
+ issuesExitCode int
+ cwd, _ = os.Getwd()
+ )
+
+ flag.BoolVar(&help, "h", false, "Show this help text")
+ flag.BoolVar(&help, "help", false, "")
+ flag.BoolVar(&noTest, "n", false, "Don't lint test files")
+ flag.BoolVar(&noTest, "no-test", false, "")
+ flag.StringVar(&report, "r", "", "Report results to one of the following formats: checkstyle. A report file destination must also be specified")
+ flag.StringVar(&report, "report", "", "")
+ flag.StringVar(&reportFile, "f", "", "Report results to the specified file. A report type must also be specified")
+ flag.StringVar(&reportFile, "file", "", "")
+ flag.IntVar(&issuesExitCode, "i", 2, "Exit code when issues were found")
+ flag.IntVar(&issuesExitCode, "issues-exit-code", 2, "")
+ flag.Parse()
+
+ report = strings.TrimSpace(strings.ToLower(report))
+
+ if help {
+ showHelp()
+ return 0
+ }
+
+ if report != "" && report != "checkstyle" {
+ logger.Fatalf("error: invalid report type '%s'", report)
+ }
+
+ if report != "" && reportFile == "" {
+ logger.Fatalf("error: a report file must be specified when a report is enabled")
+ }
+
+ if report == "" && reportFile != "" {
+ logger.Fatalf("error: a report type must be specified when a report file is enabled")
+ }
+
+ args = flag.Args()
+ if len(args) == 0 {
+ args = []string{"./..."}
+ }
+
+ config, err := GetConfig(configFile)
+ if err != nil {
+ logger.Fatalf("error: %s", err)
+ }
+
+ filteredFiles := GetFilteredFiles(cwd, noTest, args)
+
+ processor, err := NewProcessor(*config, logger)
+ if err != nil {
+ logger.Fatalf("error: %s", err)
+ }
+
+ results := processor.ProcessFiles(filteredFiles)
+
+ if report == "checkstyle" {
+ err := WriteCheckstyle(reportFile, results)
+ if err != nil {
+ logger.Fatalf("error: %s", err)
+ }
+ }
+
+ for _, r := range results {
+ fmt.Println(r.String())
+ }
+
+ if len(results) > 0 {
+ return issuesExitCode
+ }
+
+ return 0
+}
+
+// GetConfig from YAML file.
+func GetConfig(configFile string) (*Configuration, error) {
+ config := Configuration{}
+
+ home, err := homedir.Dir()
+ if err != nil {
+ return nil, fmt.Errorf(errFindingHomedir, err)
+ }
+
+ cfgFile := ""
+ homeDirCfgFile := filepath.Join(home, configFile)
+
+ switch {
+ case fileExists(configFile):
+ cfgFile = configFile
+ case fileExists(homeDirCfgFile):
+ cfgFile = homeDirCfgFile
+ default:
+ return nil, fmt.Errorf("%w: %s %s", errFindingConfigFile, configFile, homeDirCfgFile)
+ }
+
+ data, err := ioutil.ReadFile(cfgFile)
+ if err != nil {
+ return nil, fmt.Errorf(errReadingConfigFile, err)
+ }
+
+ err = yaml.Unmarshal(data, &config)
+ if err != nil {
+ return nil, fmt.Errorf(errParsingConfigFile, err)
+ }
+
+ return &config, nil
+}
+
+// GetFilteredFiles returns files based on search string arguments and filters.
+func GetFilteredFiles(cwd string, skipTests bool, args []string) []string {
+ var (
+ foundFiles = []string{}
+ filteredFiles = []string{}
+ )
+
+ for _, f := range args {
+ if strings.HasSuffix(f, "/...") {
+ dir, _ := filepath.Split(f)
+
+ foundFiles = append(foundFiles, expandGoWildcard(dir)...)
+
+ continue
+ }
+
+ if _, err := os.Stat(f); err == nil {
+ foundFiles = append(foundFiles, f)
+ }
+ }
+
+ // Use relative path to print shorter names, sort out test foundFiles if chosen.
+ for _, f := range foundFiles {
+ if skipTests {
+ if strings.HasSuffix(f, "_test.go") {
+ continue
+ }
+ }
+
+ if relativePath, err := filepath.Rel(cwd, f); err == nil {
+ filteredFiles = append(filteredFiles, relativePath)
+
+ continue
+ }
+
+ filteredFiles = append(filteredFiles, f)
+ }
+
+ return filteredFiles
+}
+
+// showHelp text for command line.
+func showHelp() {
+ helpText := `Usage: gomodguard [files...]
+Also supports package syntax but will use it in relative path, i.e. ./pkg/...
+Flags:`
+ fmt.Println(helpText)
+ flag.PrintDefaults()
+}
+
+// WriteCheckstyle takes the results and writes them to a checkstyle formated file.
+func WriteCheckstyle(checkstyleFilePath string, results []Result) error {
+ check := checkstyle.New()
+
+ for i := range results {
+ file := check.EnsureFile(results[i].FileName)
+ file.AddError(checkstyle.NewError(results[i].LineNumber, 1, checkstyle.SeverityError, results[i].Reason, "gomodguard"))
+ }
+
+ checkstyleXML := fmt.Sprintf("\n%s", check.String())
+
+ err := ioutil.WriteFile(checkstyleFilePath, []byte(xmlfmt.FormatXML(checkstyleXML, "", " ")), 0644) // nolint:gosec
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// fileExists returns true if the file path provided exists.
+func fileExists(filename string) bool {
+ info, err := os.Stat(filename)
+ if os.IsNotExist(err) {
+ return false
+ }
+
+ return !info.IsDir()
+}
+
+// expandGoWildcard path provided.
+func expandGoWildcard(root string) []string {
+ foundFiles := []string{}
+
+ _ = filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
+ // Only append go foundFiles.
+ if !strings.HasSuffix(info.Name(), ".go") {
+ return nil
+ }
+
+ foundFiles = append(foundFiles, path)
+
+ return nil
+ })
+
+ return foundFiles
+}
diff --git a/vendor/github.com/ryancurrah/gomodguard/go.mod b/vendor/github.com/ryancurrah/gomodguard/go.mod
index 0f0e92e4..15231c98 100644
--- a/vendor/github.com/ryancurrah/gomodguard/go.mod
+++ b/vendor/github.com/ryancurrah/gomodguard/go.mod
@@ -3,9 +3,11 @@ module github.com/ryancurrah/gomodguard
go 1.14
require (
+ github.com/Masterminds/semver v1.5.0
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b
github.com/mitchellh/go-homedir v1.1.0
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d
+ github.com/pkg/errors v0.9.1
golang.org/x/mod v0.2.0
gopkg.in/yaml.v2 v2.2.8
)
diff --git a/vendor/github.com/ryancurrah/gomodguard/go.sum b/vendor/github.com/ryancurrah/gomodguard/go.sum
index 0f4bf323..55ae4e57 100644
--- a/vendor/github.com/ryancurrah/gomodguard/go.sum
+++ b/vendor/github.com/ryancurrah/gomodguard/go.sum
@@ -1,9 +1,13 @@
+github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
+github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo=
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA=
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
diff --git a/vendor/github.com/ryancurrah/gomodguard/gomodguard.go b/vendor/github.com/ryancurrah/gomodguard/gomodguard.go
index cd4f7d66..16467734 100644
--- a/vendor/github.com/ryancurrah/gomodguard/gomodguard.go
+++ b/vendor/github.com/ryancurrah/gomodguard/gomodguard.go
@@ -12,25 +12,104 @@ import (
"os/exec"
"strings"
+ "github.com/Masterminds/semver"
+
"golang.org/x/mod/modfile"
)
-var (
- blockedReasonNotInAllowedList = "import of package `%s` is blocked because the module is not in the allowed modules list."
- blockedReasonInBlockedList = "import of package `%s` is blocked because the module is in the blocked modules list."
- goModFilename = "go.mod"
+const (
+ goModFilename = "go.mod"
+ errReadingGoModFile = "unable to read go mod file %s: %w"
+ errParsingGoModFile = "unable to parsing go mod file %s: %w"
)
-// Recommendations are alternative modules to use and a reason why.
-type Recommendations struct {
- Recommendations []string `yaml:"recommendations"`
- Reason string `yaml:"reason"`
+var (
+ blockReasonNotInAllowedList = "import of package `%s` is blocked because the module is not in the allowed modules list."
+ blockReasonInBlockedList = "import of package `%s` is blocked because the module is in the blocked modules list."
+)
+
+// BlockedVersion has a version constraint a reason why the the module version is blocked.
+type BlockedVersion struct {
+ Version string `yaml:"version"`
+ Reason string `yaml:"reason"`
+ lintedModuleVersion string `yaml:"-"`
}
-// IsRecommended returns true if the package provided is in the Recommendations list
-func (r *Recommendations) IsRecommended(pkg string) bool {
+// Set required values for performing checks. This must be ran before running anything else.
+func (r *BlockedVersion) Set(lintedModuleVersion string) {
+ r.lintedModuleVersion = lintedModuleVersion
+}
+
+// IsAllowed returns true if the blocked module is allowed. You must Set() values first.
+func (r *BlockedVersion) IsAllowed() bool {
+ return !r.isLintedModuleVersionBlocked()
+}
+
+// isLintedModuleVersionBlocked returns true if version constraint specified and the
+// linted module version meets the constraint.
+func (r *BlockedVersion) isLintedModuleVersionBlocked() bool {
+ if r.Version == "" {
+ return false
+ }
+
+ constraint, err := semver.NewConstraint(r.Version)
+ if err != nil {
+ return false
+ }
+
+ version, err := semver.NewVersion(strings.TrimLeft(r.lintedModuleVersion, "v"))
+ if err != nil {
+ return false
+ }
+
+ return constraint.Check(version)
+}
+
+// Message returns the reason why the module version is blocked.
+func (r *BlockedVersion) Message() string {
+ msg := ""
+
+ // Add version contraint to message
+ msg += fmt.Sprintf("version `%s` is blocked because it does not meet the version constraint `%s`.", r.lintedModuleVersion, r.Version)
+
+ if r.Reason == "" {
+ return msg
+ }
+
+ // Add reason to message
+ msg += fmt.Sprintf(" %s.", strings.TrimRight(r.Reason, "."))
+
+ return msg
+}
+
+// BlockedModule has alternative modules to use and a reason why the module is blocked.
+type BlockedModule struct {
+ Recommendations []string `yaml:"recommendations"`
+ Reason string `yaml:"reason"`
+ currentModuleName string `yaml:"-"`
+}
+
+// Set required values for performing checks. This must be ran before running anything else.
+func (r *BlockedModule) Set(currentModuleName string) {
+ r.currentModuleName = currentModuleName
+}
+
+// IsAllowed returns true if the blocked module is allowed. You must Set() values first.
+func (r *BlockedModule) IsAllowed() bool {
+ // If the current go.mod file being linted is a recommended module of a
+ // blocked module and it imports that blocked module, do not set as blocked.
+ // This could mean that the linted module is a wrapper for that blocked module.
+ return r.isCurrentModuleARecommendation()
+}
+
+// isCurrentModuleARecommendation returns true if the current module is in the Recommendations list.
+func (r *BlockedModule) isCurrentModuleARecommendation() bool {
+ if r == nil {
+ return false
+ }
+
for n := range r.Recommendations {
- if strings.TrimSpace(pkg) == strings.TrimSpace(r.Recommendations[n]) {
+ if strings.TrimSpace(r.currentModuleName) == strings.TrimSpace(r.Recommendations[n]) {
return true
}
}
@@ -38,14 +117,11 @@ func (r *Recommendations) IsRecommended(pkg string) bool {
return false
}
-// String returns the recommended modules and reason message.
-func (r *Recommendations) String() string {
+// Message returns the reason why the module is blocked and a list of recommended modules if provided.
+func (r *BlockedModule) Message() string {
msg := ""
- if r == nil {
- return msg
- }
-
+ // Add recommendations to message
for i := range r.Recommendations {
switch {
case len(r.Recommendations) == 1:
@@ -59,8 +135,15 @@ func (r *Recommendations) String() string {
}
}
- if r.Reason != "" {
- msg += fmt.Sprintf(" %s", r.Reason)
+ if r.Reason == "" {
+ return msg
+ }
+
+ // Add reason to message
+ if msg == "" {
+ msg = fmt.Sprintf("%s.", strings.TrimRight(r.Reason, "."))
+ } else {
+ msg += fmt.Sprintf(" %s.", strings.TrimRight(r.Reason, "."))
}
return msg
@@ -68,25 +151,24 @@ func (r *Recommendations) String() string {
// HasRecommendations returns true if the blocked package has
// recommended modules.
-func (r *Recommendations) HasRecommendations() bool {
+func (r *BlockedModule) HasRecommendations() bool {
+ if r == nil {
+ return false
+ }
+
return len(r.Recommendations) > 0
}
-// BlockedModule is a blocked module name and
-// optionally a list of recommended modules
-// and a reason message.
-type BlockedModule map[string]Recommendations
+// BlockedVersions a list of blocked modules by a version constraint.
+type BlockedVersions []map[string]BlockedVersion
-// BlockedModules a list of blocked modules.
-type BlockedModules []BlockedModule
-
-// Get returns the modules that are blocked.
-func (b BlockedModules) Get() []string {
+// Get returns the module names that are blocked.
+func (b BlockedVersions) Get() []string {
modules := make([]string, len(b))
- for i := range b {
- for module := range b[i] {
- modules[i] = module
+ for n := range b {
+ for module := range b[n] {
+ modules[n] = module
break
}
}
@@ -94,46 +176,49 @@ func (b BlockedModules) Get() []string {
return modules
}
-// RecommendedModules will return a list of recommended modules for the
-// package provided. If there is no recommendation nil will be returned.
-func (b BlockedModules) RecommendedModules(pkg string) *Recommendations {
- for i := range b {
- for blockedModule, recommendations := range b[i] {
- if strings.HasPrefix(strings.ToLower(pkg), strings.ToLower(blockedModule)) && recommendations.HasRecommendations() {
- return &recommendations
+// GetBlockReason returns a block version if one is set for the provided linted module name.
+func (b BlockedVersions) GetBlockReason(lintedModuleName, lintedModuleVersion string) *BlockedVersion {
+ for _, blockedModule := range b {
+ for blockedModuleName, blockedVersion := range blockedModule {
+ if strings.EqualFold(strings.TrimSpace(lintedModuleName), strings.TrimSpace(blockedModuleName)) {
+ blockedVersion.Set(lintedModuleVersion)
+ return &blockedVersion
}
-
- break
}
}
return nil
}
-// IsBlockedPackage returns true if the package name is in
-// the blocked modules list.
-func (b BlockedModules) IsBlockedPackage(pkg string) bool {
- blockedModules := b.Get()
- for i := range blockedModules {
- if strings.HasPrefix(strings.ToLower(pkg), strings.ToLower(blockedModules[i])) {
- return true
+// BlockedModules a list of blocked modules.
+type BlockedModules []map[string]BlockedModule
+
+// Get returns the module names that are blocked.
+func (b BlockedModules) Get() []string {
+ modules := make([]string, len(b))
+
+ for n := range b {
+ for module := range b[n] {
+ modules[n] = module
+ break
}
}
- return false
+ return modules
}
-// IsBlockedModule returns true if the given module name is in the
-// blocked modules list.
-func (b BlockedModules) IsBlockedModule(module string) bool {
- blockedModules := b.Get()
- for i := range blockedModules {
- if strings.EqualFold(module, strings.TrimSpace(blockedModules[i])) {
- return true
+// GetBlockReason returns a block module if one is set for the provided linted module name.
+func (b BlockedModules) GetBlockReason(currentModuleName, lintedModuleName string) *BlockedModule {
+ for _, blockedModule := range b {
+ for blockedModuleName, blockedModule := range blockedModule {
+ if strings.EqualFold(strings.TrimSpace(lintedModuleName), strings.TrimSpace(blockedModuleName)) {
+ blockedModule.Set(currentModuleName)
+ return &blockedModule
+ }
}
}
- return false
+ return nil
}
// Allowed is a list of modules and module
@@ -145,10 +230,11 @@ type Allowed struct {
// IsAllowedModule returns true if the given module
// name is in the allowed modules list.
-func (a *Allowed) IsAllowedModule(module string) bool {
+func (a *Allowed) IsAllowedModule(moduleName string) bool {
allowedModules := a.Modules
+
for i := range allowedModules {
- if strings.EqualFold(module, strings.TrimSpace(allowedModules[i])) {
+ if strings.EqualFold(strings.TrimSpace(moduleName), strings.TrimSpace(allowedModules[i])) {
return true
}
}
@@ -158,10 +244,11 @@ func (a *Allowed) IsAllowedModule(module string) bool {
// IsAllowedModuleDomain returns true if the given modules domain is
// in the allowed module domains list.
-func (a *Allowed) IsAllowedModuleDomain(module string) bool {
+func (a *Allowed) IsAllowedModuleDomain(moduleName string) bool {
allowedDomains := a.Domains
+
for i := range allowedDomains {
- if strings.HasPrefix(strings.ToLower(module), strings.TrimSpace(strings.ToLower(allowedDomains[i]))) {
+ if strings.HasPrefix(strings.TrimSpace(strings.ToLower(moduleName)), strings.TrimSpace(strings.ToLower(allowedDomains[i]))) {
return true
}
}
@@ -172,7 +259,8 @@ func (a *Allowed) IsAllowedModuleDomain(module string) bool {
// Blocked is a list of modules that are
// blocked and not to be used.
type Blocked struct {
- Modules BlockedModules `yaml:"modules"`
+ Modules BlockedModules `yaml:"modules"`
+ Versions BlockedVersions `yaml:"versions"`
}
// Configuration of gomodguard allow and block lists.
@@ -192,46 +280,43 @@ type Result struct {
// String returns the filename, line
// number and reason of a Result.
func (r *Result) String() string {
- return fmt.Sprintf("%s:%d: %s", r.FileName, r.LineNumber, r.Reason)
+ return fmt.Sprintf("%s:%d:1 %s", r.FileName, r.LineNumber, r.Reason)
}
// Processor processes Go files.
type Processor struct {
- config Configuration
- logger *log.Logger
- modfile *modfile.File
- blockedModulesFromModFile []string
- result []Result
+ Config Configuration
+ Logger *log.Logger
+ Modfile *modfile.File
+ blockedModulesFromModFile map[string][]string
+ Result []Result
}
// NewProcessor will create a Processor to lint blocked packages.
func NewProcessor(config Configuration, logger *log.Logger) (*Processor, error) {
goModFileBytes, err := loadGoModFile()
if err != nil {
- errMsg := fmt.Sprintf("unable to read %s file: %s", goModFilename, err)
-
- return nil, fmt.Errorf(errMsg)
+ return nil, fmt.Errorf(errReadingGoModFile, goModFilename, err)
}
mfile, err := modfile.Parse(goModFilename, goModFileBytes, nil)
if err != nil {
- errMsg := fmt.Sprintf("unable to parse %s file: %s", goModFilename, err)
-
- return nil, fmt.Errorf(errMsg)
+ return nil, fmt.Errorf(errParsingGoModFile, goModFilename, err)
}
logger.Printf("info: allowed modules, %+v", config.Allowed.Modules)
logger.Printf("info: allowed module domains, %+v", config.Allowed.Domains)
logger.Printf("info: blocked modules, %+v", config.Blocked.Modules.Get())
+ logger.Printf("info: blocked modules with version constraints, %+v", config.Blocked.Versions.Get())
p := &Processor{
- config: config,
- logger: logger,
- modfile: mfile,
- result: []Result{},
+ Config: config,
+ Logger: logger,
+ Modfile: mfile,
+ Result: []Result{},
}
- p.setBlockedModulesFromModFile()
+ p.SetBlockedModulesFromModFile()
return p, nil
}
@@ -244,13 +329,18 @@ func (p *Processor) ProcessFiles(filenames []string) []Result {
pluralModuleMsg = ""
}
- p.logger.Printf("info: found `%d` blocked module%s in the %s file, %+v",
- len(p.blockedModulesFromModFile), pluralModuleMsg, goModFilename, p.blockedModulesFromModFile)
+ blockedModules := make([]string, 0, len(p.blockedModulesFromModFile))
+ for blockedModuleName := range p.blockedModulesFromModFile {
+ blockedModules = append(blockedModules, blockedModuleName)
+ }
+
+ p.Logger.Printf("info: found %d blocked module%s in %s: %+v",
+ len(p.blockedModulesFromModFile), pluralModuleMsg, goModFilename, blockedModules)
for _, filename := range filenames {
data, err := ioutil.ReadFile(filename)
if err != nil {
- p.result = append(p.result, Result{
+ p.Result = append(p.Result, Result{
FileName: filename,
LineNumber: 0,
Reason: fmt.Sprintf("unable to read file, file cannot be linted (%s)", err.Error()),
@@ -260,7 +350,7 @@ func (p *Processor) ProcessFiles(filenames []string) []Result {
p.process(filename, data)
}
- return p.result
+ return p.Result
}
// process file imports and add lint error if blocked package is imported.
@@ -269,7 +359,7 @@ func (p *Processor) process(filename string, data []byte) {
file, err := parser.ParseFile(fileSet, filename, data, parser.ParseComments)
if err != nil {
- p.result = append(p.result, Result{
+ p.Result = append(p.Result, Result{
FileName: filename,
LineNumber: 0,
Reason: fmt.Sprintf("invalid syntax, file cannot be linted (%s)", err.Error()),
@@ -279,23 +369,16 @@ func (p *Processor) process(filename string, data []byte) {
}
imports := file.Imports
- for i := range imports {
- importedPkg := strings.TrimSpace(strings.Trim(imports[i].Path.Value, "\""))
- if p.isBlockedPackageFromModFile(importedPkg) {
- reason := ""
+ for n := range imports {
+ importedPkg := strings.TrimSpace(strings.Trim(imports[n].Path.Value, "\""))
- if p.config.Blocked.Modules.IsBlockedPackage(importedPkg) {
- reason = fmt.Sprintf(blockedReasonInBlockedList, importedPkg)
- } else {
- reason = fmt.Sprintf(blockedReasonNotInAllowedList, importedPkg)
- }
+ blockReasons := p.isBlockedPackageFromModFile(importedPkg)
+ if blockReasons == nil {
+ continue
+ }
- recommendedModules := p.config.Blocked.Modules.RecommendedModules(importedPkg)
- if recommendedModules != nil {
- reason += fmt.Sprintf(" %s", recommendedModules.String())
- }
-
- p.addError(fileSet, imports[i].Pos(), reason)
+ for _, blockReason := range blockReasons {
+ p.addError(fileSet, imports[n].Pos(), blockReason)
}
}
}
@@ -305,7 +388,7 @@ func (p *Processor) process(filename string, data []byte) {
func (p *Processor) addError(fileset *token.FileSet, pos token.Pos, reason string) {
position := fileset.Position(pos)
- p.result = append(p.result, Result{
+ p.Result = append(p.Result, Result{
FileName: position.Filename,
LineNumber: position.Line,
Position: position,
@@ -313,64 +396,69 @@ func (p *Processor) addError(fileset *token.FileSet, pos token.Pos, reason strin
})
}
-// setBlockedModules determines which modules are blocked by reading
+// SetBlockedModulesFromModFile determines which modules are blocked by reading
// the go.mod file and comparing the require modules to the allowed modules.
-func (p *Processor) setBlockedModulesFromModFile() {
- blockedModules := make([]string, 0, len(p.modfile.Require))
- requiredModules := p.modfile.Require
- lintedModule := p.modfile.Module.Mod.Path
+func (p *Processor) SetBlockedModulesFromModFile() {
+ blockedModules := make(map[string][]string, len(p.Modfile.Require))
+ currentModuleName := p.Modfile.Module.Mod.Path
+ lintedModules := p.Modfile.Require
- for i := range requiredModules {
- if !requiredModules[i].Indirect {
- requiredModule := strings.TrimSpace(requiredModules[i].Mod.Path)
+ for i := range lintedModules {
+ if lintedModules[i].Indirect {
+ continue
+ }
- if p.config.Allowed.IsAllowedModuleDomain(requiredModule) {
- continue
- }
+ lintedModuleName := strings.TrimSpace(lintedModules[i].Mod.Path)
+ lintedModuleVersion := strings.TrimSpace(lintedModules[i].Mod.Version)
- if p.config.Allowed.IsAllowedModule(requiredModule) {
- continue
- }
+ var isAllowed bool
- requiredModuleIsBlocked := p.config.Blocked.Modules.IsBlockedModule(requiredModule)
+ switch {
+ case len(p.Config.Allowed.Modules) == 0 && len(p.Config.Allowed.Domains) == 0:
+ isAllowed = true
+ case p.Config.Allowed.IsAllowedModuleDomain(lintedModuleName):
+ isAllowed = true
+ case p.Config.Allowed.IsAllowedModule(lintedModuleName):
+ isAllowed = true
+ default:
+ isAllowed = false
+ }
- if len(p.config.Allowed.Modules) == 0 &&
- len(p.config.Allowed.Domains) == 0 &&
- !requiredModuleIsBlocked {
- continue
- }
+ blockModuleReason := p.Config.Blocked.Modules.GetBlockReason(currentModuleName, lintedModuleName)
+ blockVersionReason := p.Config.Blocked.Versions.GetBlockReason(lintedModuleName, lintedModuleVersion)
- // If the go.mod file being linted is a recommended module of a blocked module
- // and it imports that blocked module, do not set as a blocked. This means
- // that the linted module wraps that blocked module
- if requiredModuleIsBlocked {
- recommendedModules := p.config.Blocked.Modules.RecommendedModules(requiredModule)
+ if !isAllowed && blockModuleReason == nil && blockVersionReason == nil {
+ blockedModules[lintedModuleName] = append(blockedModules[lintedModuleName], blockReasonNotInAllowedList)
+ continue
+ }
- if recommendedModules.IsRecommended(lintedModule) {
- continue
- }
- }
+ if blockModuleReason != nil && !blockModuleReason.IsAllowed() {
+ blockedModules[lintedModuleName] = append(blockedModules[lintedModuleName], fmt.Sprintf("%s %s", blockReasonInBlockedList, blockModuleReason.Message()))
+ }
- blockedModules = append(blockedModules, requiredModule)
+ if blockVersionReason != nil && !blockVersionReason.IsAllowed() {
+ blockedModules[lintedModuleName] = append(blockedModules[lintedModuleName], fmt.Sprintf("%s %s", blockReasonInBlockedList, blockVersionReason.Message()))
}
}
- if len(blockedModules) > 0 {
- p.blockedModulesFromModFile = blockedModules
- }
+ p.blockedModulesFromModFile = blockedModules
}
-// isBlockedPackageFromModFile returns true if the imported packages
-// module is in the go.mod file and was blocked.
-func (p *Processor) isBlockedPackageFromModFile(pkg string) bool {
- blockedModulesFromModFile := p.blockedModulesFromModFile
- for i := range blockedModulesFromModFile {
- if strings.HasPrefix(strings.ToLower(pkg), strings.ToLower(blockedModulesFromModFile[i])) {
- return true
+// isBlockedPackageFromModFile returns the block reason if the package is blocked.
+func (p *Processor) isBlockedPackageFromModFile(packageName string) []string {
+ for blockedModuleName, blockReasons := range p.blockedModulesFromModFile {
+ if strings.HasPrefix(strings.TrimSpace(packageName), strings.TrimSpace(blockedModuleName)) {
+ formattedReasons := make([]string, 0, len(blockReasons))
+
+ for _, blockReason := range blockReasons {
+ formattedReasons = append(formattedReasons, fmt.Sprintf(blockReason, packageName))
+ }
+
+ return formattedReasons
}
}
- return false
+ return nil
}
func loadGoModFile() ([]byte, error) {
@@ -386,6 +474,7 @@ func loadGoModFile() ([]byte, error) {
_, _ = buf.ReadFrom(stdout)
goEnv := make(map[string]string)
+
err := json.Unmarshal(buf.Bytes(), &goEnv)
if err != nil {
return ioutil.ReadFile(goModFilename)
diff --git a/vendor/github.com/ryanrolds/sqlclosecheck/LICENSE b/vendor/github.com/ryanrolds/sqlclosecheck/LICENSE
new file mode 100644
index 00000000..77b261d7
--- /dev/null
+++ b/vendor/github.com/ryanrolds/sqlclosecheck/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2020 Ryan R. Olds
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go b/vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go
new file mode 100644
index 00000000..bc42dfb3
--- /dev/null
+++ b/vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go
@@ -0,0 +1,311 @@
+package analyzer
+
+import (
+ "go/types"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/buildssa"
+ "golang.org/x/tools/go/ssa"
+)
+
+const (
+ rowsName = "Rows"
+ stmtName = "Stmt"
+ closeMethod = "Close"
+)
+
+var (
+ sqlPackages = []string{
+ "database/sql",
+ "github.com/jmoiron/sqlx",
+ }
+)
+
+func NewAnalyzer() *analysis.Analyzer {
+ return &analysis.Analyzer{
+ Name: "sqlclosecheck",
+ Doc: "Checks that sql.Rows and sql.Stmt are closed.",
+ Run: run,
+ Requires: []*analysis.Analyzer{
+ buildssa.Analyzer,
+ },
+ }
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ pssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA)
+
+ // Build list of types we are looking for
+ targetTypes := getTargetTypes(pssa, sqlPackages)
+
+ // If non of the types are found, skip
+ if len(targetTypes) == 0 {
+ return nil, nil
+ }
+
+ funcs := pssa.SrcFuncs
+ for _, f := range funcs {
+ for _, b := range f.Blocks {
+ for i := range b.Instrs {
+ // Check if instruction is call that returns a target type
+ targetValues := getTargetTypesValues(b, i, targetTypes)
+ if len(targetValues) == 0 {
+ continue
+ }
+
+ // log.Printf("%s", f.Name())
+
+ // For each found target check if they are closed and deferred
+ for _, targetValue := range targetValues {
+ refs := (*targetValue.value).Referrers()
+ isClosed := checkClosed(refs, targetTypes)
+ if !isClosed {
+ pass.Reportf((targetValue.instr).Pos(), "Rows/Stmt was not closed")
+ }
+
+ checkDeferred(pass, refs, targetTypes, false)
+ }
+ }
+ }
+ }
+
+ return nil, nil
+}
+
+func getTargetTypes(pssa *buildssa.SSA, targetPackages []string) []*types.Pointer {
+ targets := []*types.Pointer{}
+
+ for _, sqlPkg := range targetPackages {
+ pkg := pssa.Pkg.Prog.ImportedPackage(sqlPkg)
+ if pkg == nil {
+ // the SQL package being checked isn't imported
+ return targets
+ }
+
+ rowsType := getTypePointerFromName(pkg, rowsName)
+ if rowsType != nil {
+ targets = append(targets, rowsType)
+ }
+
+ stmtType := getTypePointerFromName(pkg, stmtName)
+ if stmtType != nil {
+ targets = append(targets, stmtType)
+ }
+ }
+
+ return targets
+}
+
+func getTypePointerFromName(pkg *ssa.Package, name string) *types.Pointer {
+ pkgType := pkg.Type(name)
+ if pkgType == nil {
+ // this package does not use Rows/Stmt
+ return nil
+ }
+
+ obj := pkgType.Object()
+ named, ok := obj.Type().(*types.Named)
+ if !ok {
+ return nil
+ }
+
+ return types.NewPointer(named)
+}
+
+type targetValue struct {
+ value *ssa.Value
+ instr ssa.Instruction
+}
+
+func getTargetTypesValues(b *ssa.BasicBlock, i int, targetTypes []*types.Pointer) []targetValue {
+ targetValues := []targetValue{}
+
+ instr := b.Instrs[i]
+ call, ok := instr.(*ssa.Call)
+ if !ok {
+ return targetValues
+ }
+
+ signature := call.Call.Signature()
+ results := signature.Results()
+ for i := 0; i < results.Len(); i++ {
+ v := results.At(i)
+ varType := v.Type()
+
+ for _, targetType := range targetTypes {
+ if !types.Identical(varType, targetType) {
+ continue
+ }
+
+ for _, cRef := range *call.Referrers() {
+ switch instr := cRef.(type) {
+ case *ssa.Call:
+ if len(instr.Call.Args) >= 1 && types.Identical(instr.Call.Args[0].Type(), targetType) {
+ targetValues = append(targetValues, targetValue{
+ value: &instr.Call.Args[0],
+ instr: call,
+ })
+ }
+ case ssa.Value:
+ if types.Identical(instr.Type(), targetType) {
+ targetValues = append(targetValues, targetValue{
+ value: &instr,
+ instr: call,
+ })
+ }
+ }
+ }
+ }
+ }
+
+ return targetValues
+}
+
+func checkClosed(refs *[]ssa.Instruction, targetTypes []*types.Pointer) bool {
+ numInstrs := len(*refs)
+ for idx, ref := range *refs {
+ // log.Printf("%T - %s", ref, ref)
+
+ action := getAction(ref, targetTypes)
+ switch action {
+ case "closed":
+ return true
+ case "passed":
+ // Passed and not used after
+ if numInstrs == idx+1 {
+ return true
+ }
+ case "returned":
+ return true
+ case "handled":
+ return true
+ default:
+ // log.Printf(action)
+ }
+ }
+
+ return false
+}
+
+func getAction(instr ssa.Instruction, targetTypes []*types.Pointer) string {
+ switch instr := instr.(type) {
+ case *ssa.Defer:
+ if instr.Call.Value == nil {
+ return "unvalued defer"
+ }
+
+ name := instr.Call.Value.Name()
+ if name == closeMethod {
+ return "closed"
+ }
+ case *ssa.Call:
+ if instr.Call.Value == nil {
+ return "unvalued call"
+ }
+
+ isTarget := false
+ receiver := instr.Call.StaticCallee().Signature.Recv()
+ if receiver != nil {
+ isTarget = isTargetType(receiver.Type(), targetTypes)
+ }
+
+ name := instr.Call.Value.Name()
+ if isTarget && name == closeMethod {
+ return "closed"
+ }
+
+ if !isTarget {
+ return "passed"
+ }
+ case *ssa.Phi:
+ return "passed"
+ case *ssa.MakeInterface:
+ return "passed"
+ case *ssa.Store:
+ if len(*instr.Addr.Referrers()) == 0 {
+ return "noop"
+ }
+
+ for _, aRef := range *instr.Addr.Referrers() {
+ if c, ok := aRef.(*ssa.MakeClosure); ok {
+ f := c.Fn.(*ssa.Function)
+ for _, b := range f.Blocks {
+ if checkClosed(&b.Instrs, targetTypes) {
+ return "handled"
+ }
+ }
+ }
+ }
+ case *ssa.UnOp:
+ instrType := instr.Type()
+ for _, targetType := range targetTypes {
+ if types.Identical(instrType, targetType) {
+ if checkClosed(instr.Referrers(), targetTypes) {
+ return "handled"
+ }
+ }
+ }
+ case *ssa.FieldAddr:
+ if checkClosed(instr.Referrers(), targetTypes) {
+ return "handled"
+ }
+ case *ssa.Return:
+ return "returned"
+ default:
+ // log.Printf("%s", instr)
+ }
+
+ return "unhandled"
+}
+
+func checkDeferred(pass *analysis.Pass, instrs *[]ssa.Instruction, targetTypes []*types.Pointer, inDefer bool) {
+ for _, instr := range *instrs {
+ switch instr := instr.(type) {
+ case *ssa.Defer:
+ if instr.Call.Value != nil && instr.Call.Value.Name() == closeMethod {
+ return
+ }
+ case *ssa.Call:
+ if instr.Call.Value != nil && instr.Call.Value.Name() == closeMethod {
+ if !inDefer {
+ pass.Reportf(instr.Pos(), "Close should use defer")
+ }
+
+ return
+ }
+ case *ssa.Store:
+ if len(*instr.Addr.Referrers()) == 0 {
+ return
+ }
+
+ for _, aRef := range *instr.Addr.Referrers() {
+ if c, ok := aRef.(*ssa.MakeClosure); ok {
+ f := c.Fn.(*ssa.Function)
+
+ for _, b := range f.Blocks {
+ checkDeferred(pass, &b.Instrs, targetTypes, true)
+ }
+ }
+ }
+ case *ssa.UnOp:
+ instrType := instr.Type()
+ for _, targetType := range targetTypes {
+ if types.Identical(instrType, targetType) {
+ checkDeferred(pass, instr.Referrers(), targetTypes, inDefer)
+ }
+ }
+ case *ssa.FieldAddr:
+ checkDeferred(pass, instr.Referrers(), targetTypes, inDefer)
+ }
+ }
+}
+
+func isTargetType(t types.Type, targetTypes []*types.Pointer) bool {
+ for _, targetType := range targetTypes {
+ if types.Identical(t, targetType) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/securego/gosec/v2/Dockerfile b/vendor/github.com/securego/gosec/v2/Dockerfile
index a874697e..c937d525 100644
--- a/vendor/github.com/securego/gosec/v2/Dockerfile
+++ b/vendor/github.com/securego/gosec/v2/Dockerfile
@@ -8,7 +8,8 @@ RUN go mod download
RUN make build-linux
FROM golang:${GO_VERSION}-alpine
-RUN apk add --update --no-cache ca-certificates git gcc libc-dev
+RUN apk add --update --no-cache ca-certificates bash git gcc libc-dev
ENV GO111MODULE on
COPY --from=builder /build/gosec /bin/gosec
-ENTRYPOINT ["/bin/gosec"]
+COPY entrypoint.sh /bin/entrypoint.sh
+ENTRYPOINT ["/bin/entrypoint.sh"]
diff --git a/vendor/github.com/securego/gosec/v2/README.md b/vendor/github.com/securego/gosec/v2/README.md
index 52be7342..8fd10f16 100644
--- a/vendor/github.com/securego/gosec/v2/README.md
+++ b/vendor/github.com/securego/gosec/v2/README.md
@@ -41,7 +41,7 @@ wget -O - -q https://raw.githubusercontent.com/securego/gosec/master/install.sh
# then you will have to download a tar.gz file for your operating system instead of a binary file
wget https://github.com/securego/gosec/releases/download/vX.Y.Z/gosec_vX.Y.Z_OS.tar.gz
-# The file will be in the current folder where you run the command
+# The file will be in the current folder where you run the command
# and you can check the checksum like this
echo " gosec_vX.Y.Z_OS.tar.gz" | sha256sum -c -
@@ -66,7 +66,7 @@ jobs:
env:
GO111MODULE: on
steps:
- - name: Checkout Source
+ - name: Checkout Source
uses: actions/checkout@v2
- name: Run Gosec Security Scanner
uses: securego/gosec@master
@@ -114,11 +114,11 @@ directory you can supply `./...` as the input argument.
- G402: Look for bad TLS connection settings
- G403: Ensure minimum RSA key length of 2048 bits
- G404: Insecure random number source (rand)
-- G501: Import blacklist: crypto/md5
-- G502: Import blacklist: crypto/des
-- G503: Import blacklist: crypto/rc4
-- G504: Import blacklist: net/http/cgi
-- G505: Import blacklist: crypto/sha1
+- G501: Import blocklist: crypto/md5
+- G502: Import blocklist: crypto/des
+- G503: Import blocklist: crypto/rc4
+- G504: Import blocklist: net/http/cgi
+- G505: Import blocklist: crypto/sha1
- G601: Implicit memory aliasing of items from a range statement
### Retired rules
@@ -139,7 +139,7 @@ $ gosec -exclude=G303 ./...
```
### CWE Mapping
-Every issue detected by `gosec` is mapped to a [CWE (Common Weakness Enumeration)](http://cwe.mitre.org/data/index.html) which describes in more generic terms the vulnerability. The exact mapping can be found [here](https://github.com/securego/gosec/blob/53be8dd8644ee48802114178cff6eb7e29757414/issue.go#L49).
+Every issue detected by `gosec` is mapped to a [CWE (Common Weakness Enumeration)](http://cwe.mitre.org/data/index.html) which describes in more generic terms the vulnerability. The exact mapping can be found [here](https://github.com/securego/gosec/blob/master/issue.go#L49).
### Configuration
@@ -161,7 +161,7 @@ A number of global settings can be provided in a configuration file as follows:
# Run with a global configuration file
$ gosec -conf config.json .
```
-Also some rules accept configuration. For instance on rule `G104`, it is possible to define packages along with a list
+Also some rules accept configuration. For instance on rule `G104`, it is possible to define packages along with a list
of functions which will be skipped when auditing the not checked errors:
```JSON
@@ -186,14 +186,14 @@ You can also configure the hard-coded credentials rule `G101` with additional pa
}
```
-### Dependencies
+### Dependencies
gosec will fetch automatically the dependencies of the code which is being analyzed when go module is turned on (e.g.` GO111MODULE=on`). If this is not the case,
the dependencies need to be explicitly downloaded by running the `go get -d` command before the scan.
### Excluding test files and folders
-gosec will ignore test files across all packages and any dependencies in your vendor directory.
+gosec will ignore test files across all packages and any dependencies in your vendor directory.
The scanning of test files can be enabled with the following flag:
@@ -233,7 +233,7 @@ func main(){
```
When a specific false positive has been identified and verified as safe, you may wish to suppress only that single rule (or a specific set of rules)
-within a section of code, while continuing to scan for other problems. To do this, you can list the rule(s) to be suppressed within
+within a section of code, while continuing to scan for other problems. To do this, you can list the rule(s) to be suppressed within
the `#nosec` annotation, e.g: `/* #nosec G401 */` or `// #nosec G201 G202 G203`
In some cases you may also want to revisit places where `#nosec` annotations
@@ -300,7 +300,7 @@ You can also build locally the docker image by using the command:
make image
```
-You can run the `gosec` tool in a container against your local Go project. You only have to mount the project
+You can run the `gosec` tool in a container against your local Go project. You only have to mount the project
into a volume as follows:
```bash
@@ -324,3 +324,7 @@ go generate ./...
```
This will generate the `rules/tls_config.go` file which will contain the current ciphers recommendation from Mozilla.
+
+## Who is using gosec?
+
+This is a [list](USERS.md) with some of the gosec's users.
diff --git a/vendor/github.com/securego/gosec/v2/USERS.md b/vendor/github.com/securego/gosec/v2/USERS.md
new file mode 100644
index 00000000..eac13d03
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/USERS.md
@@ -0,0 +1,26 @@
+# Users
+
+This is a list of gosec's users. Please send a pull request with your organisation or project name if you are using gosec.
+
+## Companies
+
+1. [Gitlab](https://docs.gitlab.com/ee/user/application_security/sast/)
+2. [CloudBees](https://cloudbees.com)
+3. [VMware](https://www.vmware.com)
+4. [Codacy](https://support.codacy.com/hc/en-us/articles/213632009-Engines)
+5. [Coinbase](https://github.com/coinbase/watchdog/blob/master/Makefile#L12)
+6. [RedHat/OpenShift](https://github.com/openshift/openshift-azure)
+7. [Guardalis](https://www.guardrails.io/)
+8. [1Password](https://github.com/1Password/srp)
+9. [PingCAP/tidb](https://github.com/pingcap/tidb)
+
+## Projects
+
+1. [golangci-lint](https://github.com/golangci/golangci-lint)
+2. [Kubenetes](https://github.com/kubernetes/kubernetes) (via golangci)
+3. [caddy](https://github.com/caddyserver/caddy) (via golangci)
+4. [Jenkins X](https://github.com/jenkins-x/jx/blob/bdc51840a41b75776159c1c7b7faa1cf477be473/hack/linter.sh#L25)
+5. [HuskyCI](https://huskyci.opensource.globo.com/)
+6. [GolangCI](https://golangci.com/)
+7. [semgrep.live](https://semgrep.live/)
+8. [gofiber](https://github.com/gofiber/fiber)
diff --git a/vendor/github.com/securego/gosec/v2/analyzer.go b/vendor/github.com/securego/gosec/v2/analyzer.go
index ca4440c2..d4aae3ad 100644
--- a/vendor/github.com/securego/gosec/v2/analyzer.go
+++ b/vendor/github.com/securego/gosec/v2/analyzer.go
@@ -125,7 +125,12 @@ func (gosec *Analyzer) LoadRules(ruleDefinitions map[string]RuleBuilder) {
// Process kicks off the analysis process for a given package
func (gosec *Analyzer) Process(buildTags []string, packagePaths ...string) error {
- config := gosec.pkgConfig(buildTags)
+ config := &packages.Config{
+ Mode: LoadMode,
+ BuildFlags: buildTags,
+ Tests: gosec.tests,
+ }
+
for _, pkgPath := range packagePaths {
pkgs, err := gosec.load(pkgPath, config)
if err != nil {
@@ -145,19 +150,6 @@ func (gosec *Analyzer) Process(buildTags []string, packagePaths ...string) error
return nil
}
-func (gosec *Analyzer) pkgConfig(buildTags []string) *packages.Config {
- flags := []string{}
- if len(buildTags) > 0 {
- tagsFlag := "-tags=" + strings.Join(buildTags, " ")
- flags = append(flags, tagsFlag)
- }
- return &packages.Config{
- Mode: LoadMode,
- BuildFlags: flags,
- Tests: gosec.tests,
- }
-}
-
func (gosec *Analyzer) load(pkgPath string, conf *packages.Config) ([]*packages.Package, error) {
abspath, err := GetPkgAbsPath(pkgPath)
if err != nil {
@@ -166,7 +158,11 @@ func (gosec *Analyzer) load(pkgPath string, conf *packages.Config) ([]*packages.
}
gosec.logger.Println("Import directory:", abspath)
- basePackage, err := build.Default.ImportDir(pkgPath, build.ImportComment)
+ // step 1/3 create build context.
+ buildD := build.Default
+ // step 2/3: add build tags to get env dependent files into basePackage.
+ buildD.BuildTags = conf.BuildFlags
+ basePackage, err := buildD.ImportDir(pkgPath, build.ImportComment)
if err != nil {
return []*packages.Package{}, fmt.Errorf("importing dir %q: %v", pkgPath, err)
}
@@ -188,6 +184,8 @@ func (gosec *Analyzer) load(pkgPath string, conf *packages.Config) ([]*packages.
}
}
+ // step 3/3 remove build tags from conf to proceed build correctly.
+ conf.BuildFlags = nil
pkgs, err := packages.Load(conf, packageFiles...)
if err != nil {
return []*packages.Package{}, fmt.Errorf("loading files from package %q: %v", pkgPath, err)
diff --git a/vendor/github.com/securego/gosec/v2/entrypoint.sh b/vendor/github.com/securego/gosec/v2/entrypoint.sh
new file mode 100644
index 00000000..4dc04672
--- /dev/null
+++ b/vendor/github.com/securego/gosec/v2/entrypoint.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+
+# Expand the arguments into an array of strings. This is requires because the GitHub action
+# provides all arguments concatenated as a single string.
+ARGS=("$@")
+
+/bin/gosec ${ARGS[*]}
diff --git a/vendor/github.com/securego/gosec/v2/go.mod b/vendor/github.com/securego/gosec/v2/go.mod
index edfa3434..e2654af1 100644
--- a/vendor/github.com/securego/gosec/v2/go.mod
+++ b/vendor/github.com/securego/gosec/v2/go.mod
@@ -2,18 +2,17 @@ module github.com/securego/gosec/v2
require (
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/golang/protobuf v1.3.2 // indirect
- github.com/gookit/color v1.2.4
+ github.com/gookit/color v1.2.5
github.com/kr/pretty v0.1.0 // indirect
github.com/mozilla/tls-observatory v0.0.0-20200317151703-4fa42e1c2dee
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d
- github.com/onsi/ginkgo v1.12.0
- github.com/onsi/gomega v1.9.0
+ github.com/onsi/ginkgo v1.13.0
+ github.com/onsi/gomega v1.10.1
github.com/stretchr/testify v1.4.0 // indirect
golang.org/x/text v0.3.2 // indirect
- golang.org/x/tools v0.0.0-20200331202046-9d5940d49312
+ golang.org/x/tools v0.0.0-20200701041122-1837592efa10
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
- gopkg.in/yaml.v2 v2.2.8
+ gopkg.in/yaml.v2 v2.3.0
)
go 1.14
diff --git a/vendor/github.com/securego/gosec/v2/go.sum b/vendor/github.com/securego/gosec/v2/go.sum
index fff56a30..7f56b937 100644
--- a/vendor/github.com/securego/gosec/v2/go.sum
+++ b/vendor/github.com/securego/gosec/v2/go.sum
@@ -3,12 +3,24 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/gookit/color v1.2.4 h1:xOYBan3Fwlrqj1M1UN2TlHOCRiek3bGzWf/vPnJ1roE=
github.com/gookit/color v1.2.4/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg=
+github.com/gookit/color v1.2.5 h1:s1gzb/fg3HhkSLKyWVUsZcVBUo+R1TwEYTmmxH8gGFg=
+github.com/gookit/color v1.2.5/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
@@ -20,15 +32,21 @@ github.com/mozilla/tls-observatory v0.0.0-20200317151703-4fa42e1c2dee h1:1xJ+Xi9
github.com/mozilla/tls-observatory v0.0.0-20200317151703-4fa42e1c2dee/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E=
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.12.3/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg=
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
@@ -45,6 +63,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -52,8 +71,12 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
@@ -62,11 +85,21 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200331202046-9d5940d49312 h1:2PHG+Ia3gK1K2kjxZnSylizb//eyaMG8gDFbOG7wLV8=
golang.org/x/tools v0.0.0-20200331202046-9d5940d49312/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200601175630-2caf76543d99 h1:deddXmhOJb/bvD/4M/j2AUMrhHeh6GkqykJSCWyTNVk=
+golang.org/x/tools v0.0.0-20200601175630-2caf76543d99/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200701041122-1837592efa10 h1:/dVa/Kj8QBudsXg83xokTMENAVrcMqZdhECHe1y2LJ0=
+golang.org/x/tools v0.0.0-20200701041122-1837592efa10/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -80,3 +113,5 @@ gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/securego/gosec/v2/helpers.go b/vendor/github.com/securego/gosec/v2/helpers.go
index 40dc8e9c..83dfa293 100644
--- a/vendor/github.com/securego/gosec/v2/helpers.go
+++ b/vendor/github.com/securego/gosec/v2/helpers.go
@@ -37,7 +37,6 @@ import (
// node, matched := MatchCallByPackage(n, ctx, "math/rand", "Read")
//
func MatchCallByPackage(n ast.Node, c *Context, pkg string, names ...string) (*ast.CallExpr, bool) {
-
importedName, found := GetImportedName(pkg, c)
if !found {
return nil, false
@@ -226,6 +225,27 @@ func GetIdentStringValues(ident *ast.Ident) []string {
return values
}
+// GetBinaryExprOperands returns all operands of a binary expression by traversing
+// the expression tree
+func GetBinaryExprOperands(be *ast.BinaryExpr) []ast.Node {
+ var traverse func(be *ast.BinaryExpr)
+ result := []ast.Node{}
+ traverse = func(be *ast.BinaryExpr) {
+ if lhs, ok := be.X.(*ast.BinaryExpr); ok {
+ traverse(lhs)
+ } else {
+ result = append(result, be.X)
+ }
+ if rhs, ok := be.Y.(*ast.BinaryExpr); ok {
+ traverse(rhs)
+ } else {
+ result = append(result, be.Y)
+ }
+ }
+ traverse(be)
+ return result
+}
+
// GetImportedName returns the name used for the package within the
// code. It will resolve aliases and ignores initialization only imports.
func GetImportedName(path string, ctx *Context) (string, bool) {
diff --git a/vendor/github.com/securego/gosec/v2/issue.go b/vendor/github.com/securego/gosec/v2/issue.go
index 28ad726b..aa58c343 100644
--- a/vendor/github.com/securego/gosec/v2/issue.go
+++ b/vendor/github.com/securego/gosec/v2/issue.go
@@ -15,9 +15,12 @@
package gosec
import (
+ "bufio"
+ "bytes"
"encoding/json"
"fmt"
"go/ast"
+ "go/token"
"os"
"strconv"
)
@@ -34,6 +37,10 @@ const (
High
)
+// SnippetOffset defines the number of lines captured before
+// the beginning and after the end of a code snippet
+const SnippetOffset = 1
+
// Cwe id and url
type Cwe struct {
ID string
@@ -53,6 +60,7 @@ var IssueToCWE = map[string]Cwe{
"G104": GetCwe("703"),
"G106": GetCwe("322"),
"G107": GetCwe("88"),
+ "G108": GetCwe("200"),
"G109": GetCwe("190"),
"G110": GetCwe("409"),
"G201": GetCwe("89"),
@@ -64,6 +72,8 @@ var IssueToCWE = map[string]Cwe{
"G303": GetCwe("377"),
"G304": GetCwe("22"),
"G305": GetCwe("22"),
+ "G306": GetCwe("276"),
+ "G307": GetCwe("703"),
"G401": GetCwe("326"),
"G402": GetCwe("295"),
"G403": GetCwe("310"),
@@ -73,6 +83,7 @@ var IssueToCWE = map[string]Cwe{
"G503": GetCwe("327"),
"G504": GetCwe("327"),
"G505": GetCwe("327"),
+ "G601": GetCwe("118"),
}
// Issue is returned by a gosec rule if it discovers an issue with the scanned code.
@@ -120,43 +131,56 @@ func (c Score) String() string {
return "UNDEFINED"
}
+// codeSnippet extracts a code snippet based on the ast reference
func codeSnippet(file *os.File, start int64, end int64, n ast.Node) (string, error) {
if n == nil {
- return "", fmt.Errorf("Invalid AST node provided")
+ return "", fmt.Errorf("invalid AST node provided")
}
+ var pos int64
+ var buf bytes.Buffer
+ scanner := bufio.NewScanner(file)
+ scanner.Split(bufio.ScanLines)
+ for scanner.Scan() {
+ pos++
+ if pos > end {
+ break
+ } else if pos >= start && pos <= end {
+ code := fmt.Sprintf("%d: %s\n", pos, scanner.Text())
+ buf.WriteString(code)
+ }
+ }
+ return buf.String(), nil
+}
- size := (int)(end - start) // Go bug, os.File.Read should return int64 ...
- _, err := file.Seek(start, 0) // #nosec
- if err != nil {
- return "", fmt.Errorf("move to the beginning of file: %v", err)
+func codeSnippetStartLine(node ast.Node, fobj *token.File) int64 {
+ s := (int64)(fobj.Line(node.Pos()))
+ if s-SnippetOffset > 0 {
+ return s - SnippetOffset
}
+ return s
+}
- buf := make([]byte, size)
- if nread, err := file.Read(buf); err != nil || nread != size {
- return "", fmt.Errorf("Unable to read code")
- }
- return string(buf), nil
+func codeSnippetEndLine(node ast.Node, fobj *token.File) int64 {
+ e := (int64)(fobj.Line(node.End()))
+ return e + SnippetOffset
}
// NewIssue creates a new Issue
func NewIssue(ctx *Context, node ast.Node, ruleID, desc string, severity Score, confidence Score) *Issue {
- var code string
fobj := ctx.FileSet.File(node.Pos())
name := fobj.Name()
-
start, end := fobj.Line(node.Pos()), fobj.Line(node.End())
line := strconv.Itoa(start)
if start != end {
line = fmt.Sprintf("%d-%d", start, end)
}
-
col := strconv.Itoa(fobj.Position(node.Pos()).Column)
- // #nosec
+ var code string
if file, err := os.Open(fobj.Name()); err == nil {
- defer file.Close()
- s := (int64)(fobj.Position(node.Pos()).Offset) // Go bug, should be int64
- e := (int64)(fobj.Position(node.End()).Offset) // Go bug, should be int64
+ defer file.Close() // #nosec
+ s := codeSnippetStartLine(node, fobj)
+ e := codeSnippetEndLine(node, fobj)
code, err = codeSnippet(file, s, e, node)
if err != nil {
code = err.Error()
diff --git a/vendor/github.com/securego/gosec/v2/rules/bad_defer.go b/vendor/github.com/securego/gosec/v2/rules/bad_defer.go
index 3c358806..b33a0477 100644
--- a/vendor/github.com/securego/gosec/v2/rules/bad_defer.go
+++ b/vendor/github.com/securego/gosec/v2/rules/bad_defer.go
@@ -40,7 +40,7 @@ func (r *badDefer) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
for _, deferTyp := range r.types {
if typ, method, err := gosec.GetCallInfo(deferStmt.Call, c); err == nil {
if normalize(typ) == deferTyp.typ && contains(deferTyp.methods, method) {
- return gosec.NewIssue(c, n, r.ID(), fmt.Sprintf(r.What, typ, method), r.Severity, r.Confidence), nil
+ return gosec.NewIssue(c, n, r.ID(), fmt.Sprintf(r.What, method, typ), r.Severity, r.Confidence), nil
}
}
}
diff --git a/vendor/github.com/securego/gosec/v2/rules/blacklist.go b/vendor/github.com/securego/gosec/v2/rules/blocklist.go
similarity index 50%
rename from vendor/github.com/securego/gosec/v2/rules/blacklist.go
rename to vendor/github.com/securego/gosec/v2/rules/blocklist.go
index 9bb73381..afd4ee56 100644
--- a/vendor/github.com/securego/gosec/v2/rules/blacklist.go
+++ b/vendor/github.com/securego/gosec/v2/rules/blocklist.go
@@ -21,9 +21,9 @@ import (
"github.com/securego/gosec/v2"
)
-type blacklistedImport struct {
+type blocklistedImport struct {
gosec.MetaData
- Blacklisted map[string]string
+ Blocklisted map[string]string
}
func unquote(original string) string {
@@ -32,63 +32,63 @@ func unquote(original string) string {
return strings.TrimRight(copy, `"`)
}
-func (r *blacklistedImport) ID() string {
+func (r *blocklistedImport) ID() string {
return r.MetaData.ID
}
-func (r *blacklistedImport) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
+func (r *blocklistedImport) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
if node, ok := n.(*ast.ImportSpec); ok {
- if description, ok := r.Blacklisted[unquote(node.Path.Value)]; ok {
+ if description, ok := r.Blocklisted[unquote(node.Path.Value)]; ok {
return gosec.NewIssue(c, node, r.ID(), description, r.Severity, r.Confidence), nil
}
}
return nil, nil
}
-// NewBlacklistedImports reports when a blacklisted import is being used.
+// NewBlocklistedImports reports when a blocklisted import is being used.
// Typically when a deprecated technology is being used.
-func NewBlacklistedImports(id string, conf gosec.Config, blacklist map[string]string) (gosec.Rule, []ast.Node) {
- return &blacklistedImport{
+func NewBlocklistedImports(id string, conf gosec.Config, blocklist map[string]string) (gosec.Rule, []ast.Node) {
+ return &blocklistedImport{
MetaData: gosec.MetaData{
ID: id,
Severity: gosec.Medium,
Confidence: gosec.High,
},
- Blacklisted: blacklist,
+ Blocklisted: blocklist,
}, []ast.Node{(*ast.ImportSpec)(nil)}
}
-// NewBlacklistedImportMD5 fails if MD5 is imported
-func NewBlacklistedImportMD5(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
- return NewBlacklistedImports(id, conf, map[string]string{
- "crypto/md5": "Blacklisted import crypto/md5: weak cryptographic primitive",
+// NewBlocklistedImportMD5 fails if MD5 is imported
+func NewBlocklistedImportMD5(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return NewBlocklistedImports(id, conf, map[string]string{
+ "crypto/md5": "Blocklisted import crypto/md5: weak cryptographic primitive",
})
}
-// NewBlacklistedImportDES fails if DES is imported
-func NewBlacklistedImportDES(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
- return NewBlacklistedImports(id, conf, map[string]string{
- "crypto/des": "Blacklisted import crypto/des: weak cryptographic primitive",
+// NewBlocklistedImportDES fails if DES is imported
+func NewBlocklistedImportDES(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return NewBlocklistedImports(id, conf, map[string]string{
+ "crypto/des": "Blocklisted import crypto/des: weak cryptographic primitive",
})
}
-// NewBlacklistedImportRC4 fails if DES is imported
-func NewBlacklistedImportRC4(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
- return NewBlacklistedImports(id, conf, map[string]string{
- "crypto/rc4": "Blacklisted import crypto/rc4: weak cryptographic primitive",
+// NewBlocklistedImportRC4 fails if DES is imported
+func NewBlocklistedImportRC4(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return NewBlocklistedImports(id, conf, map[string]string{
+ "crypto/rc4": "Blocklisted import crypto/rc4: weak cryptographic primitive",
})
}
-// NewBlacklistedImportCGI fails if CGI is imported
-func NewBlacklistedImportCGI(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
- return NewBlacklistedImports(id, conf, map[string]string{
- "net/http/cgi": "Blacklisted import net/http/cgi: Go versions < 1.6.3 are vulnerable to Httpoxy attack: (CVE-2016-5386)",
+// NewBlocklistedImportCGI fails if CGI is imported
+func NewBlocklistedImportCGI(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return NewBlocklistedImports(id, conf, map[string]string{
+ "net/http/cgi": "Blocklisted import net/http/cgi: Go versions < 1.6.3 are vulnerable to Httpoxy attack: (CVE-2016-5386)",
})
}
-// NewBlacklistedImportSHA1 fails if SHA1 is imported
-func NewBlacklistedImportSHA1(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
- return NewBlacklistedImports(id, conf, map[string]string{
- "crypto/sha1": "Blacklisted import crypto/sha1: weak cryptographic primitive",
+// NewBlocklistedImportSHA1 fails if SHA1 is imported
+func NewBlocklistedImportSHA1(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
+ return NewBlocklistedImports(id, conf, map[string]string{
+ "crypto/sha1": "Blocklisted import crypto/sha1: weak cryptographic primitive",
})
}
diff --git a/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go b/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go
index 65c7ae36..b2668dec 100644
--- a/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go
+++ b/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go
@@ -1,10 +1,10 @@
package rules
import (
- "fmt"
- "github.com/securego/gosec/v2"
"go/ast"
"go/token"
+
+ "github.com/securego/gosec/v2"
)
type implicitAliasing struct {
@@ -33,20 +33,23 @@ func (r *implicitAliasing) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, er
// When presented with a range statement, get the underlying Object bound to
// by assignment and add it to our set (r.aliases) of objects to check for.
if key, ok := node.Value.(*ast.Ident); ok {
- if assignment, ok := key.Obj.Decl.(*ast.AssignStmt); ok {
- if len(assignment.Lhs) < 2 {
- return nil, nil
- }
+ if key.Obj != nil {
+ if assignment, ok := key.Obj.Decl.(*ast.AssignStmt); ok {
+ if len(assignment.Lhs) < 2 {
+ return nil, nil
+ }
- if object, ok := assignment.Lhs[1].(*ast.Ident); ok {
- r.aliases[object.Obj] = struct{}{}
+ if object, ok := assignment.Lhs[1].(*ast.Ident); ok {
+ r.aliases[object.Obj] = struct{}{}
- if r.rightBrace < node.Body.Rbrace {
- r.rightBrace = node.Body.Rbrace
+ if r.rightBrace < node.Body.Rbrace {
+ r.rightBrace = node.Body.Rbrace
+ }
}
}
}
}
+
case *ast.UnaryExpr:
// If this unary expression is outside of the last range statement we were looking at
// then clear the list of objects we're concerned about because they're no longer in
@@ -95,7 +98,7 @@ func NewImplicitAliasing(id string, conf gosec.Config) (gosec.Rule, []ast.Node)
ID: id,
Severity: gosec.Medium,
Confidence: gosec.Medium,
- What: fmt.Sprintf("Implicit memory aliasing in for loop."),
+ What: "Implicit memory aliasing in for loop.",
},
}, []ast.Node{(*ast.RangeStmt)(nil), (*ast.UnaryExpr)(nil), (*ast.ReturnStmt)(nil)}
}
diff --git a/vendor/github.com/securego/gosec/v2/rules/rand.go b/vendor/github.com/securego/gosec/v2/rules/rand.go
index 08c28fca..bf86b762 100644
--- a/vendor/github.com/securego/gosec/v2/rules/rand.go
+++ b/vendor/github.com/securego/gosec/v2/rules/rand.go
@@ -43,7 +43,8 @@ func (w *weakRand) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
// NewWeakRandCheck detects the use of random number generator that isn't cryptographically secure
func NewWeakRandCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
return &weakRand{
- funcNames: []string{"Read", "Int"},
+ funcNames: []string{"New", "Read", "Float32", "Float64", "Int", "Int31",
+ "Int31n", "Int63", "Int63n", "Intn", "NormalFloat64", "Uint32", "Uint64"},
packagePath: "math/rand",
MetaData: gosec.MetaData{
ID: id,
diff --git a/vendor/github.com/securego/gosec/v2/rules/readfile.go b/vendor/github.com/securego/gosec/v2/rules/readfile.go
index a52f7425..459b4ad2 100644
--- a/vendor/github.com/securego/gosec/v2/rules/readfile.go
+++ b/vendor/github.com/securego/gosec/v2/rules/readfile.go
@@ -102,5 +102,6 @@ func NewReadFile(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
rule.pathJoin.Add("path", "Join")
rule.Add("io/ioutil", "ReadFile")
rule.Add("os", "Open")
+ rule.Add("os", "OpenFile")
return rule, []ast.Node{(*ast.CallExpr)(nil)}
}
diff --git a/vendor/github.com/securego/gosec/v2/rules/rulelist.go b/vendor/github.com/securego/gosec/v2/rules/rulelist.go
index 06e1dfb9..a3d9ca2f 100644
--- a/vendor/github.com/securego/gosec/v2/rules/rulelist.go
+++ b/vendor/github.com/securego/gosec/v2/rules/rulelist.go
@@ -90,12 +90,12 @@ func Generate(filters ...RuleFilter) RuleList {
{"G403", "Ensure minimum RSA key length of 2048 bits", NewWeakKeyStrength},
{"G404", "Insecure random number source (rand)", NewWeakRandCheck},
- // blacklist
- {"G501", "Import blacklist: crypto/md5", NewBlacklistedImportMD5},
- {"G502", "Import blacklist: crypto/des", NewBlacklistedImportDES},
- {"G503", "Import blacklist: crypto/rc4", NewBlacklistedImportRC4},
- {"G504", "Import blacklist: net/http/cgi", NewBlacklistedImportCGI},
- {"G505", "Import blacklist: crypto/sha1", NewBlacklistedImportSHA1},
+ // blocklist
+ {"G501", "Import blocklist: crypto/md5", NewBlocklistedImportMD5},
+ {"G502", "Import blocklist: crypto/des", NewBlocklistedImportDES},
+ {"G503", "Import blocklist: crypto/rc4", NewBlocklistedImportRC4},
+ {"G504", "Import blocklist: net/http/cgi", NewBlocklistedImportCGI},
+ {"G505", "Import blocklist: crypto/sha1", NewBlocklistedImportSHA1},
// memory safety
{"G601", "Implicit memory aliasing in RangeStmt", NewImplicitAliasing},
diff --git a/vendor/github.com/securego/gosec/v2/rules/sql.go b/vendor/github.com/securego/gosec/v2/rules/sql.go
index 3279a340..127dec50 100644
--- a/vendor/github.com/securego/gosec/v2/rules/sql.go
+++ b/vendor/github.com/securego/gosec/v2/rules/sql.go
@@ -17,12 +17,14 @@ package rules
import (
"go/ast"
"regexp"
+ "strings"
"github.com/securego/gosec/v2"
)
type sqlStatement struct {
gosec.MetaData
+ gosec.CallList
// Contains a list of patterns which must all match for the rule to match.
patterns []*regexp.Regexp
@@ -65,22 +67,54 @@ func (s *sqlStrConcat) checkObject(n *ast.Ident, c *gosec.Context) bool {
return false
}
-// Look for "SELECT * FROM table WHERE " + " ' OR 1=1"
-func (s *sqlStrConcat) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
- if node, ok := n.(*ast.BinaryExpr); ok {
- if start, ok := node.X.(*ast.BasicLit); ok {
+// checkQuery verifies if the query parameters is a string concatenation
+func (s *sqlStrConcat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*gosec.Issue, error) {
+ _, fnName, err := gosec.GetCallInfo(call, ctx)
+ if err != nil {
+ return nil, err
+ }
+ var query ast.Node
+ if strings.HasSuffix(fnName, "Context") {
+ query = call.Args[1]
+ } else {
+ query = call.Args[0]
+ }
+
+ if be, ok := query.(*ast.BinaryExpr); ok {
+ operands := gosec.GetBinaryExprOperands(be)
+ if start, ok := operands[0].(*ast.BasicLit); ok {
if str, e := gosec.GetString(start); e == nil {
if !s.MatchPatterns(str) {
return nil, nil
}
- if _, ok := node.Y.(*ast.BasicLit); ok {
- return nil, nil // string cat OK
- }
- if second, ok := node.Y.(*ast.Ident); ok && s.checkObject(second, c) {
- return nil, nil
- }
- return gosec.NewIssue(c, n, s.ID(), s.What, s.Severity, s.Confidence), nil
}
+ for _, op := range operands[1:] {
+ if _, ok := op.(*ast.BasicLit); ok {
+ continue
+ }
+ if op, ok := op.(*ast.Ident); ok && s.checkObject(op, ctx) {
+ continue
+ }
+ return gosec.NewIssue(ctx, be, s.ID(), s.What, s.Severity, s.Confidence), nil
+ }
+ }
+ }
+
+ return nil, nil
+}
+
+// Checks SQL query concatenation issues such as "SELECT * FROM table WHERE " + " ' OR 1=1"
+func (s *sqlStrConcat) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) {
+ switch stmt := n.(type) {
+ case *ast.AssignStmt:
+ for _, expr := range stmt.Rhs {
+ if sqlQueryCall, ok := expr.(*ast.CallExpr); ok && s.ContainsCallExpr(expr, ctx) != nil {
+ return s.checkQuery(sqlQueryCall, ctx)
+ }
+ }
+ case *ast.ExprStmt:
+ if sqlQueryCall, ok := stmt.X.(*ast.CallExpr); ok && s.ContainsCallExpr(stmt.X, ctx) != nil {
+ return s.checkQuery(sqlQueryCall, ctx)
}
}
return nil, nil
@@ -88,10 +122,10 @@ func (s *sqlStrConcat) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error)
// NewSQLStrConcat looks for cases where we are building SQL strings via concatenation
func NewSQLStrConcat(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
- return &sqlStrConcat{
+ rule := &sqlStrConcat{
sqlStatement: sqlStatement{
patterns: []*regexp.Regexp{
- regexp.MustCompile(`(?)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE) `),
+ regexp.MustCompile(`(?i)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE) `),
},
MetaData: gosec.MetaData{
ID: id,
@@ -99,13 +133,19 @@ func NewSQLStrConcat(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
Confidence: gosec.High,
What: "SQL string concatenation",
},
+ CallList: gosec.NewCallList(),
},
- }, []ast.Node{(*ast.BinaryExpr)(nil)}
+ }
+
+ rule.AddAll("*database/sql.DB", "Query", "QueryContext", "QueryRow", "QueryRowContext")
+ rule.AddAll("*database/sql.Tx", "Query", "QueryContext", "QueryRow", "QueryRowContext")
+ return rule, []ast.Node{(*ast.AssignStmt)(nil), (*ast.ExprStmt)(nil)}
}
type sqlStrFormat struct {
+ gosec.CallList
sqlStatement
- calls gosec.CallList
+ fmtCalls gosec.CallList
noIssue gosec.CallList
noIssueQuoted gosec.CallList
}
@@ -130,14 +170,37 @@ func (s *sqlStrFormat) constObject(e ast.Expr, c *gosec.Context) bool {
return false
}
-// Looks for "fmt.Sprintf("SELECT * FROM foo where '%s', userInput)"
-func (s *sqlStrFormat) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
+func (s *sqlStrFormat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*gosec.Issue, error) {
+ _, fnName, err := gosec.GetCallInfo(call, ctx)
+ if err != nil {
+ return nil, err
+ }
+ var query ast.Node
+ if strings.HasSuffix(fnName, "Context") {
+ query = call.Args[1]
+ } else {
+ query = call.Args[0]
+ }
+ if ident, ok := query.(*ast.Ident); ok && ident.Obj != nil {
+ decl := ident.Obj.Decl
+ if assign, ok := decl.(*ast.AssignStmt); ok {
+ for _, expr := range assign.Rhs {
+ issue, err := s.checkFormatting(expr, ctx)
+ if issue != nil {
+ return issue, err
+ }
+ }
+ }
+ }
+
+ return nil, nil
+}
+
+func (s *sqlStrFormat) checkFormatting(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) {
// argIndex changes the function argument which gets matched to the regex
argIndex := 0
-
- // TODO(gm) improve confidence if database/sql is being used
- if node := s.calls.ContainsPkgCallExpr(n, c, false); node != nil {
+ if node := s.fmtCalls.ContainsPkgCallExpr(n, ctx, false); node != nil {
// if the function is fmt.Fprintf, search for SQL statement in Args[1] instead
if sel, ok := node.Fun.(*ast.SelectorExpr); ok {
if sel.Sel.Name == "Fprintf" {
@@ -177,7 +240,7 @@ func (s *sqlStrFormat) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error)
if argIndex+1 < len(node.Args) {
allSafe := true
for _, arg := range node.Args[argIndex+1:] {
- if n := s.noIssueQuoted.ContainsPkgCallExpr(arg, c, true); n == nil && !s.constObject(arg, c) {
+ if n := s.noIssueQuoted.ContainsPkgCallExpr(arg, ctx, true); n == nil && !s.constObject(arg, ctx) {
allSafe = false
break
}
@@ -187,7 +250,24 @@ func (s *sqlStrFormat) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error)
}
}
if s.MatchPatterns(formatter) {
- return gosec.NewIssue(c, n, s.ID(), s.What, s.Severity, s.Confidence), nil
+ return gosec.NewIssue(ctx, n, s.ID(), s.What, s.Severity, s.Confidence), nil
+ }
+ }
+ return nil, nil
+}
+
+// Check SQL query formatting issues such as "fmt.Sprintf("SELECT * FROM foo where '%s', userInput)"
+func (s *sqlStrFormat) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) {
+ switch stmt := n.(type) {
+ case *ast.AssignStmt:
+ for _, expr := range stmt.Rhs {
+ if sqlQueryCall, ok := expr.(*ast.CallExpr); ok && s.ContainsCallExpr(expr, ctx) != nil {
+ return s.checkQuery(sqlQueryCall, ctx)
+ }
+ }
+ case *ast.ExprStmt:
+ if sqlQueryCall, ok := stmt.X.(*ast.CallExpr); ok && s.ContainsCallExpr(stmt.X, ctx) != nil {
+ return s.checkQuery(sqlQueryCall, ctx)
}
}
return nil, nil
@@ -196,12 +276,13 @@ func (s *sqlStrFormat) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error)
// NewSQLStrFormat looks for cases where we're building SQL query strings using format strings
func NewSQLStrFormat(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
rule := &sqlStrFormat{
- calls: gosec.NewCallList(),
+ CallList: gosec.NewCallList(),
+ fmtCalls: gosec.NewCallList(),
noIssue: gosec.NewCallList(),
noIssueQuoted: gosec.NewCallList(),
sqlStatement: sqlStatement{
patterns: []*regexp.Regexp{
- regexp.MustCompile("(?)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE) "),
+ regexp.MustCompile("(?i)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE) "),
regexp.MustCompile("%[^bdoxXfFp]"),
},
MetaData: gosec.MetaData{
@@ -212,8 +293,11 @@ func NewSQLStrFormat(id string, conf gosec.Config) (gosec.Rule, []ast.Node) {
},
},
}
- rule.calls.AddAll("fmt", "Sprint", "Sprintf", "Sprintln", "Fprintf")
+ rule.AddAll("*database/sql.DB", "Query", "QueryContext", "QueryRow", "QueryRowContext")
+ rule.AddAll("*database/sql.Tx", "Query", "QueryContext", "QueryRow", "QueryRowContext")
+ rule.fmtCalls.AddAll("fmt", "Sprint", "Sprintf", "Sprintln", "Fprintf")
rule.noIssue.AddAll("os", "Stdout", "Stderr")
rule.noIssueQuoted.Add("github.com/lib/pq", "QuoteIdentifier")
- return rule, []ast.Node{(*ast.CallExpr)(nil)}
+
+ return rule, []ast.Node{(*ast.AssignStmt)(nil), (*ast.ExprStmt)(nil)}
}
diff --git a/vendor/github.com/securego/gosec/v2/rules/tls.go b/vendor/github.com/securego/gosec/v2/rules/tls.go
index fab9ee16..554378f4 100644
--- a/vendor/github.com/securego/gosec/v2/rules/tls.go
+++ b/vendor/github.com/securego/gosec/v2/rules/tls.go
@@ -17,6 +17,7 @@
package rules
import (
+ "crypto/tls"
"fmt"
"go/ast"
@@ -25,10 +26,12 @@ import (
type insecureConfigTLS struct {
gosec.MetaData
- MinVersion int16
- MaxVersion int16
- requiredType string
- goodCiphers []string
+ MinVersion int16
+ MaxVersion int16
+ requiredType string
+ goodCiphers []string
+ actualMinVersion int16
+ actualMaxVersion int16
}
func (t *insecureConfigTLS) ID() string {
@@ -45,7 +48,6 @@ func stringInSlice(a string, list []string) bool {
}
func (t *insecureConfigTLS) processTLSCipherSuites(n ast.Node, c *gosec.Context) *gosec.Issue {
-
if ciphers, ok := n.(*ast.CompositeLit); ok {
for _, cipher := range ciphers.Elts {
if ident, ok := cipher.(*ast.SelectorExpr); ok {
@@ -62,7 +64,6 @@ func (t *insecureConfigTLS) processTLSCipherSuites(n ast.Node, c *gosec.Context)
func (t *insecureConfigTLS) processTLSConfVal(n *ast.KeyValueExpr, c *gosec.Context) *gosec.Issue {
if ident, ok := n.Key.(*ast.Ident); ok {
switch ident.Name {
-
case "InsecureSkipVerify":
if node, ok := n.Value.(*ast.Ident); ok {
if node.Name != "false" {
@@ -85,20 +86,24 @@ func (t *insecureConfigTLS) processTLSConfVal(n *ast.KeyValueExpr, c *gosec.Cont
case "MinVersion":
if ival, ierr := gosec.GetInt(n.Value); ierr == nil {
- if (int16)(ival) < t.MinVersion {
- return gosec.NewIssue(c, n, t.ID(), "TLS MinVersion too low.", gosec.High, gosec.High)
+ t.actualMinVersion = (int16)(ival)
+ } else {
+ if se, ok := n.Value.(*ast.SelectorExpr); ok {
+ if pkg, ok := se.X.(*ast.Ident); ok && pkg.Name == "tls" {
+ t.actualMinVersion = t.mapVersion(se.Sel.Name)
+ }
}
- // TODO(tk): symbol tab look up to get the actual value
- return gosec.NewIssue(c, n, t.ID(), "TLS MinVersion may be too low.", gosec.High, gosec.Low)
}
case "MaxVersion":
if ival, ierr := gosec.GetInt(n.Value); ierr == nil {
- if (int16)(ival) < t.MaxVersion {
- return gosec.NewIssue(c, n, t.ID(), "TLS MaxVersion too low.", gosec.High, gosec.High)
+ t.actualMaxVersion = (int16)(ival)
+ } else {
+ if se, ok := n.Value.(*ast.SelectorExpr); ok {
+ if pkg, ok := se.X.(*ast.Ident); ok && pkg.Name == "tls" {
+ t.actualMaxVersion = t.mapVersion(se.Sel.Name)
+ }
}
- // TODO(tk): symbol tab look up to get the actual value
- return gosec.NewIssue(c, n, t.ID(), "TLS MaxVersion may be too low.", gosec.High, gosec.Low)
}
case "CipherSuites":
@@ -112,6 +117,35 @@ func (t *insecureConfigTLS) processTLSConfVal(n *ast.KeyValueExpr, c *gosec.Cont
return nil
}
+func (t *insecureConfigTLS) mapVersion(version string) int16 {
+ var v int16
+ switch version {
+ case "VersionTLS13":
+ v = tls.VersionTLS13
+ case "VersionTLS12":
+ v = tls.VersionTLS12
+ case "VersionTLS11":
+ v = tls.VersionTLS11
+ case "VersionTLS10":
+ v = tls.VersionTLS10
+ }
+ return v
+}
+
+func (t *insecureConfigTLS) checkVersion(n ast.Node, c *gosec.Context) *gosec.Issue {
+ if t.actualMaxVersion == 0 && t.actualMinVersion >= t.MinVersion {
+ // no warning is generated since the min version is grater than the secure min version
+ return nil
+ }
+ if t.actualMinVersion < t.MinVersion {
+ return gosec.NewIssue(c, n, t.ID(), "TLS MinVersion too low.", gosec.High, gosec.High)
+ }
+ if t.actualMaxVersion < t.MaxVersion {
+ return gosec.NewIssue(c, n, t.ID(), "TLS MaxVersion too low.", gosec.High, gosec.High)
+ }
+ return nil
+}
+
func (t *insecureConfigTLS) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
if complit, ok := n.(*ast.CompositeLit); ok && complit.Type != nil {
actualType := c.Info.TypeOf(complit.Type)
@@ -124,6 +158,7 @@ func (t *insecureConfigTLS) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, e
}
}
}
+ return t.checkVersion(complit, c), nil
}
}
return nil, nil
diff --git a/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go b/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go
index 0e45393d..eecb88f0 100644
--- a/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go
+++ b/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go
@@ -22,7 +22,7 @@ import (
type usesWeakCryptography struct {
gosec.MetaData
- blacklist map[string][]string
+ blocklist map[string][]string
}
func (r *usesWeakCryptography) ID() string {
@@ -30,7 +30,7 @@ func (r *usesWeakCryptography) ID() string {
}
func (r *usesWeakCryptography) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) {
- for pkg, funcs := range r.blacklist {
+ for pkg, funcs := range r.blocklist {
if _, matched := gosec.MatchCallByPackage(n, c, pkg, funcs...); matched {
return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil
}
@@ -46,7 +46,7 @@ func NewUsesWeakCryptography(id string, conf gosec.Config) (gosec.Rule, []ast.No
calls["crypto/sha1"] = []string{"New", "Sum"}
calls["crypto/rc4"] = []string{"NewCipher"}
rule := &usesWeakCryptography{
- blacklist: calls,
+ blocklist: calls,
MetaData: gosec.MetaData{
ID: id,
Severity: gosec.Medium,
diff --git a/vendor/github.com/shazow/go-diff/LICENSE b/vendor/github.com/shazow/go-diff/LICENSE
new file mode 100644
index 00000000..85e1e4b3
--- /dev/null
+++ b/vendor/github.com/shazow/go-diff/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Andrey Petrov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/shazow/go-diff/difflib/differ.go b/vendor/github.com/shazow/go-diff/difflib/differ.go
new file mode 100644
index 00000000..43dc84d9
--- /dev/null
+++ b/vendor/github.com/shazow/go-diff/difflib/differ.go
@@ -0,0 +1,39 @@
+// This package implements the diff.Differ interface using github.com/mb0/diff as a backend.
+package difflib
+
+import (
+ "io"
+ "io/ioutil"
+
+ "github.com/pmezard/go-difflib/difflib"
+)
+
+type differ struct{}
+
+// New returns an implementation of diff.Differ using mb0diff as the backend.
+func New() *differ {
+ return &differ{}
+}
+
+// Diff consumes the entire reader streams into memory before generating a diff
+// which then gets filled into the buffer. This implementation stores and
+// manipulates all three values in memory.
+func (diff *differ) Diff(out io.Writer, a io.ReadSeeker, b io.ReadSeeker) error {
+ var src, dst []byte
+ var err error
+
+ if src, err = ioutil.ReadAll(a); err != nil {
+ return err
+ }
+ if dst, err = ioutil.ReadAll(b); err != nil {
+ return err
+ }
+
+ d := difflib.UnifiedDiff{
+ A: difflib.SplitLines(string(src)),
+ B: difflib.SplitLines(string(dst)),
+ Context: 3,
+ }
+
+ return difflib.WriteUnifiedDiff(out, d)
+}
diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml
new file mode 100644
index 00000000..65dc2850
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.golangci.yml
@@ -0,0 +1,40 @@
+run:
+ # do not run on test files yet
+ tests: false
+
+# all available settings of specific linters
+linters-settings:
+ errcheck:
+ # report about not checking of errors in type assetions: `a := b.(MyStruct)`;
+ # default is false: such cases aren't reported by default.
+ check-type-assertions: false
+
+ # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
+ # default is false: such cases aren't reported by default.
+ check-blank: false
+
+ lll:
+ line-length: 100
+ tab-width: 4
+
+ prealloc:
+ simple: false
+ range-loops: false
+ for-loops: false
+
+ whitespace:
+ multi-if: false # Enforces newlines (or comments) after every multi-line if statement
+ multi-func: false # Enforces newlines (or comments) after every multi-line function signature
+
+linters:
+ enable:
+ - megacheck
+ - govet
+ disable:
+ - maligned
+ - prealloc
+ disable-all: false
+ presets:
+ - bugs
+ - unused
+ fast: false
diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml
index 848938a6..5e20aa41 100644
--- a/vendor/github.com/sirupsen/logrus/.travis.yml
+++ b/vendor/github.com/sirupsen/logrus/.travis.yml
@@ -4,21 +4,13 @@ git:
depth: 1
env:
- GO111MODULE=on
- - GO111MODULE=off
-go: [ 1.11.x, 1.12.x ]
-os: [ linux, osx ]
-matrix:
- exclude:
- - go: 1.12.x
- env: GO111MODULE=off
- - go: 1.11.x
- os: osx
+go: [1.13.x, 1.14.x]
+os: [linux, osx]
install:
- ./travis/install.sh
- - if [[ "$GO111MODULE" == "on" ]]; then go mod download; fi
- - if [[ "$GO111MODULE" == "off" ]]; then go get github.com/stretchr/testify/assert golang.org/x/sys/unix github.com/konsorten/go-windows-terminal-sequences; fi
script:
- ./travis/cross_build.sh
+ - ./travis/lint.sh
- export GOMAXPROCS=4
- export GORACE=halt_on_error=1
- go test -race -v ./...
diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
index 51a7ab0c..584026d6 100644
--- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md
+++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
@@ -1,9 +1,32 @@
+# 1.6.0
+Fixes:
+ * end of line cleanup
+ * revert the entry concurrency bug fix whic leads to deadlock under some circumstances
+ * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14
+
+Features:
+ * add an option to the `TextFormatter` to completely disable fields quoting
+
+# 1.5.0
+Code quality:
+ * add golangci linter run on travis
+
+Fixes:
+ * add mutex for hooks concurrent access on `Entry` data
+ * caller function field for go1.14
+ * fix build issue for gopherjs target
+
+Feature:
+ * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level
+ * add a `DisableHTMLEscape` option in the `JSONFormatter`
+ * add `ForceQuote` and `PadLevelText` options in the `TextFormatter`
+
# 1.4.2
* Fixes build break for plan9, nacl, solaris
# 1.4.1
This new release introduces:
* Enhance TextFormatter to not print caller information when they are empty (#944)
- * Remove dependency on golang.org/x/crypto (#932, #943)
+ * Remove dependency on golang.org/x/crypto (#932, #943)
Fixes:
* Fix Entry.WithContext method to return a copy of the initial entry (#941)
@@ -11,7 +34,7 @@ Fixes:
# 1.4.0
This new release introduces:
* Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848).
- * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter (#909, #911)
+ * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911)
* Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919).
Fixes:
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
index a4796eb0..5796706d 100644
--- a/vendor/github.com/sirupsen/logrus/README.md
+++ b/vendor/github.com/sirupsen/logrus/README.md
@@ -1,8 +1,28 @@
-# Logrus [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
+# Logrus [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger.
+**Logrus is in maintenance-mode.** We will not be introducing new features. It's
+simply too hard to do in a way that won't break many people's projects, which is
+the last thing you want from your Logging library (again...).
+
+This does not mean Logrus is dead. Logrus will continue to be maintained for
+security, (backwards compatible) bug fixes, and performance (where we are
+limited by the interface).
+
+I believe Logrus' biggest contribution is to have played a part in today's
+widespread use of structured logging in Golang. There doesn't seem to be a
+reason to do a major, breaking iteration into Logrus V2, since the fantastic Go
+community has built those independently. Many fantastic alternatives have sprung
+up. Logrus would look like those, had it been re-designed with what we know
+about structured logging in Go today. Check out, for example,
+[Zerolog][zerolog], [Zap][zap], and [Apex][apex].
+
+[zerolog]: https://github.com/rs/zerolog
+[zap]: https://github.com/uber-go/zap
+[apex]: https://github.com/apex/log
+
**Seeing weird case-sensitive problems?** It's in the past been possible to
import Logrus as both upper- and lower-case. Due to the Go package environment,
this caused issues in the community and we needed a standard. Some environments
@@ -15,11 +35,6 @@ comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
For an in-depth explanation of the casing issue, see [this
comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
-**Are you interested in assisting in maintaining Logrus?** Currently I have a
-lot of obligations, and I am unable to provide Logrus with the maintainership it
-needs. If you'd like to help, please reach out to me at `simon at author's
-username dot com`.
-
Nicely color-coded in development (when a TTY is attached, otherwise just
plain text):
@@ -187,7 +202,7 @@ func main() {
log.Out = os.Stdout
// You could set this to any `io.Writer` such as a file
- // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
+ // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
// if err == nil {
// log.Out = file
// } else {
@@ -272,7 +287,7 @@ func init() {
```
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
-A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
+A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
#### Level logging
@@ -354,6 +369,7 @@ The built-in logging formatters are:
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
* When colors are enabled, levels are truncated to 4 characters by default. To disable
truncation set the `DisableLevelTruncation` field to `true`.
+ * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text.
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
* `logrus.JSONFormatter`. Logs fields as JSON.
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
@@ -364,8 +380,10 @@ Third party logging formatters:
* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html).
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
-* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo.
* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure.
+* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files.
+* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added.
You can define your formatter by implementing the `Formatter` interface,
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
@@ -430,14 +448,14 @@ entries. It should not be a feature of the application-level logger.
| Tool | Description |
| ---- | ----------- |
-|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.|
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
#### Testing
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
-* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
```go
@@ -465,7 +483,7 @@ func TestSomething(t*testing.T){
Logrus can register one or more functions that will be called when any `fatal`
level message is logged. The registered handlers will be executed before
-logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
```
@@ -490,6 +508,6 @@ Situation when locking is not needed includes:
1) logger.Out is protected by locks.
- 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+ 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing)
(Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml
index 96c2ce15..df9d65c3 100644
--- a/vendor/github.com/sirupsen/logrus/appveyor.yml
+++ b/vendor/github.com/sirupsen/logrus/appveyor.yml
@@ -1,14 +1,14 @@
-version: "{build}"
-platform: x64
-clone_folder: c:\gopath\src\github.com\sirupsen\logrus
-environment:
- GOPATH: c:\gopath
-branches:
- only:
- - master
-install:
- - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- - go version
-build_script:
- - go get -t
- - go test
+version: "{build}"
+platform: x64
+clone_folder: c:\gopath\src\github.com\sirupsen\logrus
+environment:
+ GOPATH: c:\gopath
+branches:
+ only:
+ - master
+install:
+ - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
+ - go version
+build_script:
+ - go get -t
+ - go test
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
index 63e25583..f6e062a3 100644
--- a/vendor/github.com/sirupsen/logrus/entry.go
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -85,10 +85,15 @@ func NewEntry(logger *Logger) *Entry {
}
}
+// Returns the bytes representation of this entry from the formatter.
+func (entry *Entry) Bytes() ([]byte, error) {
+ return entry.Logger.Formatter.Format(entry)
+}
+
// Returns the string representation from the reader and ultimately the
// formatter.
func (entry *Entry) String() (string, error) {
- serialized, err := entry.Logger.Formatter.Format(entry)
+ serialized, err := entry.Bytes()
if err != nil {
return "", err
}
@@ -103,7 +108,11 @@ func (entry *Entry) WithError(err error) *Entry {
// Add a context to the Entry.
func (entry *Entry) WithContext(ctx context.Context) *Entry {
- return &Entry{Logger: entry.Logger, Data: entry.Data, Time: entry.Time, err: entry.err, Context: ctx}
+ dataCopy := make(Fields, len(entry.Data))
+ for k, v := range entry.Data {
+ dataCopy[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx}
}
// Add a single field to the Entry.
@@ -144,7 +153,11 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
// Overrides the time of the Entry.
func (entry *Entry) WithTime(t time.Time) *Entry {
- return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err, Context: entry.Context}
+ dataCopy := make(Fields, len(entry.Data))
+ for k, v := range entry.Data {
+ dataCopy[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context}
}
// getPackageName reduces a fully qualified function name to the package name
@@ -165,15 +178,20 @@ func getPackageName(f string) string {
// getCaller retrieves the name of the first non-logrus calling function
func getCaller() *runtime.Frame {
-
// cache this package's fully-qualified name
callerInitOnce.Do(func() {
- pcs := make([]uintptr, 2)
+ pcs := make([]uintptr, maximumCallerDepth)
_ = runtime.Callers(0, pcs)
- logrusPackage = getPackageName(runtime.FuncForPC(pcs[1]).Name())
- // now that we have the cache, we can skip a minimum count of known-logrus functions
- // XXX this is dubious, the number of frames may vary
+ // dynamic get the package name and the minimum caller depth
+ for i := 0; i < maximumCallerDepth; i++ {
+ funcName := runtime.FuncForPC(pcs[i]).Name()
+ if strings.Contains(funcName, "getCaller") {
+ logrusPackage = getPackageName(funcName)
+ break
+ }
+ }
+
minimumCallerDepth = knownLogrusFrames
})
@@ -187,7 +205,7 @@ func getCaller() *runtime.Frame {
// If the caller isn't part of this package, we're done
if pkg != logrusPackage {
- return &f
+ return &f //nolint:scopelint
}
}
@@ -217,9 +235,11 @@ func (entry Entry) log(level Level, msg string) {
entry.Level = level
entry.Message = msg
+ entry.Logger.mu.Lock()
if entry.Logger.ReportCaller {
entry.Caller = getCaller()
}
+ entry.Logger.mu.Unlock()
entry.fireHooks()
@@ -255,11 +275,10 @@ func (entry *Entry) write() {
serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
- } else {
- _, err = entry.Logger.Out.Write(serialized)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
- }
+ return
+ }
+ if _, err = entry.Logger.Out.Write(serialized); err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
}
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
index 62fc2f21..42b04f6c 100644
--- a/vendor/github.com/sirupsen/logrus/exported.go
+++ b/vendor/github.com/sirupsen/logrus/exported.go
@@ -80,7 +80,7 @@ func WithFields(fields Fields) *Entry {
return std.WithFields(fields)
}
-// WithTime creats an entry from the standard logger and overrides the time of
+// WithTime creates an entry from the standard logger and overrides the time of
// logs generated with it.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod
index 12fdf989..d4132967 100644
--- a/vendor/github.com/sirupsen/logrus/go.mod
+++ b/vendor/github.com/sirupsen/logrus/go.mod
@@ -2,9 +2,10 @@ module github.com/sirupsen/logrus
require (
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/konsorten/go-windows-terminal-sequences v1.0.1
+ github.com/konsorten/go-windows-terminal-sequences v1.0.3
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/stretchr/objx v0.1.1 // indirect
github.com/stretchr/testify v1.2.2
golang.org/x/sys v0.0.0-20190422165155-953cdadca894
)
+
+go 1.13
diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum
index 596c318b..49c690f2 100644
--- a/vendor/github.com/sirupsen/logrus/go.sum
+++ b/vendor/github.com/sirupsen/logrus/go.sum
@@ -1,16 +1,12 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs=
-github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
index 098a21a0..ba7f2371 100644
--- a/vendor/github.com/sirupsen/logrus/json_formatter.go
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -28,6 +28,9 @@ type JSONFormatter struct {
// DisableTimestamp allows disabling automatic timestamps in output
DisableTimestamp bool
+ // DisableHTMLEscape allows disabling html escaping in output
+ DisableHTMLEscape bool
+
// DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
DataKey string
@@ -110,6 +113,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
}
encoder := json.NewEncoder(b)
+ encoder.SetEscapeHTML(!f.DisableHTMLEscape)
if f.PrettyPrint {
encoder.SetIndent("", " ")
}
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
index c0c0b1e5..6fdda748 100644
--- a/vendor/github.com/sirupsen/logrus/logger.go
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -68,10 +68,10 @@ func (mw *MutexWrap) Disable() {
// `Out` and `Hooks` directly on the default logger instance. You can also just
// instantiate your own:
//
-// var log = &Logger{
+// var log = &logrus.Logger{
// Out: os.Stderr,
-// Formatter: new(JSONFormatter),
-// Hooks: make(LevelHooks),
+// Formatter: new(logrus.JSONFormatter),
+// Hooks: make(logrus.LevelHooks),
// Level: logrus.DebugLevel,
// }
//
@@ -100,8 +100,9 @@ func (logger *Logger) releaseEntry(entry *Entry) {
logger.entryPool.Put(entry)
}
-// Adds a field to the log entry, note that it doesn't log until you call
-// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry.
+// WithField allocates a new entry and adds a field to it.
+// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to
+// this new returned entry.
// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
entry := logger.newEntry()
diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go
index 8644761f..2f16224c 100644
--- a/vendor/github.com/sirupsen/logrus/logrus.go
+++ b/vendor/github.com/sirupsen/logrus/logrus.go
@@ -51,7 +51,7 @@ func (level *Level) UnmarshalText(text []byte) error {
return err
}
- *level = Level(l)
+ *level = l
return nil
}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
index 3c4f43f9..49978998 100644
--- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
@@ -1,4 +1,5 @@
// +build darwin dragonfly freebsd netbsd openbsd
+// +build !js
package logrus
@@ -10,4 +11,3 @@ func isTerminal(fd int) bool {
_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
return err == nil
}
-
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go
new file mode 100644
index 00000000..ebdae3ec
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go
@@ -0,0 +1,7 @@
+// +build js
+
+package logrus
+
+func isTerminal(fd int) bool {
+ return false
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
index 355dc966..cc4fe6e3 100644
--- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
@@ -1,4 +1,5 @@
// +build linux aix
+// +build !js
package logrus
@@ -10,4 +11,3 @@ func isTerminal(fd int) bool {
_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
return err == nil
}
-
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
index e01587c4..3c28b54c 100644
--- a/vendor/github.com/sirupsen/logrus/text_formatter.go
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -6,9 +6,11 @@ import (
"os"
"runtime"
"sort"
+ "strconv"
"strings"
"sync"
"time"
+ "unicode/utf8"
)
const (
@@ -32,6 +34,14 @@ type TextFormatter struct {
// Force disabling colors.
DisableColors bool
+ // Force quoting of all values
+ ForceQuote bool
+
+ // DisableQuote disables quoting for all values.
+ // DisableQuote will have a lower priority than ForceQuote.
+ // If both of them are set to true, quote will be forced on all values.
+ DisableQuote bool
+
// Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
EnvironmentOverrideColors bool
@@ -57,6 +67,10 @@ type TextFormatter struct {
// Disables the truncation of the level text to 4 characters.
DisableLevelTruncation bool
+ // PadLevelText Adds padding the level text so that all the levels output at the same length
+ // PadLevelText is a superset of the DisableLevelTruncation option
+ PadLevelText bool
+
// QuoteEmptyFields will wrap empty fields in quotes if true
QuoteEmptyFields bool
@@ -79,23 +93,32 @@ type TextFormatter struct {
CallerPrettyfier func(*runtime.Frame) (function string, file string)
terminalInitOnce sync.Once
+
+ // The max length of the level text, generated dynamically on init
+ levelTextMaxLength int
}
func (f *TextFormatter) init(entry *Entry) {
if entry.Logger != nil {
f.isTerminal = checkIfTerminal(entry.Logger.Out)
}
+ // Get the max length of the level text
+ for _, level := range AllLevels {
+ levelTextLength := utf8.RuneCount([]byte(level.String()))
+ if levelTextLength > f.levelTextMaxLength {
+ f.levelTextMaxLength = levelTextLength
+ }
+ }
}
func (f *TextFormatter) isColored() bool {
isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows"))
if f.EnvironmentOverrideColors {
- if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
+ switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); {
+ case ok && force != "0":
isColored = true
- } else if ok && force == "0" {
- isColored = false
- } else if os.Getenv("CLICOLOR") == "0" {
+ case ok && force == "0", os.Getenv("CLICOLOR") == "0":
isColored = false
}
}
@@ -217,9 +240,18 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
}
levelText := strings.ToUpper(entry.Level.String())
- if !f.DisableLevelTruncation {
+ if !f.DisableLevelTruncation && !f.PadLevelText {
levelText = levelText[0:4]
}
+ if f.PadLevelText {
+ // Generates the format string used in the next line, for example "%-6s" or "%-7s".
+ // Based on the max level text length.
+ formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s"
+ // Formats the level text by appending spaces up to the max length, for example:
+ // - "INFO "
+ // - "WARNING"
+ levelText = fmt.Sprintf(formatString, levelText)
+ }
// Remove a single newline if it already exists in the message to keep
// the behavior of logrus text_formatter the same as the stdlib log package
@@ -243,11 +275,12 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
}
}
- if f.DisableTimestamp {
+ switch {
+ case f.DisableTimestamp:
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
- } else if !f.FullTimestamp {
+ case !f.FullTimestamp:
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
- } else {
+ default:
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
}
for _, k := range keys {
@@ -258,9 +291,15 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
}
func (f *TextFormatter) needsQuoting(text string) bool {
+ if f.ForceQuote {
+ return true
+ }
if f.QuoteEmptyFields && len(text) == 0 {
return true
}
+ if f.DisableQuote {
+ return false
+ }
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go
index 9e1f7513..72e8e3a1 100644
--- a/vendor/github.com/sirupsen/logrus/writer.go
+++ b/vendor/github.com/sirupsen/logrus/writer.go
@@ -6,10 +6,16 @@ import (
"runtime"
)
+// Writer at INFO level. See WriterLevel for details.
func (logger *Logger) Writer() *io.PipeWriter {
return logger.WriterLevel(InfoLevel)
}
+// WriterLevel returns an io.Writer that can be used to write arbitrary text to
+// the logger at the given log level. Each line written to the writer will be
+// printed in the usual way using formatters and hooks. The writer is part of an
+// io.Pipe and it is the callers responsibility to close the writer when done.
+// This can be used to override the standard library logger easily.
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
return NewEntry(logger).WriterLevel(level)
}
diff --git a/vendor/github.com/sonatard/noctx/.gitignore b/vendor/github.com/sonatard/noctx/.gitignore
new file mode 100644
index 00000000..2d830686
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/.gitignore
@@ -0,0 +1 @@
+coverage.out
diff --git a/vendor/github.com/sonatard/noctx/.golangci.yml b/vendor/github.com/sonatard/noctx/.golangci.yml
new file mode 100644
index 00000000..1580acde
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/.golangci.yml
@@ -0,0 +1,20 @@
+run:
+
+linters-settings:
+ govet:
+ enable-all: true
+
+linters:
+ enable-all: true
+ disable:
+ - gochecknoglobals
+ - gomnd
+ - gocognit
+ - nestif
+
+issues:
+ exclude-rules:
+ - path: reqwithoutctx/ssa.go
+ text: "Consider preallocating `exts`"
+ linters:
+ - prealloc
diff --git a/vendor/github.com/sonatard/noctx/LICENSE b/vendor/github.com/sonatard/noctx/LICENSE
new file mode 100644
index 00000000..a00d5727
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 sonatard
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/sonatard/noctx/Makefile b/vendor/github.com/sonatard/noctx/Makefile
new file mode 100644
index 00000000..1a27f6b5
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/Makefile
@@ -0,0 +1,16 @@
+.PHONY: all imports test lint
+
+all: imports test lint
+
+imports:
+ goimports -w ./
+
+test:
+ go test -race ./...
+
+test_coverage:
+ go test -race -coverprofile=coverage.out -covermode=atomic ./...
+
+lint:
+ golangci-lint run ./...
+
diff --git a/vendor/github.com/sonatard/noctx/README.md b/vendor/github.com/sonatard/noctx/README.md
new file mode 100644
index 00000000..bfe9782c
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/README.md
@@ -0,0 +1,95 @@
+# noctx
+
+![](https://github.com/sonatard/noctx/workflows/.github/workflows/ci.yml/badge.svg)
+
+`noctx` finds sending http request without context.Context.
+
+You should use `noctx` if sending http request in your library.
+Passing `context.Context` enables library user to cancel http request, getting trace information and so on.
+
+## Install
+
+```sh
+$ go get -u github.com/sonatard/noctx/cmd/noctx
+```
+
+## Usage
+
+```sh
+$ go vet -vettool=`which noctx` main.go
+./main.go:6:11: net/http.Get must not be called
+```
+
+## Detection rules
+- Executing following functions
+ - `net/http.Get`
+ - `net/http.Head`
+ - `net/http.Post`
+ - `net/http.PostForm`
+ - `(*net/http.Client).Get`
+ - `(*net/http.Client).Head`
+ - `(*net/http.Client).Post`
+ - `(*net/http.Client).PostForm`
+- `http.Request` returned by `http.NewRequest` function and passes it to other function.
+
+## How to fix
+- Send http request using `(*http.Client).Do(*http.Request)` method.
+- In Go 1.13 and later, use `http.NewRequestWithContext` function instead of using `http.NewRequest` function.
+- In Go 1.12 and earlier, call `(http.Request).WithContext(ctx)` after `http.NewRequest`.
+
+`(http.Request).WithContext(ctx)` has a disadvantage of performance because it returns a copy of `http.Request`. Use `http.NewRequestWithContext` function if you only support Go1.13 or later.
+
+## Sample Code
+
+```go
+package main
+
+import (
+ "context"
+ "net/http"
+)
+
+func main() {
+ const url = "http://example.com"
+ http.Get(url) // want `net/http\.Get must not be called`
+ http.Head(url) // want `net/http\.Head must not be called`
+ http.Post(url, "", nil) // want `net/http\.Post must not be called`
+ http.PostForm(url, nil) // want `net/http\.PostForm must not be called`
+
+ cli := &http.Client{}
+ cli.Get(url) // want `\(\*net/http\.Client\)\.Get must not be called`
+ cli.Head(url) // want `\(\*net/http\.Client\)\.Head must not be called`
+ cli.Post(url, "", nil) // want `\(\*net/http\.Client\)\.Post must not be called`
+ cli.PostForm(url, nil) // want `\(\*net/http\.Client\)\.PostForm must not be called`
+
+ req, _ := http.NewRequest(http.MethodPost, url, nil) // want `should rewrite http.NewRequestWithContext or add \(\*Request\).WithContext`
+ cli.Do(req)
+
+ ctx := context.Background()
+ req2, _ := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) // OK
+ cli.Do(req2)
+
+ req3, _ := http.NewRequest(http.MethodPost, url, nil) // OK
+ req3 = req3.WithContext(ctx)
+ cli.Do(req3)
+
+ f2 := func(req *http.Request, ctx context.Context) *http.Request {
+ return req
+ }
+ req4, _ := http.NewRequest(http.MethodPost, url, nil) // want `should rewrite http.NewRequestWithContext or add \(\*Request\).WithContext`
+ req4 = f2(req4, ctx)
+ cli.Do(req4)
+
+ req5, _ := func() (*http.Request, error) {
+ return http.NewRequest(http.MethodPost, url, nil) // want `should rewrite http.NewRequestWithContext or add \(\*Request\).WithContext`
+ }()
+ cli.Do(req5)
+
+}
+```
+
+## Reference
+- [net/http - NewRequest](https://golang.org/pkg/net/http/#NewRequest)
+- [net/http - NewRequestWithContext](https://golang.org/pkg/net/http/#NewRequestWithContext)
+- [net/http - Request.WithContext](https://golang.org/pkg/net/http/#Request.WithContext)
+
diff --git a/vendor/github.com/sonatard/noctx/go.mod b/vendor/github.com/sonatard/noctx/go.mod
new file mode 100644
index 00000000..47b7901a
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/go.mod
@@ -0,0 +1,8 @@
+module github.com/sonatard/noctx
+
+go 1.13
+
+require (
+ github.com/gostaticanalysis/analysisutil v0.0.3
+ golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9
+)
diff --git a/vendor/github.com/sonatard/noctx/go.sum b/vendor/github.com/sonatard/noctx/go.sum
new file mode 100644
index 00000000..f8e5b075
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/go.sum
@@ -0,0 +1,16 @@
+github.com/gostaticanalysis/analysisutil v0.0.3 h1:iwp+5/UAyzQSFgQ4uR2sni99sJ8Eo9DEacKWM5pekIg=
+github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9 h1:KOkk4e2xd5OeCDJGwacvr75ICCbCsShrHiqPEdsA9hg=
+golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/sonatard/noctx/ngfunc/main.go b/vendor/github.com/sonatard/noctx/ngfunc/main.go
new file mode 100644
index 00000000..cfeb0f00
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/ngfunc/main.go
@@ -0,0 +1,57 @@
+package ngfunc
+
+import (
+ "go/types"
+
+ "github.com/gostaticanalysis/analysisutil"
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/buildssa"
+)
+
+func Run(pass *analysis.Pass) (interface{}, error) {
+ ngFuncNames := []string{
+ "net/http.Get",
+ "net/http.Head",
+ "net/http.Post",
+ "net/http.PostForm",
+ "(*net/http.Client).Get",
+ "(*net/http.Client).Head",
+ "(*net/http.Client).Post",
+ "(*net/http.Client).PostForm",
+ }
+
+ ngFuncs := typeFuncs(pass, ngFuncNames)
+ if len(ngFuncs) == 0 {
+ return nil, nil
+ }
+
+ reportFuncs := ngCalledFuncs(pass, ngFuncs)
+ report(pass, reportFuncs)
+
+ return nil, nil
+}
+
+func ngCalledFuncs(pass *analysis.Pass, ngFuncs []*types.Func) []*Report {
+ var reports []*Report
+
+ srcFuncs := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs
+ for _, sf := range srcFuncs {
+ for _, b := range sf.Blocks {
+ for _, instr := range b.Instrs {
+ for _, ngFunc := range ngFuncs {
+ if analysisutil.Called(instr, nil, ngFunc) {
+ ngCalledFunc := &Report{
+ Instruction: instr,
+ function: ngFunc,
+ }
+ reports = append(reports, ngCalledFunc)
+
+ break
+ }
+ }
+ }
+ }
+ }
+
+ return reports
+}
diff --git a/vendor/github.com/sonatard/noctx/ngfunc/report.go b/vendor/github.com/sonatard/noctx/ngfunc/report.go
new file mode 100644
index 00000000..e5005179
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/ngfunc/report.go
@@ -0,0 +1,29 @@
+package ngfunc
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/ssa"
+)
+
+type Report struct {
+ Instruction ssa.Instruction
+ function *types.Func
+}
+
+func (n *Report) Pos() token.Pos {
+ return n.Instruction.Pos()
+}
+
+func (n *Report) Message() string {
+ return fmt.Sprintf("%s must not be called", n.function.FullName())
+}
+
+func report(pass *analysis.Pass, reports []*Report) {
+ for _, report := range reports {
+ pass.Reportf(report.Pos(), report.Message())
+ }
+}
diff --git a/vendor/github.com/sonatard/noctx/ngfunc/types.go b/vendor/github.com/sonatard/noctx/ngfunc/types.go
new file mode 100644
index 00000000..f1877386
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/ngfunc/types.go
@@ -0,0 +1,65 @@
+package ngfunc
+
+import (
+ "fmt"
+ "go/types"
+ "strings"
+
+ "github.com/gostaticanalysis/analysisutil"
+ "golang.org/x/tools/go/analysis"
+)
+
+var errNotFound = fmt.Errorf("function not found")
+
+func typeFuncs(pass *analysis.Pass, funcs []string) []*types.Func {
+ fs := make([]*types.Func, 0, len(funcs))
+
+ for _, fn := range funcs {
+ f, err := typeFunc(pass, fn)
+ if err != nil {
+ continue
+ }
+
+ fs = append(fs, f)
+ }
+
+ return fs
+}
+
+func typeFunc(pass *analysis.Pass, funcName string) (*types.Func, error) {
+ ss := strings.Split(strings.TrimSpace(funcName), ".")
+
+ switch len(ss) {
+ case 2:
+ // package function: pkgname.Func
+ f, ok := analysisutil.ObjectOf(pass, ss[0], ss[1]).(*types.Func)
+ if !ok || f == nil {
+ return nil, errNotFound
+ }
+
+ return f, nil
+ case 3:
+ // method: (*pkgname.Type).Method
+ pkgname := strings.TrimLeft(ss[0], "(")
+ typename := strings.TrimRight(ss[1], ")")
+
+ if pkgname != "" && pkgname[0] == '*' {
+ pkgname = pkgname[1:]
+ typename = "*" + typename
+ }
+
+ typ := analysisutil.TypeOf(pass, pkgname, typename)
+ if typ == nil {
+ return nil, errNotFound
+ }
+
+ m := analysisutil.MethodOf(typ, ss[2])
+ if m == nil {
+ return nil, errNotFound
+ }
+
+ return m, nil
+ }
+
+ return nil, errNotFound
+}
diff --git a/vendor/github.com/sonatard/noctx/noctx.go b/vendor/github.com/sonatard/noctx/noctx.go
new file mode 100644
index 00000000..478ad885
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/noctx.go
@@ -0,0 +1,31 @@
+package noctx
+
+import (
+ "github.com/sonatard/noctx/ngfunc"
+ "github.com/sonatard/noctx/reqwithoutctx"
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/buildssa"
+)
+
+var Analyzer = &analysis.Analyzer{
+ Name: "noctx",
+ Doc: Doc,
+ Run: run,
+ Requires: []*analysis.Analyzer{
+ buildssa.Analyzer,
+ },
+}
+
+const Doc = "noctx finds sending http request without context.Context"
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ if _, err := ngfunc.Run(pass); err != nil {
+ return nil, err
+ }
+
+ if _, err := reqwithoutctx.Run(pass); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/sonatard/noctx/reqwithoutctx/main.go b/vendor/github.com/sonatard/noctx/reqwithoutctx/main.go
new file mode 100644
index 00000000..b09e1de1
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/reqwithoutctx/main.go
@@ -0,0 +1,14 @@
+package reqwithoutctx
+
+import (
+ "golang.org/x/tools/go/analysis"
+)
+
+func Run(pass *analysis.Pass) (interface{}, error) {
+ analyzer := NewAnalyzer(pass)
+ reports := analyzer.Exec()
+
+ report(pass, reports)
+
+ return nil, nil
+}
diff --git a/vendor/github.com/sonatard/noctx/reqwithoutctx/report.go b/vendor/github.com/sonatard/noctx/reqwithoutctx/report.go
new file mode 100644
index 00000000..1c94e314
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/reqwithoutctx/report.go
@@ -0,0 +1,26 @@
+package reqwithoutctx
+
+import (
+ "go/token"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/ssa"
+)
+
+type Report struct {
+ Instruction ssa.Instruction
+}
+
+func (n *Report) Pos() token.Pos {
+ return n.Instruction.Pos()
+}
+
+func (n *Report) Message() string {
+ return "should rewrite http.NewRequestWithContext or add (*Request).WithContext"
+}
+
+func report(pass *analysis.Pass, reports []*Report) {
+ for _, report := range reports {
+ pass.Reportf(report.Pos(), report.Message())
+ }
+}
diff --git a/vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go b/vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go
new file mode 100644
index 00000000..35751269
--- /dev/null
+++ b/vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go
@@ -0,0 +1,180 @@
+package reqwithoutctx
+
+import (
+ "go/types"
+
+ "github.com/gostaticanalysis/analysisutil"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/buildssa"
+ "golang.org/x/tools/go/ssa"
+)
+
+type Analyzer struct {
+ Funcs []*ssa.Function
+ newRequestType types.Type
+ requestType types.Type
+}
+
+func NewAnalyzer(pass *analysis.Pass) *Analyzer {
+ newRequestType := analysisutil.TypeOf(pass, "net/http", "NewRequest")
+ requestType := analysisutil.TypeOf(pass, "net/http", "*Request")
+
+ srcFuncs := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs
+
+ return &Analyzer{
+ Funcs: srcFuncs,
+ newRequestType: newRequestType,
+ requestType: requestType,
+ }
+}
+
+func (a *Analyzer) Exec() []*Report {
+ if a.newRequestType == nil || a.requestType == nil {
+ return []*Report{}
+ }
+
+ usedReqs := a.usedReqs()
+ newReqs := a.requestsByNewRequest()
+
+ return a.report(usedReqs, newReqs)
+}
+
+func (a *Analyzer) report(usedReqs map[string]*ssa.Extract, newReqs map[*ssa.Call]*ssa.Extract) []*Report {
+ var reports []*Report
+
+ for _, fReq := range usedReqs {
+ for newRequest, req := range newReqs {
+ if fReq == req {
+ reports = append(reports, &Report{Instruction: newRequest})
+ }
+ }
+ }
+
+ return reports
+}
+
+func (a *Analyzer) usedReqs() map[string]*ssa.Extract {
+ reqExts := make(map[string]*ssa.Extract)
+
+ for _, f := range a.Funcs {
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ switch i := instr.(type) {
+ case *ssa.Call:
+ exts := a.usedReqByCall(i)
+ for _, ext := range exts {
+ key := i.String() + ext.String()
+ reqExts[key] = ext
+ }
+ case *ssa.UnOp:
+ ext := a.usedReqByUnOp(i)
+ if ext != nil {
+ key := i.String() + ext.String()
+ reqExts[key] = ext
+ }
+ case *ssa.Return:
+ exts := a.usedReqByReturn(i)
+ for _, ext := range exts {
+ key := i.String() + ext.String()
+ reqExts[key] = ext
+ }
+ }
+ }
+ }
+ }
+
+ return reqExts
+}
+
+func (a *Analyzer) usedReqByCall(call *ssa.Call) []*ssa.Extract {
+ var exts []*ssa.Extract
+
+ // skip net/http.Request method call
+ if call.Common().Signature().Recv() != nil && types.Identical(call.Value().Type(), a.requestType) {
+ return exts
+ }
+
+ args := call.Common().Args
+ if len(args) == 0 {
+ return exts
+ }
+
+ for _, arg := range args {
+ ext, ok := arg.(*ssa.Extract)
+ if !ok {
+ continue
+ }
+
+ if !types.Identical(ext.Type(), a.requestType) {
+ continue
+ }
+
+ exts = append(exts, ext)
+ }
+
+ return exts
+}
+
+func (a *Analyzer) usedReqByUnOp(op *ssa.UnOp) *ssa.Extract {
+ if ext, ok := op.X.(*ssa.Extract); ok && types.Identical(ext.Type(), a.requestType) {
+ return ext
+ }
+
+ return nil
+}
+
+func (a *Analyzer) usedReqByReturn(ret *ssa.Return) []*ssa.Extract {
+ rets := ret.Results
+ exts := make([]*ssa.Extract, 0, len(rets))
+
+ for _, ret := range rets {
+ ext, ok := ret.(*ssa.Extract)
+ if !ok {
+ continue
+ }
+
+ if types.Identical(ext.Type(), a.requestType) {
+ exts = append(exts, ext)
+ }
+ }
+
+ return exts
+}
+
+func (a *Analyzer) requestsByNewRequest() map[*ssa.Call]*ssa.Extract {
+ reqs := make(map[*ssa.Call]*ssa.Extract)
+
+ for _, f := range a.Funcs {
+ for _, b := range f.Blocks {
+ for _, instr := range b.Instrs {
+ ext, ok := instr.(*ssa.Extract)
+ if !ok {
+ continue
+ }
+
+ if !types.Identical(ext.Type(), a.requestType) {
+ continue
+ }
+
+ operands := ext.Operands([]*ssa.Value{})
+ if len(operands) != 1 {
+ continue
+ }
+
+ operand := *operands[0]
+
+ f, ok := operand.(*ssa.Call)
+ if !ok {
+ continue
+ }
+
+ if types.Identical(f.Call.Value.Type(), a.newRequestType) {
+ reqs[f] = ext
+ }
+ }
+ }
+ }
+
+ return reqs
+}
diff --git a/vendor/github.com/sourcegraph/go-diff/diff/diff.go b/vendor/github.com/sourcegraph/go-diff/diff/diff.go
index 646602a6..0f465b9e 100644
--- a/vendor/github.com/sourcegraph/go-diff/diff/diff.go
+++ b/vendor/github.com/sourcegraph/go-diff/diff/diff.go
@@ -1,10 +1,64 @@
package diff
-import "bytes"
+import (
+ "bytes"
+ "time"
+)
-// NOTE: types are code-generated in diff.pb.go.
+// A FileDiff represents a unified diff for a single file.
+//
+// A file unified diff has a header that resembles the following:
+//
+// --- oldname 2009-10-11 15:12:20.000000000 -0700
+// +++ newname 2009-10-11 15:12:30.000000000 -0700
+type FileDiff struct {
+ // the original name of the file
+ OrigName string
+ // the original timestamp (nil if not present)
+ OrigTime *time.Time
+ // the new name of the file (often same as OrigName)
+ NewName string
+ // the new timestamp (nil if not present)
+ NewTime *time.Time
+ // extended header lines (e.g., git's "new mode ", "rename from ", etc.)
+ Extended []string
+ // hunks that were changed from orig to new
+ Hunks []*Hunk
+}
-//go:generate protoc -I../../../.. -I ../../../../github.com/gogo/protobuf/protobuf -I. --gogo_out=. diff.proto
+// A Hunk represents a series of changes (additions or deletions) in a file's
+// unified diff.
+type Hunk struct {
+ // starting line number in original file
+ OrigStartLine int32
+ // number of lines the hunk applies to in the original file
+ OrigLines int32
+ // if > 0, then the original file had a 'No newline at end of file' mark at this offset
+ OrigNoNewlineAt int32
+ // starting line number in new file
+ NewStartLine int32
+ // number of lines the hunk applies to in the new file
+ NewLines int32
+ // optional section heading
+ Section string
+ // 0-indexed line offset in unified file diff (including section headers); this is
+ // only set when Hunks are read from entire file diff (i.e., when ReadAllHunks is
+ // called) This accounts for hunk headers, too, so the StartPosition of the first
+ // hunk will be 1.
+ StartPosition int32
+ // hunk body (lines prefixed with '-', '+', or ' ')
+ Body []byte
+}
+
+// A Stat is a diff stat that represents the number of lines added/changed/deleted.
+type Stat struct {
+ // number of lines added
+ Added int32
+ // number of lines changed
+ Changed int32
+ // number of lines deleted
+ Deleted int32
+}
// Stat computes the number of lines added/changed/deleted in all
// hunks in this file's diff.
@@ -54,10 +108,12 @@ func (h *Hunk) Stat() Stat {
}
var (
- hunkPrefix = []byte("@@ ")
+ hunkPrefix = []byte("@@ ")
+ onlyInMessagePrefix = []byte("Only in ")
)
const hunkHeader = "@@ -%d,%d +%d,%d @@"
+const onlyInMessage = "Only in %s: %s\n"
// diffTimeParseLayout is the layout used to parse the time in unified diff file
// header timestamps.
diff --git a/vendor/github.com/sourcegraph/go-diff/diff/diff.pb.go b/vendor/github.com/sourcegraph/go-diff/diff/diff.pb.go
deleted file mode 100644
index 2e7c27fb..00000000
--- a/vendor/github.com/sourcegraph/go-diff/diff/diff.pb.go
+++ /dev/null
@@ -1,1059 +0,0 @@
-// Code generated by protoc-gen-gogo.
-// source: diff.proto
-// DO NOT EDIT!
-
-/*
- Package diff is a generated protocol buffer package.
-
- It is generated from these files:
- diff.proto
-
- It has these top-level messages:
- FileDiff
- Hunk
- Stat
-*/
-package diff
-
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// discarding unused import gogoproto "github.com/gogo/protobuf/gogoproto"
-import pbtypes "sourcegraph.com/sqs/pbtypes"
-
-import io "io"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// A FileDiff represents a unified diff for a single file.
-//
-// A file unified diff has a header that resembles the following:
-//
-// --- oldname 2009-10-11 15:12:20.000000000 -0700
-// +++ newname 2009-10-11 15:12:30.000000000 -0700
-type FileDiff struct {
- // the original name of the file
- OrigName string `protobuf:"bytes,1,opt,name=OrigName,proto3" json:"OrigName,omitempty"`
- // the original timestamp (nil if not present)
- OrigTime *pbtypes.Timestamp `protobuf:"bytes,2,opt,name=OrigTime" json:"OrigTime,omitempty"`
- // the new name of the file (often same as OrigName)
- NewName string `protobuf:"bytes,3,opt,name=NewName,proto3" json:"NewName,omitempty"`
- // the new timestamp (nil if not present)
- NewTime *pbtypes.Timestamp `protobuf:"bytes,4,opt,name=NewTime" json:"NewTime,omitempty"`
- // extended header lines (e.g., git's "new mode ", "rename from ", etc.)
- Extended []string `protobuf:"bytes,5,rep,name=Extended" json:"Extended,omitempty"`
- // hunks that were changed from orig to new
- Hunks []*Hunk `protobuf:"bytes,6,rep,name=Hunks" json:"Hunks,omitempty"`
-}
-
-func (m *FileDiff) Reset() { *m = FileDiff{} }
-func (m *FileDiff) String() string { return proto.CompactTextString(m) }
-func (*FileDiff) ProtoMessage() {}
-
-// A Hunk represents a series of changes (additions or deletions) in a file's
-// unified diff.
-type Hunk struct {
- // starting line number in original file
- OrigStartLine int32 `protobuf:"varint,1,opt,name=OrigStartLine,proto3" json:"OrigStartLine,omitempty"`
- // number of lines the hunk applies to in the original file
- OrigLines int32 `protobuf:"varint,2,opt,name=OrigLines,proto3" json:"OrigLines,omitempty"`
- // if > 0, then the original file had a 'No newline at end of file' mark at this offset
- OrigNoNewlineAt int32 `protobuf:"varint,3,opt,name=OrigNoNewlineAt,proto3" json:"OrigNoNewlineAt,omitempty"`
- // starting line number in new file
- NewStartLine int32 `protobuf:"varint,4,opt,name=NewStartLine,proto3" json:"NewStartLine,omitempty"`
- // number of lines the hunk applies to in the new file
- NewLines int32 `protobuf:"varint,5,opt,name=NewLines,proto3" json:"NewLines,omitempty"`
- // optional section heading
- Section string `protobuf:"bytes,6,opt,name=Section,proto3" json:"Section,omitempty"`
- // 0-indexed line offset in unified file diff (including section headers); this is
- // only set when Hunks are read from entire file diff (i.e., when ReadAllHunks is
- // called) This accounts for hunk headers, too, so the StartPosition of the first
- // hunk will be 1.
- StartPosition int32 `protobuf:"varint,7,opt,name=StartPosition,proto3" json:"StartPosition,omitempty"`
- // hunk body (lines prefixed with '-', '+', or ' ')
- Body []byte `protobuf:"bytes,8,opt,name=Body,proto3" json:"Body,omitempty"`
-}
-
-func (m *Hunk) Reset() { *m = Hunk{} }
-func (m *Hunk) String() string { return proto.CompactTextString(m) }
-func (*Hunk) ProtoMessage() {}
-
-// A Stat is a diff stat that represents the number of lines added/changed/deleted.
-type Stat struct {
- // number of lines added
- Added int32 `protobuf:"varint,1,opt,name=Added,proto3" json:""`
- // number of lines changed
- Changed int32 `protobuf:"varint,2,opt,name=Changed,proto3" json:""`
- // number of lines deleted
- Deleted int32 `protobuf:"varint,3,opt,name=Deleted,proto3" json:""`
-}
-
-func (m *Stat) Reset() { *m = Stat{} }
-func (m *Stat) String() string { return proto.CompactTextString(m) }
-func (*Stat) ProtoMessage() {}
-
-func (m *FileDiff) Marshal() (data []byte, err error) {
- size := m.Size()
- data = make([]byte, size)
- n, err := m.MarshalTo(data)
- if err != nil {
- return nil, err
- }
- return data[:n], nil
-}
-
-func (m *FileDiff) MarshalTo(data []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if len(m.OrigName) > 0 {
- data[i] = 0xa
- i++
- i = encodeVarintDiff(data, i, uint64(len(m.OrigName)))
- i += copy(data[i:], m.OrigName)
- }
- if m.OrigTime != nil {
- data[i] = 0x12
- i++
- i = encodeVarintDiff(data, i, uint64(m.OrigTime.Size()))
- n1, err := m.OrigTime.MarshalTo(data[i:])
- if err != nil {
- return 0, err
- }
- i += n1
- }
- if len(m.NewName) > 0 {
- data[i] = 0x1a
- i++
- i = encodeVarintDiff(data, i, uint64(len(m.NewName)))
- i += copy(data[i:], m.NewName)
- }
- if m.NewTime != nil {
- data[i] = 0x22
- i++
- i = encodeVarintDiff(data, i, uint64(m.NewTime.Size()))
- n2, err := m.NewTime.MarshalTo(data[i:])
- if err != nil {
- return 0, err
- }
- i += n2
- }
- if len(m.Extended) > 0 {
- for _, s := range m.Extended {
- data[i] = 0x2a
- i++
- l = len(s)
- for l >= 1<<7 {
- data[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- data[i] = uint8(l)
- i++
- i += copy(data[i:], s)
- }
- }
- if len(m.Hunks) > 0 {
- for _, msg := range m.Hunks {
- data[i] = 0x32
- i++
- i = encodeVarintDiff(data, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(data[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- return i, nil
-}
-
-func (m *Hunk) Marshal() (data []byte, err error) {
- size := m.Size()
- data = make([]byte, size)
- n, err := m.MarshalTo(data)
- if err != nil {
- return nil, err
- }
- return data[:n], nil
-}
-
-func (m *Hunk) MarshalTo(data []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if m.OrigStartLine != 0 {
- data[i] = 0x8
- i++
- i = encodeVarintDiff(data, i, uint64(m.OrigStartLine))
- }
- if m.OrigLines != 0 {
- data[i] = 0x10
- i++
- i = encodeVarintDiff(data, i, uint64(m.OrigLines))
- }
- if m.OrigNoNewlineAt != 0 {
- data[i] = 0x18
- i++
- i = encodeVarintDiff(data, i, uint64(m.OrigNoNewlineAt))
- }
- if m.NewStartLine != 0 {
- data[i] = 0x20
- i++
- i = encodeVarintDiff(data, i, uint64(m.NewStartLine))
- }
- if m.NewLines != 0 {
- data[i] = 0x28
- i++
- i = encodeVarintDiff(data, i, uint64(m.NewLines))
- }
- if len(m.Section) > 0 {
- data[i] = 0x32
- i++
- i = encodeVarintDiff(data, i, uint64(len(m.Section)))
- i += copy(data[i:], m.Section)
- }
- if m.StartPosition != 0 {
- data[i] = 0x38
- i++
- i = encodeVarintDiff(data, i, uint64(m.StartPosition))
- }
- if m.Body != nil {
- if len(m.Body) > 0 {
- data[i] = 0x42
- i++
- i = encodeVarintDiff(data, i, uint64(len(m.Body)))
- i += copy(data[i:], m.Body)
- }
- }
- return i, nil
-}
-
-func (m *Stat) Marshal() (data []byte, err error) {
- size := m.Size()
- data = make([]byte, size)
- n, err := m.MarshalTo(data)
- if err != nil {
- return nil, err
- }
- return data[:n], nil
-}
-
-func (m *Stat) MarshalTo(data []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if m.Added != 0 {
- data[i] = 0x8
- i++
- i = encodeVarintDiff(data, i, uint64(m.Added))
- }
- if m.Changed != 0 {
- data[i] = 0x10
- i++
- i = encodeVarintDiff(data, i, uint64(m.Changed))
- }
- if m.Deleted != 0 {
- data[i] = 0x18
- i++
- i = encodeVarintDiff(data, i, uint64(m.Deleted))
- }
- return i, nil
-}
-
-func encodeFixed64Diff(data []byte, offset int, v uint64) int {
- data[offset] = uint8(v)
- data[offset+1] = uint8(v >> 8)
- data[offset+2] = uint8(v >> 16)
- data[offset+3] = uint8(v >> 24)
- data[offset+4] = uint8(v >> 32)
- data[offset+5] = uint8(v >> 40)
- data[offset+6] = uint8(v >> 48)
- data[offset+7] = uint8(v >> 56)
- return offset + 8
-}
-func encodeFixed32Diff(data []byte, offset int, v uint32) int {
- data[offset] = uint8(v)
- data[offset+1] = uint8(v >> 8)
- data[offset+2] = uint8(v >> 16)
- data[offset+3] = uint8(v >> 24)
- return offset + 4
-}
-func encodeVarintDiff(data []byte, offset int, v uint64) int {
- for v >= 1<<7 {
- data[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- data[offset] = uint8(v)
- return offset + 1
-}
-func (m *FileDiff) Size() (n int) {
- var l int
- _ = l
- l = len(m.OrigName)
- if l > 0 {
- n += 1 + l + sovDiff(uint64(l))
- }
- if m.OrigTime != nil {
- l = m.OrigTime.Size()
- n += 1 + l + sovDiff(uint64(l))
- }
- l = len(m.NewName)
- if l > 0 {
- n += 1 + l + sovDiff(uint64(l))
- }
- if m.NewTime != nil {
- l = m.NewTime.Size()
- n += 1 + l + sovDiff(uint64(l))
- }
- if len(m.Extended) > 0 {
- for _, s := range m.Extended {
- l = len(s)
- n += 1 + l + sovDiff(uint64(l))
- }
- }
- if len(m.Hunks) > 0 {
- for _, e := range m.Hunks {
- l = e.Size()
- n += 1 + l + sovDiff(uint64(l))
- }
- }
- return n
-}
-
-func (m *Hunk) Size() (n int) {
- var l int
- _ = l
- if m.OrigStartLine != 0 {
- n += 1 + sovDiff(uint64(m.OrigStartLine))
- }
- if m.OrigLines != 0 {
- n += 1 + sovDiff(uint64(m.OrigLines))
- }
- if m.OrigNoNewlineAt != 0 {
- n += 1 + sovDiff(uint64(m.OrigNoNewlineAt))
- }
- if m.NewStartLine != 0 {
- n += 1 + sovDiff(uint64(m.NewStartLine))
- }
- if m.NewLines != 0 {
- n += 1 + sovDiff(uint64(m.NewLines))
- }
- l = len(m.Section)
- if l > 0 {
- n += 1 + l + sovDiff(uint64(l))
- }
- if m.StartPosition != 0 {
- n += 1 + sovDiff(uint64(m.StartPosition))
- }
- if m.Body != nil {
- l = len(m.Body)
- if l > 0 {
- n += 1 + l + sovDiff(uint64(l))
- }
- }
- return n
-}
-
-func (m *Stat) Size() (n int) {
- var l int
- _ = l
- if m.Added != 0 {
- n += 1 + sovDiff(uint64(m.Added))
- }
- if m.Changed != 0 {
- n += 1 + sovDiff(uint64(m.Changed))
- }
- if m.Deleted != 0 {
- n += 1 + sovDiff(uint64(m.Deleted))
- }
- return n
-}
-
-func sovDiff(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
-}
-func sozDiff(x uint64) (n int) {
- return sovDiff(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *FileDiff) Unmarshal(data []byte) error {
- l := len(data)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: FileDiff: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: FileDiff: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field OrigName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthDiff
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.OrigName = string(data[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field OrigTime", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthDiff
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.OrigTime == nil {
- m.OrigTime = &pbtypes.Timestamp{}
- }
- if err := m.OrigTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NewName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthDiff
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.NewName = string(data[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NewTime", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthDiff
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.NewTime == nil {
- m.NewTime = &pbtypes.Timestamp{}
- }
- if err := m.NewTime.Unmarshal(data[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Extended", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthDiff
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Extended = append(m.Extended, string(data[iNdEx:postIndex]))
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Hunks", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthDiff
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Hunks = append(m.Hunks, &Hunk{})
- if err := m.Hunks[len(m.Hunks)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipDiff(data[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthDiff
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Hunk) Unmarshal(data []byte) error {
- l := len(data)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Hunk: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Hunk: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field OrigStartLine", wireType)
- }
- m.OrigStartLine = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- m.OrigStartLine |= (int32(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field OrigLines", wireType)
- }
- m.OrigLines = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- m.OrigLines |= (int32(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field OrigNoNewlineAt", wireType)
- }
- m.OrigNoNewlineAt = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- m.OrigNoNewlineAt |= (int32(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field NewStartLine", wireType)
- }
- m.NewStartLine = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- m.NewStartLine |= (int32(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field NewLines", wireType)
- }
- m.NewLines = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- m.NewLines |= (int32(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Section", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthDiff
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Section = string(data[iNdEx:postIndex])
- iNdEx = postIndex
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StartPosition", wireType)
- }
- m.StartPosition = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- m.StartPosition |= (int32(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 8:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthDiff
- }
- postIndex := iNdEx + byteLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Body = append([]byte{}, data[iNdEx:postIndex]...)
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipDiff(data[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthDiff
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Stat) Unmarshal(data []byte) error {
- l := len(data)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Stat: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Stat: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Added", wireType)
- }
- m.Added = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- m.Added |= (int32(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Changed", wireType)
- }
- m.Changed = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- m.Changed |= (int32(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
- }
- m.Deleted = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- m.Deleted |= (int32(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipDiff(data[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthDiff
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipDiff(data []byte) (n int, err error) {
- l := len(data)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if data[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- iNdEx += length
- if length < 0 {
- return 0, ErrInvalidLengthDiff
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowDiff
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipDiff(data[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthDiff = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowDiff = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/sourcegraph/go-diff/diff/diff.proto b/vendor/github.com/sourcegraph/go-diff/diff/diff.proto
deleted file mode 100644
index 8868970f..00000000
--- a/vendor/github.com/sourcegraph/go-diff/diff/diff.proto
+++ /dev/null
@@ -1,81 +0,0 @@
-syntax = "proto3";
-package diff;
-
-import "github.com/gogo/protobuf/gogoproto/gogo.proto";
-import "sourcegraph.com/sqs/pbtypes/timestamp.proto";
-
-option (gogoproto.goproto_getters_all) = false;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-
-// A FileDiff represents a unified diff for a single file.
-//
-// A file unified diff has a header that resembles the following:
-//
-// --- oldname 2009-10-11 15:12:20.000000000 -0700
-// +++ newname 2009-10-11 15:12:30.000000000 -0700
-message FileDiff {
- // the original name of the file
- string OrigName = 1;
-
- // the original timestamp (nil if not present)
- pbtypes.Timestamp OrigTime = 2;
-
- // the new name of the file (often same as OrigName)
- string NewName = 3;
-
- // the new timestamp (nil if not present)
- pbtypes.Timestamp NewTime = 4;
-
- // extended header lines (e.g., git's "new mode ", "rename from ", etc.)
- repeated string Extended = 5;
-
- // hunks that were changed from orig to new
- repeated Hunk Hunks = 6;
-}
-
-
-// A Hunk represents a series of changes (additions or deletions) in a file's
-// unified diff.
-message Hunk {
- // starting line number in original file
- int32 OrigStartLine = 1;
-
- // number of lines the hunk applies to in the original file
- int32 OrigLines = 2;
-
- // if > 0, then the original file had a 'No newline at end of file' mark at this offset
- int32 OrigNoNewlineAt = 3;
-
- // starting line number in new file
- int32 NewStartLine = 4;
-
- // number of lines the hunk applies to in the new file
- int32 NewLines = 5;
-
- // optional section heading
- string Section = 6;
-
- // 0-indexed line offset in unified file diff (including section headers); this is
- // only set when Hunks are read from entire file diff (i.e., when ReadAllHunks is
- // called) This accounts for hunk headers, too, so the StartPosition of the first
- // hunk will be 1.
- int32 StartPosition = 7;
-
- // hunk body (lines prefixed with '-', '+', or ' ')
- bytes Body = 8;
-}
-
-// A Stat is a diff stat that represents the number of lines added/changed/deleted.
-message Stat {
- // number of lines added
- int32 Added = 1 [(gogoproto.jsontag) = ""];
-
- // number of lines changed
- int32 Changed = 2 [(gogoproto.jsontag) = ""];
-
- // number of lines deleted
- int32 Deleted = 3 [(gogoproto.jsontag) = ""];
-}
-
diff --git a/vendor/github.com/sourcegraph/go-diff/diff/parse.go b/vendor/github.com/sourcegraph/go-diff/diff/parse.go
index 1e77df48..67435023 100644
--- a/vendor/github.com/sourcegraph/go-diff/diff/parse.go
+++ b/vendor/github.com/sourcegraph/go-diff/diff/parse.go
@@ -6,15 +6,16 @@ import (
"errors"
"fmt"
"io"
+ "path/filepath"
+ "strconv"
"strings"
"time"
-
- "sourcegraph.com/sqs/pbtypes"
)
-// ParseMultiFileDiff parses a multi-file unified diff. It returns an error if parsing failed as a whole, but does its
-// best to parse as many files in the case of per-file errors. In the case of non-fatal per-file errors, the error
-// return value is null and the Errs field in the returned MultiFileDiff is set.
+// ParseMultiFileDiff parses a multi-file unified diff. It returns an error if
+// parsing failed as a whole, but does its best to parse as many files in the
+// case of per-file errors. If it cannot detect when the diff of the next file
+// begins, the hunks are added to the FileDiff of the previous file.
func ParseMultiFileDiff(diff []byte) ([]*FileDiff, error) {
return NewMultiFileDiffReader(bytes.NewReader(diff)).ReadAllFiles()
}
@@ -70,6 +71,12 @@ func (r *MultiFileDiffReader) ReadFile() (*FileDiff, error) {
}
}
+ // FileDiff is added/deleted file
+ // No further collection of hunks needed
+ if fd.NewName == "" {
+ return fd, nil
+ }
+
// Before reading hunks, check to see if there are any. If there
// aren't any, and there's another file after this file in the
// diff, then the hunks reader will complain ErrNoHunkHeader. It's
@@ -78,7 +85,7 @@ func (r *MultiFileDiffReader) ReadFile() (*FileDiff, error) {
// need to perform the check here.
hr := fr.HunksReader()
line, err := readLine(r.reader)
- if err != nil {
+ if err != nil && err != io.EOF {
return fd, err
}
line = bytes.TrimSuffix(line, []byte{'\n'})
@@ -196,12 +203,10 @@ func (r *FileDiffReader) ReadAllHeaders() (*FileDiff, error) {
return nil, err
}
if origTime != nil {
- ts := pbtypes.NewTimestamp(*origTime)
- fd.OrigTime = &ts
+ fd.OrigTime = origTime
}
if newTime != nil {
- ts := pbtypes.NewTimestamp(*newTime)
- fd.NewTime = &ts
+ fd.NewTime = newTime
}
return fd, nil
@@ -221,8 +226,16 @@ func (r *FileDiffReader) HunksReader() *HunksReader {
// ReadFileHeaders reads the unified file diff header (the lines that
// start with "---" and "+++" with the orig/new file names and
-// timestamps).
+// timestamps). Or which starts with "Only in " with dir path and filename.
+// "Only in" message is supported in POSIX locale: https://pubs.opengroup.org/onlinepubs/9699919799/utilities/diff.html#tag_20_34_10
func (r *FileDiffReader) ReadFileHeaders() (origName, newName string, origTimestamp, newTimestamp *time.Time, err error) {
+ if r.fileHeaderLine != nil {
+ if isOnlyMessage, source, filename := parseOnlyInMessage(r.fileHeaderLine); isOnlyMessage {
+ return filepath.Join(string(source), string(filename)),
+ "", nil, nil, nil
+ }
+ }
+
origName, origTimestamp, err = r.readOneFileHeader([]byte("--- "))
if err != nil {
return "", "", nil, nil, err
@@ -233,6 +246,15 @@ func (r *FileDiffReader) ReadFileHeaders() (origName, newName string, origTimest
return "", "", nil, nil, err
}
+ unquotedOrigName, err := strconv.Unquote(origName)
+ if err == nil {
+ origName = unquotedOrigName
+ }
+ unquotedNewName, err := strconv.Unquote(newName)
+ if err == nil {
+ newName = unquotedNewName
+ }
+
return origName, newName, origTimestamp, newTimestamp, nil
}
@@ -282,7 +304,7 @@ func (r *FileDiffReader) readOneFileHeader(prefix []byte) (filename string, time
type OverflowError string
func (e OverflowError) Error() string {
- return fmt.Sprintf("overflowed into next file: %s", e)
+ return fmt.Sprintf("overflowed into next file: %s", string(e))
}
// ReadExtendedHeaders reads the extended header lines, if any, from a
@@ -319,6 +341,12 @@ func (r *FileDiffReader) ReadExtendedHeaders() ([]string, error) {
return xheaders, nil
}
+ // Reached message that file is added/deleted
+ if isOnlyInMessage, _, _ := parseOnlyInMessage(line); isOnlyInMessage {
+ r.fileHeaderLine = line // pass to readOneFileHeader (see fileHeaderLine field doc)
+ return xheaders, nil
+ }
+
r.line++
r.offset += int64(len(line))
xheaders = append(xheaders, string(line))
@@ -328,30 +356,58 @@ func (r *FileDiffReader) ReadExtendedHeaders() ([]string, error) {
// handleEmpty detects when FileDiff was an empty diff and will not have any hunks
// that follow. It updates fd fields from the parsed extended headers.
func handleEmpty(fd *FileDiff) (wasEmpty bool) {
+ var err error
+ lineCount := len(fd.Extended)
+ if lineCount > 0 && !strings.HasPrefix(fd.Extended[0], "diff --git ") {
+ return false
+ }
switch {
- case (len(fd.Extended) == 3 || len(fd.Extended) == 4 && strings.HasPrefix(fd.Extended[3], "Binary files ")) &&
- strings.HasPrefix(fd.Extended[1], "new file mode ") && strings.HasPrefix(fd.Extended[0], "diff --git "):
+ case (lineCount == 3 || lineCount == 4 && strings.HasPrefix(fd.Extended[3], "Binary files ") || lineCount > 4 && strings.HasPrefix(fd.Extended[3], "GIT binary patch")) &&
+ strings.HasPrefix(fd.Extended[1], "new file mode "):
names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2)
fd.OrigName = "/dev/null"
- fd.NewName = names[1]
+ fd.NewName, err = strconv.Unquote(names[1])
+ if err != nil {
+ fd.NewName = names[1]
+ }
return true
- case (len(fd.Extended) == 3 || len(fd.Extended) == 4 && strings.HasPrefix(fd.Extended[3], "Binary files ")) &&
- strings.HasPrefix(fd.Extended[1], "deleted file mode ") && strings.HasPrefix(fd.Extended[0], "diff --git "):
+ case (lineCount == 3 || lineCount == 4 && strings.HasPrefix(fd.Extended[3], "Binary files ") || lineCount > 4 && strings.HasPrefix(fd.Extended[3], "GIT binary patch")) &&
+ strings.HasPrefix(fd.Extended[1], "deleted file mode "):
names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2)
- fd.OrigName = names[0]
+ fd.OrigName, err = strconv.Unquote(names[0])
+ if err != nil {
+ fd.OrigName = names[0]
+ }
fd.NewName = "/dev/null"
return true
- case len(fd.Extended) == 4 && strings.HasPrefix(fd.Extended[2], "rename from ") && strings.HasPrefix(fd.Extended[3], "rename to ") && strings.HasPrefix(fd.Extended[0], "diff --git "):
+ case lineCount == 4 && strings.HasPrefix(fd.Extended[2], "rename from ") && strings.HasPrefix(fd.Extended[3], "rename to "):
+ names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2)
+ fd.OrigName, err = strconv.Unquote(names[0])
+ if err != nil {
+ fd.OrigName = names[0]
+ }
+ fd.NewName, err = strconv.Unquote(names[1])
+ if err != nil {
+ fd.NewName = names[1]
+ }
+ return true
+ case lineCount == 6 && strings.HasPrefix(fd.Extended[5], "Binary files ") && strings.HasPrefix(fd.Extended[2], "rename from ") && strings.HasPrefix(fd.Extended[3], "rename to "):
names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2)
fd.OrigName = names[0]
fd.NewName = names[1]
return true
- case len(fd.Extended) == 3 && strings.HasPrefix(fd.Extended[2], "Binary files ") && strings.HasPrefix(fd.Extended[0], "diff --git "):
+ case lineCount == 3 && strings.HasPrefix(fd.Extended[2], "Binary files ") || lineCount > 3 && strings.HasPrefix(fd.Extended[2], "GIT binary patch"):
names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2)
- fd.OrigName = names[0]
- fd.NewName = names[1]
+ fd.OrigName, err = strconv.Unquote(names[0])
+ if err != nil {
+ fd.OrigName = names[0]
+ }
+ fd.NewName, err = strconv.Unquote(names[1])
+ if err != nil {
+ fd.NewName = names[1]
+ }
return true
default:
return false
@@ -369,6 +425,10 @@ var (
// ErrExtendedHeadersEOF is when an EOF was encountered while reading extended file headers, which means that there were no ---/+++ headers encountered before hunks (if any) began.
ErrExtendedHeadersEOF = errors.New("expected file header while reading extended headers, got EOF")
+
+ // ErrBadOnlyInMessage is when a file have a malformed `only in` message
+ // Should be in format `Only in {source}: {filename}`
+ ErrBadOnlyInMessage = errors.New("bad 'only in' message")
)
// ParseHunks parses hunks from a unified diff. The diff must consist
@@ -467,7 +527,7 @@ func (r *HunksReader) ReadHunk() (*Hunk, error) {
return r.hunk, nil
}
- if len(line) >= 1 && !linePrefix(line[0]) {
+ if len(line) >= 1 && (!linePrefix(line[0]) || bytes.HasPrefix(line, []byte("--- "))) {
// Bad hunk header line. If we're reading a multi-file
// diff, this may be the end of the current
// file. Return a "rich" error that lets our caller
@@ -578,6 +638,19 @@ func (r *HunksReader) ReadAllHunks() ([]*Hunk, error) {
}
}
+// parseOnlyInMessage checks if line is a "Only in {source}: {filename}" and returns source and filename
+func parseOnlyInMessage(line []byte) (bool, []byte, []byte) {
+ if !bytes.HasPrefix(line, onlyInMessagePrefix) {
+ return false, nil, nil
+ }
+ line = line[len(onlyInMessagePrefix):]
+ idx := bytes.Index(line, []byte(": "))
+ if idx < 0 {
+ return false, nil, nil
+ }
+ return true, line[:idx], line[idx+2:]
+}
+
// A ParseError is a description of a unified diff syntax error.
type ParseError struct {
Line int // Line where the error occurred
diff --git a/vendor/github.com/sourcegraph/go-diff/diff/print.go b/vendor/github.com/sourcegraph/go-diff/diff/print.go
index d440cb9a..012651a3 100644
--- a/vendor/github.com/sourcegraph/go-diff/diff/print.go
+++ b/vendor/github.com/sourcegraph/go-diff/diff/print.go
@@ -4,9 +4,8 @@ import (
"bytes"
"fmt"
"io"
+ "path/filepath"
"time"
-
- "sourcegraph.com/sqs/pbtypes"
)
// PrintMultiFileDiff prints a multi-file diff in unified diff format.
@@ -36,14 +35,24 @@ func PrintFileDiff(d *FileDiff) ([]byte, error) {
}
}
+ // FileDiff is added/deleted file
+ // No further hunks printing needed
+ if d.NewName == "" {
+ _, err := fmt.Fprintf(&buf, onlyInMessage, filepath.Dir(d.OrigName), filepath.Base(d.OrigName))
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+ }
+
if d.Hunks == nil {
return buf.Bytes(), nil
}
- if err := printFileHeader(&buf, "--- ", d.OrigName, timePtr(d.OrigTime)); err != nil {
+ if err := printFileHeader(&buf, "--- ", d.OrigName, d.OrigTime); err != nil {
return nil, err
}
- if err := printFileHeader(&buf, "+++ ", d.NewName, timePtr(d.NewTime)); err != nil {
+ if err := printFileHeader(&buf, "+++ ", d.NewName, d.NewTime); err != nil {
return nil, err
}
@@ -58,14 +67,6 @@ func PrintFileDiff(d *FileDiff) ([]byte, error) {
return buf.Bytes(), nil
}
-func timePtr(ts *pbtypes.Timestamp) *time.Time {
- if ts == nil {
- return nil
- }
- t := ts.Time()
- return &t
-}
-
func printFileHeader(w io.Writer, prefix string, filename string, timestamp *time.Time) error {
if _, err := fmt.Fprint(w, prefix, filename); err != nil {
return err
diff --git a/vendor/github.com/spf13/afero/.gitignore b/vendor/github.com/spf13/afero/.gitignore
new file mode 100644
index 00000000..9c1d9861
--- /dev/null
+++ b/vendor/github.com/spf13/afero/.gitignore
@@ -0,0 +1,2 @@
+sftpfs/file1
+sftpfs/test/
diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml
index 0637db72..14596449 100644
--- a/vendor/github.com/spf13/afero/.travis.yml
+++ b/vendor/github.com/spf13/afero/.travis.yml
@@ -1,21 +1,22 @@
-sudo: false
-language: go
-
-go:
- - 1.9
- - "1.10"
- - tip
-
-os:
- - linux
- - osx
-
-matrix:
- allow_failures:
- - go: tip
- fast_finish: true
-
-script:
- - go build
- - go test -race -v ./...
-
+sudo: false
+language: go
+
+go:
+ - "1.13"
+ - "1.14"
+ - tip
+
+os:
+ - linux
+ - osx
+
+matrix:
+ allow_failures:
+ - go: tip
+ fast_finish: true
+
+script:
+ - go build -v ./...
+ - go test -count=1 -cover -race -v ./...
+ - go vet ./...
+ - FILES=$(gofmt -s -l . zipfs sftpfs mem tarfs); if [[ -n "${FILES}" ]]; then echo "You have go format errors; gofmt your changes"; exit 1; fi
diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md
index 0c9b04b5..c3e807ae 100644
--- a/vendor/github.com/spf13/afero/README.md
+++ b/vendor/github.com/spf13/afero/README.md
@@ -6,7 +6,7 @@ A FileSystem Abstraction System for Go
# Overview
-Afero is an filesystem framework providing a simple, uniform and universal API
+Afero is a filesystem framework providing a simple, uniform and universal API
interacting with any filesystem, as an abstraction layer providing interfaces,
types and methods. Afero has an exceptionally clean interface and simple design
without needless constructors or initialization methods.
@@ -18,7 +18,7 @@ and benefit of the os and ioutil packages.
Afero provides significant improvements over using the os package alone, most
notably the ability to create mock and testing filesystems without relying on the disk.
-It is suitable for use in a any situation where you would consider using the OS
+It is suitable for use in any situation where you would consider using the OS
package as it provides an additional abstraction that makes it easy to use a
memory backed file system during testing. It also adds support for the http
filesystem for full interoperability.
@@ -41,8 +41,8 @@ Afero is easy to use and easier to adopt.
A few different ways you could use Afero:
-* Use the interfaces alone to define you own file system.
-* Wrap for the OS packages.
+* Use the interfaces alone to define your own file system.
+* Wrapper for the OS packages.
* Define different filesystems for different parts of your application.
* Use Afero for mock filesystems while testing
@@ -227,7 +227,7 @@ operation and a mock filesystem during testing or as needed.
```go
appfs := afero.NewOsFs()
-appfs.MkdirAll("src/a", 0755))
+appfs.MkdirAll("src/a", 0755)
```
## Memory Backed Storage
@@ -241,7 +241,7 @@ safely.
```go
mm := afero.NewMemMapFs()
-mm.MkdirAll("src/a", 0755))
+mm.MkdirAll("src/a", 0755)
```
#### InMemoryFile
@@ -306,7 +306,7 @@ Any Afero FileSystem can be used as an httpFs.
```go
httpFs := afero.NewHttpFs()
-fileserver := http.FileServer(httpFs.Dir()))
+fileserver := http.FileServer(httpFs.Dir())
http.Handle("/", fileserver)
```
@@ -380,8 +380,6 @@ The following is a short list of possible backends we hope someone will
implement:
* SSH
-* ZIP
-* TAR
* S3
# About the project
@@ -406,28 +404,7 @@ Googles very well.
## Release Notes
-* **0.10.0** 2015.12.10
- * Full compatibility with Windows
- * Introduction of afero utilities
- * Test suite rewritten to work cross platform
- * Normalize paths for MemMapFs
- * Adding Sync to the file interface
- * **Breaking Change** Walk and ReadDir have changed parameter order
- * Moving types used by MemMapFs to a subpackage
- * General bugfixes and improvements
-* **0.9.0** 2015.11.05
- * New Walk function similar to filepath.Walk
- * MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC
- * MemMapFs.Remove now really deletes the file
- * InMemoryFile.Readdir and Readdirnames work correctly
- * InMemoryFile functions lock it for concurrent access
- * Test suite improvements
-* **0.8.0** 2014.10.28
- * First public version
- * Interfaces feel ready for people to build using
- * Interfaces satisfy all known uses
- * MemMapFs passes the majority of the OS test suite
- * OsFs passes the majority of the OS test suite
+See the [Releases Page](https://github.com/spf13/afero/releases).
## Contributing
diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml
index a633ad50..5d2f34bf 100644
--- a/vendor/github.com/spf13/afero/appveyor.yml
+++ b/vendor/github.com/spf13/afero/appveyor.yml
@@ -10,6 +10,6 @@ build_script:
go get -v github.com/spf13/afero/...
- go build github.com/spf13/afero
+ go build -v github.com/spf13/afero/...
test_script:
-- cmd: go test -race -v github.com/spf13/afero/...
+- cmd: go test -count=1 -cover -race -v github.com/spf13/afero/...
diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go
index 616ff8ff..3a14b833 100644
--- a/vendor/github.com/spf13/afero/basepath.go
+++ b/vendor/github.com/spf13/afero/basepath.go
@@ -177,4 +177,30 @@ func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
return fi, false, err
}
+func (b *BasePathFs) SymlinkIfPossible(oldname, newname string) error {
+ oldname, err := b.RealPath(oldname)
+ if err != nil {
+ return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err}
+ }
+ newname, err = b.RealPath(newname)
+ if err != nil {
+ return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err}
+ }
+ if linker, ok := b.source.(Linker); ok {
+ return linker.SymlinkIfPossible(oldname, newname)
+ }
+ return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
+}
+
+func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) {
+ name, err := b.RealPath(name)
+ if err != nil {
+ return "", &os.PathError{Op: "readlink", Path: name, Err: err}
+ }
+ if reader, ok := b.source.(LinkReader); ok {
+ return reader.ReadlinkIfPossible(name)
+ }
+ return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
+}
+
// vim: ts=4 sw=4 noexpandtab nolist syn=go
diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go
index 5728243d..18b45824 100644
--- a/vendor/github.com/spf13/afero/const_bsds.go
+++ b/vendor/github.com/spf13/afero/const_bsds.go
@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build darwin openbsd freebsd netbsd dragonfly
+// +build aix darwin openbsd freebsd netbsd dragonfly
package afero
diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go
index 968fc278..2b850e4d 100644
--- a/vendor/github.com/spf13/afero/const_win_unix.go
+++ b/vendor/github.com/spf13/afero/const_win_unix.go
@@ -15,6 +15,7 @@
// +build !freebsd
// +build !dragonfly
// +build !netbsd
+// +build !aix
package afero
diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go
index 9aef3979..96b77012 100644
--- a/vendor/github.com/spf13/afero/copyOnWriteFs.go
+++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go
@@ -117,6 +117,26 @@ func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error)
return fi, false, err
}
+func (u *CopyOnWriteFs) SymlinkIfPossible(oldname, newname string) error {
+ if slayer, ok := u.layer.(Linker); ok {
+ return slayer.SymlinkIfPossible(oldname, newname)
+ }
+
+ return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
+}
+
+func (u *CopyOnWriteFs) ReadlinkIfPossible(name string) (string, error) {
+ if rlayer, ok := u.layer.(LinkReader); ok {
+ return rlayer.ReadlinkIfPossible(name)
+ }
+
+ if rbase, ok := u.base.(LinkReader); ok {
+ return rbase.ReadlinkIfPossible(name)
+ }
+
+ return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
+}
+
func (u *CopyOnWriteFs) isNotExist(err error) bool {
if e, ok := err.(*os.PathError); ok {
err = e.Err
@@ -267,7 +287,7 @@ func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error {
return u.layer.MkdirAll(name, perm)
}
if dir {
- return syscall.EEXIST
+ return ErrFileExists
}
return u.layer.MkdirAll(name, perm)
}
@@ -282,7 +302,8 @@ func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error {
return u.layer.MkdirAll(name, perm)
}
if dir {
- return syscall.EEXIST
+ // This is in line with how os.MkdirAll behaves.
+ return nil
}
return u.layer.MkdirAll(name, perm)
}
diff --git a/vendor/github.com/spf13/afero/go.mod b/vendor/github.com/spf13/afero/go.mod
index 9eff4fed..abe4fe1c 100644
--- a/vendor/github.com/spf13/afero/go.mod
+++ b/vendor/github.com/spf13/afero/go.mod
@@ -1 +1,9 @@
module github.com/spf13/afero
+
+require (
+ github.com/pkg/sftp v1.10.1
+ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586
+ golang.org/x/text v0.3.3
+)
+
+go 1.13
diff --git a/vendor/github.com/spf13/afero/go.sum b/vendor/github.com/spf13/afero/go.sum
new file mode 100644
index 00000000..89d9bfbc
--- /dev/null
+++ b/vendor/github.com/spf13/afero/go.sum
@@ -0,0 +1,29 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1 h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go
index 5c3a3d8f..a403133e 100644
--- a/vendor/github.com/spf13/afero/ioutil.go
+++ b/vendor/github.com/spf13/afero/ioutil.go
@@ -22,6 +22,7 @@ import (
"path/filepath"
"sort"
"strconv"
+ "strings"
"sync"
"time"
)
@@ -147,7 +148,7 @@ func reseed() uint32 {
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
}
-func nextSuffix() string {
+func nextRandom() string {
randmu.Lock()
r := rand
if r == 0 {
@@ -159,27 +160,36 @@ func nextSuffix() string {
return strconv.Itoa(int(1e9 + r%1e9))[1:]
}
-// TempFile creates a new temporary file in the directory dir
-// with a name beginning with prefix, opens the file for reading
-// and writing, and returns the resulting *File.
+// TempFile creates a new temporary file in the directory dir,
+// opens the file for reading and writing, and returns the resulting *os.File.
+// The filename is generated by taking pattern and adding a random
+// string to the end. If pattern includes a "*", the random string
+// replaces the last "*".
// If dir is the empty string, TempFile uses the default directory
// for temporary files (see os.TempDir).
// Multiple programs calling TempFile simultaneously
-// will not choose the same file. The caller can use f.Name()
-// to find the pathname of the file. It is the caller's responsibility
+// will not choose the same file. The caller can use f.Name()
+// to find the pathname of the file. It is the caller's responsibility
// to remove the file when no longer needed.
-func (a Afero) TempFile(dir, prefix string) (f File, err error) {
- return TempFile(a.Fs, dir, prefix)
+func (a Afero) TempFile(dir, pattern string) (f File, err error) {
+ return TempFile(a.Fs, dir, pattern)
}
-func TempFile(fs Fs, dir, prefix string) (f File, err error) {
+func TempFile(fs Fs, dir, pattern string) (f File, err error) {
if dir == "" {
dir = os.TempDir()
}
+ var prefix, suffix string
+ if pos := strings.LastIndex(pattern, "*"); pos != -1 {
+ prefix, suffix = pattern[:pos], pattern[pos+1:]
+ } else {
+ prefix = pattern
+ }
+
nconflict := 0
for i := 0; i < 10000; i++ {
- name := filepath.Join(dir, prefix+nextSuffix())
+ name := filepath.Join(dir, prefix+nextRandom()+suffix)
f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
if os.IsExist(err) {
if nconflict++; nconflict > 10 {
@@ -211,7 +221,7 @@ func TempDir(fs Fs, dir, prefix string) (name string, err error) {
nconflict := 0
for i := 0; i < 10000; i++ {
- try := filepath.Join(dir, prefix+nextSuffix())
+ try := filepath.Join(dir, prefix+nextRandom())
err = fs.Mkdir(try, 0700)
if os.IsExist(err) {
if nconflict++; nconflict > 10 {
diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go
index c18a87fb..7db4b7de 100644
--- a/vendor/github.com/spf13/afero/match.go
+++ b/vendor/github.com/spf13/afero/match.go
@@ -106,5 +106,5 @@ func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) {
// recognized by Match.
func hasMeta(path string) bool {
// TODO(niemeyer): Should other magic characters be added here?
- return strings.IndexAny(path, "*?[") >= 0
+ return strings.ContainsAny(path, "*?[")
}
diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go
index 7af2fb56..07b2e12a 100644
--- a/vendor/github.com/spf13/afero/mem/file.go
+++ b/vendor/github.com/spf13/afero/mem/file.go
@@ -193,8 +193,11 @@ func (f *File) Read(b []byte) (n int, err error) {
}
func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
+ prev := atomic.LoadInt64(&f.at)
atomic.StoreInt64(&f.at, off)
- return f.Read(b)
+ n, err = f.Read(b)
+ atomic.StoreInt64(&f.at, prev)
+ return
}
func (f *File) Truncate(size int64) error {
@@ -222,17 +225,20 @@ func (f *File) Seek(offset int64, whence int) (int64, error) {
return 0, ErrFileClosed
}
switch whence {
- case 0:
+ case io.SeekStart:
atomic.StoreInt64(&f.at, offset)
- case 1:
- atomic.AddInt64(&f.at, int64(offset))
- case 2:
+ case io.SeekCurrent:
+ atomic.AddInt64(&f.at, offset)
+ case io.SeekEnd:
atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset)
}
return f.at, nil
}
func (f *File) Write(b []byte) (n int, err error) {
+ if f.closed == true {
+ return 0, ErrFileClosed
+ }
if f.readOnly {
return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")}
}
@@ -254,7 +260,7 @@ func (f *File) Write(b []byte) (n int, err error) {
}
setModTime(f.fileData, time.Now())
- atomic.StoreInt64(&f.at, int64(len(f.fileData.data)))
+ atomic.AddInt64(&f.at, int64(n))
return
}
diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go
index 09498e70..0fa95924 100644
--- a/vendor/github.com/spf13/afero/memmap.go
+++ b/vendor/github.com/spf13/afero/memmap.go
@@ -25,6 +25,8 @@ import (
"github.com/spf13/afero/mem"
)
+const chmodBits = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky // Only a subset of bits are allowed to be changed. Documented under os.Chmod()
+
type MemMapFs struct {
mu sync.RWMutex
data map[string]*mem.FileData
@@ -40,7 +42,9 @@ func (m *MemMapFs) getData() map[string]*mem.FileData {
m.data = make(map[string]*mem.FileData)
// Root should always exist, right?
// TODO: what about windows?
- m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator)
+ root := mem.CreateDir(FilePathSeparator)
+ mem.SetMode(root, os.ModeDir|0755)
+ m.data[FilePathSeparator] = root
})
return m.data
}
@@ -52,7 +56,7 @@ func (m *MemMapFs) Create(name string) (File, error) {
m.mu.Lock()
file := mem.CreateFile(name)
m.getData()[name] = file
- m.registerWithParent(file)
+ m.registerWithParent(file, 0)
m.mu.Unlock()
return mem.NewFileHandle(file), nil
}
@@ -83,14 +87,14 @@ func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData {
return pfile
}
-func (m *MemMapFs) registerWithParent(f *mem.FileData) {
+func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) {
if f == nil {
return
}
parent := m.findParent(f)
if parent == nil {
pdir := filepath.Dir(filepath.Clean(f.Name()))
- err := m.lockfreeMkdir(pdir, 0777)
+ err := m.lockfreeMkdir(pdir, perm)
if err != nil {
//log.Println("Mkdir error:", err)
return
@@ -119,13 +123,15 @@ func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error {
}
} else {
item := mem.CreateDir(name)
+ mem.SetMode(item, os.ModeDir|perm)
m.getData()[name] = item
- m.registerWithParent(item)
+ m.registerWithParent(item, perm)
}
return nil
}
func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
+ perm &= chmodBits
name = normalizePath(name)
m.mu.RLock()
@@ -137,13 +143,12 @@ func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
m.mu.Lock()
item := mem.CreateDir(name)
+ mem.SetMode(item, os.ModeDir|perm)
m.getData()[name] = item
- m.registerWithParent(item)
+ m.registerWithParent(item, perm)
m.mu.Unlock()
- m.Chmod(name, perm|os.ModeDir)
-
- return nil
+ return m.setFileMode(name, perm|os.ModeDir)
}
func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error {
@@ -210,8 +215,12 @@ func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) {
}
func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ perm &= chmodBits
chmod := false
file, err := m.openWrite(name)
+ if err == nil && (flag&os.O_EXCL > 0) {
+ return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileExists}
+ }
if os.IsNotExist(err) && (flag&os.O_CREATE > 0) {
file, err = m.Create(name)
chmod = true
@@ -237,7 +246,7 @@ func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, erro
}
}
if chmod {
- m.Chmod(name, perm)
+ return file, m.setFileMode(name, perm)
}
return file, nil
}
@@ -269,7 +278,7 @@ func (m *MemMapFs) RemoveAll(path string) error {
m.mu.RLock()
defer m.mu.RUnlock()
- for p, _ := range m.getData() {
+ for p := range m.getData() {
if strings.HasPrefix(p, path) {
m.mu.RUnlock()
m.mu.Lock()
@@ -299,7 +308,7 @@ func (m *MemMapFs) Rename(oldname, newname string) error {
delete(m.getData(), oldname)
mem.ChangeFileName(fileData, newname)
m.getData()[newname] = fileData
- m.registerWithParent(fileData)
+ m.registerWithParent(fileData, 0)
m.mu.Unlock()
m.mu.RLock()
} else {
@@ -308,6 +317,11 @@ func (m *MemMapFs) Rename(oldname, newname string) error {
return nil
}
+func (m *MemMapFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+ fileInfo, err := m.Stat(name)
+ return fileInfo, false, err
+}
+
func (m *MemMapFs) Stat(name string) (os.FileInfo, error) {
f, err := m.Open(name)
if err != nil {
@@ -318,6 +332,21 @@ func (m *MemMapFs) Stat(name string) (os.FileInfo, error) {
}
func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
+ mode &= chmodBits
+
+ m.mu.RLock()
+ f, ok := m.getData()[name]
+ m.mu.RUnlock()
+ if !ok {
+ return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
+ }
+ prevOtherBits := mem.GetFileInfo(f).Mode() & ^chmodBits
+
+ mode = prevOtherBits | mode
+ return m.setFileMode(name, mode)
+}
+
+func (m *MemMapFs) setFileMode(name string, mode os.FileMode) error {
name = normalizePath(name)
m.mu.RLock()
diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go
index 13cc1b84..4761db5d 100644
--- a/vendor/github.com/spf13/afero/os.go
+++ b/vendor/github.com/spf13/afero/os.go
@@ -99,3 +99,11 @@ func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
fi, err := os.Lstat(name)
return fi, true, err
}
+
+func (OsFs) SymlinkIfPossible(oldname, newname string) error {
+ return os.Symlink(oldname, newname)
+}
+
+func (OsFs) ReadlinkIfPossible(name string) (string, error) {
+ return os.Readlink(name)
+}
diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go
index c6376ec3..f94b181b 100644
--- a/vendor/github.com/spf13/afero/readonlyfs.go
+++ b/vendor/github.com/spf13/afero/readonlyfs.go
@@ -44,6 +44,18 @@ func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
return fi, false, err
}
+func (r *ReadOnlyFs) SymlinkIfPossible(oldname, newname string) error {
+ return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
+}
+
+func (r *ReadOnlyFs) ReadlinkIfPossible(name string) (string, error) {
+ if srdr, ok := r.source.(LinkReader); ok {
+ return srdr.ReadlinkIfPossible(name)
+ }
+
+ return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
+}
+
func (r *ReadOnlyFs) Rename(o, n string) error {
return syscall.EPERM
}
diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go
index 9d92dbc0..c8fc0086 100644
--- a/vendor/github.com/spf13/afero/regexpfs.go
+++ b/vendor/github.com/spf13/afero/regexpfs.go
@@ -126,6 +126,9 @@ func (r *RegexpFs) Open(name string) (File, error) {
}
}
f, err := r.source.Open(name)
+ if err != nil {
+ return nil, err
+ }
return &RegexpFile{f: f, re: r.re}, nil
}
diff --git a/vendor/github.com/spf13/afero/symlink.go b/vendor/github.com/spf13/afero/symlink.go
new file mode 100644
index 00000000..d1c6ea53
--- /dev/null
+++ b/vendor/github.com/spf13/afero/symlink.go
@@ -0,0 +1,55 @@
+// Copyright © 2018 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "errors"
+)
+
+// Symlinker is an optional interface in Afero. It is only implemented by the
+// filesystems saying so.
+// It indicates support for 3 symlink related interfaces that implement the
+// behaviors of the os methods:
+// - Lstat
+// - Symlink, and
+// - Readlink
+type Symlinker interface {
+ Lstater
+ Linker
+ LinkReader
+}
+
+// Linker is an optional interface in Afero. It is only implemented by the
+// filesystems saying so.
+// It will call Symlink if the filesystem itself is, or it delegates to, the os filesystem,
+// or the filesystem otherwise supports Symlink's.
+type Linker interface {
+ SymlinkIfPossible(oldname, newname string) error
+}
+
+// ErrNoSymlink is the error that will be wrapped in an os.LinkError if a file system
+// does not support Symlink's either directly or through its delegated filesystem.
+// As expressed by support for the Linker interface.
+var ErrNoSymlink = errors.New("symlink not supported")
+
+// LinkReader is an optional interface in Afero. It is only implemented by the
+// filesystems saying so.
+type LinkReader interface {
+ ReadlinkIfPossible(name string) (string, error)
+}
+
+// ErrNoReadlink is the error that will be wrapped in an os.Path if a file system
+// does not support the readlink operation either directly or through its delegated filesystem.
+// As expressed by support for the LinkReader interface.
+var ErrNoReadlink = errors.New("readlink not supported")
diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go
index 1e78f7d1..985363ee 100644
--- a/vendor/github.com/spf13/afero/unionFile.go
+++ b/vendor/github.com/spf13/afero/unionFile.go
@@ -155,7 +155,8 @@ var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, err
}
// Readdir will weave the two directories together and
-// return a single view of the overlayed directories
+// return a single view of the overlayed directories.
+// At the end of the directory view, the error is io.EOF if c > 0.
func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) {
var merge DirsMerger = f.Merger
if merge == nil {
@@ -185,11 +186,22 @@ func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) {
}
f.files = append(f.files, merged...)
}
- if c == -1 {
- return f.files[f.off:], nil
+ files := f.files[f.off:]
+
+ if c <= 0 {
+ return files, nil
}
+
+ if len(files) == 0 {
+ return nil, io.EOF
+ }
+
+ if c > len(files) {
+ c = len(files)
+ }
+
defer func() { f.off += c }()
- return f.files[f.off:c], nil
+ return files[:c], nil
}
func (f *UnionFile) Readdirnames(c int) ([]string, error) {
diff --git a/vendor/github.com/spf13/cast/.travis.yml b/vendor/github.com/spf13/cast/.travis.yml
index 6420d1c2..833a4879 100644
--- a/vendor/github.com/spf13/cast/.travis.yml
+++ b/vendor/github.com/spf13/cast/.travis.yml
@@ -4,6 +4,7 @@ env:
sudo: required
go:
- "1.11.x"
+ - "1.12.x"
- tip
os:
- linux
diff --git a/vendor/github.com/spf13/cast/Makefile b/vendor/github.com/spf13/cast/Makefile
index 7ccf8930..f01a5dbb 100644
--- a/vendor/github.com/spf13/cast/Makefile
+++ b/vendor/github.com/spf13/cast/Makefile
@@ -1,4 +1,4 @@
-# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
+GOVERSION := $(shell go version | cut -d ' ' -f 3 | cut -d '.' -f 2)
.PHONY: check fmt lint test test-race vet test-cover-html help
.DEFAULT_GOAL := help
@@ -12,11 +12,13 @@ test-race: ## Run tests with race detector
go test -race ./...
fmt: ## Run gofmt linter
+ifeq "$(GOVERSION)" "12"
@for d in `go list` ; do \
if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \
echo "^ improperly formatted go files" && echo && exit 1; \
fi \
done
+endif
lint: ## Run golint linter
@for d in `go list` ; do \
diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go
index a4859fb0..70c7291b 100644
--- a/vendor/github.com/spf13/cast/caste.go
+++ b/vendor/github.com/spf13/cast/caste.go
@@ -819,15 +819,15 @@ func ToStringE(i interface{}) (string, error) {
case int8:
return strconv.FormatInt(int64(s), 10), nil
case uint:
- return strconv.FormatInt(int64(s), 10), nil
+ return strconv.FormatUint(uint64(s), 10), nil
case uint64:
- return strconv.FormatInt(int64(s), 10), nil
+ return strconv.FormatUint(uint64(s), 10), nil
case uint32:
- return strconv.FormatInt(int64(s), 10), nil
+ return strconv.FormatUint(uint64(s), 10), nil
case uint16:
- return strconv.FormatInt(int64(s), 10), nil
+ return strconv.FormatUint(uint64(s), 10), nil
case uint8:
- return strconv.FormatInt(int64(s), 10), nil
+ return strconv.FormatUint(uint64(s), 10), nil
case []byte:
return string(s), nil
case template.HTML:
diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore
index 3b053c59..c7b459e4 100644
--- a/vendor/github.com/spf13/cobra/.gitignore
+++ b/vendor/github.com/spf13/cobra/.gitignore
@@ -32,7 +32,8 @@ Session.vim
tags
*.exe
-
cobra.test
+bin
-.idea/*
+.idea/
+*.iml
diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml
index 38b85f49..a9bd4e54 100644
--- a/vendor/github.com/spf13/cobra/.travis.yml
+++ b/vendor/github.com/spf13/cobra/.travis.yml
@@ -3,29 +3,27 @@ language: go
stages:
- diff
- test
+ - build
go:
- - 1.10.x
- - 1.11.x
- 1.12.x
+ - 1.13.x
- tip
+before_install:
+ - go get -u github.com/kyoh86/richgo
+ - go get -u github.com/mitchellh/gox
+
matrix:
allow_failures:
- go: tip
include:
- stage: diff
- go: 1.12.x
- script: diff -u <(echo -n) <(gofmt -d -s .)
+ go: 1.13.x
+ script: make fmt
+ - stage: build
+ go: 1.13.x
+ script: make cobra_generator
-before_install:
- - mkdir -p bin
- - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.6.0/shellcheck
- - chmod +x bin/shellcheck
- - go get -u github.com/kyoh86/richgo
-script:
- - PATH=$PATH:$PWD/bin richgo test -v ./...
- - go build
- - if [ -z $NOVET ]; then
- diff -u <(echo -n) <(go vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint');
- fi
+script:
+ - make test
diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile
new file mode 100644
index 00000000..e9740d1e
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/Makefile
@@ -0,0 +1,36 @@
+BIN="./bin"
+SRC=$(shell find . -name "*.go")
+
+ifeq (, $(shell which richgo))
+$(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo")
+endif
+
+.PHONY: fmt vet test cobra_generator install_deps clean
+
+default: all
+
+all: fmt vet test cobra_generator
+
+fmt:
+ $(info ******************** checking formatting ********************)
+ @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1)
+
+test: install_deps vet
+ $(info ******************** running tests ********************)
+ richgo test -v ./...
+
+cobra_generator: install_deps
+ $(info ******************** building generator ********************)
+ mkdir -p $(BIN)
+ make -C cobra all
+
+install_deps:
+ $(info ******************** downloading dependencies ********************)
+ go get -v ./...
+
+vet:
+ $(info ******************** vetting ********************)
+ go vet ./...
+
+clean:
+ rm -rf $(BIN)
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
index 60c5a425..9d799342 100644
--- a/vendor/github.com/spf13/cobra/README.md
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -24,11 +24,13 @@ Many of the most widely used Go projects are built using Cobra, such as:
[Prototool](https://github.com/uber/prototool),
[mattermost-server](https://github.com/mattermost/mattermost-server),
[Gardener](https://github.com/gardener/gardenctl),
+[Linkerd](https://linkerd.io/),
+[Github CLI](https://github.com/cli/cli)
etc.
[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra)
-[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra)
[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra)
# Table of Contents
@@ -208,51 +210,78 @@ You will additionally define flags and handle configuration in your init() funct
For example cmd/root.go:
```go
-import (
- "fmt"
- "os"
+package cmd
- homedir "github.com/mitchellh/go-homedir"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
+import (
+ "fmt"
+ "os"
+
+ homedir "github.com/mitchellh/go-homedir"
+ "github.com/spf13/cobra"
+ "github.com/spf13/viper"
)
+var (
+ // Used for flags.
+ cfgFile string
+ userLicense string
+
+ rootCmd = &cobra.Command{
+ Use: "cobra",
+ Short: "A generator for Cobra based Applications",
+ Long: `Cobra is a CLI library for Go that empowers applications.
+This application is a tool to generate the needed files
+to quickly create a Cobra application.`,
+ }
+)
+
+// Execute executes the root command.
+func Execute() error {
+ return rootCmd.Execute()
+}
+
func init() {
- cobra.OnInitialize(initConfig)
- rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
- rootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/")
- rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution")
- rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)")
- rootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration")
- viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
- viper.BindPFlag("projectbase", rootCmd.PersistentFlags().Lookup("projectbase"))
- viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
- viper.SetDefault("author", "NAME HERE ")
- viper.SetDefault("license", "apache")
+ cobra.OnInitialize(initConfig)
+
+ rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
+ rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution")
+ rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project")
+ rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration")
+ viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+ viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
+ viper.SetDefault("author", "NAME HERE ")
+ viper.SetDefault("license", "apache")
+
+ rootCmd.AddCommand(addCmd)
+ rootCmd.AddCommand(initCmd)
+}
+
+func er(msg interface{}) {
+ fmt.Println("Error:", msg)
+ os.Exit(1)
}
func initConfig() {
- // Don't forget to read config either from cfgFile or from home directory!
- if cfgFile != "" {
- // Use config file from the flag.
- viper.SetConfigFile(cfgFile)
- } else {
- // Find home directory.
- home, err := homedir.Dir()
- if err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
+ if cfgFile != "" {
+ // Use config file from the flag.
+ viper.SetConfigFile(cfgFile)
+ } else {
+ // Find home directory.
+ home, err := homedir.Dir()
+ if err != nil {
+ er(err)
+ }
- // Search config in home directory with name ".cobra" (without extension).
- viper.AddConfigPath(home)
- viper.SetConfigName(".cobra")
- }
+ // Search config in home directory with name ".cobra" (without extension).
+ viper.AddConfigPath(home)
+ viper.SetConfigName(".cobra")
+ }
- if err := viper.ReadInConfig(); err != nil {
- fmt.Println("Can't read config:", err)
- os.Exit(1)
- }
+ viper.AutomaticEnv()
+
+ if err := viper.ReadInConfig(); err == nil {
+ fmt.Println("Using config file:", viper.ConfigFileUsed())
+ }
}
```
@@ -459,7 +488,7 @@ For many years people have printed back to the screen.`,
Echo works a lot like print, except it has a child command.`,
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("Print: " + strings.Join(args, " "))
+ fmt.Println("Echo: " + strings.Join(args, " "))
},
}
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
index c4d820b8..70e9b262 100644
--- a/vendor/github.com/spf13/cobra/args.go
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -2,6 +2,7 @@ package cobra
import (
"fmt"
+ "strings"
)
type PositionalArgs func(cmd *Command, args []string) error
@@ -34,8 +35,15 @@ func NoArgs(cmd *Command, args []string) error {
// OnlyValidArgs returns an error if any args are not in the list of ValidArgs.
func OnlyValidArgs(cmd *Command, args []string) error {
if len(cmd.ValidArgs) > 0 {
+ // Remove any description that may be included in ValidArgs.
+ // A description is following a tab character.
+ var validArgs []string
+ for _, v := range cmd.ValidArgs {
+ validArgs = append(validArgs, strings.Split(v, "\t")[0])
+ }
+
for _, v := range args {
- if !stringInSlice(v, cmd.ValidArgs) {
+ if !stringInSlice(v, validArgs) {
return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
}
}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
index 57bb8e1b..1e27188c 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.go
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -58,9 +58,71 @@ __%[1]s_contains_word()
return 1
}
+__%[1]s_handle_go_custom_completion()
+{
+ __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}"
+
+ local out requestComp lastParam lastChar comp directive args
+
+ # Prepare the command to request completions for the program.
+ # Calling ${words[0]} instead of directly %[1]s allows to handle aliases
+ args=("${words[@]:1}")
+ requestComp="${words[0]} %[2]s ${args[*]}"
+
+ lastParam=${words[$((${#words[@]}-1))]}
+ lastChar=${lastParam:$((${#lastParam}-1)):1}
+ __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}"
+
+ if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter"
+ requestComp="${requestComp} \"\""
+ fi
+
+ __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}"
+ # Use eval to handle any environment variables and such
+ out=$(eval "${requestComp}" 2>/dev/null)
+
+ # Extract the directive integer at the very end of the output following a colon (:)
+ directive=${out##*:}
+ # Remove the directive
+ out=${out%%:*}
+ if [ "${directive}" = "${out}" ]; then
+ # There is not directive specified
+ directive=0
+ fi
+ __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}"
+ __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out[*]}"
+
+ if [ $((directive & %[3]d)) -ne 0 ]; then
+ # Error code. No completion.
+ __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code"
+ return
+ else
+ if [ $((directive & %[4]d)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "${FUNCNAME[0]}: activating no space"
+ compopt -o nospace
+ fi
+ fi
+ if [ $((directive & %[5]d)) -ne 0 ]; then
+ if [[ $(type -t compopt) = "builtin" ]]; then
+ __%[1]s_debug "${FUNCNAME[0]}: activating no file completion"
+ compopt +o default
+ fi
+ fi
+
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${out[*]}" -- "$cur")
+ fi
+}
+
__%[1]s_handle_reply()
{
__%[1]s_debug "${FUNCNAME[0]}"
+ local comp
case $cur in
-*)
if [[ $(type -t compopt) = "builtin" ]]; then
@@ -72,7 +134,9 @@ __%[1]s_handle_reply()
else
allflags=("${flags[*]} ${two_word_flags[*]}")
fi
- COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") )
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${allflags[*]}" -- "$cur")
if [[ $(type -t compopt) = "builtin" ]]; then
[[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
fi
@@ -118,14 +182,22 @@ __%[1]s_handle_reply()
completions=("${commands[@]}")
if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
completions=("${must_have_one_noun[@]}")
+ elif [[ -n "${has_completion_function}" ]]; then
+ # if a go completion function is provided, defer to that function
+ completions=()
+ __%[1]s_handle_go_custom_completion
fi
if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
completions+=("${must_have_one_flag[@]}")
fi
- COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") )
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${completions[*]}" -- "$cur")
if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
- COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") )
+ while IFS='' read -r comp; do
+ COMPREPLY+=("$comp")
+ done < <(compgen -W "${noun_aliases[*]}" -- "$cur")
fi
if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
@@ -160,7 +232,7 @@ __%[1]s_handle_filename_extension_flag()
__%[1]s_handle_subdirs_in_dir_flag()
{
local dir="$1"
- pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1
+ pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
}
__%[1]s_handle_flag()
@@ -272,7 +344,7 @@ __%[1]s_handle_word()
__%[1]s_handle_word
}
-`, name))
+`, name, ShellCompNoDescRequestCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp))
}
func writePostscript(buf *bytes.Buffer, name string) {
@@ -297,6 +369,7 @@ func writePostscript(buf *bytes.Buffer, name string) {
local commands=("%[1]s")
local must_have_one_flag=()
local must_have_one_noun=()
+ local has_completion_function
local last_command
local nouns=()
@@ -397,7 +470,22 @@ func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) {
buf.WriteString(fmt.Sprintf(format, name))
}
+// Setup annotations for go completions for registered flags
+func prepareCustomAnnotationsForFlags(cmd *Command) {
+ for flag := range flagCompletionFunctions {
+ // Make sure the completion script calls the __*_go_custom_completion function for
+ // every registered flag. We need to do this here (and not when the flag was registered
+ // for completion) so that we can know the root command name for the prefix
+ // of ___go_custom_completion
+ if flag.Annotations == nil {
+ flag.Annotations = map[string][]string{}
+ }
+ flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())}
+ }
+}
+
func writeFlags(buf *bytes.Buffer, cmd *Command) {
+ prepareCustomAnnotationsForFlags(cmd)
buf.WriteString(` flags=()
two_word_flags=()
local_nonpersistent_flags=()
@@ -460,8 +548,14 @@ func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) {
buf.WriteString(" must_have_one_noun=()\n")
sort.Sort(sort.StringSlice(cmd.ValidArgs))
for _, value := range cmd.ValidArgs {
+ // Remove any description that may be included following a tab character.
+ // Descriptions are not supported by bash completion.
+ value = strings.Split(value, "\t")[0]
buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
}
+ if cmd.ValidArgsFunction != nil {
+ buf.WriteString(" has_completion_function=1\n")
+ }
}
func writeCmdAliases(buf *bytes.Buffer, cmd *Command) {
diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md
index 4ac61ee1..e61a3a65 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.md
+++ b/vendor/github.com/spf13/cobra/bash_completions.md
@@ -56,7 +56,149 @@ func main() {
`out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior.
-## Creating your own custom functions
+## Have the completions code complete your 'nouns'
+
+### Static completion of nouns
+
+This method allows you to provide a pre-defined list of completion choices for your nouns using the `validArgs` field.
+For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like:
+
+```go
+validArgs []string = { "pod", "node", "service", "replicationcontroller" }
+
+cmd := &cobra.Command{
+ Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
+ Short: "Display one or many resources",
+ Long: get_long,
+ Example: get_example,
+ Run: func(cmd *cobra.Command, args []string) {
+ err := RunGet(f, out, cmd, args)
+ util.CheckErr(err)
+ },
+ ValidArgs: validArgs,
+}
+```
+
+Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like
+
+```bash
+# kubectl get [tab][tab]
+node pod replicationcontroller service
+```
+
+### Plural form and shortcuts for nouns
+
+If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
+
+```go
+argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
+
+cmd := &cobra.Command{
+ ...
+ ValidArgs: validArgs,
+ ArgAliases: argAliases
+}
+```
+
+The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
+the completion algorithm if entered manually, e.g. in:
+
+```bash
+# kubectl get rc [tab][tab]
+backend frontend database
+```
+
+Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns
+in this example again instead of the replication controllers.
+
+### Dynamic completion of nouns
+
+In some cases it is not possible to provide a list of possible completions in advance. Instead, the list of completions must be determined at execution-time. Cobra provides two ways of defining such dynamic completion of nouns. Note that both these methods can be used along-side each other as long as they are not both used for the same command.
+
+**Note**: *Custom Completions written in Go* will automatically work for other shell-completion scripts (e.g., Fish shell), while *Custom Completions written in Bash* will only work for Bash shell-completion. It is therefore recommended to use *Custom Completions written in Go*.
+
+#### 1. Custom completions of nouns written in Go
+
+In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command. Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both.
+Simplified code from `helm status` looks like:
+
+```go
+cmd := &cobra.Command{
+ Use: "status RELEASE_NAME",
+ Short: "Display the status of the named release",
+ Long: status_long,
+ RunE: func(cmd *cobra.Command, args []string) {
+ RunGet(args[0])
+ },
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if len(args) != 0 {
+ return nil, cobra.ShellCompDirectiveNoFileComp
+ }
+ return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
+}
+```
+Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster.
+Notice we put the `ValidArgsFunction` on the `status` subcommand. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like
+
+```bash
+# helm status [tab][tab]
+harbor notary rook thanos
+```
+You may have noticed the use of `cobra.ShellCompDirective`. These directives are bit fields allowing to control some shell completion behaviors for your particular completion. You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp`
+```go
+// Indicates an error occurred and completions should be ignored.
+ShellCompDirectiveError
+// Indicates that the shell should not add a space after the completion,
+// even if there is a single completion provided.
+ShellCompDirectiveNoSpace
+// Indicates that the shell should not provide file completion even when
+// no completion is provided.
+// This currently does not work for zsh or bash < 4
+ShellCompDirectiveNoFileComp
+// Indicates that the shell will perform its default behavior after completions
+// have been provided (this implies !ShellCompDirectiveNoSpace && !ShellCompDirectiveNoFileComp).
+ShellCompDirectiveDefault
+```
+
+When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function.
+
+##### Debugging
+
+Cobra achieves dynamic completions written in Go through the use of a hidden command called by the completion script. To debug your Go completion code, you can call this hidden command directly:
+```bash
+# helm __complete status har
+harbor
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
+```
+***Important:*** If the noun to complete is empty, you must pass an empty parameter to the `__complete` command:
+```bash
+# helm __complete status ""
+harbor
+notary
+rook
+thanos
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
+```
+Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code. You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code:
+```go
+// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE
+// is set to a file path) and optionally prints to stderr.
+cobra.CompDebug(msg string, printToStdErr bool) {
+cobra.CompDebugln(msg string, printToStdErr bool)
+
+// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE
+// is set to a file path) and to stderr.
+cobra.CompError(msg string)
+cobra.CompErrorln(msg string)
+```
+***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned above.
+
+#### 2. Custom completions of nouns written in Bash
+
+This method allows you to inject bash functions into the completion script. Those bash functions are responsible for providing the completion choices for your own completions.
Some more actual code that works in kubernetes:
@@ -111,58 +253,6 @@ Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods!
-## Have the completions code complete your 'nouns'
-
-In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like:
-
-```go
-validArgs []string = { "pod", "node", "service", "replicationcontroller" }
-
-cmd := &cobra.Command{
- Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
- Short: "Display one or many resources",
- Long: get_long,
- Example: get_example,
- Run: func(cmd *cobra.Command, args []string) {
- err := RunGet(f, out, cmd, args)
- util.CheckErr(err)
- },
- ValidArgs: validArgs,
-}
-```
-
-Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like
-
-```bash
-# kubectl get [tab][tab]
-node pod replicationcontroller service
-```
-
-## Plural form and shortcuts for nouns
-
-If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
-
-```go
-argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
-
-cmd := &cobra.Command{
- ...
- ValidArgs: validArgs,
- ArgAliases: argAliases
-}
-```
-
-The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
-the completion algorithm if entered manually, e.g. in:
-
-```bash
-# kubectl get rc [tab][tab]
-backend frontend database
-```
-
-Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns
-in this example again instead of the replication controllers.
-
## Mark flags as required
Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy.
@@ -211,8 +301,45 @@ So while there are many other files in the CWD it only shows me subdirs and thos
# Specify custom flag completion
-Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specify
-a custom flag completion function with cobra.BashCompCustom:
+As for nouns, Cobra provides two ways of defining dynamic completion of flags. Note that both these methods can be used along-side each other as long as they are not both used for the same flag.
+
+**Note**: *Custom Completions written in Go* will automatically work for other shell-completion scripts (e.g., Fish shell), while *Custom Completions written in Bash* will only work for Bash shell-completion. It is therefore recommended to use *Custom Completions written in Go*.
+
+## 1. Custom completions of flags written in Go
+
+To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function in the following manner:
+
+```go
+flagName := "output"
+cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault
+})
+```
+Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated. In our example this dynamic completion will give results like so:
+
+```bash
+# helm status --output [tab][tab]
+json table yaml
+```
+
+### Debugging
+
+You can also easily debug your Go completion code for flags:
+```bash
+# helm __complete status --output ""
+json
+table
+yaml
+:4
+Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr
+```
+***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned in the above section.
+
+## 2. Custom completions of flags written in Bash
+
+Alternatively, you can use bash code for flag custom completion. Similar to the filename
+completion and filtering using `cobra.BashCompFilenameExt`, you can specify
+a custom flag completion bash function with `cobra.BashCompCustom`:
```go
annotation := make(map[string][]string)
@@ -226,7 +353,7 @@ a custom flag completion function with cobra.BashCompCustom:
cmd.Flags().AddFlag(flag)
```
-In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction`
+In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction`
value, e.g.:
```bash
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
index 6505c070..d01becc8 100644
--- a/vendor/github.com/spf13/cobra/cobra.go
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -52,7 +52,7 @@ var EnableCommandSorting = true
// if the CLI is started from explorer.exe.
// To disable the mousetrap, just set this variable to blank string ("").
// Works only on Microsoft Windows.
-var MousetrapHelpText string = `This is a command line tool.
+var MousetrapHelpText = `This is a command line tool.
You need to open cmd.exe and run it from there.
`
@@ -61,7 +61,7 @@ You need to open cmd.exe and run it from there.
// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed.
// To disable the mousetrap, just set MousetrapHelpText to blank string ("").
// Works only on Microsoft Windows.
-var MousetrapDisplayDuration time.Duration = 5 * time.Second
+var MousetrapDisplayDuration = 5 * time.Second
// AddTemplateFunc adds a template function that's available to Usage and Help
// template generation.
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
index c7e89830..88e6ed77 100644
--- a/vendor/github.com/spf13/cobra/command.go
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -17,6 +17,7 @@ package cobra
import (
"bytes"
+ "context"
"fmt"
"io"
"os"
@@ -56,6 +57,10 @@ type Command struct {
// ValidArgs is list of all valid non-flag arguments that are accepted in bash completions
ValidArgs []string
+ // ValidArgsFunction is an optional function that provides valid non-flag arguments for bash completion.
+ // It is a dynamic version of using ValidArgs.
+ // Only one of ValidArgs and ValidArgsFunction can be used for a command.
+ ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
// Expected arguments
Args PositionalArgs
@@ -80,7 +85,8 @@ type Command struct {
// Version defines the version for this command. If this value is non-empty and the command does not
// define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
- // will print content of the "Version" variable.
+ // will print content of the "Version" variable. A shorthand "v" flag will also be added if the
+ // command does not define one.
Version string
// The *Run functions are executed in the following order:
@@ -140,9 +146,11 @@ type Command struct {
// TraverseChildren parses flags on all parents before executing child command.
TraverseChildren bool
- //FParseErrWhitelist flag parse errors to be ignored
+ // FParseErrWhitelist flag parse errors to be ignored
FParseErrWhitelist FParseErrWhitelist
+ ctx context.Context
+
// commands is the list of commands supported by this program.
commands []*Command
// parent is a parent command for this command.
@@ -202,6 +210,12 @@ type Command struct {
errWriter io.Writer
}
+// Context returns underlying command context. If command wasn't
+// executed with ExecuteContext Context returns Background context.
+func (c *Command) Context() context.Context {
+ return c.ctx
+}
+
// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
// particularly useful when testing.
func (c *Command) SetArgs(a []string) {
@@ -228,7 +242,7 @@ func (c *Command) SetErr(newErr io.Writer) {
c.errWriter = newErr
}
-// SetOut sets the source for input data
+// SetIn sets the source for input data
// If newIn is nil, os.Stdin is used.
func (c *Command) SetIn(newIn io.Reader) {
c.inReader = newIn
@@ -297,7 +311,7 @@ func (c *Command) ErrOrStderr() io.Writer {
return c.getErr(os.Stderr)
}
-// ErrOrStderr returns output to stderr
+// InOrStdin returns input to stdin
func (c *Command) InOrStdin() io.Reader {
return c.getIn(os.Stdin)
}
@@ -369,6 +383,8 @@ func (c *Command) HelpFunc() func(*Command, []string) {
}
return func(c *Command, a []string) {
c.mergePersistentFlags()
+ // The help should be sent to stdout
+ // See https://github.com/spf13/cobra/issues/1002
err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c)
if err != nil {
c.Println(err)
@@ -857,6 +873,13 @@ func (c *Command) preRun() {
}
}
+// ExecuteContext is the same as Execute(), but sets the ctx on the command.
+// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle functions.
+func (c *Command) ExecuteContext(ctx context.Context) error {
+ c.ctx = ctx
+ return c.Execute()
+}
+
// Execute uses the args (os.Args[1:] by default)
// and run through the command tree finding appropriate matches
// for commands and then corresponding flags.
@@ -867,6 +890,10 @@ func (c *Command) Execute() error {
// ExecuteC executes the command.
func (c *Command) ExecuteC() (cmd *Command, err error) {
+ if c.ctx == nil {
+ c.ctx = context.Background()
+ }
+
// Regardless of what command execute is called on, run on Root only
if c.HasParent() {
return c.Root().ExecuteC()
@@ -888,6 +915,9 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
args = os.Args[1:]
}
+ // initialize the hidden command to be used for bash completion
+ c.initCompleteCmd(args)
+
var flags []string
if c.TraverseChildren {
cmd, flags, err = c.Traverse(args)
@@ -911,6 +941,12 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
cmd.commandCalledAs.name = cmd.Name()
}
+ // We have to pass global context to children command
+ // if context is present on the parent command.
+ if cmd.ctx == nil {
+ cmd.ctx = c.ctx
+ }
+
err = cmd.execute(flags)
if err != nil {
// Always show help if requested, even if SilenceErrors is in
@@ -994,7 +1030,11 @@ func (c *Command) InitDefaultVersionFlag() {
} else {
usage += c.Name()
}
- c.Flags().Bool("version", false, usage)
+ if c.Flags().ShorthandLookup("v") == nil {
+ c.Flags().BoolP("version", "v", false, usage)
+ } else {
+ c.Flags().Bool("version", false, usage)
+ }
}
}
@@ -1547,7 +1587,7 @@ func (c *Command) ParseFlags(args []string) error {
beforeErrorBufLen := c.flagErrorBuf.Len()
c.mergePersistentFlags()
- //do it here after merging all flags and just before parse
+ // do it here after merging all flags and just before parse
c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist)
err := c.Flags().Parse(args)
diff --git a/vendor/github.com/spf13/cobra/custom_completions.go b/vendor/github.com/spf13/cobra/custom_completions.go
new file mode 100644
index 00000000..ba57327c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/custom_completions.go
@@ -0,0 +1,384 @@
+package cobra
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/spf13/pflag"
+)
+
+const (
+ // ShellCompRequestCmd is the name of the hidden command that is used to request
+ // completion results from the program. It is used by the shell completion scripts.
+ ShellCompRequestCmd = "__complete"
+ // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request
+ // completion results without their description. It is used by the shell completion scripts.
+ ShellCompNoDescRequestCmd = "__completeNoDesc"
+)
+
+// Global map of flag completion functions.
+var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){}
+
+// ShellCompDirective is a bit map representing the different behaviors the shell
+// can be instructed to have once completions have been provided.
+type ShellCompDirective int
+
+const (
+ // ShellCompDirectiveError indicates an error occurred and completions should be ignored.
+ ShellCompDirectiveError ShellCompDirective = 1 << iota
+
+ // ShellCompDirectiveNoSpace indicates that the shell should not add a space
+ // after the completion even if there is a single completion provided.
+ ShellCompDirectiveNoSpace
+
+ // ShellCompDirectiveNoFileComp indicates that the shell should not provide
+ // file completion even when no completion is provided.
+ // This currently does not work for zsh or bash < 4
+ ShellCompDirectiveNoFileComp
+
+ // ShellCompDirectiveDefault indicates to let the shell perform its default
+ // behavior after completions have been provided.
+ ShellCompDirectiveDefault ShellCompDirective = 0
+)
+
+// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
+func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error {
+ flag := c.Flag(flagName)
+ if flag == nil {
+ return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName)
+ }
+ if _, exists := flagCompletionFunctions[flag]; exists {
+ return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName)
+ }
+ flagCompletionFunctions[flag] = f
+ return nil
+}
+
+// Returns a string listing the different directive enabled in the specified parameter
+func (d ShellCompDirective) string() string {
+ var directives []string
+ if d&ShellCompDirectiveError != 0 {
+ directives = append(directives, "ShellCompDirectiveError")
+ }
+ if d&ShellCompDirectiveNoSpace != 0 {
+ directives = append(directives, "ShellCompDirectiveNoSpace")
+ }
+ if d&ShellCompDirectiveNoFileComp != 0 {
+ directives = append(directives, "ShellCompDirectiveNoFileComp")
+ }
+ if len(directives) == 0 {
+ directives = append(directives, "ShellCompDirectiveDefault")
+ }
+
+ if d > ShellCompDirectiveError+ShellCompDirectiveNoSpace+ShellCompDirectiveNoFileComp {
+ return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d)
+ }
+ return strings.Join(directives, ", ")
+}
+
+// Adds a special hidden command that can be used to request custom completions.
+func (c *Command) initCompleteCmd(args []string) {
+ completeCmd := &Command{
+ Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd),
+ Aliases: []string{ShellCompNoDescRequestCmd},
+ DisableFlagsInUseLine: true,
+ Hidden: true,
+ DisableFlagParsing: true,
+ Args: MinimumNArgs(1),
+ Short: "Request shell completion choices for the specified command-line",
+ Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s",
+ "to request completion choices for the specified command-line.", ShellCompRequestCmd),
+ Run: func(cmd *Command, args []string) {
+ finalCmd, completions, directive, err := cmd.getCompletions(args)
+ if err != nil {
+ CompErrorln(err.Error())
+ // Keep going for multiple reasons:
+ // 1- There could be some valid completions even though there was an error
+ // 2- Even without completions, we need to print the directive
+ }
+
+ noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd)
+ for _, comp := range completions {
+ if noDescriptions {
+ // Remove any description that may be included following a tab character.
+ comp = strings.Split(comp, "\t")[0]
+ }
+ // Print each possible completion to stdout for the completion script to consume.
+ fmt.Fprintln(finalCmd.OutOrStdout(), comp)
+ }
+
+ if directive > ShellCompDirectiveError+ShellCompDirectiveNoSpace+ShellCompDirectiveNoFileComp {
+ directive = ShellCompDirectiveDefault
+ }
+
+ // As the last printout, print the completion directive for the completion script to parse.
+ // The directive integer must be that last character following a single colon (:).
+ // The completion script expects :
+ fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive)
+
+ // Print some helpful info to stderr for the user to understand.
+ // Output from stderr must be ignored by the completion script.
+ fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string())
+ },
+ }
+ c.AddCommand(completeCmd)
+ subCmd, _, err := c.Find(args)
+ if err != nil || subCmd.Name() != ShellCompRequestCmd {
+ // Only create this special command if it is actually being called.
+ // This reduces possible side-effects of creating such a command;
+ // for example, having this command would cause problems to a
+ // cobra program that only consists of the root command, since this
+ // command would cause the root command to suddenly have a subcommand.
+ c.RemoveCommand(completeCmd)
+ }
+}
+
+func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) {
+ var completions []string
+
+ // The last argument, which is not completely typed by the user,
+ // should not be part of the list of arguments
+ toComplete := args[len(args)-1]
+ trimmedArgs := args[:len(args)-1]
+
+ // Find the real command for which completion must be performed
+ finalCmd, finalArgs, err := c.Root().Find(trimmedArgs)
+ if err != nil {
+ // Unable to find the real command. E.g., someInvalidCmd
+ return c, completions, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs)
+ }
+
+ // When doing completion of a flag name, as soon as an argument starts with
+ // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires
+ // the flag to be complete
+ if len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") {
+ // We are completing a flag name
+ finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
+ })
+ finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+ completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
+ })
+
+ directive := ShellCompDirectiveDefault
+ if len(completions) > 0 {
+ if strings.HasSuffix(completions[0], "=") {
+ directive = ShellCompDirectiveNoSpace
+ }
+ }
+ return finalCmd, completions, directive, nil
+ }
+
+ var flag *pflag.Flag
+ if !finalCmd.DisableFlagParsing {
+ // We only do flag completion if we are allowed to parse flags
+ // This is important for commands which have requested to do their own flag completion.
+ flag, finalArgs, toComplete, err = checkIfFlagCompletion(finalCmd, finalArgs, toComplete)
+ if err != nil {
+ // Error while attempting to parse flags
+ return finalCmd, completions, ShellCompDirectiveDefault, err
+ }
+ }
+
+ if flag == nil {
+ // Complete subcommand names
+ for _, subCmd := range finalCmd.Commands() {
+ if subCmd.IsAvailableCommand() && strings.HasPrefix(subCmd.Name(), toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
+ }
+ }
+
+ if len(finalCmd.ValidArgs) > 0 {
+ // Always complete ValidArgs, even if we are completing a subcommand name.
+ // This is for commands that have both subcommands and ValidArgs.
+ for _, validArg := range finalCmd.ValidArgs {
+ if strings.HasPrefix(validArg, toComplete) {
+ completions = append(completions, validArg)
+ }
+ }
+
+ // If there are ValidArgs specified (even if they don't match), we stop completion.
+ // Only one of ValidArgs or ValidArgsFunction can be used for a single command.
+ return finalCmd, completions, ShellCompDirectiveNoFileComp, nil
+ }
+
+ // Always let the logic continue so as to add any ValidArgsFunction completions,
+ // even if we already found sub-commands.
+ // This is for commands that have subcommands but also specify a ValidArgsFunction.
+ }
+
+ // Parse the flags and extract the arguments to prepare for calling the completion function
+ if err = finalCmd.ParseFlags(finalArgs); err != nil {
+ return finalCmd, completions, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error())
+ }
+
+ // We only remove the flags from the arguments if DisableFlagParsing is not set.
+ // This is important for commands which have requested to do their own flag completion.
+ if !finalCmd.DisableFlagParsing {
+ finalArgs = finalCmd.Flags().Args()
+ }
+
+ // Find the completion function for the flag or command
+ var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
+ if flag != nil {
+ completionFn = flagCompletionFunctions[flag]
+ } else {
+ completionFn = finalCmd.ValidArgsFunction
+ }
+ if completionFn == nil {
+ // Go custom completion not supported/needed for this flag or command
+ return finalCmd, completions, ShellCompDirectiveDefault, nil
+ }
+
+ // Call the registered completion function to get the completions
+ comps, directive := completionFn(finalCmd, finalArgs, toComplete)
+ completions = append(completions, comps...)
+ return finalCmd, completions, directive, nil
+}
+
+func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string {
+ if nonCompletableFlag(flag) {
+ return []string{}
+ }
+
+ var completions []string
+ flagName := "--" + flag.Name
+ if strings.HasPrefix(flagName, toComplete) {
+ // Flag without the =
+ completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+
+ if len(flag.NoOptDefVal) == 0 {
+ // Flag requires a value, so it can be suffixed with =
+ flagName += "="
+ completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+ }
+ }
+
+ flagName = "-" + flag.Shorthand
+ if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) {
+ completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
+ }
+
+ return completions
+}
+
+func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) {
+ var flagName string
+ trimmedArgs := args
+ flagWithEqual := false
+ if isFlagArg(lastArg) {
+ if index := strings.Index(lastArg, "="); index >= 0 {
+ flagName = strings.TrimLeft(lastArg[:index], "-")
+ lastArg = lastArg[index+1:]
+ flagWithEqual = true
+ } else {
+ return nil, nil, "", errors.New("Unexpected completion request for flag")
+ }
+ }
+
+ if len(flagName) == 0 {
+ if len(args) > 0 {
+ prevArg := args[len(args)-1]
+ if isFlagArg(prevArg) {
+ // Only consider the case where the flag does not contain an =.
+ // If the flag contains an = it means it has already been fully processed,
+ // so we don't need to deal with it here.
+ if index := strings.Index(prevArg, "="); index < 0 {
+ flagName = strings.TrimLeft(prevArg, "-")
+
+ // Remove the uncompleted flag or else there could be an error created
+ // for an invalid value for that flag
+ trimmedArgs = args[:len(args)-1]
+ }
+ }
+ }
+ }
+
+ if len(flagName) == 0 {
+ // Not doing flag completion
+ return nil, trimmedArgs, lastArg, nil
+ }
+
+ flag := findFlag(finalCmd, flagName)
+ if flag == nil {
+ // Flag not supported by this command, nothing to complete
+ err := fmt.Errorf("Subcommand '%s' does not support flag '%s'", finalCmd.Name(), flagName)
+ return nil, nil, "", err
+ }
+
+ if !flagWithEqual {
+ if len(flag.NoOptDefVal) != 0 {
+ // We had assumed dealing with a two-word flag but the flag is a boolean flag.
+ // In that case, there is no value following it, so we are not really doing flag completion.
+ // Reset everything to do noun completion.
+ trimmedArgs = args
+ flag = nil
+ }
+ }
+
+ return flag, trimmedArgs, lastArg, nil
+}
+
+func findFlag(cmd *Command, name string) *pflag.Flag {
+ flagSet := cmd.Flags()
+ if len(name) == 1 {
+ // First convert the short flag into a long flag
+ // as the cmd.Flag() search only accepts long flags
+ if short := flagSet.ShorthandLookup(name); short != nil {
+ name = short.Name
+ } else {
+ set := cmd.InheritedFlags()
+ if short = set.ShorthandLookup(name); short != nil {
+ name = short.Name
+ } else {
+ return nil
+ }
+ }
+ }
+ return cmd.Flag(name)
+}
+
+// CompDebug prints the specified string to the same file as where the
+// completion script prints its logs.
+// Note that completion printouts should never be on stdout as they would
+// be wrongly interpreted as actual completion choices by the completion script.
+func CompDebug(msg string, printToStdErr bool) {
+ msg = fmt.Sprintf("[Debug] %s", msg)
+
+ // Such logs are only printed when the user has set the environment
+ // variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
+ if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" {
+ f, err := os.OpenFile(path,
+ os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err == nil {
+ defer f.Close()
+ f.WriteString(msg)
+ }
+ }
+
+ if printToStdErr {
+ // Must print to stderr for this not to be read by the completion script.
+ fmt.Fprintf(os.Stderr, msg)
+ }
+}
+
+// CompDebugln prints the specified string with a newline at the end
+// to the same file as where the completion script prints its logs.
+// Such logs are only printed when the user has set the environment
+// variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
+func CompDebugln(msg string, printToStdErr bool) {
+ CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr)
+}
+
+// CompError prints the specified completion message to stderr.
+func CompError(msg string) {
+ msg = fmt.Sprintf("[Error] %s", msg)
+ CompDebug(msg, true)
+}
+
+// CompErrorln prints the specified completion message to stderr with a newline at the end.
+func CompErrorln(msg string) {
+ CompError(fmt.Sprintf("%s\n", msg))
+}
diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go
new file mode 100644
index 00000000..c83609c8
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/fish_completions.go
@@ -0,0 +1,172 @@
+package cobra
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+func genFishComp(buf *bytes.Buffer, name string, includeDesc bool) {
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
+ }
+ buf.WriteString(fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name))
+ buf.WriteString(fmt.Sprintf(`
+function __%[1]s_debug
+ set file "$BASH_COMP_DEBUG_FILE"
+ if test -n "$file"
+ echo "$argv" >> $file
+ end
+end
+
+function __%[1]s_perform_completion
+ __%[1]s_debug "Starting __%[1]s_perform_completion with: $argv"
+
+ set args (string split -- " " "$argv")
+ set lastArg "$args[-1]"
+
+ __%[1]s_debug "args: $args"
+ __%[1]s_debug "last arg: $lastArg"
+
+ set emptyArg ""
+ if test -z "$lastArg"
+ __%[1]s_debug "Setting emptyArg"
+ set emptyArg \"\"
+ end
+ __%[1]s_debug "emptyArg: $emptyArg"
+
+ set requestComp "$args[1] %[2]s $args[2..-1] $emptyArg"
+ __%[1]s_debug "Calling $requestComp"
+
+ set results (eval $requestComp 2> /dev/null)
+ set comps $results[1..-2]
+ set directiveLine $results[-1]
+
+ # For Fish, when completing a flag with an = (e.g., -n=)
+ # completions must be prefixed with the flag
+ set flagPrefix (string match -r -- '-.*=' "$lastArg")
+
+ __%[1]s_debug "Comps: $comps"
+ __%[1]s_debug "DirectiveLine: $directiveLine"
+ __%[1]s_debug "flagPrefix: $flagPrefix"
+
+ for comp in $comps
+ printf "%%s%%s\n" "$flagPrefix" "$comp"
+ end
+
+ printf "%%s\n" "$directiveLine"
+end
+
+# This function does three things:
+# 1- Obtain the completions and store them in the global __%[1]s_comp_results
+# 2- Set the __%[1]s_comp_do_file_comp flag if file completion should be performed
+# and unset it otherwise
+# 3- Return true if the completion results are not empty
+function __%[1]s_prepare_completions
+ # Start fresh
+ set --erase __%[1]s_comp_do_file_comp
+ set --erase __%[1]s_comp_results
+
+ # Check if the command-line is already provided. This is useful for testing.
+ if not set --query __%[1]s_comp_commandLine
+ set __%[1]s_comp_commandLine (commandline)
+ end
+ __%[1]s_debug "commandLine is: $__%[1]s_comp_commandLine"
+
+ set results (__%[1]s_perform_completion "$__%[1]s_comp_commandLine")
+ set --erase __%[1]s_comp_commandLine
+ __%[1]s_debug "Completion results: $results"
+
+ if test -z "$results"
+ __%[1]s_debug "No completion, probably due to a failure"
+ # Might as well do file completion, in case it helps
+ set --global __%[1]s_comp_do_file_comp 1
+ return 0
+ end
+
+ set directive (string sub --start 2 $results[-1])
+ set --global __%[1]s_comp_results $results[1..-2]
+
+ __%[1]s_debug "Completions are: $__%[1]s_comp_results"
+ __%[1]s_debug "Directive is: $directive"
+
+ if test -z "$directive"
+ set directive 0
+ end
+
+ set compErr (math (math --scale 0 $directive / %[3]d) %% 2)
+ if test $compErr -eq 1
+ __%[1]s_debug "Received error directive: aborting."
+ # Might as well do file completion, in case it helps
+ set --global __%[1]s_comp_do_file_comp 1
+ return 0
+ end
+
+ set nospace (math (math --scale 0 $directive / %[4]d) %% 2)
+ set nofiles (math (math --scale 0 $directive / %[5]d) %% 2)
+
+ __%[1]s_debug "nospace: $nospace, nofiles: $nofiles"
+
+ # Important not to quote the variable for count to work
+ set numComps (count $__%[1]s_comp_results)
+ __%[1]s_debug "numComps: $numComps"
+
+ if test $numComps -eq 1; and test $nospace -ne 0
+ # To support the "nospace" directive we trick the shell
+ # by outputting an extra, longer completion.
+ __%[1]s_debug "Adding second completion to perform nospace directive"
+ set --append __%[1]s_comp_results $__%[1]s_comp_results[1].
+ end
+
+ if test $numComps -eq 0; and test $nofiles -eq 0
+ __%[1]s_debug "Requesting file completion"
+ set --global __%[1]s_comp_do_file_comp 1
+ end
+
+ # If we don't want file completion, we must return true even if there
+ # are no completions found. This is because fish will perform the last
+ # completion command, even if its condition is false, if no other
+ # completion command was triggered
+ return (not set --query __%[1]s_comp_do_file_comp)
+end
+
+# Remove any pre-existing completions for the program since we will be handling all of them
+# TODO this cleanup is not sufficient. Fish completions are only loaded once the user triggers
+# them, so the below deletion will not work as it is run too early. What else can we do?
+complete -c %[1]s -e
+
+# The order in which the below two lines are defined is very important so that __%[1]s_prepare_completions
+# is called first. It is __%[1]s_prepare_completions that sets up the __%[1]s_comp_do_file_comp variable.
+#
+# This completion will be run second as complete commands are added FILO.
+# It triggers file completion choices when __%[1]s_comp_do_file_comp is set.
+complete -c %[1]s -n 'set --query __%[1]s_comp_do_file_comp'
+
+# This completion will be run first as complete commands are added FILO.
+# The call to __%[1]s_prepare_completions will setup both __%[1]s_comp_results abd __%[1]s_comp_do_file_comp.
+# It provides the program's completion choices.
+complete -c %[1]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
+
+`, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp))
+}
+
+// GenFishCompletion generates fish completion file and writes to the passed writer.
+func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genFishComp(buf, c.Name(), includeDesc)
+ _, err := buf.WriteTo(w)
+ return err
+}
+
+// GenFishCompletionFile generates fish completion file.
+func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error {
+ outFile, err := os.Create(filename)
+ if err != nil {
+ return err
+ }
+ defer outFile.Close()
+
+ return c.GenFishCompletion(outFile, includeDesc)
+}
diff --git a/vendor/github.com/spf13/cobra/fish_completions.md b/vendor/github.com/spf13/cobra/fish_completions.md
new file mode 100644
index 00000000..6bfe5f88
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/fish_completions.md
@@ -0,0 +1,7 @@
+## Generating Fish Completions for your own cobra.Command
+
+Cobra supports native Fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users.
+
+### Limitations
+
+* Custom completions implemented using the `ValidArgsFunction` and `RegisterFlagCompletionFunc()` are supported automatically but the ones implemented in Bash scripting are not.
diff --git a/vendor/github.com/spf13/cobra/go.mod b/vendor/github.com/spf13/cobra/go.mod
index 9a9eb65a..dea1030b 100644
--- a/vendor/github.com/spf13/cobra/go.mod
+++ b/vendor/github.com/spf13/cobra/go.mod
@@ -3,11 +3,10 @@ module github.com/spf13/cobra
go 1.12
require (
- github.com/BurntSushi/toml v0.3.1 // indirect
- github.com/cpuguy83/go-md2man v1.0.10
+ github.com/cpuguy83/go-md2man/v2 v2.0.0
github.com/inconshreveable/mousetrap v1.0.0
github.com/mitchellh/go-homedir v1.1.0
github.com/spf13/pflag v1.0.3
- github.com/spf13/viper v1.3.2
+ github.com/spf13/viper v1.4.0
gopkg.in/yaml.v2 v2.2.2
)
diff --git a/vendor/github.com/spf13/cobra/go.sum b/vendor/github.com/spf13/cobra/go.sum
index 9761f4d0..3aaa2ac0 100644
--- a/vendor/github.com/spf13/cobra/go.sum
+++ b/vendor/github.com/spf13/cobra/go.sum
@@ -1,31 +1,91 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
-github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
-github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
@@ -34,18 +94,56 @@ github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M=
-github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
+github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
-golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a h1:1n5lsVfiQW3yfsRGu98756EH1YthsFqr/5mxHduZW2A=
-golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/spf13/jwalterweatherman/.gitignore b/vendor/github.com/spf13/jwalterweatherman/.gitignore
index 00268614..a71f88af 100644
--- a/vendor/github.com/spf13/jwalterweatherman/.gitignore
+++ b/vendor/github.com/spf13/jwalterweatherman/.gitignore
@@ -20,3 +20,5 @@ _cgo_export.*
_testmain.go
*.exe
+*.bench
+go.sum
\ No newline at end of file
diff --git a/vendor/github.com/spf13/jwalterweatherman/default_notepad.go b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go
index bcb76340..a018c15c 100644
--- a/vendor/github.com/spf13/jwalterweatherman/default_notepad.go
+++ b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go
@@ -64,6 +64,13 @@ func SetStdoutThreshold(threshold Threshold) {
reloadDefaultNotepad()
}
+// SetStdoutOutput set the stdout output for the default notepad. Default is stdout.
+func SetStdoutOutput(handle io.Writer) {
+ defaultNotepad.outHandle = handle
+ defaultNotepad.init()
+ reloadDefaultNotepad()
+}
+
// SetPrefix set the prefix for the default logger. Empty by default.
func SetPrefix(prefix string) {
defaultNotepad.SetPrefix(prefix)
@@ -76,6 +83,13 @@ func SetFlags(flags int) {
reloadDefaultNotepad()
}
+// SetLogListeners configures the default logger with one or more log listeners.
+func SetLogListeners(l ...LogListener) {
+ defaultNotepad.logListeners = l
+ defaultNotepad.init()
+ reloadDefaultNotepad()
+}
+
// Level returns the current global log threshold.
func LogThreshold() Threshold {
return defaultNotepad.logThreshold
@@ -95,19 +109,3 @@ func GetLogThreshold() Threshold {
func GetStdoutThreshold() Threshold {
return defaultNotepad.GetStdoutThreshold()
}
-
-// LogCountForLevel returns the number of log invocations for a given threshold.
-func LogCountForLevel(l Threshold) uint64 {
- return defaultNotepad.LogCountForLevel(l)
-}
-
-// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations
-// greater than or equal to a given threshold.
-func LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 {
- return defaultNotepad.LogCountForLevelsGreaterThanorEqualTo(threshold)
-}
-
-// ResetLogCounters resets the invocation counters for all levels.
-func ResetLogCounters() {
- defaultNotepad.ResetLogCounters()
-}
diff --git a/vendor/github.com/spf13/jwalterweatherman/go.mod b/vendor/github.com/spf13/jwalterweatherman/go.mod
index bce549c0..1dbcfd3e 100644
--- a/vendor/github.com/spf13/jwalterweatherman/go.mod
+++ b/vendor/github.com/spf13/jwalterweatherman/go.mod
@@ -1 +1,7 @@
module github.com/spf13/jwalterweatherman
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/stretchr/testify v1.2.2
+)
diff --git a/vendor/github.com/spf13/jwalterweatherman/log_counter.go b/vendor/github.com/spf13/jwalterweatherman/log_counter.go
index 11423ac4..41285f3d 100644
--- a/vendor/github.com/spf13/jwalterweatherman/log_counter.go
+++ b/vendor/github.com/spf13/jwalterweatherman/log_counter.go
@@ -6,50 +6,41 @@
package jwalterweatherman
import (
+ "io"
"sync/atomic"
)
-type logCounter struct {
- counter uint64
+// Counter is an io.Writer that increments a counter on Write.
+type Counter struct {
+ count uint64
}
-func (c *logCounter) incr() {
- atomic.AddUint64(&c.counter, 1)
+func (c *Counter) incr() {
+ atomic.AddUint64(&c.count, 1)
}
-func (c *logCounter) resetCounter() {
- atomic.StoreUint64(&c.counter, 0)
+// Reset resets the counter.
+func (c *Counter) Reset() {
+ atomic.StoreUint64(&c.count, 0)
}
-func (c *logCounter) getCount() uint64 {
- return atomic.LoadUint64(&c.counter)
+// Count returns the current count.
+func (c *Counter) Count() uint64 {
+ return atomic.LoadUint64(&c.count)
}
-func (c *logCounter) Write(p []byte) (n int, err error) {
+func (c *Counter) Write(p []byte) (n int, err error) {
c.incr()
return len(p), nil
}
-// LogCountForLevel returns the number of log invocations for a given threshold.
-func (n *Notepad) LogCountForLevel(l Threshold) uint64 {
- return n.logCounters[l].getCount()
-}
-
-// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations
-// greater than or equal to a given threshold.
-func (n *Notepad) LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 {
- var cnt uint64
-
- for i := int(threshold); i < len(n.logCounters); i++ {
- cnt += n.LogCountForLevel(Threshold(i))
- }
-
- return cnt
-}
-
-// ResetLogCounters resets the invocation counters for all levels.
-func (n *Notepad) ResetLogCounters() {
- for _, np := range n.logCounters {
- np.resetCounter()
+// LogCounter creates a LogListener that counts log statements >= the given threshold.
+func LogCounter(counter *Counter, t1 Threshold) LogListener {
+ return func(t2 Threshold) io.Writer {
+ if t2 < t1 {
+ // Not interested in this threshold.
+ return nil
+ }
+ return counter
}
}
diff --git a/vendor/github.com/spf13/jwalterweatherman/notepad.go b/vendor/github.com/spf13/jwalterweatherman/notepad.go
index ae5aaf71..cc7957bf 100644
--- a/vendor/github.com/spf13/jwalterweatherman/notepad.go
+++ b/vendor/github.com/spf13/jwalterweatherman/notepad.go
@@ -8,6 +8,7 @@ package jwalterweatherman
import (
"fmt"
"io"
+ "io/ioutil"
"log"
)
@@ -58,13 +59,28 @@ type Notepad struct {
prefix string
flags int
- // One per Threshold
- logCounters [7]*logCounter
+ logListeners []LogListener
}
-// NewNotepad create a new notepad.
-func NewNotepad(outThreshold Threshold, logThreshold Threshold, outHandle, logHandle io.Writer, prefix string, flags int) *Notepad {
- n := &Notepad{}
+// A LogListener can ble supplied to a Notepad to listen on log writes for a given
+// threshold. This can be used to capture log events in unit tests and similar.
+// Note that this function will be invoked once for each log threshold. If
+// the given threshold is not of interest to you, return nil.
+// Note that these listeners will receive log events for a given threshold, even
+// if the current configuration says not to log it. That way you can count ERRORs even
+// if you don't print them to the console.
+type LogListener func(t Threshold) io.Writer
+
+// NewNotepad creates a new Notepad.
+func NewNotepad(
+ outThreshold Threshold,
+ logThreshold Threshold,
+ outHandle, logHandle io.Writer,
+ prefix string, flags int,
+ logListeners ...LogListener,
+) *Notepad {
+
+ n := &Notepad{logListeners: logListeners}
n.loggers = [7]**log.Logger{&n.TRACE, &n.DEBUG, &n.INFO, &n.WARN, &n.ERROR, &n.CRITICAL, &n.FATAL}
n.outHandle = outHandle
@@ -95,28 +111,43 @@ func (n *Notepad) init() {
for t, logger := range n.loggers {
threshold := Threshold(t)
- counter := &logCounter{}
- n.logCounters[t] = counter
prefix := n.prefix + threshold.String() + " "
switch {
case threshold >= n.logThreshold && threshold >= n.stdoutThreshold:
- *logger = log.New(io.MultiWriter(counter, logAndOut), prefix, n.flags)
+ *logger = log.New(n.createLogWriters(threshold, logAndOut), prefix, n.flags)
case threshold >= n.logThreshold:
- *logger = log.New(io.MultiWriter(counter, n.logHandle), prefix, n.flags)
+ *logger = log.New(n.createLogWriters(threshold, n.logHandle), prefix, n.flags)
case threshold >= n.stdoutThreshold:
- *logger = log.New(io.MultiWriter(counter, n.outHandle), prefix, n.flags)
+ *logger = log.New(n.createLogWriters(threshold, n.outHandle), prefix, n.flags)
default:
- // counter doesn't care about prefix and flags, so don't use them
- // for performance.
- *logger = log.New(counter, "", 0)
+ *logger = log.New(n.createLogWriters(threshold, ioutil.Discard), prefix, n.flags)
}
}
}
+func (n *Notepad) createLogWriters(t Threshold, handle io.Writer) io.Writer {
+ if len(n.logListeners) == 0 {
+ return handle
+ }
+ writers := []io.Writer{handle}
+ for _, l := range n.logListeners {
+ w := l(t)
+ if w != nil {
+ writers = append(writers, w)
+ }
+ }
+
+ if len(writers) == 1 {
+ return handle
+ }
+
+ return io.MultiWriter(writers...)
+}
+
// SetLogThreshold changes the threshold above which messages are written to the
// log file.
func (n *Notepad) SetLogThreshold(threshold Threshold) {
diff --git a/vendor/github.com/spf13/viper/.editorconfig b/vendor/github.com/spf13/viper/.editorconfig
new file mode 100644
index 00000000..63afcbcd
--- /dev/null
+++ b/vendor/github.com/spf13/viper/.editorconfig
@@ -0,0 +1,15 @@
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+indent_size = 4
+indent_style = space
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*.go]
+indent_style = tab
+
+[{Makefile, *.mk}]
+indent_style = tab
diff --git a/vendor/github.com/spf13/viper/.gitignore b/vendor/github.com/spf13/viper/.gitignore
index d6941f32..89625083 100644
--- a/vendor/github.com/spf13/viper/.gitignore
+++ b/vendor/github.com/spf13/viper/.gitignore
@@ -1,20 +1,5 @@
+/.idea/
/bin/
/build/
/var/
/vendor/
-
-# IDE integration
-/.vscode/*
-!/.vscode/launch.json
-!/.vscode/tasks.json
-/.idea/*
-!/.idea/codeStyles/
-!/.idea/copyright/
-!/.idea/dataSources.xml
-!/.idea/*.iml
-!/.idea/externalDependencies.xml
-!/.idea/go.imports.xml
-!/.idea/modules.xml
-!/.idea/runConfigurations/
-!/.idea/scopes/
-!/.idea/sqldialects.xml
diff --git a/vendor/github.com/spf13/viper/.golangci.yml b/vendor/github.com/spf13/viper/.golangci.yml
index 0ea9249e..a0755ce7 100644
--- a/vendor/github.com/spf13/viper/.golangci.yml
+++ b/vendor/github.com/spf13/viper/.golangci.yml
@@ -21,4 +21,7 @@ linters:
- scopelint
- gocyclo
- gocognit
- - gocritic
\ No newline at end of file
+ - gocritic
+
+service:
+ golangci-lint-version: 1.21.x
diff --git a/vendor/github.com/spf13/viper/.travis.yml b/vendor/github.com/spf13/viper/.travis.yml
deleted file mode 100644
index ed677bbb..00000000
--- a/vendor/github.com/spf13/viper/.travis.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-go_import_path: github.com/spf13/viper
-
-language: go
-
-env:
- global:
- - GO111MODULE="on"
- - GOFLAGS="-mod=readonly"
-
-go:
- - 1.11.x
- - 1.12.x
- - 1.13.x
- - tip
-
-os:
- - linux
- - osx
-
-matrix:
- allow_failures:
- - go: tip
- fast_finish: true
-
-script:
- - go install ./...
- - diff -u <(echo -n) <(gofmt -d .)
- - go test -v ./...
-
-after_success:
- - go get -u -d github.com/spf13/hugo
- - cd $GOPATH/src/github.com/spf13/hugo && make && ./hugo -s docs && cd -
diff --git a/vendor/github.com/spf13/viper/Makefile b/vendor/github.com/spf13/viper/Makefile
index e39b8b5e..1c2cab03 100644
--- a/vendor/github.com/spf13/viper/Makefile
+++ b/vendor/github.com/spf13/viper/Makefile
@@ -1,9 +1,12 @@
# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
OS = $(shell uname | tr A-Z a-z)
+export PATH := $(abspath bin/):${PATH}
# Build variables
BUILD_DIR ?= build
+export CGO_ENABLED ?= 0
+export GOOS = $(shell go env GOOS)
ifeq (${VERBOSE}, 1)
ifeq ($(filter -v,${GOARGS}),)
GOARGS += -v
@@ -12,7 +15,7 @@ TEST_FORMAT = short-verbose
endif
# Dependency versions
-GOTESTSUM_VERSION = 0.3.5
+GOTESTSUM_VERSION = 0.4.0
GOLANGCI_VERSION = 1.21.0
# Add the ability to override some variables
@@ -33,20 +36,19 @@ bin/gotestsum-${GOTESTSUM_VERSION}:
curl -L https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${OS}_amd64.tar.gz | tar -zOxf - gotestsum > ./bin/gotestsum-${GOTESTSUM_VERSION} && chmod +x ./bin/gotestsum-${GOTESTSUM_VERSION}
TEST_PKGS ?= ./...
-TEST_REPORT_NAME ?= results.xml
.PHONY: test
-test: TEST_REPORT ?= main
test: TEST_FORMAT ?= short
test: SHELL = /bin/bash
+test: export CGO_ENABLED=1
test: bin/gotestsum ## Run tests
- @mkdir -p ${BUILD_DIR}/test_results/${TEST_REPORT}
- bin/gotestsum --no-summary=skipped --junitfile ${BUILD_DIR}/test_results/${TEST_REPORT}/${TEST_REPORT_NAME} --format ${TEST_FORMAT} -- $(filter-out -v,${GOARGS}) $(if ${TEST_PKGS},${TEST_PKGS},./...)
+ @mkdir -p ${BUILD_DIR}
+ bin/gotestsum --no-summary=skipped --junitfile ${BUILD_DIR}/coverage.xml --format ${TEST_FORMAT} -- -race -coverprofile=${BUILD_DIR}/coverage.txt -covermode=atomic $(filter-out -v,${GOARGS}) $(if ${TEST_PKGS},${TEST_PKGS},./...)
bin/golangci-lint: bin/golangci-lint-${GOLANGCI_VERSION}
@ln -sf golangci-lint-${GOLANGCI_VERSION} bin/golangci-lint
bin/golangci-lint-${GOLANGCI_VERSION}:
@mkdir -p bin
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | BINARY=golangci-lint bash -s -- v${GOLANGCI_VERSION}
+ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b ./bin/ v${GOLANGCI_VERSION}
@mv bin/golangci-lint $@
.PHONY: lint
@@ -57,6 +59,9 @@ lint: bin/golangci-lint ## Run linter
fix: bin/golangci-lint ## Fix lint violations
bin/golangci-lint run --fix
+# Add custom targets here
+-include custom.mk
+
.PHONY: list
list: ## List all make targets
@${MAKE} -pRrn : -f $(MAKEFILE_LIST) 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | sort
diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md
index 327308bc..dfd8034f 100644
--- a/vendor/github.com/spf13/viper/README.md
+++ b/vendor/github.com/spf13/viper/README.md
@@ -1,10 +1,13 @@
-![viper logo](https://cloud.githubusercontent.com/assets/173412/10886745/998df88a-8151-11e5-9448-4736db51020d.png)
+![Viper](.github/logo.png?raw=true)
-Go configuration with fangs!
+[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#configuration)
-[![Actions](https://github.com/spf13/viper/workflows/CI/badge.svg)](https://github.com/spf13/viper)
+[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/spf13/viper/CI?style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI)
[![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-[![GoDoc](https://godoc.org/github.com/spf13/viper?status.svg)](https://godoc.org/github.com/spf13/viper)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper)
+[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/spf13/viper)
+
+**Go configuration with fangs!**
Many Go projects are built using Viper including:
@@ -101,6 +104,7 @@ where a configuration file is expected.
```go
viper.SetConfigName("config") // name of config file (without extension)
+viper.SetConfigType("yaml") // REQUIRED if the config file does not have the extension in the name
viper.AddConfigPath("/etc/appname/") // path to look for the config file in
viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search paths
viper.AddConfigPath(".") // optionally look for config in the working directory
@@ -124,7 +128,7 @@ if err := viper.ReadInConfig(); err != nil {
// Config file found and successfully parsed
```
-*NOTE:* You can also have a file without an extension and specify the format programmaticaly. For those configuration files that lie in the home of the user without any extension like `.bashrc`
+*NOTE [since 1.6]:* You can also have a file without an extension and specify the format programmaticaly. For those configuration files that lie in the home of the user without any extension like `.bashrc`
### Writing Config Files
@@ -399,7 +403,7 @@ in a Key/Value store such as etcd or Consul. These values take precedence over
default values, but are overridden by configuration values retrieved from disk,
flags, or environment variables.
-Viper uses [crypt](https://github.com/xordataexchange/crypt) to retrieve
+Viper uses [crypt](https://github.com/bketelsen/crypt) to retrieve
configuration from the K/V store, which means that you can store your
configuration values encrypted and have them automatically decrypted if you have
the correct gpg keyring. Encryption is optional.
@@ -411,7 +415,7 @@ independently of it.
K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001.
```bash
-$ go get github.com/xordataexchange/crypt/bin/crypt
+$ go get github.com/bketelsen/crypt/bin/crypt
$ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json
```
@@ -434,7 +438,7 @@ err := viper.ReadRemoteConfig()
```
#### Consul
-You need to set a key to Consul key/value storage with JSON value containing your desired config.
+You need to set a key to Consul key/value storage with JSON value containing your desired config.
For example, create a Consul key/value store key `MY_CONSUL_KEY` with value:
```json
@@ -453,6 +457,16 @@ fmt.Println(viper.Get("port")) // 8080
fmt.Println(viper.Get("hostname")) // myhostname.com
```
+#### Firestore
+
+```go
+viper.AddRemoteProvider("firestore", "google-cloud-project-id", "collection/document")
+viper.SetConfigType("json") // Config's format: "json", "toml", "yaml", "yml"
+err := viper.ReadRemoteConfig()
+```
+
+Of course, you're allowed to use `SecureRemoteProvider` also
+
### Remote Key/Value Store Example - Encrypted
```go
@@ -692,18 +706,49 @@ var C config
v.Unmarshal(&C)
```
+Viper also supports unmarshaling into embedded structs:
+
+```go
+/*
+Example config:
+
+module:
+ enabled: true
+ token: 89h3f98hbwf987h3f98wenf89ehf
+*/
+type config struct {
+ Module struct {
+ Enabled bool
+
+ moduleConfig `mapstructure:",squash"`
+ }
+}
+
+// moduleConfig could be in a module specific package
+type moduleConfig struct {
+ Token string
+}
+
+var C config
+
+err := viper.Unmarshal(&C)
+if err != nil {
+ t.Fatalf("unable to decode into struct, %v", err)
+}
+```
+
Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default.
### Marshalling to string
-You may need to marshal all the settings held in viper into a string rather than write them to a file.
+You may need to marshal all the settings held in viper into a string rather than write them to a file.
You can use your favorite format's marshaller with the config returned by `AllSettings()`.
```go
import (
yaml "gopkg.in/yaml.v2"
// ...
-)
+)
func yamlStringSettings() string {
c := viper.AllSettings()
diff --git a/vendor/github.com/spf13/viper/go.mod b/vendor/github.com/spf13/viper/go.mod
index 0e358cbe..7d108dcc 100644
--- a/vendor/github.com/spf13/viper/go.mod
+++ b/vendor/github.com/spf13/viper/go.mod
@@ -3,18 +3,15 @@ module github.com/spf13/viper
go 1.12
require (
- github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 // indirect
+ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c
github.com/coreos/bbolt v1.3.2 // indirect
- github.com/coreos/etcd v3.3.10+incompatible // indirect
- github.com/coreos/go-semver v0.2.0 // indirect
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e // indirect
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
github.com/fsnotify/fsnotify v1.4.7
github.com/gogo/protobuf v1.2.1 // indirect
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect
- github.com/google/btree v1.0.0 // indirect
- github.com/gorilla/websocket v1.4.0 // indirect
+ github.com/gorilla/websocket v1.4.2 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.9.0 // indirect
@@ -30,19 +27,14 @@ require (
github.com/spf13/cast v1.3.0
github.com/spf13/jwalterweatherman v1.0.0
github.com/spf13/pflag v1.0.3
- github.com/stretchr/testify v1.2.2
+ github.com/stretchr/testify v1.3.0
github.com/subosito/gotenv v1.2.0
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect
- github.com/ugorji/go v1.1.4 // indirect
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
- github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77
go.etcd.io/bbolt v1.3.2 // indirect
go.uber.org/atomic v1.4.0 // indirect
go.uber.org/multierr v1.1.0 // indirect
go.uber.org/zap v1.10.0 // indirect
- golang.org/x/net v0.0.0-20190522155817-f3200d17e092 // indirect
- golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
- google.golang.org/grpc v1.21.0 // indirect
gopkg.in/ini.v1 v1.51.0
gopkg.in/yaml.v2 v2.2.4
)
diff --git a/vendor/github.com/spf13/viper/go.sum b/vendor/github.com/spf13/viper/go.sum
index d75aee23..463aa7db 100644
--- a/vendor/github.com/spf13/viper/go.sum
+++ b/vendor/github.com/spf13/viper/go.sum
@@ -1,35 +1,62 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/firestore v1.1.0 h1:9x7Bx0A9R5/M9jibeJeZWqjeVEIxYW9fZYqB9a70/bY=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA=
-github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c h1:+0HFd5KSZ/mm3JmhmrDukiId5iR6w4+BdFtfSy4yWIc=
+github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=
-github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
@@ -42,26 +69,77 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/hashicorp/consul/api v1.1.0 h1:BNQPM9ytxj6jbjjdRPioQ94T6YXriSopn0i8COv6SRA=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
@@ -77,18 +155,39 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
@@ -103,6 +202,10 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzr
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
@@ -120,21 +223,23 @@ github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=
-github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=
-github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
@@ -142,46 +247,132 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco=
-golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc h1:NCy3Ohtk6Iny5V/reW2Ktypo4zIpWBdRJ1uFMjBxdg8=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0 h1:Q3Ui3V3/CVinFWFiW39Iw0kMuVrRzYX0wN6OPFp0lTA=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
@@ -190,3 +381,8 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go
index b7889696..cee6b242 100644
--- a/vendor/github.com/spf13/viper/util.go
+++ b/vendor/github.com/spf13/viper/util.go
@@ -91,13 +91,22 @@ func insensitiviseMap(m map[string]interface{}) {
func absPathify(inPath string) string {
jww.INFO.Println("Trying to resolve absolute path to", inPath)
- if strings.HasPrefix(inPath, "$HOME") {
+ if inPath == "$HOME" || strings.HasPrefix(inPath, "$HOME"+string(os.PathSeparator)) {
inPath = userHomeDir() + inPath[5:]
}
if strings.HasPrefix(inPath, "$") {
end := strings.Index(inPath, string(os.PathSeparator))
- inPath = os.Getenv(inPath[1:end]) + inPath[end:]
+
+ var value, suffix string
+ if end == -1 {
+ value = os.Getenv(inPath[1:])
+ } else {
+ value = os.Getenv(inPath[1:end])
+ suffix = inPath[end:]
+ }
+
+ inPath = value + suffix
}
if filepath.IsAbs(inPath) {
diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go
index eb2f5177..405dc20f 100644
--- a/vendor/github.com/spf13/viper/viper.go
+++ b/vendor/github.com/spf13/viper/viper.go
@@ -287,7 +287,7 @@ func NewWithOptions(opts ...Option) *Viper {
func Reset() {
v = New()
SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"}
- SupportedRemoteProviders = []string{"etcd", "consul"}
+ SupportedRemoteProviders = []string{"etcd", "consul", "firestore"}
}
type defaultRemoteProvider struct {
@@ -328,7 +328,7 @@ type RemoteProvider interface {
var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"}
// SupportedRemoteProviders are universally supported remote providers.
-var SupportedRemoteProviders = []string{"etcd", "consul"}
+var SupportedRemoteProviders = []string{"etcd", "consul", "firestore"}
func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) }
func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) {
@@ -477,7 +477,7 @@ func (v *Viper) AddConfigPath(in string) {
// AddRemoteProvider adds a remote configuration source.
// Remote Providers are searched in the order they are added.
-// provider is a string value, "etcd" or "consul" are currently supported.
+// provider is a string value: "etcd", "consul" or "firestore" are currently supported.
// endpoint is the url. etcd requires http://ip:port consul requires ip:port
// path is the path in the k/v store to retrieve configuration
// To retrieve a config file called myapp.json from /configs/myapp.json
@@ -506,14 +506,14 @@ func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error {
// AddSecureRemoteProvider adds a remote configuration source.
// Secure Remote Providers are searched in the order they are added.
-// provider is a string value, "etcd" or "consul" are currently supported.
+// provider is a string value: "etcd", "consul" or "firestore" are currently supported.
// endpoint is the url. etcd requires http://ip:port consul requires ip:port
// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg
// path is the path in the k/v store to retrieve configuration
// To retrieve a config file called myapp.json from /configs/myapp.json
// you should set path to /configs and set config name (SetConfigName()) to
// "myapp"
-// Secure Remote Providers are implemented with github.com/xordataexchange/crypt
+// Secure Remote Providers are implemented with github.com/bketelsen/crypt
func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring)
}
@@ -896,13 +896,7 @@ func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) e
return v.UnmarshalKey(key, rawVal, opts...)
}
func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
- err := decode(v.Get(key), defaultDecoderConfig(rawVal, opts...))
-
- if err != nil {
- return err
- }
-
- return nil
+ return decode(v.Get(key), defaultDecoderConfig(rawVal, opts...))
}
// Unmarshal unmarshals the config into a Struct. Make sure that the tags
@@ -911,13 +905,7 @@ func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
return v.Unmarshal(rawVal, opts...)
}
func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
- err := decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...))
-
- if err != nil {
- return err
- }
-
- return nil
+ return decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...))
}
// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot
@@ -956,13 +944,7 @@ func (v *Viper) UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption)
config := defaultDecoderConfig(rawVal, opts...)
config.ErrorUnused = true
- err := decode(v.AllSettings(), config)
-
- if err != nil {
- return err
- }
-
- return nil
+ return decode(v.AllSettings(), config)
}
// BindPFlags binds a full flag set to the configuration, using each flag's long
@@ -996,11 +978,6 @@ func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) {
}
// BindFlagValue binds a specific key to a FlagValue.
-// Example (where serverCmd is a Cobra instance):
-//
-// serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
-// Viper.BindFlagValue("port", serverCmd.Flags().Lookup("port"))
-//
func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) }
func (v *Viper) BindFlagValue(key string, flag FlagValue) error {
if flag == nil {
@@ -1088,6 +1065,8 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} {
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return cast.ToIntSlice(res)
+ case "stringToString":
+ return stringToStringConv(flag.ValueString())
default:
return flag.ValueString()
}
@@ -1163,6 +1142,8 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} {
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return cast.ToIntSlice(res)
+ case "stringToString":
+ return stringToStringConv(flag.ValueString())
default:
return flag.ValueString()
}
@@ -1182,6 +1163,30 @@ func readAsCSV(val string) ([]string, error) {
return csvReader.Read()
}
+// mostly copied from pflag's implementation of this operation here https://github.com/spf13/pflag/blob/master/string_to_string.go#L79
+// alterations are: errors are swallowed, map[string]interface{} is returned in order to enable cast.ToStringMap
+func stringToStringConv(val string) interface{} {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]interface{}{}
+ }
+ r := csv.NewReader(strings.NewReader(val))
+ ss, err := r.Read()
+ if err != nil {
+ return nil
+ }
+ out := make(map[string]interface{}, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil
+ }
+ out[kv[0]] = kv[1]
+ }
+ return out
+}
+
// IsSet checks to see if the key has been set in any of the data locations.
// IsSet is case-insensitive for a key.
func IsSet(key string) bool { return v.IsSet(key) }
@@ -1418,11 +1423,18 @@ func (v *Viper) SafeWriteConfigAs(filename string) error {
func (v *Viper) writeConfig(filename string, force bool) error {
jww.INFO.Println("Attempting to write configuration to file.")
+ var configType string
+
ext := filepath.Ext(filename)
- if len(ext) <= 1 {
- return fmt.Errorf("filename: %s requires valid extension", filename)
+ if ext != "" {
+ configType = ext[1:]
+ } else {
+ configType = v.configType
}
- configType := ext[1:]
+ if configType == "" {
+ return fmt.Errorf("config type could not be determined for %s", filename)
+ }
+
if !stringInSlice(configType, SupportedExts) {
return UnsupportedConfigError(configType)
}
@@ -1619,7 +1631,7 @@ func (v *Viper) marshalWriter(f afero.File, configType string) error {
if sectionName == "default" {
sectionName = ""
}
- cfg.Section(sectionName).Key(keyName).SetValue(Get(key).(string))
+ cfg.Section(sectionName).Key(keyName).SetValue(v.Get(key).(string))
}
cfg.WriteTo(f)
}
@@ -1976,8 +1988,10 @@ func (v *Viper) searchInPath(in string) (filename string) {
}
}
- if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b {
- return filepath.Join(in, v.configName)
+ if v.configType != "" {
+ if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b {
+ return filepath.Join(in, v.configName)
+ }
}
return ""
diff --git a/vendor/github.com/ssgreg/nlreturn/v2/LICENSE b/vendor/github.com/ssgreg/nlreturn/v2/LICENSE
new file mode 100644
index 00000000..0a5b4d10
--- /dev/null
+++ b/vendor/github.com/ssgreg/nlreturn/v2/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 Grigory Zubankov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/ssgreg/nlreturn/v2/pkg/nlreturn/nlreturn.go b/vendor/github.com/ssgreg/nlreturn/v2/pkg/nlreturn/nlreturn.go
new file mode 100644
index 00000000..52318ccf
--- /dev/null
+++ b/vendor/github.com/ssgreg/nlreturn/v2/pkg/nlreturn/nlreturn.go
@@ -0,0 +1,86 @@
+package nlreturn
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+
+ "golang.org/x/tools/go/analysis"
+)
+
+const (
+ linterName = "nlreturn"
+ linterDoc = `Linter requires a new line before return and branch statements except when the return is alone inside a statement group (such as an if statement) to increase code clarity.`
+)
+
+// NewAnalyzer returns a new nlreturn analyzer.
+func NewAnalyzer() *analysis.Analyzer {
+ return &analysis.Analyzer{
+ Name: linterName,
+ Doc: linterDoc,
+ Run: run,
+ }
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ for _, f := range pass.Files {
+ ast.Inspect(f, func(node ast.Node) bool {
+ switch c := node.(type) {
+ case *ast.CaseClause:
+ inspectBlock(pass, c.Body)
+ case *ast.CommClause:
+ inspectBlock(pass, c.Body)
+ case *ast.BlockStmt:
+ inspectBlock(pass, c.List)
+ }
+
+ return true
+ })
+ }
+
+ return nil, nil
+}
+
+func inspectBlock(pass *analysis.Pass, block []ast.Stmt) {
+ for i, stmt := range block {
+ switch stmt.(type) {
+ case *ast.BranchStmt, *ast.ReturnStmt:
+ if i == 0 {
+ return
+ }
+
+ if line(pass, stmt.Pos())-line(pass, block[i-1].End()) <= 1 {
+ pass.Report(analysis.Diagnostic{
+ Pos: stmt.Pos(),
+ Message: fmt.Sprintf("%s with no blank line before", name(stmt)),
+ SuggestedFixes: []analysis.SuggestedFix{
+ {
+ TextEdits: []analysis.TextEdit{
+ {
+ Pos: stmt.Pos(),
+ NewText: []byte("\n"),
+ End: stmt.Pos(),
+ },
+ },
+ },
+ },
+ })
+ }
+ }
+ }
+}
+
+func name(stmt ast.Stmt) string {
+ switch c := stmt.(type) {
+ case *ast.BranchStmt:
+ return c.Tok.String()
+ case *ast.ReturnStmt:
+ return "return"
+ default:
+ return "unknown"
+ }
+}
+
+func line(pass *analysis.Pass, pos token.Pos) int {
+ return pass.Fset.Position(pos).Line
+}
diff --git a/vendor/github.com/stretchr/objx/.codeclimate.yml b/vendor/github.com/stretchr/objx/.codeclimate.yml
index 010d4ccd..559fa399 100644
--- a/vendor/github.com/stretchr/objx/.codeclimate.yml
+++ b/vendor/github.com/stretchr/objx/.codeclimate.yml
@@ -10,4 +10,12 @@ exclude_patterns:
- ".github/"
- "vendor/"
- "codegen/"
+- "*.yml"
+- ".*.yml"
+- "*.md"
+- "Gopkg.*"
- "doc.go"
+- "type_specific_codegen_test.go"
+- "type_specific_codegen.go"
+- ".gitignore"
+- "LICENSE"
diff --git a/vendor/github.com/stretchr/objx/.travis.yml b/vendor/github.com/stretchr/objx/.travis.yml
index a63efa59..cde6eb2a 100644
--- a/vendor/github.com/stretchr/objx/.travis.yml
+++ b/vendor/github.com/stretchr/objx/.travis.yml
@@ -1,8 +1,14 @@
language: go
go:
- - 1.8
- - 1.9
- - tip
+ - "1.10.x"
+ - "1.11.x"
+ - "1.12.x"
+ - master
+
+matrix:
+ allow_failures:
+ - go: master
+fast_finish: true
env:
global:
@@ -14,12 +20,11 @@ before_script:
- ./cc-test-reporter before-build
install:
-- go get github.com/go-task/task/cmd/task
+ - curl -sL https://taskfile.dev/install.sh | sh
script:
-- task dl-deps
-- task lint
-- task test-coverage
+ - diff -u <(echo -n) <(./bin/task lint)
+ - ./bin/task test-coverage
after_script:
- ./cc-test-reporter after-build --exit-code $TRAVIS_TEST_RESULT
diff --git a/vendor/github.com/stretchr/objx/Gopkg.lock b/vendor/github.com/stretchr/objx/Gopkg.lock
deleted file mode 100644
index eebe342a..00000000
--- a/vendor/github.com/stretchr/objx/Gopkg.lock
+++ /dev/null
@@ -1,30 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
- name = "github.com/davecgh/go-spew"
- packages = ["spew"]
- revision = "346938d642f2ec3594ed81d874461961cd0faa76"
- version = "v1.1.0"
-
-[[projects]]
- name = "github.com/pmezard/go-difflib"
- packages = ["difflib"]
- revision = "792786c7400a136282c1664665ae0a8db921c6c2"
- version = "v1.0.0"
-
-[[projects]]
- name = "github.com/stretchr/testify"
- packages = [
- "assert",
- "require"
- ]
- revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c"
- version = "v1.2.0"
-
-[solve-meta]
- analyzer-name = "dep"
- analyzer-version = 1
- inputs-digest = "2d160a7dea4ffd13c6c31dab40373822f9d78c73beba016d662bef8f7a998876"
- solver-name = "gps-cdcl"
- solver-version = 1
diff --git a/vendor/github.com/stretchr/objx/Gopkg.toml b/vendor/github.com/stretchr/objx/Gopkg.toml
deleted file mode 100644
index d70f1570..00000000
--- a/vendor/github.com/stretchr/objx/Gopkg.toml
+++ /dev/null
@@ -1,8 +0,0 @@
-[prune]
- unused-packages = true
- non-go = true
- go-tests = true
-
-[[constraint]]
- name = "github.com/stretchr/testify"
- version = "~1.2.0"
diff --git a/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/stretchr/objx/README.md
index be5750c9..246660b2 100644
--- a/vendor/github.com/stretchr/objx/README.md
+++ b/vendor/github.com/stretchr/objx/README.md
@@ -74,7 +74,7 @@ To update Objx to the latest version, run:
go get -u github.com/stretchr/objx
### Supported go versions
-We support the lastest two major Go versions, which are 1.8 and 1.9 at the moment.
+We support the lastest three major Go versions, which are 1.10, 1.11 and 1.12 at the moment.
## Contributing
Please feel free to submit issues, fork the repository and send pull requests!
diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml
index f8035641..a749ac54 100644
--- a/vendor/github.com/stretchr/objx/Taskfile.yml
+++ b/vendor/github.com/stretchr/objx/Taskfile.yml
@@ -1,32 +1,30 @@
-default:
- deps: [test]
+version: '2'
-dl-deps:
- desc: Downloads cli dependencies
- cmds:
- - go get -u github.com/golang/lint/golint
- - go get -u github.com/golang/dep/cmd/dep
+env:
+ GOFLAGS: -mod=vendor
-update-deps:
- desc: Updates dependencies
- cmds:
- - dep ensure
- - dep ensure -update
+tasks:
+ default:
+ deps: [test]
-lint:
- desc: Runs golint
- cmds:
- - go fmt $(go list ./... | grep -v /vendor/)
- - go vet $(go list ./... | grep -v /vendor/)
- - golint $(ls *.go | grep -v "doc.go")
- silent: true
+ lint:
+ desc: Checks code style
+ cmds:
+ - gofmt -d -s *.go
+ - go vet ./...
+ silent: true
-test:
- desc: Runs go tests
- cmds:
- - go test -race .
+ lint-fix:
+ desc: Fixes code style
+ cmds:
+ - gofmt -w -s *.go
-test-coverage:
- desc: Runs go tests and calucates test coverage
- cmds:
- - go test -coverprofile=c.out .
+ test:
+ desc: Runs go tests
+ cmds:
+ - go test -race ./...
+
+ test-coverage:
+ desc: Runs go tests and calucates test coverage
+ cmds:
+ - go test -race -coverprofile=c.out ./...
diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go
index 204356a2..80ad1674 100644
--- a/vendor/github.com/stretchr/objx/accessors.go
+++ b/vendor/github.com/stretchr/objx/accessors.go
@@ -1,18 +1,34 @@
package objx
import (
+ "reflect"
"regexp"
"strconv"
"strings"
)
-// arrayAccesRegexString is the regex used to extract the array number
-// from the access path
-const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$`
+const (
+ // PathSeparator is the character used to separate the elements
+ // of the keypath.
+ //
+ // For example, `location.address.city`
+ PathSeparator string = "."
+
+ // arrayAccesRegexString is the regex used to extract the array number
+ // from the access path
+ arrayAccesRegexString = `^(.+)\[([0-9]+)\]$`
+
+ // mapAccessRegexString is the regex used to extract the map key
+ // from the access path
+ mapAccessRegexString = `^([^\[]*)\[([^\]]+)\](.*)$`
+)
// arrayAccesRegex is the compiled arrayAccesRegexString
var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString)
+// mapAccessRegex is the compiled mapAccessRegexString
+var mapAccessRegex = regexp.MustCompile(mapAccessRegexString)
+
// Get gets the value using the specified selector and
// returns it inside a new Obj object.
//
@@ -46,103 +62,118 @@ func (m Map) Set(selector string, value interface{}) Map {
return m
}
+// getIndex returns the index, which is hold in s by two braches.
+// It also returns s withour the index part, e.g. name[1] will return (1, name).
+// If no index is found, -1 is returned
+func getIndex(s string) (int, string) {
+ arrayMatches := arrayAccesRegex.FindStringSubmatch(s)
+ if len(arrayMatches) > 0 {
+ // Get the key into the map
+ selector := arrayMatches[1]
+ // Get the index into the array at the key
+ // We know this cannt fail because arrayMatches[2] is an int for sure
+ index, _ := strconv.Atoi(arrayMatches[2])
+ return index, selector
+ }
+ return -1, s
+}
+
+// getKey returns the key which is held in s by two brackets.
+// It also returns the next selector.
+func getKey(s string) (string, string) {
+ selSegs := strings.SplitN(s, PathSeparator, 2)
+ thisSel := selSegs[0]
+ nextSel := ""
+
+ if len(selSegs) > 1 {
+ nextSel = selSegs[1]
+ }
+
+ mapMatches := mapAccessRegex.FindStringSubmatch(s)
+ if len(mapMatches) > 0 {
+ if _, err := strconv.Atoi(mapMatches[2]); err != nil {
+ thisSel = mapMatches[1]
+ nextSel = "[" + mapMatches[2] + "]" + mapMatches[3]
+
+ if thisSel == "" {
+ thisSel = mapMatches[2]
+ nextSel = mapMatches[3]
+ }
+
+ if nextSel == "" {
+ selSegs = []string{"", ""}
+ } else if nextSel[0] == '.' {
+ nextSel = nextSel[1:]
+ }
+ }
+ }
+
+ return thisSel, nextSel
+}
+
// access accesses the object using the selector and performs the
// appropriate action.
-func access(current, selector, value interface{}, isSet bool) interface{} {
- switch selector.(type) {
- case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
- if array, ok := current.([]interface{}); ok {
- index := intFromInterface(selector)
- if index >= len(array) {
- return nil
- }
- return array[index]
+func access(current interface{}, selector string, value interface{}, isSet bool) interface{} {
+ thisSel, nextSel := getKey(selector)
+
+ index := -1
+ if strings.Contains(thisSel, "[") {
+ index, thisSel = getIndex(thisSel)
+ }
+
+ if curMap, ok := current.(Map); ok {
+ current = map[string]interface{}(curMap)
+ }
+ // get the object in question
+ switch current.(type) {
+ case map[string]interface{}:
+ curMSI := current.(map[string]interface{})
+ if nextSel == "" && isSet {
+ curMSI[thisSel] = value
+ return nil
}
- return nil
- case string:
- selStr := selector.(string)
- selSegs := strings.SplitN(selStr, PathSeparator, 2)
- thisSel := selSegs[0]
- index := -1
- var err error
+ _, ok := curMSI[thisSel].(map[string]interface{})
+ if (curMSI[thisSel] == nil || !ok) && index == -1 && isSet {
+ curMSI[thisSel] = map[string]interface{}{}
+ }
- if strings.Contains(thisSel, "[") {
- arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel)
- if len(arrayMatches) > 0 {
- // Get the key into the map
- thisSel = arrayMatches[1]
+ current = curMSI[thisSel]
+ default:
+ current = nil
+ }
- // Get the index into the array at the key
- index, err = strconv.Atoi(arrayMatches[2])
-
- if err != nil {
- // This should never happen. If it does, something has gone
- // seriously wrong. Panic.
- panic("objx: Array index is not an integer. Must use array[int].")
- }
+ // do we need to access the item of an array?
+ if index > -1 {
+ if array, ok := interSlice(current); ok {
+ if index < len(array) {
+ current = array[index]
+ } else {
+ current = nil
}
}
- if curMap, ok := current.(Map); ok {
- current = map[string]interface{}(curMap)
- }
- // get the object in question
- switch current.(type) {
- case map[string]interface{}:
- curMSI := current.(map[string]interface{})
- if len(selSegs) <= 1 && isSet {
- curMSI[thisSel] = value
- return nil
- }
- current = curMSI[thisSel]
- default:
- current = nil
- }
- // do we need to access the item of an array?
- if index > -1 {
- if array, ok := current.([]interface{}); ok {
- if index < len(array) {
- current = array[index]
- } else {
- current = nil
- }
- }
- }
- if len(selSegs) > 1 {
- current = access(current, selSegs[1], value, isSet)
- }
+ }
+ if nextSel != "" {
+ current = access(current, nextSel, value, isSet)
}
return current
}
-// intFromInterface converts an interface object to the largest
-// representation of an unsigned integer using a type switch and
-// assertions
-func intFromInterface(selector interface{}) int {
- var value int
- switch selector.(type) {
- case int:
- value = selector.(int)
- case int8:
- value = int(selector.(int8))
- case int16:
- value = int(selector.(int16))
- case int32:
- value = int(selector.(int32))
- case int64:
- value = int(selector.(int64))
- case uint:
- value = int(selector.(uint))
- case uint8:
- value = int(selector.(uint8))
- case uint16:
- value = int(selector.(uint16))
- case uint32:
- value = int(selector.(uint32))
- case uint64:
- value = int(selector.(uint64))
- default:
- return 0
+func interSlice(slice interface{}) ([]interface{}, bool) {
+ if array, ok := slice.([]interface{}); ok {
+ return array, ok
}
- return value
+
+ s := reflect.ValueOf(slice)
+ if s.Kind() != reflect.Slice {
+ return nil, false
+ }
+
+ ret := make([]interface{}, s.Len())
+
+ for i := 0; i < s.Len(); i++ {
+ ret[i] = s.Index(i).Interface()
+ }
+
+ return ret, true
}
diff --git a/vendor/github.com/stretchr/objx/constants.go b/vendor/github.com/stretchr/objx/constants.go
deleted file mode 100644
index f9eb42a2..00000000
--- a/vendor/github.com/stretchr/objx/constants.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package objx
-
-const (
- // PathSeparator is the character used to separate the elements
- // of the keypath.
- //
- // For example, `location.address.city`
- PathSeparator string = "."
-
- // SignatureSeparator is the character that is used to
- // separate the Base64 string from the security signature.
- SignatureSeparator = "_"
-)
diff --git a/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/objx/conversions.go
index 5e020f31..080aa46e 100644
--- a/vendor/github.com/stretchr/objx/conversions.go
+++ b/vendor/github.com/stretchr/objx/conversions.go
@@ -7,11 +7,51 @@ import (
"errors"
"fmt"
"net/url"
+ "strconv"
)
+// SignatureSeparator is the character that is used to
+// separate the Base64 string from the security signature.
+const SignatureSeparator = "_"
+
+// URLValuesSliceKeySuffix is the character that is used to
+// specify a suffic for slices parsed by URLValues.
+// If the suffix is set to "[i]", then the index of the slice
+// is used in place of i
+// Ex: Suffix "[]" would have the form a[]=b&a[]=c
+// OR Suffix "[i]" would have the form a[0]=b&a[1]=c
+// OR Suffix "" would have the form a=b&a=c
+var urlValuesSliceKeySuffix = "[]"
+
+const (
+ URLValuesSliceKeySuffixEmpty = ""
+ URLValuesSliceKeySuffixArray = "[]"
+ URLValuesSliceKeySuffixIndex = "[i]"
+)
+
+// SetURLValuesSliceKeySuffix sets the character that is used to
+// specify a suffic for slices parsed by URLValues.
+// If the suffix is set to "[i]", then the index of the slice
+// is used in place of i
+// Ex: Suffix "[]" would have the form a[]=b&a[]=c
+// OR Suffix "[i]" would have the form a[0]=b&a[1]=c
+// OR Suffix "" would have the form a=b&a=c
+func SetURLValuesSliceKeySuffix(s string) error {
+ if s == URLValuesSliceKeySuffixEmpty || s == URLValuesSliceKeySuffixArray || s == URLValuesSliceKeySuffixIndex {
+ urlValuesSliceKeySuffix = s
+ return nil
+ }
+
+ return errors.New("objx: Invalid URLValuesSliceKeySuffix provided.")
+}
+
// JSON converts the contained object to a JSON string
// representation
func (m Map) JSON() (string, error) {
+ for k, v := range m {
+ m[k] = cleanUp(v)
+ }
+
result, err := json.Marshal(m)
if err != nil {
err = errors.New("objx: JSON encode failed with: " + err.Error())
@@ -19,6 +59,63 @@ func (m Map) JSON() (string, error) {
return string(result), err
}
+func cleanUpInterfaceArray(in []interface{}) []interface{} {
+ result := make([]interface{}, len(in))
+ for i, v := range in {
+ result[i] = cleanUp(v)
+ }
+ return result
+}
+
+func cleanUpInterfaceMap(in map[interface{}]interface{}) Map {
+ result := Map{}
+ for k, v := range in {
+ result[fmt.Sprintf("%v", k)] = cleanUp(v)
+ }
+ return result
+}
+
+func cleanUpStringMap(in map[string]interface{}) Map {
+ result := Map{}
+ for k, v := range in {
+ result[k] = cleanUp(v)
+ }
+ return result
+}
+
+func cleanUpMSIArray(in []map[string]interface{}) []Map {
+ result := make([]Map, len(in))
+ for i, v := range in {
+ result[i] = cleanUpStringMap(v)
+ }
+ return result
+}
+
+func cleanUpMapArray(in []Map) []Map {
+ result := make([]Map, len(in))
+ for i, v := range in {
+ result[i] = cleanUpStringMap(v)
+ }
+ return result
+}
+
+func cleanUp(v interface{}) interface{} {
+ switch v := v.(type) {
+ case []interface{}:
+ return cleanUpInterfaceArray(v)
+ case []map[string]interface{}:
+ return cleanUpMSIArray(v)
+ case map[interface{}]interface{}:
+ return cleanUpInterfaceMap(v)
+ case Map:
+ return cleanUpStringMap(v)
+ case []Map:
+ return cleanUpMapArray(v)
+ default:
+ return v
+ }
+}
+
// MustJSON converts the contained object to a JSON string
// representation and panics if there is an error
func (m Map) MustJSON() string {
@@ -40,10 +137,7 @@ func (m Map) Base64() (string, error) {
}
encoder := base64.NewEncoder(base64.StdEncoding, &buf)
- _, err = encoder.Write([]byte(jsonData))
- if err != nil {
- return "", err
- }
+ _, _ = encoder.Write([]byte(jsonData))
_ = encoder.Close()
return buf.String(), nil
@@ -93,13 +187,91 @@ func (m Map) MustSignedBase64(key string) string {
// function requires that the wrapped object be a map[string]interface{}
func (m Map) URLValues() url.Values {
vals := make(url.Values)
- for k, v := range m {
- //TODO: can this be done without sprintf?
- vals.Set(k, fmt.Sprintf("%v", v))
- }
+
+ m.parseURLValues(m, vals, "")
+
return vals
}
+func (m Map) parseURLValues(queryMap Map, vals url.Values, key string) {
+ useSliceIndex := false
+ if urlValuesSliceKeySuffix == "[i]" {
+ useSliceIndex = true
+ }
+
+ for k, v := range queryMap {
+ val := &Value{data: v}
+ switch {
+ case val.IsObjxMap():
+ if key == "" {
+ m.parseURLValues(val.ObjxMap(), vals, k)
+ } else {
+ m.parseURLValues(val.ObjxMap(), vals, key+"["+k+"]")
+ }
+ case val.IsObjxMapSlice():
+ sliceKey := k
+ if key != "" {
+ sliceKey = key + "[" + k + "]"
+ }
+
+ if useSliceIndex {
+ for i, sv := range val.MustObjxMapSlice() {
+ sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]"
+ m.parseURLValues(sv, vals, sk)
+ }
+ } else {
+ sliceKey = sliceKey + urlValuesSliceKeySuffix
+ for _, sv := range val.MustObjxMapSlice() {
+ m.parseURLValues(sv, vals, sliceKey)
+ }
+ }
+ case val.IsMSISlice():
+ sliceKey := k
+ if key != "" {
+ sliceKey = key + "[" + k + "]"
+ }
+
+ if useSliceIndex {
+ for i, sv := range val.MustMSISlice() {
+ sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]"
+ m.parseURLValues(New(sv), vals, sk)
+ }
+ } else {
+ sliceKey = sliceKey + urlValuesSliceKeySuffix
+ for _, sv := range val.MustMSISlice() {
+ m.parseURLValues(New(sv), vals, sliceKey)
+ }
+ }
+ case val.IsStrSlice(), val.IsBoolSlice(),
+ val.IsFloat32Slice(), val.IsFloat64Slice(),
+ val.IsIntSlice(), val.IsInt8Slice(), val.IsInt16Slice(), val.IsInt32Slice(), val.IsInt64Slice(),
+ val.IsUintSlice(), val.IsUint8Slice(), val.IsUint16Slice(), val.IsUint32Slice(), val.IsUint64Slice():
+
+ sliceKey := k
+ if key != "" {
+ sliceKey = key + "[" + k + "]"
+ }
+
+ if useSliceIndex {
+ for i, sv := range val.StringSlice() {
+ sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]"
+ vals.Set(sk, sv)
+ }
+ } else {
+ sliceKey = sliceKey + urlValuesSliceKeySuffix
+ vals[sliceKey] = val.StringSlice()
+ }
+
+ default:
+ if key == "" {
+ vals.Set(k, val.String())
+ } else {
+ vals.Set(key+"["+k+"]", val.String())
+ }
+ }
+ }
+}
+
// URLQuery gets an encoded URL query representing the given
// Obj. This function requires that the wrapped object be a
// map[string]interface{}
diff --git a/vendor/github.com/stretchr/objx/go.mod b/vendor/github.com/stretchr/objx/go.mod
new file mode 100644
index 00000000..31ec5a7d
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/go.mod
@@ -0,0 +1,8 @@
+module github.com/stretchr/objx
+
+go 1.12
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/stretchr/testify v1.3.0
+)
diff --git a/vendor/github.com/stretchr/objx/go.sum b/vendor/github.com/stretchr/objx/go.sum
new file mode 100644
index 00000000..4f898415
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/go.sum
@@ -0,0 +1,8 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go
index 406bc892..95149c06 100644
--- a/vendor/github.com/stretchr/objx/map.go
+++ b/vendor/github.com/stretchr/objx/map.go
@@ -97,12 +97,50 @@ func MustFromJSON(jsonString string) Map {
//
// Returns an error if the JSON is invalid.
func FromJSON(jsonString string) (Map, error) {
- var data interface{}
- err := json.Unmarshal([]byte(jsonString), &data)
+ var m Map
+ err := json.Unmarshal([]byte(jsonString), &m)
if err != nil {
return Nil, err
}
- return New(data), nil
+ m.tryConvertFloat64()
+ return m, nil
+}
+
+func (m Map) tryConvertFloat64() {
+ for k, v := range m {
+ switch v.(type) {
+ case float64:
+ f := v.(float64)
+ if float64(int(f)) == f {
+ m[k] = int(f)
+ }
+ case map[string]interface{}:
+ t := New(v)
+ t.tryConvertFloat64()
+ m[k] = t
+ case []interface{}:
+ m[k] = tryConvertFloat64InSlice(v.([]interface{}))
+ }
+ }
+}
+
+func tryConvertFloat64InSlice(s []interface{}) []interface{} {
+ for k, v := range s {
+ switch v.(type) {
+ case float64:
+ f := v.(float64)
+ if float64(int(f)) == f {
+ s[k] = int(f)
+ }
+ case map[string]interface{}:
+ t := New(v)
+ t.tryConvertFloat64()
+ s[k] = t
+ case []interface{}:
+ s[k] = tryConvertFloat64InSlice(v.([]interface{}))
+ }
+ }
+ return s
}
// FromBase64 creates a new Obj containing the data specified
diff --git a/vendor/github.com/stretchr/objx/type_specific.go b/vendor/github.com/stretchr/objx/type_specific.go
new file mode 100644
index 00000000..80f88d9f
--- /dev/null
+++ b/vendor/github.com/stretchr/objx/type_specific.go
@@ -0,0 +1,346 @@
+package objx
+
+/*
+ MSI (map[string]interface{} and []map[string]interface{})
+*/
+
+// MSI gets the value as a map[string]interface{}, returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} {
+ if s, ok := v.data.(map[string]interface{}); ok {
+ return s
+ }
+ if s, ok := v.data.(Map); ok {
+ return map[string]interface{}(s)
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+}
+
+// MustMSI gets the value as a map[string]interface{}.
+//
+// Panics if the object is not a map[string]interface{}.
+func (v *Value) MustMSI() map[string]interface{} {
+ if s, ok := v.data.(Map); ok {
+ return map[string]interface{}(s)
+ }
+ return v.data.(map[string]interface{})
+}
+
+// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault
+// value or nil if the value is not a []map[string]interface{}.
+func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} {
+ if s, ok := v.data.([]map[string]interface{}); ok {
+ return s
+ }
+
+ s := v.ObjxMapSlice()
+ if s == nil {
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+ }
+
+ result := make([]map[string]interface{}, len(s))
+ for i := range s {
+ result[i] = s[i].Value().MSI()
+ }
+ return result
+}
+
+// MustMSISlice gets the value as a []map[string]interface{}.
+//
+// Panics if the object is not a []map[string]interface{}.
+func (v *Value) MustMSISlice() []map[string]interface{} {
+ if s := v.MSISlice(); s != nil {
+ return s
+ }
+
+ return v.data.([]map[string]interface{})
+}
+
+// IsMSI gets whether the object contained is a map[string]interface{} or not.
+func (v *Value) IsMSI() bool {
+ _, ok := v.data.(map[string]interface{})
+ if !ok {
+ _, ok = v.data.(Map)
+ }
+ return ok
+}
+
+// IsMSISlice gets whether the object contained is a []map[string]interface{} or not.
+func (v *Value) IsMSISlice() bool {
+ _, ok := v.data.([]map[string]interface{})
+ if !ok {
+ _, ok = v.data.([]Map)
+ if !ok {
+ s, ok := v.data.([]interface{})
+ if ok {
+ for i := range s {
+ switch s[i].(type) {
+ case Map:
+ case map[string]interface{}:
+ default:
+ return false
+ }
+ }
+ return true
+ }
+ }
+ }
+ return ok
+}
+
+// EachMSI calls the specified callback for each object
+// in the []map[string]interface{}.
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value {
+ for index, val := range v.MustMSISlice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereMSI uses the specified decider function to select items
+// from the []map[string]interface{}. The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value {
+ var selected []map[string]interface{}
+ v.EachMSI(func(index int, val map[string]interface{}) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupMSI uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][]map[string]interface{}.
+func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value {
+ groups := make(map[string][]map[string]interface{})
+ v.EachMSI(func(index int, val map[string]interface{}) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([]map[string]interface{}, 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceMSI uses the specified function to replace each map[string]interface{}s
+// by iterating each item. The data in the returned result will be a
+// []map[string]interface{} containing the replaced items.
+func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value {
+ arr := v.MustMSISlice()
+ replaced := make([]map[string]interface{}, len(arr))
+ v.EachMSI(func(index int, val map[string]interface{}) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectMSI uses the specified collector function to collect a value
+// for each of the map[string]interface{}s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value {
+ arr := v.MustMSISlice()
+ collected := make([]interface{}, len(arr))
+ v.EachMSI(func(index int, val map[string]interface{}) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
+
+/*
+ ObjxMap ((Map) and [](Map))
+*/
+
+// ObjxMap gets the value as a (Map), returns the optionalDefault
+// value or a system default object if the value is the wrong type.
+func (v *Value) ObjxMap(optionalDefault ...(Map)) Map {
+ if s, ok := v.data.((Map)); ok {
+ return s
+ }
+ if s, ok := v.data.(map[string]interface{}); ok {
+ return s
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return New(nil)
+}
+
+// MustObjxMap gets the value as a (Map).
+//
+// Panics if the object is not a (Map).
+func (v *Value) MustObjxMap() Map {
+ if s, ok := v.data.(map[string]interface{}); ok {
+ return s
+ }
+ return v.data.((Map))
+}
+
+// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault
+// value or nil if the value is not a [](Map).
+func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) {
+ if s, ok := v.data.([]Map); ok {
+ return s
+ }
+
+ if s, ok := v.data.([]map[string]interface{}); ok {
+ result := make([]Map, len(s))
+ for i := range s {
+ result[i] = s[i]
+ }
+ return result
+ }
+
+ s, ok := v.data.([]interface{})
+ if !ok {
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+ return nil
+ }
+
+ result := make([]Map, len(s))
+ for i := range s {
+ switch s[i].(type) {
+ case Map:
+ result[i] = s[i].(Map)
+ case map[string]interface{}:
+ result[i] = New(s[i])
+ default:
+ return nil
+ }
+ }
+ return result
+}
+
+// MustObjxMapSlice gets the value as a [](Map).
+//
+// Panics if the object is not a [](Map).
+func (v *Value) MustObjxMapSlice() [](Map) {
+ if s := v.ObjxMapSlice(); s != nil {
+ return s
+ }
+ return v.data.([](Map))
+}
+
+// IsObjxMap gets whether the object contained is a (Map) or not.
+func (v *Value) IsObjxMap() bool {
+ _, ok := v.data.((Map))
+ if !ok {
+ _, ok = v.data.(map[string]interface{})
+ }
+ return ok
+}
+
+// IsObjxMapSlice gets whether the object contained is a [](Map) or not.
+func (v *Value) IsObjxMapSlice() bool {
+ _, ok := v.data.([](Map))
+ if !ok {
+ _, ok = v.data.([]map[string]interface{})
+ if !ok {
+ s, ok := v.data.([]interface{})
+ if ok {
+ for i := range s {
+ switch s[i].(type) {
+ case Map:
+ case map[string]interface{}:
+ default:
+ return false
+ }
+ }
+ return true
+ }
+ }
+ }
+
+ return ok
+}
+
+// EachObjxMap calls the specified callback for each object
+// in the [](Map).
+//
+// Panics if the object is the wrong type.
+func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value {
+ for index, val := range v.MustObjxMapSlice() {
+ carryon := callback(index, val)
+ if !carryon {
+ break
+ }
+ }
+ return v
+}
+
+// WhereObjxMap uses the specified decider function to select items
+// from the [](Map). The object contained in the result will contain
+// only the selected items.
+func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value {
+ var selected [](Map)
+ v.EachObjxMap(func(index int, val Map) bool {
+ shouldSelect := decider(index, val)
+ if !shouldSelect {
+ selected = append(selected, val)
+ }
+ return true
+ })
+ return &Value{data: selected}
+}
+
+// GroupObjxMap uses the specified grouper function to group the items
+// keyed by the return of the grouper. The object contained in the
+// result will contain a map[string][](Map).
+func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value {
+ groups := make(map[string][](Map))
+ v.EachObjxMap(func(index int, val Map) bool {
+ group := grouper(index, val)
+ if _, ok := groups[group]; !ok {
+ groups[group] = make([](Map), 0)
+ }
+ groups[group] = append(groups[group], val)
+ return true
+ })
+ return &Value{data: groups}
+}
+
+// ReplaceObjxMap uses the specified function to replace each (Map)s
+// by iterating each item. The data in the returned result will be a
+// [](Map) containing the replaced items.
+func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value {
+ arr := v.MustObjxMapSlice()
+ replaced := make([](Map), len(arr))
+ v.EachObjxMap(func(index int, val Map) bool {
+ replaced[index] = replacer(index, val)
+ return true
+ })
+ return &Value{data: replaced}
+}
+
+// CollectObjxMap uses the specified collector function to collect a value
+// for each of the (Map)s in the slice. The data returned will be a
+// []interface{}.
+func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value {
+ arr := v.MustObjxMapSlice()
+ collected := make([]interface{}, len(arr))
+ v.EachObjxMap(func(index int, val Map) bool {
+ collected[index] = collector(index, val)
+ return true
+ })
+ return &Value{data: collected}
+}
diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go
index 202a91f8..9859b407 100644
--- a/vendor/github.com/stretchr/objx/type_specific_codegen.go
+++ b/vendor/github.com/stretchr/objx/type_specific_codegen.go
@@ -1,7 +1,7 @@
package objx
/*
- Inter (interface{} and []interface{})
+ Inter (interface{} and []interface{})
*/
// Inter gets the value as a interface{}, returns the optionalDefault
@@ -126,257 +126,7 @@ func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Valu
}
/*
- MSI (map[string]interface{} and []map[string]interface{})
-*/
-
-// MSI gets the value as a map[string]interface{}, returns the optionalDefault
-// value or a system default object if the value is the wrong type.
-func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} {
- if s, ok := v.data.(map[string]interface{}); ok {
- return s
- }
- if len(optionalDefault) == 1 {
- return optionalDefault[0]
- }
- return nil
-}
-
-// MustMSI gets the value as a map[string]interface{}.
-//
-// Panics if the object is not a map[string]interface{}.
-func (v *Value) MustMSI() map[string]interface{} {
- return v.data.(map[string]interface{})
-}
-
-// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault
-// value or nil if the value is not a []map[string]interface{}.
-func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} {
- if s, ok := v.data.([]map[string]interface{}); ok {
- return s
- }
- if len(optionalDefault) == 1 {
- return optionalDefault[0]
- }
- return nil
-}
-
-// MustMSISlice gets the value as a []map[string]interface{}.
-//
-// Panics if the object is not a []map[string]interface{}.
-func (v *Value) MustMSISlice() []map[string]interface{} {
- return v.data.([]map[string]interface{})
-}
-
-// IsMSI gets whether the object contained is a map[string]interface{} or not.
-func (v *Value) IsMSI() bool {
- _, ok := v.data.(map[string]interface{})
- return ok
-}
-
-// IsMSISlice gets whether the object contained is a []map[string]interface{} or not.
-func (v *Value) IsMSISlice() bool {
- _, ok := v.data.([]map[string]interface{})
- return ok
-}
-
-// EachMSI calls the specified callback for each object
-// in the []map[string]interface{}.
-//
-// Panics if the object is the wrong type.
-func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value {
- for index, val := range v.MustMSISlice() {
- carryon := callback(index, val)
- if !carryon {
- break
- }
- }
- return v
-}
-
-// WhereMSI uses the specified decider function to select items
-// from the []map[string]interface{}. The object contained in the result will contain
-// only the selected items.
-func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value {
- var selected []map[string]interface{}
- v.EachMSI(func(index int, val map[string]interface{}) bool {
- shouldSelect := decider(index, val)
- if !shouldSelect {
- selected = append(selected, val)
- }
- return true
- })
- return &Value{data: selected}
-}
-
-// GroupMSI uses the specified grouper function to group the items
-// keyed by the return of the grouper. The object contained in the
-// result will contain a map[string][]map[string]interface{}.
-func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value {
- groups := make(map[string][]map[string]interface{})
- v.EachMSI(func(index int, val map[string]interface{}) bool {
- group := grouper(index, val)
- if _, ok := groups[group]; !ok {
- groups[group] = make([]map[string]interface{}, 0)
- }
- groups[group] = append(groups[group], val)
- return true
- })
- return &Value{data: groups}
-}
-
-// ReplaceMSI uses the specified function to replace each map[string]interface{}s
-// by iterating each item. The data in the returned result will be a
-// []map[string]interface{} containing the replaced items.
-func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value {
- arr := v.MustMSISlice()
- replaced := make([]map[string]interface{}, len(arr))
- v.EachMSI(func(index int, val map[string]interface{}) bool {
- replaced[index] = replacer(index, val)
- return true
- })
- return &Value{data: replaced}
-}
-
-// CollectMSI uses the specified collector function to collect a value
-// for each of the map[string]interface{}s in the slice. The data returned will be a
-// []interface{}.
-func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value {
- arr := v.MustMSISlice()
- collected := make([]interface{}, len(arr))
- v.EachMSI(func(index int, val map[string]interface{}) bool {
- collected[index] = collector(index, val)
- return true
- })
- return &Value{data: collected}
-}
-
-/*
- ObjxMap ((Map) and [](Map))
-*/
-
-// ObjxMap gets the value as a (Map), returns the optionalDefault
-// value or a system default object if the value is the wrong type.
-func (v *Value) ObjxMap(optionalDefault ...(Map)) Map {
- if s, ok := v.data.((Map)); ok {
- return s
- }
- if len(optionalDefault) == 1 {
- return optionalDefault[0]
- }
- return New(nil)
-}
-
-// MustObjxMap gets the value as a (Map).
-//
-// Panics if the object is not a (Map).
-func (v *Value) MustObjxMap() Map {
- return v.data.((Map))
-}
-
-// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault
-// value or nil if the value is not a [](Map).
-func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) {
- if s, ok := v.data.([](Map)); ok {
- return s
- }
- if len(optionalDefault) == 1 {
- return optionalDefault[0]
- }
- return nil
-}
-
-// MustObjxMapSlice gets the value as a [](Map).
-//
-// Panics if the object is not a [](Map).
-func (v *Value) MustObjxMapSlice() [](Map) {
- return v.data.([](Map))
-}
-
-// IsObjxMap gets whether the object contained is a (Map) or not.
-func (v *Value) IsObjxMap() bool {
- _, ok := v.data.((Map))
- return ok
-}
-
-// IsObjxMapSlice gets whether the object contained is a [](Map) or not.
-func (v *Value) IsObjxMapSlice() bool {
- _, ok := v.data.([](Map))
- return ok
-}
-
-// EachObjxMap calls the specified callback for each object
-// in the [](Map).
-//
-// Panics if the object is the wrong type.
-func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value {
- for index, val := range v.MustObjxMapSlice() {
- carryon := callback(index, val)
- if !carryon {
- break
- }
- }
- return v
-}
-
-// WhereObjxMap uses the specified decider function to select items
-// from the [](Map). The object contained in the result will contain
-// only the selected items.
-func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value {
- var selected [](Map)
- v.EachObjxMap(func(index int, val Map) bool {
- shouldSelect := decider(index, val)
- if !shouldSelect {
- selected = append(selected, val)
- }
- return true
- })
- return &Value{data: selected}
-}
-
-// GroupObjxMap uses the specified grouper function to group the items
-// keyed by the return of the grouper. The object contained in the
-// result will contain a map[string][](Map).
-func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value {
- groups := make(map[string][](Map))
- v.EachObjxMap(func(index int, val Map) bool {
- group := grouper(index, val)
- if _, ok := groups[group]; !ok {
- groups[group] = make([](Map), 0)
- }
- groups[group] = append(groups[group], val)
- return true
- })
- return &Value{data: groups}
-}
-
-// ReplaceObjxMap uses the specified function to replace each (Map)s
-// by iterating each item. The data in the returned result will be a
-// [](Map) containing the replaced items.
-func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value {
- arr := v.MustObjxMapSlice()
- replaced := make([](Map), len(arr))
- v.EachObjxMap(func(index int, val Map) bool {
- replaced[index] = replacer(index, val)
- return true
- })
- return &Value{data: replaced}
-}
-
-// CollectObjxMap uses the specified collector function to collect a value
-// for each of the (Map)s in the slice. The data returned will be a
-// []interface{}.
-func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value {
- arr := v.MustObjxMapSlice()
- collected := make([]interface{}, len(arr))
- v.EachObjxMap(func(index int, val Map) bool {
- collected[index] = collector(index, val)
- return true
- })
- return &Value{data: collected}
-}
-
-/*
- Bool (bool and []bool)
+ Bool (bool and []bool)
*/
// Bool gets the value as a bool, returns the optionalDefault
@@ -501,7 +251,7 @@ func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value {
}
/*
- Str (string and []string)
+ Str (string and []string)
*/
// Str gets the value as a string, returns the optionalDefault
@@ -626,7 +376,7 @@ func (v *Value) CollectStr(collector func(int, string) interface{}) *Value {
}
/*
- Int (int and []int)
+ Int (int and []int)
*/
// Int gets the value as a int, returns the optionalDefault
@@ -751,7 +501,7 @@ func (v *Value) CollectInt(collector func(int, int) interface{}) *Value {
}
/*
- Int8 (int8 and []int8)
+ Int8 (int8 and []int8)
*/
// Int8 gets the value as a int8, returns the optionalDefault
@@ -876,7 +626,7 @@ func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value {
}
/*
- Int16 (int16 and []int16)
+ Int16 (int16 and []int16)
*/
// Int16 gets the value as a int16, returns the optionalDefault
@@ -1001,7 +751,7 @@ func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value {
}
/*
- Int32 (int32 and []int32)
+ Int32 (int32 and []int32)
*/
// Int32 gets the value as a int32, returns the optionalDefault
@@ -1126,7 +876,7 @@ func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value {
}
/*
- Int64 (int64 and []int64)
+ Int64 (int64 and []int64)
*/
// Int64 gets the value as a int64, returns the optionalDefault
@@ -1251,7 +1001,7 @@ func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value {
}
/*
- Uint (uint and []uint)
+ Uint (uint and []uint)
*/
// Uint gets the value as a uint, returns the optionalDefault
@@ -1376,7 +1126,7 @@ func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value {
}
/*
- Uint8 (uint8 and []uint8)
+ Uint8 (uint8 and []uint8)
*/
// Uint8 gets the value as a uint8, returns the optionalDefault
@@ -1501,7 +1251,7 @@ func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value {
}
/*
- Uint16 (uint16 and []uint16)
+ Uint16 (uint16 and []uint16)
*/
// Uint16 gets the value as a uint16, returns the optionalDefault
@@ -1626,7 +1376,7 @@ func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value {
}
/*
- Uint32 (uint32 and []uint32)
+ Uint32 (uint32 and []uint32)
*/
// Uint32 gets the value as a uint32, returns the optionalDefault
@@ -1751,7 +1501,7 @@ func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value {
}
/*
- Uint64 (uint64 and []uint64)
+ Uint64 (uint64 and []uint64)
*/
// Uint64 gets the value as a uint64, returns the optionalDefault
@@ -1876,7 +1626,7 @@ func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value {
}
/*
- Uintptr (uintptr and []uintptr)
+ Uintptr (uintptr and []uintptr)
*/
// Uintptr gets the value as a uintptr, returns the optionalDefault
@@ -2001,7 +1751,7 @@ func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value
}
/*
- Float32 (float32 and []float32)
+ Float32 (float32 and []float32)
*/
// Float32 gets the value as a float32, returns the optionalDefault
@@ -2126,7 +1876,7 @@ func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value
}
/*
- Float64 (float64 and []float64)
+ Float64 (float64 and []float64)
*/
// Float64 gets the value as a float64, returns the optionalDefault
@@ -2251,7 +2001,7 @@ func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value
}
/*
- Complex64 (complex64 and []complex64)
+ Complex64 (complex64 and []complex64)
*/
// Complex64 gets the value as a complex64, returns the optionalDefault
@@ -2376,7 +2126,7 @@ func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Va
}
/*
- Complex128 (complex128 and []complex128)
+ Complex128 (complex128 and []complex128)
*/
// Complex128 gets the value as a complex128, returns the optionalDefault
diff --git a/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/stretchr/objx/value.go
index e4b4a143..4e5f9b77 100644
--- a/vendor/github.com/stretchr/objx/value.go
+++ b/vendor/github.com/stretchr/objx/value.go
@@ -20,6 +20,8 @@ func (v *Value) Data() interface{} {
// String returns the value always as a string
func (v *Value) String() string {
switch {
+ case v.IsNil():
+ return ""
case v.IsStr():
return v.Str()
case v.IsBool():
@@ -51,3 +53,107 @@ func (v *Value) String() string {
}
return fmt.Sprintf("%#v", v.Data())
}
+
+// StringSlice returns the value always as a []string
+func (v *Value) StringSlice(optionalDefault ...[]string) []string {
+ switch {
+ case v.IsStrSlice():
+ return v.MustStrSlice()
+ case v.IsBoolSlice():
+ slice := v.MustBoolSlice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatBool(iv)
+ }
+ return vals
+ case v.IsFloat32Slice():
+ slice := v.MustFloat32Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatFloat(float64(iv), 'f', -1, 32)
+ }
+ return vals
+ case v.IsFloat64Slice():
+ slice := v.MustFloat64Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatFloat(iv, 'f', -1, 64)
+ }
+ return vals
+ case v.IsIntSlice():
+ slice := v.MustIntSlice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatInt(int64(iv), 10)
+ }
+ return vals
+ case v.IsInt8Slice():
+ slice := v.MustInt8Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatInt(int64(iv), 10)
+ }
+ return vals
+ case v.IsInt16Slice():
+ slice := v.MustInt16Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatInt(int64(iv), 10)
+ }
+ return vals
+ case v.IsInt32Slice():
+ slice := v.MustInt32Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatInt(int64(iv), 10)
+ }
+ return vals
+ case v.IsInt64Slice():
+ slice := v.MustInt64Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatInt(iv, 10)
+ }
+ return vals
+ case v.IsUintSlice():
+ slice := v.MustUintSlice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatUint(uint64(iv), 10)
+ }
+ return vals
+ case v.IsUint8Slice():
+ slice := v.MustUint8Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatUint(uint64(iv), 10)
+ }
+ return vals
+ case v.IsUint16Slice():
+ slice := v.MustUint16Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatUint(uint64(iv), 10)
+ }
+ return vals
+ case v.IsUint32Slice():
+ slice := v.MustUint32Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatUint(uint64(iv), 10)
+ }
+ return vals
+ case v.IsUint64Slice():
+ slice := v.MustUint64Slice()
+ vals := make([]string, len(slice))
+ for i, iv := range slice {
+ vals[i] = strconv.FormatUint(iv, 10)
+ }
+ return vals
+ }
+ if len(optionalDefault) == 1 {
+ return optionalDefault[0]
+ }
+
+ return []string{}
+}
diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE
index f38ec595..4b0421cf 100644
--- a/vendor/github.com/stretchr/testify/LICENSE
+++ b/vendor/github.com/stretchr/testify/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell
+Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
similarity index 62%
rename from vendor/github.com/stretchr/testify/assert/assertion_order.go
rename to vendor/github.com/stretchr/testify/assert/assertion_compare.go
index 15a486ca..dc200395 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_order.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -5,20 +5,28 @@ import (
"reflect"
)
-func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
+type CompareType int
+
+const (
+ compareLess CompareType = iota - 1
+ compareEqual
+ compareGreater
+)
+
+func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
switch kind {
case reflect.Int:
{
intobj1 := obj1.(int)
intobj2 := obj2.(int)
if intobj1 > intobj2 {
- return -1, true
+ return compareGreater, true
}
if intobj1 == intobj2 {
- return 0, true
+ return compareEqual, true
}
if intobj1 < intobj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Int8:
@@ -26,13 +34,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
int8obj1 := obj1.(int8)
int8obj2 := obj2.(int8)
if int8obj1 > int8obj2 {
- return -1, true
+ return compareGreater, true
}
if int8obj1 == int8obj2 {
- return 0, true
+ return compareEqual, true
}
if int8obj1 < int8obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Int16:
@@ -40,13 +48,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
int16obj1 := obj1.(int16)
int16obj2 := obj2.(int16)
if int16obj1 > int16obj2 {
- return -1, true
+ return compareGreater, true
}
if int16obj1 == int16obj2 {
- return 0, true
+ return compareEqual, true
}
if int16obj1 < int16obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Int32:
@@ -54,13 +62,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
int32obj1 := obj1.(int32)
int32obj2 := obj2.(int32)
if int32obj1 > int32obj2 {
- return -1, true
+ return compareGreater, true
}
if int32obj1 == int32obj2 {
- return 0, true
+ return compareEqual, true
}
if int32obj1 < int32obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Int64:
@@ -68,13 +76,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
int64obj1 := obj1.(int64)
int64obj2 := obj2.(int64)
if int64obj1 > int64obj2 {
- return -1, true
+ return compareGreater, true
}
if int64obj1 == int64obj2 {
- return 0, true
+ return compareEqual, true
}
if int64obj1 < int64obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Uint:
@@ -82,13 +90,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
uintobj1 := obj1.(uint)
uintobj2 := obj2.(uint)
if uintobj1 > uintobj2 {
- return -1, true
+ return compareGreater, true
}
if uintobj1 == uintobj2 {
- return 0, true
+ return compareEqual, true
}
if uintobj1 < uintobj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Uint8:
@@ -96,13 +104,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
uint8obj1 := obj1.(uint8)
uint8obj2 := obj2.(uint8)
if uint8obj1 > uint8obj2 {
- return -1, true
+ return compareGreater, true
}
if uint8obj1 == uint8obj2 {
- return 0, true
+ return compareEqual, true
}
if uint8obj1 < uint8obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Uint16:
@@ -110,13 +118,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
uint16obj1 := obj1.(uint16)
uint16obj2 := obj2.(uint16)
if uint16obj1 > uint16obj2 {
- return -1, true
+ return compareGreater, true
}
if uint16obj1 == uint16obj2 {
- return 0, true
+ return compareEqual, true
}
if uint16obj1 < uint16obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Uint32:
@@ -124,13 +132,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
uint32obj1 := obj1.(uint32)
uint32obj2 := obj2.(uint32)
if uint32obj1 > uint32obj2 {
- return -1, true
+ return compareGreater, true
}
if uint32obj1 == uint32obj2 {
- return 0, true
+ return compareEqual, true
}
if uint32obj1 < uint32obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Uint64:
@@ -138,13 +146,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
uint64obj1 := obj1.(uint64)
uint64obj2 := obj2.(uint64)
if uint64obj1 > uint64obj2 {
- return -1, true
+ return compareGreater, true
}
if uint64obj1 == uint64obj2 {
- return 0, true
+ return compareEqual, true
}
if uint64obj1 < uint64obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Float32:
@@ -152,13 +160,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
float32obj1 := obj1.(float32)
float32obj2 := obj2.(float32)
if float32obj1 > float32obj2 {
- return -1, true
+ return compareGreater, true
}
if float32obj1 == float32obj2 {
- return 0, true
+ return compareEqual, true
}
if float32obj1 < float32obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.Float64:
@@ -166,13 +174,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
float64obj1 := obj1.(float64)
float64obj2 := obj2.(float64)
if float64obj1 > float64obj2 {
- return -1, true
+ return compareGreater, true
}
if float64obj1 == float64obj2 {
- return 0, true
+ return compareEqual, true
}
if float64obj1 < float64obj2 {
- return 1, true
+ return compareLess, true
}
}
case reflect.String:
@@ -180,18 +188,18 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
stringobj1 := obj1.(string)
stringobj2 := obj2.(string)
if stringobj1 > stringobj2 {
- return -1, true
+ return compareGreater, true
}
if stringobj1 == stringobj2 {
- return 0, true
+ return compareEqual, true
}
if stringobj1 < stringobj2 {
- return 1, true
+ return compareLess, true
}
}
}
- return 0, false
+ return compareEqual, false
}
// Greater asserts that the first element is greater than the second
@@ -200,26 +208,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
// assert.Greater(t, float64(2), float64(1))
// assert.Greater(t, "b", "a")
func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- e1Kind := reflect.ValueOf(e1).Kind()
- e2Kind := reflect.ValueOf(e2).Kind()
- if e1Kind != e2Kind {
- return Fail(t, "Elements should be the same type", msgAndArgs...)
- }
-
- res, isComparable := compare(e1, e2, e1Kind)
- if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
- }
-
- if res != -1 {
- return Fail(t, fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2), msgAndArgs...)
- }
-
- return true
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
}
// GreaterOrEqual asserts that the first element is greater than or equal to the second
@@ -229,26 +218,7 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
// assert.GreaterOrEqual(t, "b", "a")
// assert.GreaterOrEqual(t, "b", "b")
func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- e1Kind := reflect.ValueOf(e1).Kind()
- e2Kind := reflect.ValueOf(e2).Kind()
- if e1Kind != e2Kind {
- return Fail(t, "Elements should be the same type", msgAndArgs...)
- }
-
- res, isComparable := compare(e1, e2, e1Kind)
- if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
- }
-
- if res != -1 && res != 0 {
- return Fail(t, fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2), msgAndArgs...)
- }
-
- return true
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
}
// Less asserts that the first element is less than the second
@@ -257,26 +227,7 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
// assert.Less(t, float64(1), float64(2))
// assert.Less(t, "a", "b")
func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- e1Kind := reflect.ValueOf(e1).Kind()
- e2Kind := reflect.ValueOf(e2).Kind()
- if e1Kind != e2Kind {
- return Fail(t, "Elements should be the same type", msgAndArgs...)
- }
-
- res, isComparable := compare(e1, e2, e1Kind)
- if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
- }
-
- if res != 1 {
- return Fail(t, fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2), msgAndArgs...)
- }
-
- return true
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
}
// LessOrEqual asserts that the first element is less than or equal to the second
@@ -286,6 +237,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
// assert.LessOrEqual(t, "a", "b")
// assert.LessOrEqual(t, "b", "b")
func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
+}
+
+func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
@@ -296,14 +251,24 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
return Fail(t, "Elements should be the same type", msgAndArgs...)
}
- res, isComparable := compare(e1, e2, e1Kind)
+ compareResult, isComparable := compare(e1, e2, e1Kind)
if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
}
- if res != 1 && res != 0 {
- return Fail(t, fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2), msgAndArgs...)
+ if !containsValue(allowedComparesResults, compareResult) {
+ return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...)
}
return true
}
+
+func containsValue(values []CompareType, value CompareType) bool {
+ for _, v := range values {
+ if v == value {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index bf89ecd2..49370eb1 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -93,7 +93,7 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args
// EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal.
//
-// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123))
+// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -127,7 +127,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick
// Exactlyf asserts that two objects are equal in value and type.
//
-// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
+// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -173,7 +173,7 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool
// Greaterf asserts that the first element is greater than the second
//
// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
-// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1))
+// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -225,7 +225,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u
//
// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -237,7 +237,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string,
//
// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -245,6 +245,18 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri
return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
}
+// HTTPStatusCodef asserts that a specified handler returns a specified status code.
+//
+// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCode(t, handler, method, url, values, statuscode, append([]interface{}{msg}, args...)...)
+}
+
// HTTPSuccessf asserts that a specified handler returns a success status code.
//
// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
@@ -259,7 +271,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin
// Implementsf asserts that an object is implemented by the specified interface.
//
-// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -341,7 +353,7 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf
// Lessf asserts that the first element is less than the second
//
// assert.Lessf(t, 1, 2, "error message %s", "formatted")
-// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2))
+// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
// assert.Lessf(t, "a", "b", "error message %s", "formatted")
func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -454,6 +466,16 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string,
return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
}
+// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
+//
+// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
+func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
// NotNilf asserts that the specified object is not nil.
//
// assert.NotNilf(t, err, "error message %s", "formatted")
@@ -476,7 +498,7 @@ func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bo
// NotRegexpf asserts that a specified regexp does not match a string.
//
-// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -552,7 +574,7 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str
// Regexpf asserts that a specified regexp matches a string.
//
-// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index 75ecdcaa..9db88942 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -169,7 +169,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
// EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal.
//
-// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123))
+// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -251,7 +251,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg
// Exactlyf asserts that two objects are equal in value and type.
//
-// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123))
+// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -370,7 +370,7 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string,
// Greaterf asserts that the first element is greater than the second
//
// a.Greaterf(2, 1, "error message %s", "formatted")
-// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1))
+// a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
// a.Greaterf("b", "a", "error message %s", "formatted")
func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
@@ -447,7 +447,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri
//
// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -471,7 +471,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s
//
// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
-// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -479,6 +479,30 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url
return HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
}
+// HTTPStatusCode asserts that a specified handler returns a specified status code.
+//
+// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...)
+}
+
+// HTTPStatusCodef asserts that a specified handler returns a specified status code.
+//
+// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...)
+}
+
// HTTPSuccess asserts that a specified handler returns a success status code.
//
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
@@ -515,7 +539,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{},
// Implementsf asserts that an object is implemented by the specified interface.
//
-// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -706,7 +730,7 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar
// Lessf asserts that the first element is less than the second
//
// a.Lessf(1, 2, "error message %s", "formatted")
-// a.Lessf(float64(1, "error message %s", "formatted"), float64(2))
+// a.Lessf(float64(1), float64(2), "error message %s", "formatted")
// a.Lessf("a", "b", "error message %s", "formatted")
func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
@@ -884,6 +908,26 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr
return NotEqual(a.t, expected, actual, msgAndArgs...)
}
+// NotEqualValues asserts that two objects are not equal even when converted to the same type
+//
+// a.NotEqualValues(obj1, obj2)
+func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
+//
+// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
+func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValuesf(a.t, expected, actual, msg, args...)
+}
+
// NotEqualf asserts that the specified values are NOT equal.
//
// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
@@ -950,7 +994,7 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in
// NotRegexpf asserts that a specified regexp does not match a string.
//
-// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
@@ -1102,7 +1146,7 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter
// Regexpf asserts that a specified regexp matches a string.
//
-// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index bdd81389..914a10d8 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -19,7 +19,7 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/pmezard/go-difflib/difflib"
- yaml "gopkg.in/yaml.v2"
+ yaml "gopkg.in/yaml.v3"
)
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
@@ -45,7 +45,7 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
// for table driven tests.
type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
-// Comparison a custom function that returns true on success and false on failure
+// Comparison is a custom function that returns true on success and false on failure
type Comparison func() (success bool)
/*
@@ -104,11 +104,11 @@ the problem actually occurred in calling code.*/
// failed.
func CallerInfo() []string {
- pc := uintptr(0)
- file := ""
- line := 0
- ok := false
- name := ""
+ var pc uintptr
+ var ok bool
+ var file string
+ var line int
+ var name string
callers := []string{}
for i := 0; ; i++ {
@@ -429,14 +429,27 @@ func samePointers(first, second interface{}) bool {
// to a type conversion in the Go grammar.
func formatUnequalValues(expected, actual interface{}) (e string, a string) {
if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
- return fmt.Sprintf("%T(%#v)", expected, expected),
- fmt.Sprintf("%T(%#v)", actual, actual)
+ return fmt.Sprintf("%T(%s)", expected, truncatingFormat(expected)),
+ fmt.Sprintf("%T(%s)", actual, truncatingFormat(actual))
}
switch expected.(type) {
case time.Duration:
return fmt.Sprintf("%v", expected), fmt.Sprintf("%v", actual)
}
- return fmt.Sprintf("%#v", expected), fmt.Sprintf("%#v", actual)
+ return truncatingFormat(expected), truncatingFormat(actual)
+}
+
+// truncatingFormat formats the data and truncates it if it's too long.
+//
+// This helps keep formatted error messages lines from exceeding the
+// bufio.MaxScanTokenSize max line length that the go testing framework imposes.
+func truncatingFormat(data interface{}) string {
+ value := fmt.Sprintf("%#v", data)
+ max := bufio.MaxScanTokenSize - 100 // Give us some space the type info too if needed.
+ if len(value) > max {
+ value = value[0:max] + "<... truncated>"
+ }
+ return value
}
// EqualValues asserts that two objects are equal or convertable to the same types
@@ -483,12 +496,12 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
//
// assert.NotNil(t, err)
func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
if !isNil(object) {
return true
}
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, "Expected value not to be nil.", msgAndArgs...)
}
@@ -529,12 +542,12 @@ func isNil(object interface{}) bool {
//
// assert.Nil(t, err)
func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
if isNil(object) {
return true
}
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
}
@@ -571,12 +584,11 @@ func isEmpty(object interface{}) bool {
//
// assert.Empty(t, obj)
func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
pass := isEmpty(object)
if !pass {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...)
}
@@ -591,12 +603,11 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
// assert.Equal(t, "two", obj[1])
// }
func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
pass := !isEmpty(object)
if !pass {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...)
}
@@ -639,16 +650,10 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{})
//
// assert.True(t, myBool)
func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
- if h, ok := t.(interface {
- Helper()
- }); ok {
- h.Helper()
- }
-
- if value != true {
+ if !value {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, "Should be true", msgAndArgs...)
}
@@ -660,11 +665,10 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
//
// assert.False(t, myBool)
func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
- if value != false {
+ if value {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, "Should be false", msgAndArgs...)
}
@@ -695,6 +699,21 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{
}
+// NotEqualValues asserts that two objects are not equal even when converted to the same type
+//
+// assert.NotEqualValues(t, obj1, obj2)
+func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if ObjectsAreEqualValues(expected, actual) {
+ return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
+ }
+
+ return true
+}
+
// containsElement try loop over the list check if the list includes the element.
// return (false, false) if impossible.
// return (true, false) if element was not found.
@@ -747,10 +766,10 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo
ok, found := includeElement(s, contains)
if !ok {
- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
}
if !found {
- return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", s, contains), msgAndArgs...)
}
return true
@@ -881,27 +900,39 @@ func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface
return true
}
- aKind := reflect.TypeOf(listA).Kind()
- bKind := reflect.TypeOf(listB).Kind()
-
- if aKind != reflect.Array && aKind != reflect.Slice {
- return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...)
+ if !isList(t, listA, msgAndArgs...) || !isList(t, listB, msgAndArgs...) {
+ return false
}
- if bKind != reflect.Array && bKind != reflect.Slice {
- return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...)
+ extraA, extraB := diffLists(listA, listB)
+
+ if len(extraA) == 0 && len(extraB) == 0 {
+ return true
}
+ return Fail(t, formatListDiff(listA, listB, extraA, extraB), msgAndArgs...)
+}
+
+// isList checks that the provided value is array or slice.
+func isList(t TestingT, list interface{}, msgAndArgs ...interface{}) (ok bool) {
+ kind := reflect.TypeOf(list).Kind()
+ if kind != reflect.Array && kind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s, expecting array or slice", list, kind),
+ msgAndArgs...)
+ }
+ return true
+}
+
+// diffLists diffs two arrays/slices and returns slices of elements that are only in A and only in B.
+// If some element is present multiple times, each instance is counted separately (e.g. if something is 2x in A and
+// 5x in B, it will be 0x in extraA and 3x in extraB). The order of items in both lists is ignored.
+func diffLists(listA, listB interface{}) (extraA, extraB []interface{}) {
aValue := reflect.ValueOf(listA)
bValue := reflect.ValueOf(listB)
aLen := aValue.Len()
bLen := bValue.Len()
- if aLen != bLen {
- return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...)
- }
-
// Mark indexes in bValue that we already used
visited := make([]bool, bLen)
for i := 0; i < aLen; i++ {
@@ -918,11 +949,38 @@ func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface
}
}
if !found {
- return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...)
+ extraA = append(extraA, element)
}
}
- return true
+ for j := 0; j < bLen; j++ {
+ if visited[j] {
+ continue
+ }
+ extraB = append(extraB, bValue.Index(j).Interface())
+ }
+
+ return
+}
+
+func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) string {
+ var msg bytes.Buffer
+
+ msg.WriteString("elements differ")
+ if len(extraA) > 0 {
+ msg.WriteString("\n\nextra elements in list A:\n")
+ msg.WriteString(spewConfig.Sdump(extraA))
+ }
+ if len(extraB) > 0 {
+ msg.WriteString("\n\nextra elements in list B:\n")
+ msg.WriteString(spewConfig.Sdump(extraB))
+ }
+ msg.WriteString("\n\nlistA:\n")
+ msg.WriteString(spewConfig.Sdump(listA))
+ msg.WriteString("\n\nlistB:\n")
+ msg.WriteString(spewConfig.Sdump(listB))
+
+ return msg.String()
}
// Condition uses a Comparison to assert a complex condition.
@@ -1058,6 +1116,8 @@ func toFloat(x interface{}) (float64, bool) {
xok := true
switch xn := x.(type) {
+ case uint:
+ xf = float64(xn)
case uint8:
xf = float64(xn)
case uint16:
@@ -1079,7 +1139,7 @@ func toFloat(x interface{}) (float64, bool) {
case float32:
xf = float64(xn)
case float64:
- xf = float64(xn)
+ xf = xn
case time.Duration:
xf = float64(xn)
default:
@@ -1193,6 +1253,9 @@ func calcRelativeError(expected, actual interface{}) (float64, error) {
if !aok {
return 0, fmt.Errorf("expected value %q cannot be converted to float", expected)
}
+ if math.IsNaN(af) {
+ return 0, errors.New("expected value must not be NaN")
+ }
if af == 0 {
return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
}
@@ -1200,6 +1263,9 @@ func calcRelativeError(expected, actual interface{}) (float64, error) {
if !bok {
return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
}
+ if math.IsNaN(bf) {
+ return 0, errors.New("actual value must not be NaN")
+ }
return math.Abs(af-bf) / math.Abs(af), nil
}
@@ -1209,6 +1275,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd
if h, ok := t.(tHelper); ok {
h.Helper()
}
+ if math.IsNaN(epsilon) {
+ return Fail(t, "epsilon must not be NaN")
+ }
actualEpsilon, err := calcRelativeError(expected, actual)
if err != nil {
return Fail(t, err.Error(), msgAndArgs...)
@@ -1256,10 +1325,10 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
// assert.Equal(t, expectedObj, actualObj)
// }
func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
if err != nil {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...)
}
@@ -1273,11 +1342,10 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
// assert.Equal(t, expectedError, err)
// }
func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
- if h, ok := t.(tHelper); ok {
- h.Helper()
- }
-
if err == nil {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
return Fail(t, "An error is expected but got nil.", msgAndArgs...)
}
@@ -1553,6 +1621,7 @@ var spewConfig = spew.ConfigState{
DisablePointerAddresses: true,
DisableCapacities: true,
SortKeys: true,
+ DisableMethods: true,
}
type tHelper interface {
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
index df46fa77..4ed341dd 100644
--- a/vendor/github.com/stretchr/testify/assert/http_assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -33,7 +33,6 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
- return false
}
isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
@@ -56,7 +55,6 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
- return false
}
isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
@@ -79,7 +77,6 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
- return false
}
isErrorCode := code >= http.StatusBadRequest
@@ -90,6 +87,28 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values
return isErrorCode
}
+// HTTPStatusCode asserts that a specified handler returns a specified status code.
+//
+// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ }
+
+ successful := code == statuscode
+ if !successful {
+ Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code))
+ }
+
+ return successful
+}
+
// HTTPBody is a helper that returns HTTP body of the response. It returns
// empty string if building a new request fails.
func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go
index 58e0798d..c6df4485 100644
--- a/vendor/github.com/stretchr/testify/mock/mock.go
+++ b/vendor/github.com/stretchr/testify/mock/mock.go
@@ -65,6 +65,11 @@ type Call struct {
// reference. It's useful when mocking methods such as unmarshalers or
// decoders.
RunFn func(Arguments)
+
+ // PanicMsg holds msg to be used to mock panic on the function call
+ // if the PanicMsg is set to a non nil string the function call will panic
+ // irrespective of other settings
+ PanicMsg *string
}
func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call {
@@ -77,6 +82,7 @@ func newCall(parent *Mock, methodName string, callerInfo []string, methodArgumen
Repeatability: 0,
WaitFor: nil,
RunFn: nil,
+ PanicMsg: nil,
}
}
@@ -100,6 +106,18 @@ func (c *Call) Return(returnArguments ...interface{}) *Call {
return c
}
+// Panic specifies if the functon call should fail and the panic message
+//
+// Mock.On("DoSomething").Panic("test panic")
+func (c *Call) Panic(msg string) *Call {
+ c.lock()
+ defer c.unlock()
+
+ c.PanicMsg = &msg
+
+ return c
+}
+
// Once indicates that that the mock should only return the value once.
//
// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once()
@@ -150,7 +168,7 @@ func (c *Call) After(d time.Duration) *Call {
// mocking a method (such as an unmarshaler) that takes a pointer to a struct and
// sets properties in such struct
//
-// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}").Return().Run(func(args Arguments) {
+// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}")).Return().Run(func(args Arguments) {
// arg := args.Get(0).(*map[string]interface{})
// arg["foo"] = "bar"
// })
@@ -392,6 +410,13 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen
time.Sleep(call.waitTime)
}
+ m.mutex.Lock()
+ panicMsg := call.PanicMsg
+ m.mutex.Unlock()
+ if panicMsg != nil {
+ panic(*panicMsg)
+ }
+
m.mutex.Lock()
runFn := call.RunFn
m.mutex.Unlock()
@@ -527,6 +552,45 @@ func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...inter
return true
}
+// IsMethodCallable checking that the method can be called
+// If the method was called more than `Repeatability` return false
+func (m *Mock) IsMethodCallable(t TestingT, methodName string, arguments ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+
+ for _, v := range m.ExpectedCalls {
+ if v.Method != methodName {
+ continue
+ }
+ if len(arguments) != len(v.Arguments) {
+ continue
+ }
+ if v.Repeatability < v.totalCalls {
+ continue
+ }
+ if isArgsEqual(v.Arguments, arguments) {
+ return true
+ }
+ }
+ return false
+}
+
+// isArgsEqual compares arguments
+func isArgsEqual(expected Arguments, args []interface{}) bool {
+ if len(expected) != len(args) {
+ return false
+ }
+ for i, v := range args {
+ if !reflect.DeepEqual(expected[i], v) {
+ return false
+ }
+ }
+ return true
+}
+
func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool {
for _, call := range m.calls() {
if call.Method == methodName {
@@ -791,7 +855,7 @@ func (args Arguments) String(indexOrNil ...int) string {
// normal String() method - return a string representation of the args
var argsStr []string
for _, arg := range args {
- argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg)))
+ argsStr = append(argsStr, fmt.Sprintf("%T", arg)) // handles nil nicely
}
return strings.Join(argsStr, ",")
} else if len(indexOrNil) == 1 {
diff --git a/vendor/github.com/tdakkota/asciicheck/README.md b/vendor/github.com/tdakkota/asciicheck/README.md
index fc62811b..a7ff5884 100644
--- a/vendor/github.com/tdakkota/asciicheck/README.md
+++ b/vendor/github.com/tdakkota/asciicheck/README.md
@@ -7,6 +7,34 @@ Simple linter to check that your code does not contain non-ASCII identifiers
go get -u github.com/tdakkota/asciicheck/cmd/asciicheck
```
+# Reason to use
+So, do you see this code? Looks correct, isn't it?
+
+```go
+package main
+
+import "fmt"
+
+type TеstStruct struct{}
+
+func main() {
+ s := TestStruct{}
+ fmt.Println(s)
+}
+```
+But if you try to run it, you will get an error:
+```
+./prog.go:8:7: undefined: TestStruct
+```
+What? `TestStruct` is defined above, but compiler thinks diffrent. Why?
+
+**Answer**:
+Because `TestStruct` is not `TеstStruct`.
+```
+type TеstStruct struct{}
+ ^ this 'e' (U+0435) is not 'e' (U+0065)
+```
+
# Usage
asciicheck uses [`singlechecker`](https://pkg.go.dev/golang.org/x/tools/go/analysis/singlechecker) package to run:
diff --git a/vendor/github.com/tetafro/godot/.golangci.yml b/vendor/github.com/tetafro/godot/.golangci.yml
new file mode 100644
index 00000000..2b799b26
--- /dev/null
+++ b/vendor/github.com/tetafro/godot/.golangci.yml
@@ -0,0 +1,67 @@
+run:
+ concurrency: 2
+ deadline: 5m
+
+skip-dirs:
+ - path: ./testdata/
+
+linters:
+ disable-all: true
+ enable:
+ - deadcode
+ - errcheck
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - structcheck
+ - typecheck
+ - unused
+ - varcheck
+ - bodyclose
+ - depguard
+ - dogsled
+ - dupl
+ - funlen
+ - gochecknoinits
+ - goconst
+ - gocritic
+ - gocyclo
+ - godot
+ - gofmt
+ - gofumpt
+ - goimports
+ - golint
+ - gomnd
+ - gomodguard
+ - goprintffuncname
+ - gosec
+ - lll
+ - maligned
+ - misspell
+ - nakedret
+ - nestif
+ - prealloc
+ - rowserrcheck
+ - scopelint
+ - stylecheck
+ - unconvert
+ - unparam
+ - whitespace
+
+linters-settings:
+ godot:
+ check-all: true
+
+issues:
+ exclude-use-default: false
+ exclude-rules:
+ - path: _test\.go
+ linters:
+ - dupl
+ - errcheck
+ - funlen
+ - gosec
+ - path: cmd/godot/main\.go
+ linters:
+ - gomnd
diff --git a/vendor/github.com/tetafro/godot/.goreleaser.yml b/vendor/github.com/tetafro/godot/.goreleaser.yml
index 87a05a2a..c0fc2b6b 100644
--- a/vendor/github.com/tetafro/godot/.goreleaser.yml
+++ b/vendor/github.com/tetafro/godot/.goreleaser.yml
@@ -8,5 +8,4 @@ changelog:
sort: asc
filters:
exclude:
- - '^docs:'
- - '^test:'
+ - '^Merge pull request'
diff --git a/vendor/github.com/tetafro/godot/Makefile b/vendor/github.com/tetafro/godot/Makefile
index fee7693a..98a691d7 100644
--- a/vendor/github.com/tetafro/godot/Makefile
+++ b/vendor/github.com/tetafro/godot/Makefile
@@ -2,6 +2,16 @@
test:
go test ./...
+.PHONY: cover
+cover:
+ go test -coverprofile cover.out ./...
+ go tool cover -html=cover.out
+ rm -f cover.out
+
+.PHONY: lint
+lint:
+ golangci-lint run
+
.PHONY: build
build:
go build -o godot ./cmd/godot
diff --git a/vendor/github.com/tetafro/godot/README.md b/vendor/github.com/tetafro/godot/README.md
index e2d2f6b0..864767e3 100644
--- a/vendor/github.com/tetafro/godot/README.md
+++ b/vendor/github.com/tetafro/godot/README.md
@@ -13,23 +13,32 @@ end of the last sentence if needed.
> Comments should begin with the name of the thing being described
> and end in a period
-## Install and run
+## Install
*NOTE: Godot is available as a part of [GolangCI Lint](https://github.com/golangci/golangci-lint)
(disabled by default).*
Build from source
+
```sh
go get -u github.com/tetafro/godot/cmd/godot
```
or download binary from [releases page](https://github.com/tetafro/godot/releases).
-Run
+## Run
+
```sh
godot ./myproject
```
+Autofix flags are also available
+
+```sh
+godot -f ./myproject # fix issues and print the result
+godot -w ./myproject # fix issues and replace the original file
+```
+
## Examples
Code
@@ -50,5 +59,5 @@ Top level comment should end in a period: math/math.go:3:1
```
See more examples in test files:
-- [for default mode](testdata/example_default.go)
-- [for using --all flag](testdata/example_checkall.go)
+- [for default mode](testdata/default/in/main.go)
+- [for using --all flag](testdata/checkall/in/main.go)
diff --git a/vendor/github.com/tetafro/godot/godot.go b/vendor/github.com/tetafro/godot/godot.go
index 5eec8357..81211d72 100644
--- a/vendor/github.com/tetafro/godot/godot.go
+++ b/vendor/github.com/tetafro/godot/godot.go
@@ -3,19 +3,25 @@
package godot
import (
+ "fmt"
"go/ast"
"go/token"
+ "io/ioutil"
+ "os"
"regexp"
+ "sort"
"strings"
)
-const noPeriodMessage = "Top level comment should end in a period"
-
-// Message contains a message of linting error.
-type Message struct {
- Pos token.Position
- Message string
-}
+const (
+ // noPeriodMessage is an error message to return.
+ noPeriodMessage = "Top level comment should end in a period"
+ // topLevelColumn is just the most left column of the file.
+ topLevelColumn = 1
+ // topLevelGroupColumn is the most left column inside a group declaration
+ // on the top level.
+ topLevelGroupColumn = 2
+)
// Settings contains linter settings.
type Settings struct {
@@ -23,12 +29,25 @@ type Settings struct {
CheckAll bool
}
+// Issue contains a description of linting error and a possible replacement.
+type Issue struct {
+ Pos token.Position
+ Message string
+ Replacement string
+}
+
+// position is an position inside a comment (might be multiline comment).
+type position struct {
+ line int
+ column int
+}
+
var (
// List of valid last characters.
lastChars = []string{".", "?", "!"}
- // Special tags in comments like "nolint" or "build".
- tags = regexp.MustCompile("^[a-z]+:")
+ // Special tags in comments like "// nolint:", or "// +k8s:".
+ tags = regexp.MustCompile(`^\+?[a-z0-9]+:`)
// Special hashtags in comments like "#nosec".
hashtags = regexp.MustCompile("^#[a-z]+ ")
@@ -38,43 +57,150 @@ var (
)
// Run runs this linter on the provided code.
-func Run(file *ast.File, fset *token.FileSet, settings Settings) []Message {
- msgs := []Message{}
+func Run(file *ast.File, fset *token.FileSet, settings Settings) []Issue {
+ issues := checkBlocks(file, fset)
// Check all top-level comments
if settings.CheckAll {
- for _, group := range file.Comments {
- if ok, msg := check(fset, group); !ok {
- msgs = append(msgs, msg)
- }
- }
- return msgs
+ issues = append(issues, checkTopLevel(file, fset)...)
+ sortIssues(issues)
+ return issues
}
// Check only declaration comments
+ issues = append(issues, checkDeclarations(file, fset)...)
+ sortIssues(issues)
+ return issues
+}
+
+// Fix fixes all issues and return new version of file content.
+func Fix(path string, file *ast.File, fset *token.FileSet, settings Settings) ([]byte, error) {
+ // Read file
+ content, err := ioutil.ReadFile(path) // nolint: gosec
+ if err != nil {
+ return nil, fmt.Errorf("read file: %v", err)
+ }
+ if len(content) == 0 {
+ return nil, nil
+ }
+
+ issues := Run(file, fset, settings)
+
+ // slice -> map
+ m := map[int]Issue{}
+ for _, iss := range issues {
+ m[iss.Pos.Line] = iss
+ }
+
+ // Replace lines from issues
+ fixed := make([]byte, 0, len(content))
+ for i, line := range strings.Split(string(content), "\n") {
+ newline := line
+ if iss, ok := m[i+1]; ok {
+ newline = iss.Replacement
+ }
+ fixed = append(fixed, []byte(newline+"\n")...)
+ }
+ fixed = fixed[:len(fixed)-1] // trim last "\n"
+
+ return fixed, nil
+}
+
+// Replace rewrites original file with it's fixed version.
+func Replace(path string, file *ast.File, fset *token.FileSet, settings Settings) error {
+ info, err := os.Stat(path)
+ if err != nil {
+ return fmt.Errorf("check file: %v", err)
+ }
+ mode := info.Mode()
+
+ fixed, err := Fix(path, file, fset, settings)
+ if err != nil {
+ return fmt.Errorf("fix issues: %v", err)
+ }
+
+ if err := ioutil.WriteFile(path, fixed, mode); err != nil {
+ return fmt.Errorf("write file: %v", err)
+ }
+ return nil
+}
+
+// sortIssues sorts by filename, line and column.
+func sortIssues(iss []Issue) {
+ sort.Slice(iss, func(i, j int) bool {
+ if iss[i].Pos.Filename != iss[j].Pos.Filename {
+ return iss[i].Pos.Filename < iss[j].Pos.Filename
+ }
+ if iss[i].Pos.Line != iss[j].Pos.Line {
+ return iss[i].Pos.Line < iss[j].Pos.Line
+ }
+ return iss[i].Pos.Column < iss[j].Pos.Column
+ })
+}
+
+// checkTopLevel checks all top-level comments.
+func checkTopLevel(file *ast.File, fset *token.FileSet) (issues []Issue) {
+ for _, group := range file.Comments {
+ if iss, ok := check(fset, group, topLevelColumn); !ok {
+ issues = append(issues, iss)
+ }
+ }
+ return issues
+}
+
+// checkDeclarations checks top level declaration comments.
+func checkDeclarations(file *ast.File, fset *token.FileSet) (issues []Issue) {
for _, decl := range file.Decls {
switch d := decl.(type) {
case *ast.GenDecl:
- if ok, msg := check(fset, d.Doc); !ok {
- msgs = append(msgs, msg)
+ if iss, ok := check(fset, d.Doc, topLevelColumn); !ok {
+ issues = append(issues, iss)
}
case *ast.FuncDecl:
- if ok, msg := check(fset, d.Doc); !ok {
- msgs = append(msgs, msg)
+ if iss, ok := check(fset, d.Doc, topLevelColumn); !ok {
+ issues = append(issues, iss)
}
}
}
- return msgs
+ return issues
}
-func check(fset *token.FileSet, group *ast.CommentGroup) (ok bool, msg Message) {
+// checkBlocks checks comments inside top level blocks (var (...), const (...), etc).
+func checkBlocks(file *ast.File, fset *token.FileSet) (issues []Issue) {
+ for _, decl := range file.Decls {
+ d, ok := decl.(*ast.GenDecl)
+ if !ok {
+ continue
+ }
+ // No parenthesis == no block
+ if d.Lparen == 0 {
+ continue
+ }
+ for _, group := range file.Comments {
+ // Skip comments outside this block
+ if d.Lparen > group.Pos() || group.Pos() > d.Rparen {
+ continue
+ }
+ // Skip comments that are not top-level for this block
+ if fset.Position(group.Pos()).Column != topLevelGroupColumn {
+ continue
+ }
+ if iss, ok := check(fset, group, topLevelGroupColumn); !ok {
+ issues = append(issues, iss)
+ }
+ }
+ }
+ return issues
+}
+
+func check(fset *token.FileSet, group *ast.CommentGroup, level int) (iss Issue, ok bool) {
if group == nil || len(group.List) == 0 {
- return true, Message{}
+ return Issue{}, true
}
// Check only top-level comments
- if fset.Position(group.Pos()).Column > 1 {
- return true, Message{}
+ if fset.Position(group.Pos()).Column > level {
+ return Issue{}, true
}
// Get last element from comment group - it can be either
@@ -82,29 +208,40 @@ func check(fset *token.FileSet, group *ast.CommentGroup) (ok bool, msg Message)
// for "/*"-comment
last := group.List[len(group.List)-1]
- line, ok := checkComment(last.Text)
+ p, ok := checkComment(last.Text)
if ok {
- return true, Message{}
+ return Issue{}, true
}
+
pos := fset.Position(last.Slash)
- pos.Line += line
- return false, Message{
- Pos: pos,
- Message: noPeriodMessage,
+ pos.Line += p.line
+ pos.Column = p.column + level - 1
+
+ indent := strings.Repeat("\t", level-1)
+
+ iss = Issue{
+ Pos: pos,
+ Message: noPeriodMessage,
+ Replacement: indent + makeReplacement(last.Text, p),
}
+ return iss, false
}
-func checkComment(comment string) (line int, ok bool) {
+func checkComment(comment string) (pos position, ok bool) {
// Check last line of "//"-comment
if strings.HasPrefix(comment, "//") {
+ pos.column = len([]rune(comment)) // runes for non-latin chars
comment = strings.TrimPrefix(comment, "//")
- return 0, checkLastChar(comment)
+ if checkLastChar(comment) {
+ return position{}, true
+ }
+ return pos, false
}
// Skip cgo code blocks
- // TODO: Find a better way to detect cgo code.
+ // TODO: Find a better way to detect cgo code
if strings.Contains(comment, "#include") || strings.Contains(comment, "#define") {
- return 0, true
+ return position{}, true
}
// Check last non-empty line in multiline "/*"-comment block
@@ -116,9 +253,19 @@ func checkComment(comment string) (line int, ok bool) {
}
break
}
- comment = strings.TrimPrefix(lines[i], "/*")
+ pos.line = i
+ comment = lines[i]
comment = strings.TrimSuffix(comment, "*/")
- return i, checkLastChar(comment)
+ comment = strings.TrimRight(comment, " ")
+ // Get position of the last non-space char in comment line, use runes
+ // in case of non-latin chars
+ pos.column = len([]rune(comment))
+ comment = strings.TrimPrefix(comment, "/*")
+
+ if checkLastChar(comment) {
+ return position{}, true
+ }
+ return pos, false
}
func checkLastChar(s string) bool {
@@ -142,6 +289,8 @@ func checkLastChar(s string) bool {
if s == "" {
return true
}
+ // Trim parenthesis for cases when the whole sentence is inside parenthesis
+ s = strings.TrimRight(s, ")")
for _, ch := range lastChars {
if string(s[len(s)-1]) == ch {
return true
@@ -149,3 +298,24 @@ func checkLastChar(s string) bool {
}
return false
}
+
+// makeReplacement basically just inserts a period into comment on
+// the given position.
+func makeReplacement(s string, pos position) string {
+ lines := strings.Split(s, "\n")
+ if len(lines) < pos.line {
+ // This should never happen
+ return s
+ }
+ line := []rune(lines[pos.line])
+ if len(line) < pos.column {
+ // This should never happen
+ return s
+ }
+ // Insert a period
+ newline := append(
+ line[:pos.column],
+ append([]rune{'.'}, line[pos.column:]...)...,
+ )
+ return string(newline)
+}
diff --git a/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go b/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go
index 3c702ab5..145d5409 100644
--- a/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go
+++ b/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go
@@ -250,6 +250,20 @@ func (r *runner) isCloseCall(ccall ssa.Instruction) bool {
}
}
}
+
+ if returnOp, ok := cs.(*ssa.Return); ok {
+ for _, resultValue := range returnOp.Results {
+ if resultValue.Type().String() == "io.Closer" {
+ return true
+ }
+ }
+ }
+ }
+ }
+ case *ssa.Return:
+ for _, resultValue := range ccall.Results {
+ if resultValue.Type().String() == "io.ReadCloser" {
+ return true
}
}
}
diff --git a/vendor/github.com/ultraware/funlen/README.md b/vendor/github.com/ultraware/funlen/README.md
index ca7e9a0f..aaf34852 100644
--- a/vendor/github.com/ultraware/funlen/README.md
+++ b/vendor/github.com/ultraware/funlen/README.md
@@ -2,68 +2,8 @@
Funlen is a linter that checks for long functions. It can checks both on the number of lines and the number of statements.
-The default limits are 50 lines and 35 statements. You can configure these with the `-l` and `-s` flags.
-
-Example code:
-
-```go
-package main
-
-import "fmt"
-
-func fiveStatements() {
- fmt.Println(1)
- fmt.Println(2)
- fmt.Println(3)
- fmt.Println(4)
- fmt.Println(5)
-}
-
-func sevenLines() {
- fmt.Println(1)
-
- fmt.Println(2)
-
- fmt.Println(3)
-
- fmt.Println(4)
-}
-```
-
-Reults in:
-
-```
-$ funlen -l=6 -s=4 .
-main.go:5:6:Function 'fiveStatements' has too many statements (5 > 4)
-main.go:13:6:Function 'sevenLines' is too long (7 > 6)
-```
+The default limits are 60 lines and 40 statements. You can configure these.
## Installation guide
-```bash
-go get git.ultraware.nl/NiseVoid/funlen
-```
-
-### Gometalinter
-
-You can add funlen to gometalinter and enable it.
-
-`.gometalinter.json`:
-
-```json
-{
- "Linters": {
- "funlen": "funlen -l=50 -s=35:PATH:LINE:COL:MESSAGE"
- },
-
- "Enable": [
- "funlen"
- ]
-}
-```
-
-commandline:
-
-```bash
-gometalinter --linter "funlen:funlen -l=50 -s=35:PATH:LINE:COL:MESSAGE" --enable "funlen"
-```
+Funlen is included in [golangci-lint](https://github.com/golangci/golangci-lint/). Install it and enable funlen.
diff --git a/vendor/github.com/ultraware/funlen/main.go b/vendor/github.com/ultraware/funlen/main.go
index 19e48e2f..2ba35300 100644
--- a/vendor/github.com/ultraware/funlen/main.go
+++ b/vendor/github.com/ultraware/funlen/main.go
@@ -7,8 +7,10 @@ import (
"reflect"
)
-const defaultLineLimit = 60
-const defaultStmtLimit = 40
+const (
+ defaultLineLimit = 60
+ defaultStmtLimit = 40
+)
// Run runs this linter on the provided code
func Run(file *ast.File, fset *token.FileSet, lineLimit, stmtLimit int) []Message {
@@ -26,13 +28,17 @@ func Run(file *ast.File, fset *token.FileSet, lineLimit, stmtLimit int) []Messag
continue
}
- if stmts := parseStmts(decl.Body.List); stmts > stmtLimit {
- msgs = append(msgs, makeStmtMessage(fset, decl.Name, stmts, stmtLimit))
- continue
+ if stmtLimit > 0 {
+ if stmts := parseStmts(decl.Body.List); stmts > stmtLimit {
+ msgs = append(msgs, makeStmtMessage(fset, decl.Name, stmts, stmtLimit))
+ continue
+ }
}
- if lines := getLines(fset, decl); lines > lineLimit {
- msgs = append(msgs, makeLineMessage(fset, decl.Name, lines, lineLimit))
+ if lineLimit > 0 {
+ if lines := getLines(fset, decl); lines > lineLimit {
+ msgs = append(msgs, makeLineMessage(fset, decl.Name, lines, lineLimit))
+ }
}
}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 76a92e0c..2482f7bf 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -2525,6 +2525,7 @@ func strSliceContains(ss []string, s string) bool {
type erringRoundTripper struct{ err error }
+func (rt erringRoundTripper) RoundTripErr() error { return rt.err }
func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
// gzipReader wraps a response body so it can lazily
diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
new file mode 100644
index 00000000..567a4763
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for mips64, OpenBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go
index 3559e5dc..56416786 100644
--- a/vendor/golang.org/x/sys/unix/ioctl.go
+++ b/vendor/golang.org/x/sys/unix/ioctl.go
@@ -20,6 +20,15 @@ func IoctlSetInt(fd int, req uint, value int) error {
return ioctl(fd, req, uintptr(value))
}
+// IoctlSetPointerInt performs an ioctl operation which sets an
+// integer value on fd, using the specified request number. The ioctl
+// argument is called with a pointer to the integer value, rather than
+// passing the integer value directly.
+func IoctlSetPointerInt(fd int, req uint, value int) error {
+ v := int32(value)
+ return ioctl(fd, req, uintptr(unsafe.Pointer(&v)))
+}
+
// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
//
// To change fd's window size, the req argument should be TIOCSWINSZ.
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
index ece31e9d..d257fac5 100644
--- a/vendor/golang.org/x/sys/unix/mkall.sh
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -73,26 +73,22 @@ aix_ppc64)
darwin_386)
mkerrors="$mkerrors -m32"
mksyscall="go run mksyscall.go -l32"
- mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
mkasm="go run mkasm_darwin.go"
;;
darwin_amd64)
mkerrors="$mkerrors -m64"
- mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
mkasm="go run mkasm_darwin.go"
;;
darwin_arm)
mkerrors="$mkerrors"
mksyscall="go run mksyscall.go -l32"
- mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
mkasm="go run mkasm_darwin.go"
;;
darwin_arm64)
mkerrors="$mkerrors -m64"
- mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
mkasm="go run mkasm_darwin.go"
;;
@@ -184,6 +180,15 @@ openbsd_arm64)
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
+openbsd_mips64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd"
+ mksysctl="go run mksysctl_openbsd.go"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
solaris_amd64)
mksyscall="go run mksyscall_solaris.go"
mkerrors="$mkerrors -m64"
@@ -217,8 +222,6 @@ esac
# aix/ppc64 script generates files instead of writing to stdin.
echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ;
elif [ "$GOOS" == "darwin" ]; then
- # pre-1.12, direct syscalls
- echo "$mksyscall -tags $GOOS,$GOARCH,!go1.12 $syscall_goos syscall_darwin_${GOARCH}.1_11.go $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.1_11.go";
# 1.12 and later, syscalls via libSystem
echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
# 1.13 and later, syscalls via libSystem (including syscallPtr)
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index 08f8230d..cab45604 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -107,6 +107,7 @@ includes_FreeBSD='
#include
#include
#include
+#include
#include
#include
#include
@@ -192,9 +193,12 @@ struct ltchars {
#include
#include
#include
+#include
+#include
#include
#include
#include
+#include
#include
#include
#include
@@ -297,6 +301,7 @@ includes_NetBSD='
#include
#include
#include
+#include
#include
#include
#include
@@ -325,6 +330,7 @@ includes_OpenBSD='
#include
#include
#include
+#include
#include
#include
#include
@@ -507,11 +513,14 @@ ccflags="$@"
$2 ~ /^(CLOCK|TIMER)_/ ||
$2 ~ /^CAN_/ ||
$2 ~ /^CAP_/ ||
+ $2 ~ /^CP_/ ||
+ $2 ~ /^CPUSTATES$/ ||
$2 ~ /^ALG_/ ||
$2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE)/ ||
$2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|[GS]ETFLAGS)/ ||
$2 ~ /^FS_VERITY_/ ||
$2 ~ /^FSCRYPT_/ ||
+ $2 ~ /^DM_/ ||
$2 ~ /^GRND_/ ||
$2 ~ /^RND/ ||
$2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ ||
diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go
index 68605db6..60bbe10a 100644
--- a/vendor/golang.org/x/sys/unix/syscall_bsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go
@@ -527,6 +527,23 @@ func SysctlClockinfo(name string) (*Clockinfo, error) {
return &ci, nil
}
+func SysctlTimeval(name string) (*Timeval, error) {
+ mib, err := sysctlmib(name)
+ if err != nil {
+ return nil, err
+ }
+
+ var tv Timeval
+ n := uintptr(unsafe.Sizeof(tv))
+ if err := sysctl(mib, (*byte)(unsafe.Pointer(&tv)), &n, nil, 0); err != nil {
+ return nil, err
+ }
+ if n != unsafe.Sizeof(tv) {
+ return nil, EIO
+ }
+ return &tv, nil
+}
+
//sys utimes(path string, timeval *[2]Timeval) (err error)
func Utimes(path string, tv []Timeval) error {
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go
index 6a15cba6..b31ef035 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go
@@ -10,6 +10,8 @@ import (
"unsafe"
)
+const _SYS_GETDIRENTRIES64 = 344
+
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
// To implement this using libSystem we'd need syscall_syscallPtr for
// fdopendir. However, syscallPtr was only added in Go 1.13, so we fall
@@ -20,7 +22,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
} else {
p = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(p), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0)
+ r0, _, e1 := Syscall6(_SYS_GETDIRENTRIES64, uintptr(fd), uintptr(p), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0)
n = int(r0)
if e1 != 0 {
return n, errnoErr(e1)
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 0cf31acf..e2a05eed 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -49,6 +49,11 @@ type SockaddrDatalink struct {
raw RawSockaddrDatalink
}
+// Some external packages rely on SYS___SYSCTL being defined to implement their
+// own sysctl wrappers. Provide it here, even though direct syscalls are no
+// longer supported on darwin.
+const SYS___SYSCTL = 202
+
// Translate "kern.hostname" to []_C_int{0,1,2,3}.
func nametomib(name string) (mib []_C_int, err error) {
const siz = unsafe.Sizeof(mib[0])
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go
deleted file mode 100644
index 6b223f91..00000000
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin,386,!go1.12
-
-package unix
-
-//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
index 2724e3a5..ea0be1e9 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
@@ -44,10 +44,6 @@ func (cmsg *Cmsghdr) SetLen(length int) {
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
-// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
-// of darwin/386 the syscall is called sysctl instead of __sysctl.
-const SYS___SYSCTL = SYS_SYSCTL
-
//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64
//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64
//sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go
deleted file mode 100644
index 68ebd6fa..00000000
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin,amd64,!go1.12
-
-package unix
-
-//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
index ce2e0d24..58624044 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
@@ -44,10 +44,6 @@ func (cmsg *Cmsghdr) SetLen(length int) {
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
-// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
-// of darwin/amd64 the syscall is called sysctl instead of __sysctl.
-const SYS___SYSCTL = SYS_SYSCTL
-
//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64
//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64
//sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go
deleted file mode 100644
index 0e3f25ac..00000000
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin,arm,!go1.12
-
-package unix
-
-func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
- return 0, ENOSYS
-}
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
index fc17a3f2..b8b31418 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
@@ -44,10 +44,6 @@ func (cmsg *Cmsghdr) SetLen(length int) {
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic
-// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
-// of darwin/arm the syscall is called sysctl instead of __sysctl.
-const SYS___SYSCTL = SYS_SYSCTL
-
//sys Fstat(fd int, stat *Stat_t) (err error)
//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
//sys Fstatfs(fd int, stat *Statfs_t) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go
deleted file mode 100644
index 01d45040..00000000
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin,arm64,!go1.12
-
-package unix
-
-func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
- return 0, ENOSYS
-}
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
index 1e91ddf3..67413983 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
@@ -46,10 +46,6 @@ func (cmsg *Cmsghdr) SetLen(length int) {
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic
-// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
-// of darwin/arm64 the syscall is called sysctl instead of __sysctl.
-const SYS___SYSCTL = SYS_SYSCTL
-
//sys Fstat(fd int, stat *Stat_t) (err error)
//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
//sys Fstatfs(fd int, stat *Statfs_t) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go
index 99e62dcd..f3d5149f 100644
--- a/vendor/golang.org/x/sys/unix/syscall_illumos.go
+++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go
@@ -24,7 +24,7 @@ func bytes2iovec(bs [][]byte) []Iovec {
return iovecs
}
-//sys readv(fd int, iovs []Iovec) (n int, err error)
+//sys readv(fd int, iovs []Iovec) (n int, err error)
func Readv(fd int, iovs [][]byte) (n int, err error) {
iovecs := bytes2iovec(iovs)
@@ -32,7 +32,7 @@ func Readv(fd int, iovs [][]byte) (n int, err error) {
return n, err
}
-//sys preadv(fd int, iovs []Iovec, off int64) (n int, err error)
+//sys preadv(fd int, iovs []Iovec, off int64) (n int, err error)
func Preadv(fd int, iovs [][]byte, off int64) (n int, err error) {
iovecs := bytes2iovec(iovs)
@@ -40,7 +40,7 @@ func Preadv(fd int, iovs [][]byte, off int64) (n int, err error) {
return n, err
}
-//sys writev(fd int, iovs []Iovec) (n int, err error)
+//sys writev(fd int, iovs []Iovec) (n int, err error)
func Writev(fd int, iovs [][]byte) (n int, err error) {
iovecs := bytes2iovec(iovs)
@@ -48,10 +48,30 @@ func Writev(fd int, iovs [][]byte) (n int, err error) {
return n, err
}
-//sys pwritev(fd int, iovs []Iovec, off int64) (n int, err error)
+//sys pwritev(fd int, iovs []Iovec, off int64) (n int, err error)
func Pwritev(fd int, iovs [][]byte, off int64) (n int, err error) {
iovecs := bytes2iovec(iovs)
n, err = pwritev(fd, iovecs, off)
return n, err
}
+
+//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
+
+func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ nfd, err = accept4(fd, &rsa, &len, flags)
+ if err != nil {
+ return
+ }
+ if len > SizeofSockaddrAny {
+ panic("RawSockaddrAny too small")
+ }
+ sa, err = anyToSockaddr(fd, &rsa)
+ if err != nil {
+ Close(nfd)
+ nfd = 0
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index e50e4cb2..ec7e4c4d 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -82,15 +82,6 @@ func IoctlRetInt(fd int, req uint) (int, error) {
return int(ret), nil
}
-// IoctlSetPointerInt performs an ioctl operation which sets an
-// integer value on fd, using the specified request number. The ioctl
-// argument is called with a pointer to the integer value, rather than
-// passing the integer value directly.
-func IoctlSetPointerInt(fd int, req uint, value int) error {
- v := int32(value)
- return ioctl(fd, req, uintptr(unsafe.Pointer(&v)))
-}
-
func IoctlSetRTCTime(fd int, value *RTCTime) error {
err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value)))
runtime.KeepAlive(value)
@@ -145,6 +136,12 @@ func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
return openat(dirfd, path, flags|O_LARGEFILE, mode)
}
+//sys openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error)
+
+func Openat2(dirfd int, path string, how *OpenHow) (fd int, err error) {
+ return openat2(dirfd, path, how, SizeofOpenHow)
+}
+
//sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error)
func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
@@ -885,6 +882,35 @@ func (sa *SockaddrL2TPIP6) sockaddr() (unsafe.Pointer, _Socklen, error) {
return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP6, nil
}
+// SockaddrIUCV implements the Sockaddr interface for AF_IUCV sockets.
+type SockaddrIUCV struct {
+ UserID string
+ Name string
+ raw RawSockaddrIUCV
+}
+
+func (sa *SockaddrIUCV) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ sa.raw.Family = AF_IUCV
+ // These are EBCDIC encoded by the kernel, but we still need to pad them
+ // with blanks. Initializing with blanks allows the caller to feed in either
+ // a padded or an unpadded string.
+ for i := 0; i < 8; i++ {
+ sa.raw.Nodeid[i] = ' '
+ sa.raw.User_id[i] = ' '
+ sa.raw.Name[i] = ' '
+ }
+ if len(sa.UserID) > 8 || len(sa.Name) > 8 {
+ return nil, 0, EINVAL
+ }
+ for i, b := range []byte(sa.UserID[:]) {
+ sa.raw.User_id[i] = int8(b)
+ }
+ for i, b := range []byte(sa.Name[:]) {
+ sa.raw.Name[i] = int8(b)
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrIUCV, nil
+}
+
func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
switch rsa.Addr.Family {
case AF_NETLINK:
@@ -1065,6 +1091,38 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
}
return sa, nil
+ case AF_IUCV:
+ pp := (*RawSockaddrIUCV)(unsafe.Pointer(rsa))
+
+ var user [8]byte
+ var name [8]byte
+
+ for i := 0; i < 8; i++ {
+ user[i] = byte(pp.User_id[i])
+ name[i] = byte(pp.Name[i])
+ }
+
+ sa := &SockaddrIUCV{
+ UserID: string(user[:]),
+ Name: string(name[:]),
+ }
+ return sa, nil
+
+ case AF_CAN:
+ pp := (*RawSockaddrCAN)(unsafe.Pointer(rsa))
+ sa := &SockaddrCAN{
+ Ifindex: int(pp.Ifindex),
+ }
+ rx := (*[4]byte)(unsafe.Pointer(&sa.RxID))
+ for i := 0; i < 4; i++ {
+ rx[i] = pp.Addr[i]
+ }
+ tx := (*[4]byte)(unsafe.Pointer(&sa.TxID))
+ for i := 0; i < 4; i++ {
+ tx[i] = pp.Addr[i+4]
+ }
+ return sa, nil
+
}
return nil, EAFNOSUPPORT
}
@@ -1965,10 +2023,15 @@ func isGroupMember(gid int) bool {
}
//sys faccessat(dirfd int, path string, mode uint32) (err error)
+//sys Faccessat2(dirfd int, path string, mode uint32, flags int) (err error)
func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
- if flags & ^(AT_SYMLINK_NOFOLLOW|AT_EACCESS) != 0 {
- return EINVAL
+ if flags == 0 {
+ return faccessat(dirfd, path, mode)
+ }
+
+ if err := Faccessat2(dirfd, path, mode, flags); err != ENOSYS && err != EPERM {
+ return err
}
// The Linux kernel faccessat system call does not take any flags.
@@ -1977,8 +2040,8 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
// Because people naturally expect syscall.Faccessat to act
// like C faccessat, we do the same.
- if flags == 0 {
- return faccessat(dirfd, path, mode)
+ if flags & ^(AT_SYMLINK_NOFOLLOW|AT_EACCESS) != 0 {
+ return EINVAL
}
var st Stat_t
@@ -2122,6 +2185,18 @@ func Klogset(typ int, arg int) (err error) {
return nil
}
+// RemoteIovec is Iovec with the pointer replaced with an integer.
+// It is used for ProcessVMReadv and ProcessVMWritev, where the pointer
+// refers to a location in a different process' address space, which
+// would confuse the Go garbage collector.
+type RemoteIovec struct {
+ Base uintptr
+ Len int
+}
+
+//sys ProcessVMReadv(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) = SYS_PROCESS_VM_READV
+//sys ProcessVMWritev(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) = SYS_PROCESS_VM_WRITEV
+
/*
* Unimplemented
*/
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
index e1913e2c..496837b1 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
@@ -7,7 +7,6 @@
package unix
import (
- "syscall"
"unsafe"
)
@@ -49,10 +48,6 @@ func Pipe2(p []int, flags int) (err error) {
return
}
-// Underlying system call writes to newoffset via pointer.
-// Implemented in assembly to avoid allocation.
-func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno)
-
func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
newoffset, errno := seek(fd, offset, whence)
if errno != 0 {
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go
new file mode 100644
index 00000000..8c514c95
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go
@@ -0,0 +1,13 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm,!gccgo,linux
+
+package unix
+
+import "syscall"
+
+// Underlying system call writes to newoffset via pointer.
+// Implemented in assembly to avoid allocation.
+func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno)
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go
new file mode 100644
index 00000000..30f28534
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go
@@ -0,0 +1,35 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+func setTimespec(sec, nsec int64) Timespec {
+ return Timespec{Sec: sec, Nsec: nsec}
+}
+
+func setTimeval(sec, usec int64) Timeval {
+ return Timeval{Sec: sec, Usec: usec}
+}
+
+func SetKevent(k *Kevent_t, fd, mode, flags int) {
+ k.Ident = uint64(fd)
+ k.Filter = int16(mode)
+ k.Flags = uint16(flags)
+}
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
+// of OpenBSD the syscall is called sysctl instead of __sysctl.
+const SYS___SYSCTL = SYS_SYSCTL
diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
index 84824587..3689c808 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
@@ -339,6 +339,12 @@ const (
CLOCK_UPTIME_FAST = 0x8
CLOCK_UPTIME_PRECISE = 0x7
CLOCK_VIRTUAL = 0x1
+ CPUSTATES = 0x5
+ CP_IDLE = 0x4
+ CP_INTR = 0x3
+ CP_NICE = 0x1
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x30000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
index 4acd101c..b8f7c3c9 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
@@ -339,6 +339,12 @@ const (
CLOCK_UPTIME_FAST = 0x8
CLOCK_UPTIME_PRECISE = 0x7
CLOCK_VIRTUAL = 0x1
+ CPUSTATES = 0x5
+ CP_IDLE = 0x4
+ CP_INTR = 0x3
+ CP_NICE = 0x1
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x30000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go
index e4719873..be14bb1a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go
@@ -339,6 +339,12 @@ const (
CLOCK_UPTIME_FAST = 0x8
CLOCK_UPTIME_PRECISE = 0x7
CLOCK_VIRTUAL = 0x1
+ CPUSTATES = 0x5
+ CP_IDLE = 0x4
+ CP_INTR = 0x3
+ CP_NICE = 0x1
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x30000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go
index 5e49769d..7ce9c008 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go
@@ -339,6 +339,12 @@ const (
CLOCK_UPTIME_FAST = 0x8
CLOCK_UPTIME_PRECISE = 0x7
CLOCK_VIRTUAL = 0x1
+ CPUSTATES = 0x5
+ CP_IDLE = 0x4
+ CP_INTR = 0x3
+ CP_NICE = 0x1
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x30000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index f8bd50c1..388050a0 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -244,8 +244,66 @@ const (
CAN_EFF_FLAG = 0x80000000
CAN_EFF_ID_BITS = 0x1d
CAN_EFF_MASK = 0x1fffffff
+ CAN_ERR_ACK = 0x20
+ CAN_ERR_BUSERROR = 0x80
+ CAN_ERR_BUSOFF = 0x40
+ CAN_ERR_CRTL = 0x4
+ CAN_ERR_CRTL_ACTIVE = 0x40
+ CAN_ERR_CRTL_RX_OVERFLOW = 0x1
+ CAN_ERR_CRTL_RX_PASSIVE = 0x10
+ CAN_ERR_CRTL_RX_WARNING = 0x4
+ CAN_ERR_CRTL_TX_OVERFLOW = 0x2
+ CAN_ERR_CRTL_TX_PASSIVE = 0x20
+ CAN_ERR_CRTL_TX_WARNING = 0x8
+ CAN_ERR_CRTL_UNSPEC = 0x0
+ CAN_ERR_DLC = 0x8
CAN_ERR_FLAG = 0x20000000
+ CAN_ERR_LOSTARB = 0x2
+ CAN_ERR_LOSTARB_UNSPEC = 0x0
CAN_ERR_MASK = 0x1fffffff
+ CAN_ERR_PROT = 0x8
+ CAN_ERR_PROT_ACTIVE = 0x40
+ CAN_ERR_PROT_BIT = 0x1
+ CAN_ERR_PROT_BIT0 = 0x8
+ CAN_ERR_PROT_BIT1 = 0x10
+ CAN_ERR_PROT_FORM = 0x2
+ CAN_ERR_PROT_LOC_ACK = 0x19
+ CAN_ERR_PROT_LOC_ACK_DEL = 0x1b
+ CAN_ERR_PROT_LOC_CRC_DEL = 0x18
+ CAN_ERR_PROT_LOC_CRC_SEQ = 0x8
+ CAN_ERR_PROT_LOC_DATA = 0xa
+ CAN_ERR_PROT_LOC_DLC = 0xb
+ CAN_ERR_PROT_LOC_EOF = 0x1a
+ CAN_ERR_PROT_LOC_ID04_00 = 0xe
+ CAN_ERR_PROT_LOC_ID12_05 = 0xf
+ CAN_ERR_PROT_LOC_ID17_13 = 0x7
+ CAN_ERR_PROT_LOC_ID20_18 = 0x6
+ CAN_ERR_PROT_LOC_ID28_21 = 0x2
+ CAN_ERR_PROT_LOC_IDE = 0x5
+ CAN_ERR_PROT_LOC_INTERM = 0x12
+ CAN_ERR_PROT_LOC_RES0 = 0x9
+ CAN_ERR_PROT_LOC_RES1 = 0xd
+ CAN_ERR_PROT_LOC_RTR = 0xc
+ CAN_ERR_PROT_LOC_SOF = 0x3
+ CAN_ERR_PROT_LOC_SRTR = 0x4
+ CAN_ERR_PROT_LOC_UNSPEC = 0x0
+ CAN_ERR_PROT_OVERLOAD = 0x20
+ CAN_ERR_PROT_STUFF = 0x4
+ CAN_ERR_PROT_TX = 0x80
+ CAN_ERR_PROT_UNSPEC = 0x0
+ CAN_ERR_RESTARTED = 0x100
+ CAN_ERR_TRX = 0x10
+ CAN_ERR_TRX_CANH_NO_WIRE = 0x4
+ CAN_ERR_TRX_CANH_SHORT_TO_BAT = 0x5
+ CAN_ERR_TRX_CANH_SHORT_TO_GND = 0x7
+ CAN_ERR_TRX_CANH_SHORT_TO_VCC = 0x6
+ CAN_ERR_TRX_CANL_NO_WIRE = 0x40
+ CAN_ERR_TRX_CANL_SHORT_TO_BAT = 0x50
+ CAN_ERR_TRX_CANL_SHORT_TO_CANH = 0x80
+ CAN_ERR_TRX_CANL_SHORT_TO_GND = 0x70
+ CAN_ERR_TRX_CANL_SHORT_TO_VCC = 0x60
+ CAN_ERR_TRX_UNSPEC = 0x0
+ CAN_ERR_TX_TIMEOUT = 0x1
CAN_INV_FILTER = 0x20000000
CAN_ISOTP = 0x6
CAN_J1939 = 0x7
@@ -265,6 +323,7 @@ const (
CAP_AUDIT_READ = 0x25
CAP_AUDIT_WRITE = 0x1d
CAP_BLOCK_SUSPEND = 0x24
+ CAP_BPF = 0x27
CAP_CHOWN = 0x0
CAP_DAC_OVERRIDE = 0x1
CAP_DAC_READ_SEARCH = 0x2
@@ -273,7 +332,7 @@ const (
CAP_IPC_LOCK = 0xe
CAP_IPC_OWNER = 0xf
CAP_KILL = 0x5
- CAP_LAST_CAP = 0x25
+ CAP_LAST_CAP = 0x27
CAP_LEASE = 0x1c
CAP_LINUX_IMMUTABLE = 0x9
CAP_MAC_ADMIN = 0x21
@@ -283,6 +342,7 @@ const (
CAP_NET_BIND_SERVICE = 0xa
CAP_NET_BROADCAST = 0xb
CAP_NET_RAW = 0xd
+ CAP_PERFMON = 0x26
CAP_SETFCAP = 0x1f
CAP_SETGID = 0x6
CAP_SETPCAP = 0x8
@@ -372,8 +432,54 @@ const (
DEVLINK_GENL_NAME = "devlink"
DEVLINK_GENL_VERSION = 0x1
DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14
+ DEVMEM_MAGIC = 0x454d444d
DEVPTS_SUPER_MAGIC = 0x1cd1
DMA_BUF_MAGIC = 0x444d4142
+ DM_ACTIVE_PRESENT_FLAG = 0x20
+ DM_BUFFER_FULL_FLAG = 0x100
+ DM_CONTROL_NODE = "control"
+ DM_DATA_OUT_FLAG = 0x10000
+ DM_DEFERRED_REMOVE = 0x20000
+ DM_DEV_ARM_POLL = 0xc138fd10
+ DM_DEV_CREATE = 0xc138fd03
+ DM_DEV_REMOVE = 0xc138fd04
+ DM_DEV_RENAME = 0xc138fd05
+ DM_DEV_SET_GEOMETRY = 0xc138fd0f
+ DM_DEV_STATUS = 0xc138fd07
+ DM_DEV_SUSPEND = 0xc138fd06
+ DM_DEV_WAIT = 0xc138fd08
+ DM_DIR = "mapper"
+ DM_GET_TARGET_VERSION = 0xc138fd11
+ DM_INACTIVE_PRESENT_FLAG = 0x40
+ DM_INTERNAL_SUSPEND_FLAG = 0x40000
+ DM_IOCTL = 0xfd
+ DM_LIST_DEVICES = 0xc138fd02
+ DM_LIST_VERSIONS = 0xc138fd0d
+ DM_MAX_TYPE_NAME = 0x10
+ DM_NAME_LEN = 0x80
+ DM_NOFLUSH_FLAG = 0x800
+ DM_PERSISTENT_DEV_FLAG = 0x8
+ DM_QUERY_INACTIVE_TABLE_FLAG = 0x1000
+ DM_READONLY_FLAG = 0x1
+ DM_REMOVE_ALL = 0xc138fd01
+ DM_SECURE_DATA_FLAG = 0x8000
+ DM_SKIP_BDGET_FLAG = 0x200
+ DM_SKIP_LOCKFS_FLAG = 0x400
+ DM_STATUS_TABLE_FLAG = 0x10
+ DM_SUSPEND_FLAG = 0x2
+ DM_TABLE_CLEAR = 0xc138fd0a
+ DM_TABLE_DEPS = 0xc138fd0b
+ DM_TABLE_LOAD = 0xc138fd09
+ DM_TABLE_STATUS = 0xc138fd0c
+ DM_TARGET_MSG = 0xc138fd0e
+ DM_UEVENT_GENERATED_FLAG = 0x2000
+ DM_UUID_FLAG = 0x4000
+ DM_UUID_LEN = 0x81
+ DM_VERSION = 0xc138fd00
+ DM_VERSION_EXTRA = "-ioctl (2020-02-27)"
+ DM_VERSION_MAJOR = 0x4
+ DM_VERSION_MINOR = 0x2a
+ DM_VERSION_PATCHLEVEL = 0x0
DT_BLK = 0x6
DT_CHR = 0x2
DT_DIR = 0x4
@@ -475,6 +581,7 @@ const (
ETH_P_MOBITEX = 0x15
ETH_P_MPLS_MC = 0x8848
ETH_P_MPLS_UC = 0x8847
+ ETH_P_MRP = 0x88e3
ETH_P_MVRP = 0x88f5
ETH_P_NCSI = 0x88f8
ETH_P_NSH = 0x894f
@@ -602,8 +709,9 @@ const (
FSCRYPT_POLICY_FLAGS_PAD_4 = 0x0
FSCRYPT_POLICY_FLAGS_PAD_8 = 0x1
FSCRYPT_POLICY_FLAGS_PAD_MASK = 0x3
- FSCRYPT_POLICY_FLAGS_VALID = 0xf
+ FSCRYPT_POLICY_FLAGS_VALID = 0x1f
FSCRYPT_POLICY_FLAG_DIRECT_KEY = 0x4
+ FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32 = 0x10
FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 = 0x8
FSCRYPT_POLICY_V1 = 0x0
FSCRYPT_POLICY_V2 = 0x2
@@ -632,7 +740,7 @@ const (
FS_POLICY_FLAGS_PAD_4 = 0x0
FS_POLICY_FLAGS_PAD_8 = 0x1
FS_POLICY_FLAGS_PAD_MASK = 0x3
- FS_POLICY_FLAGS_VALID = 0xf
+ FS_POLICY_FLAGS_VALID = 0x1f
FS_VERITY_FL = 0x100000
FS_VERITY_HASH_ALG_SHA256 = 0x1
FS_VERITY_HASH_ALG_SHA512 = 0x2
@@ -834,6 +942,7 @@ const (
IPPROTO_EGP = 0x8
IPPROTO_ENCAP = 0x62
IPPROTO_ESP = 0x32
+ IPPROTO_ETHERNET = 0x8f
IPPROTO_FRAGMENT = 0x2c
IPPROTO_GRE = 0x2f
IPPROTO_HOPOPTS = 0x0
@@ -847,6 +956,7 @@ const (
IPPROTO_L2TP = 0x73
IPPROTO_MH = 0x87
IPPROTO_MPLS = 0x89
+ IPPROTO_MPTCP = 0x106
IPPROTO_MTP = 0x5c
IPPROTO_NONE = 0x3b
IPPROTO_PIM = 0x67
@@ -1016,6 +1126,7 @@ const (
KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2
KEYCTL_CAPS0_PUBLIC_KEY = 0x8
KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40
+ KEYCTL_CAPS1_NOTIFICATIONS = 0x4
KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1
KEYCTL_CAPS1_NS_KEY_TAG = 0x2
KEYCTL_CHOWN = 0x4
@@ -1053,6 +1164,7 @@ const (
KEYCTL_SUPPORTS_VERIFY = 0x8
KEYCTL_UNLINK = 0x9
KEYCTL_UPDATE = 0x2
+ KEYCTL_WATCH_KEY = 0x20
KEY_REQKEY_DEFL_DEFAULT = 0x0
KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6
KEY_REQKEY_DEFL_NO_CHANGE = -0x1
@@ -1096,6 +1208,8 @@ const (
LOOP_SET_FD = 0x4c00
LOOP_SET_STATUS = 0x4c02
LOOP_SET_STATUS64 = 0x4c04
+ LOOP_SET_STATUS_CLEARABLE_FLAGS = 0x4
+ LOOP_SET_STATUS_SETTABLE_FLAGS = 0xc
LO_KEY_SIZE = 0x20
LO_NAME_SIZE = 0x40
MADV_COLD = 0x14
@@ -1929,6 +2043,7 @@ const (
SOL_ATM = 0x108
SOL_CAIF = 0x116
SOL_CAN_BASE = 0x64
+ SOL_CAN_RAW = 0x65
SOL_DCCP = 0x10d
SOL_DECNET = 0x105
SOL_ICMPV6 = 0x3a
@@ -1992,8 +2107,10 @@ const (
STATX_ATTR_APPEND = 0x20
STATX_ATTR_AUTOMOUNT = 0x1000
STATX_ATTR_COMPRESSED = 0x4
+ STATX_ATTR_DAX = 0x2000
STATX_ATTR_ENCRYPTED = 0x800
STATX_ATTR_IMMUTABLE = 0x10
+ STATX_ATTR_MOUNT_ROOT = 0x2000
STATX_ATTR_NODUMP = 0x40
STATX_ATTR_VERITY = 0x100000
STATX_BASIC_STATS = 0x7ff
@@ -2002,6 +2119,7 @@ const (
STATX_CTIME = 0x80
STATX_GID = 0x10
STATX_INO = 0x100
+ STATX_MNT_ID = 0x1000
STATX_MODE = 0x2
STATX_MTIME = 0x40
STATX_NLINK = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 8b0e024b..84f71e99 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -192,6 +192,7 @@ const (
PPPIOCSRASYNCMAP = 0x40047454
PPPIOCSXASYNCMAP = 0x4020744f
PPPIOCXFERUNIT = 0x744e
+ PROT_BTI = 0x10
PR_SET_PTRACER_ANY = 0xffffffffffffffff
PTRACE_SYSEMU = 0x1f
PTRACE_SYSEMU_SINGLESTEP = 0x20
diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
index 96b9b8ab..20f3a579 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go
@@ -158,6 +158,12 @@ const (
CLONE_SIGHAND = 0x800
CLONE_VFORK = 0x4000
CLONE_VM = 0x100
+ CPUSTATES = 0x5
+ CP_IDLE = 0x4
+ CP_INTR = 0x3
+ CP_NICE = 0x1
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
index ed522a84..90b8fcd2 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go
@@ -158,6 +158,12 @@ const (
CLONE_SIGHAND = 0x800
CLONE_VFORK = 0x4000
CLONE_VM = 0x100
+ CPUSTATES = 0x5
+ CP_IDLE = 0x4
+ CP_INTR = 0x3
+ CP_NICE = 0x1
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
index c8d36fe9..c5c03993 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go
@@ -150,6 +150,12 @@ const (
BRKINT = 0x2
CFLUSH = 0xf
CLOCAL = 0x8000
+ CPUSTATES = 0x5
+ CP_IDLE = 0x4
+ CP_INTR = 0x3
+ CP_NICE = 0x1
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go
index f1c146a7..14dd3c1d 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go
@@ -158,6 +158,12 @@ const (
CLONE_SIGHAND = 0x800
CLONE_VFORK = 0x4000
CLONE_VM = 0x100
+ CPUSTATES = 0x5
+ CP_IDLE = 0x4
+ CP_INTR = 0x3
+ CP_NICE = 0x1
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
index 5402bd55..c865a10d 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go
@@ -146,6 +146,13 @@ const (
BRKINT = 0x2
CFLUSH = 0xf
CLOCAL = 0x8000
+ CPUSTATES = 0x6
+ CP_IDLE = 0x5
+ CP_INTR = 0x4
+ CP_NICE = 0x1
+ CP_SPIN = 0x3
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
index ffaf2d2f..9db6b2fb 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go
@@ -153,6 +153,13 @@ const (
CLOCK_REALTIME = 0x0
CLOCK_THREAD_CPUTIME_ID = 0x4
CLOCK_UPTIME = 0x5
+ CPUSTATES = 0x6
+ CP_IDLE = 0x5
+ CP_INTR = 0x4
+ CP_NICE = 0x1
+ CP_SPIN = 0x3
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
index 7aa796a6..7072526a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go
@@ -146,6 +146,13 @@ const (
BRKINT = 0x2
CFLUSH = 0xf
CLOCAL = 0x8000
+ CPUSTATES = 0x6
+ CP_IDLE = 0x5
+ CP_INTR = 0x4
+ CP_NICE = 0x1
+ CP_SPIN = 0x3
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go
index 1792d3f1..ac5efbe5 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go
@@ -156,6 +156,13 @@ const (
CLOCK_REALTIME = 0x0
CLOCK_THREAD_CPUTIME_ID = 0x4
CLOCK_UPTIME = 0x5
+ CPUSTATES = 0x6
+ CP_IDLE = 0x5
+ CP_INTR = 0x4
+ CP_NICE = 0x1
+ CP_SPIN = 0x3
+ CP_SYS = 0x2
+ CP_USER = 0x0
CREAD = 0x800
CRTSCTS = 0x10000
CS5 = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go
new file mode 100644
index 00000000..a74639a4
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go
@@ -0,0 +1,1862 @@
+// mkerrors.sh -m64
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build mips64,openbsd
+
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
+// cgo -godefs -- -m64 _const.go
+
+package unix
+
+import "syscall"
+
+const (
+ AF_APPLETALK = 0x10
+ AF_BLUETOOTH = 0x20
+ AF_CCITT = 0xa
+ AF_CHAOS = 0x5
+ AF_CNT = 0x15
+ AF_COIP = 0x14
+ AF_DATAKIT = 0x9
+ AF_DECnet = 0xc
+ AF_DLI = 0xd
+ AF_E164 = 0x1a
+ AF_ECMA = 0x8
+ AF_ENCAP = 0x1c
+ AF_HYLINK = 0xf
+ AF_IMPLINK = 0x3
+ AF_INET = 0x2
+ AF_INET6 = 0x18
+ AF_IPX = 0x17
+ AF_ISDN = 0x1a
+ AF_ISO = 0x7
+ AF_KEY = 0x1e
+ AF_LAT = 0xe
+ AF_LINK = 0x12
+ AF_LOCAL = 0x1
+ AF_MAX = 0x24
+ AF_MPLS = 0x21
+ AF_NATM = 0x1b
+ AF_NS = 0x6
+ AF_OSI = 0x7
+ AF_PUP = 0x4
+ AF_ROUTE = 0x11
+ AF_SIP = 0x1d
+ AF_SNA = 0xb
+ AF_UNIX = 0x1
+ AF_UNSPEC = 0x0
+ ALTWERASE = 0x200
+ ARPHRD_ETHER = 0x1
+ ARPHRD_FRELAY = 0xf
+ ARPHRD_IEEE1394 = 0x18
+ ARPHRD_IEEE802 = 0x6
+ B0 = 0x0
+ B110 = 0x6e
+ B115200 = 0x1c200
+ B1200 = 0x4b0
+ B134 = 0x86
+ B14400 = 0x3840
+ B150 = 0x96
+ B1800 = 0x708
+ B19200 = 0x4b00
+ B200 = 0xc8
+ B230400 = 0x38400
+ B2400 = 0x960
+ B28800 = 0x7080
+ B300 = 0x12c
+ B38400 = 0x9600
+ B4800 = 0x12c0
+ B50 = 0x32
+ B57600 = 0xe100
+ B600 = 0x258
+ B7200 = 0x1c20
+ B75 = 0x4b
+ B76800 = 0x12c00
+ B9600 = 0x2580
+ BIOCFLUSH = 0x20004268
+ BIOCGBLEN = 0x40044266
+ BIOCGDIRFILT = 0x4004427c
+ BIOCGDLT = 0x4004426a
+ BIOCGDLTLIST = 0xc010427b
+ BIOCGETIF = 0x4020426b
+ BIOCGFILDROP = 0x40044278
+ BIOCGHDRCMPLT = 0x40044274
+ BIOCGRSIG = 0x40044273
+ BIOCGRTIMEOUT = 0x4010426e
+ BIOCGSTATS = 0x4008426f
+ BIOCIMMEDIATE = 0x80044270
+ BIOCLOCK = 0x20004276
+ BIOCPROMISC = 0x20004269
+ BIOCSBLEN = 0xc0044266
+ BIOCSDIRFILT = 0x8004427d
+ BIOCSDLT = 0x8004427a
+ BIOCSETF = 0x80104267
+ BIOCSETIF = 0x8020426c
+ BIOCSETWF = 0x80104277
+ BIOCSFILDROP = 0x80044279
+ BIOCSHDRCMPLT = 0x80044275
+ BIOCSRSIG = 0x80044272
+ BIOCSRTIMEOUT = 0x8010426d
+ BIOCVERSION = 0x40044271
+ BPF_A = 0x10
+ BPF_ABS = 0x20
+ BPF_ADD = 0x0
+ BPF_ALIGNMENT = 0x4
+ BPF_ALU = 0x4
+ BPF_AND = 0x50
+ BPF_B = 0x10
+ BPF_DIRECTION_IN = 0x1
+ BPF_DIRECTION_OUT = 0x2
+ BPF_DIV = 0x30
+ BPF_FILDROP_CAPTURE = 0x1
+ BPF_FILDROP_DROP = 0x2
+ BPF_FILDROP_PASS = 0x0
+ BPF_H = 0x8
+ BPF_IMM = 0x0
+ BPF_IND = 0x40
+ BPF_JA = 0x0
+ BPF_JEQ = 0x10
+ BPF_JGE = 0x30
+ BPF_JGT = 0x20
+ BPF_JMP = 0x5
+ BPF_JSET = 0x40
+ BPF_K = 0x0
+ BPF_LD = 0x0
+ BPF_LDX = 0x1
+ BPF_LEN = 0x80
+ BPF_LSH = 0x60
+ BPF_MAJOR_VERSION = 0x1
+ BPF_MAXBUFSIZE = 0x200000
+ BPF_MAXINSNS = 0x200
+ BPF_MEM = 0x60
+ BPF_MEMWORDS = 0x10
+ BPF_MINBUFSIZE = 0x20
+ BPF_MINOR_VERSION = 0x1
+ BPF_MISC = 0x7
+ BPF_MSH = 0xa0
+ BPF_MUL = 0x20
+ BPF_NEG = 0x80
+ BPF_OR = 0x40
+ BPF_RELEASE = 0x30bb6
+ BPF_RET = 0x6
+ BPF_RSH = 0x70
+ BPF_ST = 0x2
+ BPF_STX = 0x3
+ BPF_SUB = 0x10
+ BPF_TAX = 0x0
+ BPF_TXA = 0x80
+ BPF_W = 0x0
+ BPF_X = 0x8
+ BRKINT = 0x2
+ CFLUSH = 0xf
+ CLOCAL = 0x8000
+ CLOCK_BOOTTIME = 0x6
+ CLOCK_MONOTONIC = 0x3
+ CLOCK_PROCESS_CPUTIME_ID = 0x2
+ CLOCK_REALTIME = 0x0
+ CLOCK_THREAD_CPUTIME_ID = 0x4
+ CLOCK_UPTIME = 0x5
+ CPUSTATES = 0x6
+ CP_IDLE = 0x5
+ CP_INTR = 0x4
+ CP_NICE = 0x1
+ CP_SPIN = 0x3
+ CP_SYS = 0x2
+ CP_USER = 0x0
+ CREAD = 0x800
+ CRTSCTS = 0x10000
+ CS5 = 0x0
+ CS6 = 0x100
+ CS7 = 0x200
+ CS8 = 0x300
+ CSIZE = 0x300
+ CSTART = 0x11
+ CSTATUS = 0xff
+ CSTOP = 0x13
+ CSTOPB = 0x400
+ CSUSP = 0x1a
+ CTL_HW = 0x6
+ CTL_KERN = 0x1
+ CTL_MAXNAME = 0xc
+ CTL_NET = 0x4
+ DIOCADDQUEUE = 0xc110445d
+ DIOCADDRULE = 0xcd604404
+ DIOCADDSTATE = 0xc1084425
+ DIOCCHANGERULE = 0xcd60441a
+ DIOCCLRIFFLAG = 0xc028445a
+ DIOCCLRSRCNODES = 0x20004455
+ DIOCCLRSTATES = 0xc0e04412
+ DIOCCLRSTATUS = 0xc0284416
+ DIOCGETLIMIT = 0xc0084427
+ DIOCGETQSTATS = 0xc1204460
+ DIOCGETQUEUE = 0xc110445f
+ DIOCGETQUEUES = 0xc110445e
+ DIOCGETRULE = 0xcd604407
+ DIOCGETRULES = 0xcd604406
+ DIOCGETRULESET = 0xc444443b
+ DIOCGETRULESETS = 0xc444443a
+ DIOCGETSRCNODES = 0xc0104454
+ DIOCGETSTATE = 0xc1084413
+ DIOCGETSTATES = 0xc0104419
+ DIOCGETSTATUS = 0xc1e84415
+ DIOCGETSYNFLWATS = 0xc0084463
+ DIOCGETTIMEOUT = 0xc008441e
+ DIOCIGETIFACES = 0xc0284457
+ DIOCKILLSRCNODES = 0xc080445b
+ DIOCKILLSTATES = 0xc0e04429
+ DIOCNATLOOK = 0xc0504417
+ DIOCOSFPADD = 0xc088444f
+ DIOCOSFPFLUSH = 0x2000444e
+ DIOCOSFPGET = 0xc0884450
+ DIOCRADDADDRS = 0xc4504443
+ DIOCRADDTABLES = 0xc450443d
+ DIOCRCLRADDRS = 0xc4504442
+ DIOCRCLRASTATS = 0xc4504448
+ DIOCRCLRTABLES = 0xc450443c
+ DIOCRCLRTSTATS = 0xc4504441
+ DIOCRDELADDRS = 0xc4504444
+ DIOCRDELTABLES = 0xc450443e
+ DIOCRGETADDRS = 0xc4504446
+ DIOCRGETASTATS = 0xc4504447
+ DIOCRGETTABLES = 0xc450443f
+ DIOCRGETTSTATS = 0xc4504440
+ DIOCRINADEFINE = 0xc450444d
+ DIOCRSETADDRS = 0xc4504445
+ DIOCRSETTFLAGS = 0xc450444a
+ DIOCRTSTADDRS = 0xc4504449
+ DIOCSETDEBUG = 0xc0044418
+ DIOCSETHOSTID = 0xc0044456
+ DIOCSETIFFLAG = 0xc0284459
+ DIOCSETLIMIT = 0xc0084428
+ DIOCSETREASS = 0xc004445c
+ DIOCSETSTATUSIF = 0xc0284414
+ DIOCSETSYNCOOKIES = 0xc0014462
+ DIOCSETSYNFLWATS = 0xc0084461
+ DIOCSETTIMEOUT = 0xc008441d
+ DIOCSTART = 0x20004401
+ DIOCSTOP = 0x20004402
+ DIOCXBEGIN = 0xc0104451
+ DIOCXCOMMIT = 0xc0104452
+ DIOCXROLLBACK = 0xc0104453
+ DLT_ARCNET = 0x7
+ DLT_ATM_RFC1483 = 0xb
+ DLT_AX25 = 0x3
+ DLT_CHAOS = 0x5
+ DLT_C_HDLC = 0x68
+ DLT_EN10MB = 0x1
+ DLT_EN3MB = 0x2
+ DLT_ENC = 0xd
+ DLT_FDDI = 0xa
+ DLT_IEEE802 = 0x6
+ DLT_IEEE802_11 = 0x69
+ DLT_IEEE802_11_RADIO = 0x7f
+ DLT_LOOP = 0xc
+ DLT_MPLS = 0xdb
+ DLT_NULL = 0x0
+ DLT_OPENFLOW = 0x10b
+ DLT_PFLOG = 0x75
+ DLT_PFSYNC = 0x12
+ DLT_PPP = 0x9
+ DLT_PPP_BSDOS = 0x10
+ DLT_PPP_ETHER = 0x33
+ DLT_PPP_SERIAL = 0x32
+ DLT_PRONET = 0x4
+ DLT_RAW = 0xe
+ DLT_SLIP = 0x8
+ DLT_SLIP_BSDOS = 0xf
+ DLT_USBPCAP = 0xf9
+ DLT_USER0 = 0x93
+ DLT_USER1 = 0x94
+ DLT_USER10 = 0x9d
+ DLT_USER11 = 0x9e
+ DLT_USER12 = 0x9f
+ DLT_USER13 = 0xa0
+ DLT_USER14 = 0xa1
+ DLT_USER15 = 0xa2
+ DLT_USER2 = 0x95
+ DLT_USER3 = 0x96
+ DLT_USER4 = 0x97
+ DLT_USER5 = 0x98
+ DLT_USER6 = 0x99
+ DLT_USER7 = 0x9a
+ DLT_USER8 = 0x9b
+ DLT_USER9 = 0x9c
+ DT_BLK = 0x6
+ DT_CHR = 0x2
+ DT_DIR = 0x4
+ DT_FIFO = 0x1
+ DT_LNK = 0xa
+ DT_REG = 0x8
+ DT_SOCK = 0xc
+ DT_UNKNOWN = 0x0
+ ECHO = 0x8
+ ECHOCTL = 0x40
+ ECHOE = 0x2
+ ECHOK = 0x4
+ ECHOKE = 0x1
+ ECHONL = 0x10
+ ECHOPRT = 0x20
+ EMT_TAGOVF = 0x1
+ EMUL_ENABLED = 0x1
+ EMUL_NATIVE = 0x2
+ ENDRUNDISC = 0x9
+ ETHERMIN = 0x2e
+ ETHERMTU = 0x5dc
+ ETHERTYPE_8023 = 0x4
+ ETHERTYPE_AARP = 0x80f3
+ ETHERTYPE_ACCTON = 0x8390
+ ETHERTYPE_AEONIC = 0x8036
+ ETHERTYPE_ALPHA = 0x814a
+ ETHERTYPE_AMBER = 0x6008
+ ETHERTYPE_AMOEBA = 0x8145
+ ETHERTYPE_AOE = 0x88a2
+ ETHERTYPE_APOLLO = 0x80f7
+ ETHERTYPE_APOLLODOMAIN = 0x8019
+ ETHERTYPE_APPLETALK = 0x809b
+ ETHERTYPE_APPLITEK = 0x80c7
+ ETHERTYPE_ARGONAUT = 0x803a
+ ETHERTYPE_ARP = 0x806
+ ETHERTYPE_AT = 0x809b
+ ETHERTYPE_ATALK = 0x809b
+ ETHERTYPE_ATOMIC = 0x86df
+ ETHERTYPE_ATT = 0x8069
+ ETHERTYPE_ATTSTANFORD = 0x8008
+ ETHERTYPE_AUTOPHON = 0x806a
+ ETHERTYPE_AXIS = 0x8856
+ ETHERTYPE_BCLOOP = 0x9003
+ ETHERTYPE_BOFL = 0x8102
+ ETHERTYPE_CABLETRON = 0x7034
+ ETHERTYPE_CHAOS = 0x804
+ ETHERTYPE_COMDESIGN = 0x806c
+ ETHERTYPE_COMPUGRAPHIC = 0x806d
+ ETHERTYPE_COUNTERPOINT = 0x8062
+ ETHERTYPE_CRONUS = 0x8004
+ ETHERTYPE_CRONUSVLN = 0x8003
+ ETHERTYPE_DCA = 0x1234
+ ETHERTYPE_DDE = 0x807b
+ ETHERTYPE_DEBNI = 0xaaaa
+ ETHERTYPE_DECAM = 0x8048
+ ETHERTYPE_DECCUST = 0x6006
+ ETHERTYPE_DECDIAG = 0x6005
+ ETHERTYPE_DECDNS = 0x803c
+ ETHERTYPE_DECDTS = 0x803e
+ ETHERTYPE_DECEXPER = 0x6000
+ ETHERTYPE_DECLAST = 0x8041
+ ETHERTYPE_DECLTM = 0x803f
+ ETHERTYPE_DECMUMPS = 0x6009
+ ETHERTYPE_DECNETBIOS = 0x8040
+ ETHERTYPE_DELTACON = 0x86de
+ ETHERTYPE_DIDDLE = 0x4321
+ ETHERTYPE_DLOG1 = 0x660
+ ETHERTYPE_DLOG2 = 0x661
+ ETHERTYPE_DN = 0x6003
+ ETHERTYPE_DOGFIGHT = 0x1989
+ ETHERTYPE_DSMD = 0x8039
+ ETHERTYPE_ECMA = 0x803
+ ETHERTYPE_ENCRYPT = 0x803d
+ ETHERTYPE_ES = 0x805d
+ ETHERTYPE_EXCELAN = 0x8010
+ ETHERTYPE_EXPERDATA = 0x8049
+ ETHERTYPE_FLIP = 0x8146
+ ETHERTYPE_FLOWCONTROL = 0x8808
+ ETHERTYPE_FRARP = 0x808
+ ETHERTYPE_GENDYN = 0x8068
+ ETHERTYPE_HAYES = 0x8130
+ ETHERTYPE_HIPPI_FP = 0x8180
+ ETHERTYPE_HITACHI = 0x8820
+ ETHERTYPE_HP = 0x8005
+ ETHERTYPE_IEEEPUP = 0xa00
+ ETHERTYPE_IEEEPUPAT = 0xa01
+ ETHERTYPE_IMLBL = 0x4c42
+ ETHERTYPE_IMLBLDIAG = 0x424c
+ ETHERTYPE_IP = 0x800
+ ETHERTYPE_IPAS = 0x876c
+ ETHERTYPE_IPV6 = 0x86dd
+ ETHERTYPE_IPX = 0x8137
+ ETHERTYPE_IPXNEW = 0x8037
+ ETHERTYPE_KALPANA = 0x8582
+ ETHERTYPE_LANBRIDGE = 0x8038
+ ETHERTYPE_LANPROBE = 0x8888
+ ETHERTYPE_LAT = 0x6004
+ ETHERTYPE_LBACK = 0x9000
+ ETHERTYPE_LITTLE = 0x8060
+ ETHERTYPE_LLDP = 0x88cc
+ ETHERTYPE_LOGICRAFT = 0x8148
+ ETHERTYPE_LOOPBACK = 0x9000
+ ETHERTYPE_MACSEC = 0x88e5
+ ETHERTYPE_MATRA = 0x807a
+ ETHERTYPE_MAX = 0xffff
+ ETHERTYPE_MERIT = 0x807c
+ ETHERTYPE_MICP = 0x873a
+ ETHERTYPE_MOPDL = 0x6001
+ ETHERTYPE_MOPRC = 0x6002
+ ETHERTYPE_MOTOROLA = 0x818d
+ ETHERTYPE_MPLS = 0x8847
+ ETHERTYPE_MPLS_MCAST = 0x8848
+ ETHERTYPE_MUMPS = 0x813f
+ ETHERTYPE_NBPCC = 0x3c04
+ ETHERTYPE_NBPCLAIM = 0x3c09
+ ETHERTYPE_NBPCLREQ = 0x3c05
+ ETHERTYPE_NBPCLRSP = 0x3c06
+ ETHERTYPE_NBPCREQ = 0x3c02
+ ETHERTYPE_NBPCRSP = 0x3c03
+ ETHERTYPE_NBPDG = 0x3c07
+ ETHERTYPE_NBPDGB = 0x3c08
+ ETHERTYPE_NBPDLTE = 0x3c0a
+ ETHERTYPE_NBPRAR = 0x3c0c
+ ETHERTYPE_NBPRAS = 0x3c0b
+ ETHERTYPE_NBPRST = 0x3c0d
+ ETHERTYPE_NBPSCD = 0x3c01
+ ETHERTYPE_NBPVCD = 0x3c00
+ ETHERTYPE_NBS = 0x802
+ ETHERTYPE_NCD = 0x8149
+ ETHERTYPE_NESTAR = 0x8006
+ ETHERTYPE_NETBEUI = 0x8191
+ ETHERTYPE_NOVELL = 0x8138
+ ETHERTYPE_NS = 0x600
+ ETHERTYPE_NSAT = 0x601
+ ETHERTYPE_NSCOMPAT = 0x807
+ ETHERTYPE_NTRAILER = 0x10
+ ETHERTYPE_OS9 = 0x7007
+ ETHERTYPE_OS9NET = 0x7009
+ ETHERTYPE_PACER = 0x80c6
+ ETHERTYPE_PAE = 0x888e
+ ETHERTYPE_PBB = 0x88e7
+ ETHERTYPE_PCS = 0x4242
+ ETHERTYPE_PLANNING = 0x8044
+ ETHERTYPE_PPP = 0x880b
+ ETHERTYPE_PPPOE = 0x8864
+ ETHERTYPE_PPPOEDISC = 0x8863
+ ETHERTYPE_PRIMENTS = 0x7031
+ ETHERTYPE_PUP = 0x200
+ ETHERTYPE_PUPAT = 0x200
+ ETHERTYPE_QINQ = 0x88a8
+ ETHERTYPE_RACAL = 0x7030
+ ETHERTYPE_RATIONAL = 0x8150
+ ETHERTYPE_RAWFR = 0x6559
+ ETHERTYPE_RCL = 0x1995
+ ETHERTYPE_RDP = 0x8739
+ ETHERTYPE_RETIX = 0x80f2
+ ETHERTYPE_REVARP = 0x8035
+ ETHERTYPE_SCA = 0x6007
+ ETHERTYPE_SECTRA = 0x86db
+ ETHERTYPE_SECUREDATA = 0x876d
+ ETHERTYPE_SGITW = 0x817e
+ ETHERTYPE_SG_BOUNCE = 0x8016
+ ETHERTYPE_SG_DIAG = 0x8013
+ ETHERTYPE_SG_NETGAMES = 0x8014
+ ETHERTYPE_SG_RESV = 0x8015
+ ETHERTYPE_SIMNET = 0x5208
+ ETHERTYPE_SLOW = 0x8809
+ ETHERTYPE_SNA = 0x80d5
+ ETHERTYPE_SNMP = 0x814c
+ ETHERTYPE_SONIX = 0xfaf5
+ ETHERTYPE_SPIDER = 0x809f
+ ETHERTYPE_SPRITE = 0x500
+ ETHERTYPE_STP = 0x8181
+ ETHERTYPE_TALARIS = 0x812b
+ ETHERTYPE_TALARISMC = 0x852b
+ ETHERTYPE_TCPCOMP = 0x876b
+ ETHERTYPE_TCPSM = 0x9002
+ ETHERTYPE_TEC = 0x814f
+ ETHERTYPE_TIGAN = 0x802f
+ ETHERTYPE_TRAIL = 0x1000
+ ETHERTYPE_TRANSETHER = 0x6558
+ ETHERTYPE_TYMSHARE = 0x802e
+ ETHERTYPE_UBBST = 0x7005
+ ETHERTYPE_UBDEBUG = 0x900
+ ETHERTYPE_UBDIAGLOOP = 0x7002
+ ETHERTYPE_UBDL = 0x7000
+ ETHERTYPE_UBNIU = 0x7001
+ ETHERTYPE_UBNMC = 0x7003
+ ETHERTYPE_VALID = 0x1600
+ ETHERTYPE_VARIAN = 0x80dd
+ ETHERTYPE_VAXELN = 0x803b
+ ETHERTYPE_VEECO = 0x8067
+ ETHERTYPE_VEXP = 0x805b
+ ETHERTYPE_VGLAB = 0x8131
+ ETHERTYPE_VINES = 0xbad
+ ETHERTYPE_VINESECHO = 0xbaf
+ ETHERTYPE_VINESLOOP = 0xbae
+ ETHERTYPE_VITAL = 0xff00
+ ETHERTYPE_VLAN = 0x8100
+ ETHERTYPE_VLTLMAN = 0x8080
+ ETHERTYPE_VPROD = 0x805c
+ ETHERTYPE_VURESERVED = 0x8147
+ ETHERTYPE_WATERLOO = 0x8130
+ ETHERTYPE_WELLFLEET = 0x8103
+ ETHERTYPE_X25 = 0x805
+ ETHERTYPE_X75 = 0x801
+ ETHERTYPE_XNSSM = 0x9001
+ ETHERTYPE_XTP = 0x817d
+ ETHER_ADDR_LEN = 0x6
+ ETHER_ALIGN = 0x2
+ ETHER_CRC_LEN = 0x4
+ ETHER_CRC_POLY_BE = 0x4c11db6
+ ETHER_CRC_POLY_LE = 0xedb88320
+ ETHER_HDR_LEN = 0xe
+ ETHER_MAX_DIX_LEN = 0x600
+ ETHER_MAX_HARDMTU_LEN = 0xff9b
+ ETHER_MAX_LEN = 0x5ee
+ ETHER_MIN_LEN = 0x40
+ ETHER_TYPE_LEN = 0x2
+ ETHER_VLAN_ENCAP_LEN = 0x4
+ EVFILT_AIO = -0x3
+ EVFILT_DEVICE = -0x8
+ EVFILT_PROC = -0x5
+ EVFILT_READ = -0x1
+ EVFILT_SIGNAL = -0x6
+ EVFILT_SYSCOUNT = 0x8
+ EVFILT_TIMER = -0x7
+ EVFILT_VNODE = -0x4
+ EVFILT_WRITE = -0x2
+ EVL_ENCAPLEN = 0x4
+ EVL_PRIO_BITS = 0xd
+ EVL_PRIO_MAX = 0x7
+ EVL_VLID_MASK = 0xfff
+ EVL_VLID_MAX = 0xffe
+ EVL_VLID_MIN = 0x1
+ EVL_VLID_NULL = 0x0
+ EV_ADD = 0x1
+ EV_CLEAR = 0x20
+ EV_DELETE = 0x2
+ EV_DISABLE = 0x8
+ EV_DISPATCH = 0x80
+ EV_ENABLE = 0x4
+ EV_EOF = 0x8000
+ EV_ERROR = 0x4000
+ EV_FLAG1 = 0x2000
+ EV_ONESHOT = 0x10
+ EV_RECEIPT = 0x40
+ EV_SYSFLAGS = 0xf000
+ EXTA = 0x4b00
+ EXTB = 0x9600
+ EXTPROC = 0x800
+ FD_CLOEXEC = 0x1
+ FD_SETSIZE = 0x400
+ FLUSHO = 0x800000
+ F_DUPFD = 0x0
+ F_DUPFD_CLOEXEC = 0xa
+ F_GETFD = 0x1
+ F_GETFL = 0x3
+ F_GETLK = 0x7
+ F_GETOWN = 0x5
+ F_ISATTY = 0xb
+ F_OK = 0x0
+ F_RDLCK = 0x1
+ F_SETFD = 0x2
+ F_SETFL = 0x4
+ F_SETLK = 0x8
+ F_SETLKW = 0x9
+ F_SETOWN = 0x6
+ F_UNLCK = 0x2
+ F_WRLCK = 0x3
+ HUPCL = 0x4000
+ HW_MACHINE = 0x1
+ ICANON = 0x100
+ ICMP6_FILTER = 0x12
+ ICRNL = 0x100
+ IEXTEN = 0x400
+ IFAN_ARRIVAL = 0x0
+ IFAN_DEPARTURE = 0x1
+ IFF_ALLMULTI = 0x200
+ IFF_BROADCAST = 0x2
+ IFF_CANTCHANGE = 0x8e52
+ IFF_DEBUG = 0x4
+ IFF_LINK0 = 0x1000
+ IFF_LINK1 = 0x2000
+ IFF_LINK2 = 0x4000
+ IFF_LOOPBACK = 0x8
+ IFF_MULTICAST = 0x8000
+ IFF_NOARP = 0x80
+ IFF_OACTIVE = 0x400
+ IFF_POINTOPOINT = 0x10
+ IFF_PROMISC = 0x100
+ IFF_RUNNING = 0x40
+ IFF_SIMPLEX = 0x800
+ IFF_STATICARP = 0x20
+ IFF_UP = 0x1
+ IFNAMSIZ = 0x10
+ IFT_1822 = 0x2
+ IFT_A12MPPSWITCH = 0x82
+ IFT_AAL2 = 0xbb
+ IFT_AAL5 = 0x31
+ IFT_ADSL = 0x5e
+ IFT_AFLANE8023 = 0x3b
+ IFT_AFLANE8025 = 0x3c
+ IFT_ARAP = 0x58
+ IFT_ARCNET = 0x23
+ IFT_ARCNETPLUS = 0x24
+ IFT_ASYNC = 0x54
+ IFT_ATM = 0x25
+ IFT_ATMDXI = 0x69
+ IFT_ATMFUNI = 0x6a
+ IFT_ATMIMA = 0x6b
+ IFT_ATMLOGICAL = 0x50
+ IFT_ATMRADIO = 0xbd
+ IFT_ATMSUBINTERFACE = 0x86
+ IFT_ATMVCIENDPT = 0xc2
+ IFT_ATMVIRTUAL = 0x95
+ IFT_BGPPOLICYACCOUNTING = 0xa2
+ IFT_BLUETOOTH = 0xf8
+ IFT_BRIDGE = 0xd1
+ IFT_BSC = 0x53
+ IFT_CARP = 0xf7
+ IFT_CCTEMUL = 0x3d
+ IFT_CEPT = 0x13
+ IFT_CES = 0x85
+ IFT_CHANNEL = 0x46
+ IFT_CNR = 0x55
+ IFT_COFFEE = 0x84
+ IFT_COMPOSITELINK = 0x9b
+ IFT_DCN = 0x8d
+ IFT_DIGITALPOWERLINE = 0x8a
+ IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
+ IFT_DLSW = 0x4a
+ IFT_DOCSCABLEDOWNSTREAM = 0x80
+ IFT_DOCSCABLEMACLAYER = 0x7f
+ IFT_DOCSCABLEUPSTREAM = 0x81
+ IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd
+ IFT_DS0 = 0x51
+ IFT_DS0BUNDLE = 0x52
+ IFT_DS1FDL = 0xaa
+ IFT_DS3 = 0x1e
+ IFT_DTM = 0x8c
+ IFT_DUMMY = 0xf1
+ IFT_DVBASILN = 0xac
+ IFT_DVBASIOUT = 0xad
+ IFT_DVBRCCDOWNSTREAM = 0x93
+ IFT_DVBRCCMACLAYER = 0x92
+ IFT_DVBRCCUPSTREAM = 0x94
+ IFT_ECONET = 0xce
+ IFT_ENC = 0xf4
+ IFT_EON = 0x19
+ IFT_EPLRS = 0x57
+ IFT_ESCON = 0x49
+ IFT_ETHER = 0x6
+ IFT_FAITH = 0xf3
+ IFT_FAST = 0x7d
+ IFT_FASTETHER = 0x3e
+ IFT_FASTETHERFX = 0x45
+ IFT_FDDI = 0xf
+ IFT_FIBRECHANNEL = 0x38
+ IFT_FRAMERELAYINTERCONNECT = 0x3a
+ IFT_FRAMERELAYMPI = 0x5c
+ IFT_FRDLCIENDPT = 0xc1
+ IFT_FRELAY = 0x20
+ IFT_FRELAYDCE = 0x2c
+ IFT_FRF16MFRBUNDLE = 0xa3
+ IFT_FRFORWARD = 0x9e
+ IFT_G703AT2MB = 0x43
+ IFT_G703AT64K = 0x42
+ IFT_GIF = 0xf0
+ IFT_GIGABITETHERNET = 0x75
+ IFT_GR303IDT = 0xb2
+ IFT_GR303RDT = 0xb1
+ IFT_H323GATEKEEPER = 0xa4
+ IFT_H323PROXY = 0xa5
+ IFT_HDH1822 = 0x3
+ IFT_HDLC = 0x76
+ IFT_HDSL2 = 0xa8
+ IFT_HIPERLAN2 = 0xb7
+ IFT_HIPPI = 0x2f
+ IFT_HIPPIINTERFACE = 0x39
+ IFT_HOSTPAD = 0x5a
+ IFT_HSSI = 0x2e
+ IFT_HY = 0xe
+ IFT_IBM370PARCHAN = 0x48
+ IFT_IDSL = 0x9a
+ IFT_IEEE1394 = 0x90
+ IFT_IEEE80211 = 0x47
+ IFT_IEEE80212 = 0x37
+ IFT_IEEE8023ADLAG = 0xa1
+ IFT_IFGSN = 0x91
+ IFT_IMT = 0xbe
+ IFT_INFINIBAND = 0xc7
+ IFT_INTERLEAVE = 0x7c
+ IFT_IP = 0x7e
+ IFT_IPFORWARD = 0x8e
+ IFT_IPOVERATM = 0x72
+ IFT_IPOVERCDLC = 0x6d
+ IFT_IPOVERCLAW = 0x6e
+ IFT_IPSWITCH = 0x4e
+ IFT_ISDN = 0x3f
+ IFT_ISDNBASIC = 0x14
+ IFT_ISDNPRIMARY = 0x15
+ IFT_ISDNS = 0x4b
+ IFT_ISDNU = 0x4c
+ IFT_ISO88022LLC = 0x29
+ IFT_ISO88023 = 0x7
+ IFT_ISO88024 = 0x8
+ IFT_ISO88025 = 0x9
+ IFT_ISO88025CRFPINT = 0x62
+ IFT_ISO88025DTR = 0x56
+ IFT_ISO88025FIBER = 0x73
+ IFT_ISO88026 = 0xa
+ IFT_ISUP = 0xb3
+ IFT_L2VLAN = 0x87
+ IFT_L3IPVLAN = 0x88
+ IFT_L3IPXVLAN = 0x89
+ IFT_LAPB = 0x10
+ IFT_LAPD = 0x4d
+ IFT_LAPF = 0x77
+ IFT_LINEGROUP = 0xd2
+ IFT_LOCALTALK = 0x2a
+ IFT_LOOP = 0x18
+ IFT_MBIM = 0xfa
+ IFT_MEDIAMAILOVERIP = 0x8b
+ IFT_MFSIGLINK = 0xa7
+ IFT_MIOX25 = 0x26
+ IFT_MODEM = 0x30
+ IFT_MPC = 0x71
+ IFT_MPLS = 0xa6
+ IFT_MPLSTUNNEL = 0x96
+ IFT_MSDSL = 0x8f
+ IFT_MVL = 0xbf
+ IFT_MYRINET = 0x63
+ IFT_NFAS = 0xaf
+ IFT_NSIP = 0x1b
+ IFT_OPTICALCHANNEL = 0xc3
+ IFT_OPTICALTRANSPORT = 0xc4
+ IFT_OTHER = 0x1
+ IFT_P10 = 0xc
+ IFT_P80 = 0xd
+ IFT_PARA = 0x22
+ IFT_PFLOG = 0xf5
+ IFT_PFLOW = 0xf9
+ IFT_PFSYNC = 0xf6
+ IFT_PLC = 0xae
+ IFT_PON155 = 0xcf
+ IFT_PON622 = 0xd0
+ IFT_POS = 0xab
+ IFT_PPP = 0x17
+ IFT_PPPMULTILINKBUNDLE = 0x6c
+ IFT_PROPATM = 0xc5
+ IFT_PROPBWAP2MP = 0xb8
+ IFT_PROPCNLS = 0x59
+ IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
+ IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
+ IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
+ IFT_PROPMUX = 0x36
+ IFT_PROPVIRTUAL = 0x35
+ IFT_PROPWIRELESSP2P = 0x9d
+ IFT_PTPSERIAL = 0x16
+ IFT_PVC = 0xf2
+ IFT_Q2931 = 0xc9
+ IFT_QLLC = 0x44
+ IFT_RADIOMAC = 0xbc
+ IFT_RADSL = 0x5f
+ IFT_REACHDSL = 0xc0
+ IFT_RFC1483 = 0x9f
+ IFT_RS232 = 0x21
+ IFT_RSRB = 0x4f
+ IFT_SDLC = 0x11
+ IFT_SDSL = 0x60
+ IFT_SHDSL = 0xa9
+ IFT_SIP = 0x1f
+ IFT_SIPSIG = 0xcc
+ IFT_SIPTG = 0xcb
+ IFT_SLIP = 0x1c
+ IFT_SMDSDXI = 0x2b
+ IFT_SMDSICIP = 0x34
+ IFT_SONET = 0x27
+ IFT_SONETOVERHEADCHANNEL = 0xb9
+ IFT_SONETPATH = 0x32
+ IFT_SONETVT = 0x33
+ IFT_SRP = 0x97
+ IFT_SS7SIGLINK = 0x9c
+ IFT_STACKTOSTACK = 0x6f
+ IFT_STARLAN = 0xb
+ IFT_T1 = 0x12
+ IFT_TDLC = 0x74
+ IFT_TELINK = 0xc8
+ IFT_TERMPAD = 0x5b
+ IFT_TR008 = 0xb0
+ IFT_TRANSPHDLC = 0x7b
+ IFT_TUNNEL = 0x83
+ IFT_ULTRA = 0x1d
+ IFT_USB = 0xa0
+ IFT_V11 = 0x40
+ IFT_V35 = 0x2d
+ IFT_V36 = 0x41
+ IFT_V37 = 0x78
+ IFT_VDSL = 0x61
+ IFT_VIRTUALIPADDRESS = 0x70
+ IFT_VIRTUALTG = 0xca
+ IFT_VOICEDID = 0xd5
+ IFT_VOICEEM = 0x64
+ IFT_VOICEEMFGD = 0xd3
+ IFT_VOICEENCAP = 0x67
+ IFT_VOICEFGDEANA = 0xd4
+ IFT_VOICEFXO = 0x65
+ IFT_VOICEFXS = 0x66
+ IFT_VOICEOVERATM = 0x98
+ IFT_VOICEOVERCABLE = 0xc6
+ IFT_VOICEOVERFRAMERELAY = 0x99
+ IFT_VOICEOVERIP = 0x68
+ IFT_X213 = 0x5d
+ IFT_X25 = 0x5
+ IFT_X25DDN = 0x4
+ IFT_X25HUNTGROUP = 0x7a
+ IFT_X25MLP = 0x79
+ IFT_X25PLE = 0x28
+ IFT_XETHER = 0x1a
+ IGNBRK = 0x1
+ IGNCR = 0x80
+ IGNPAR = 0x4
+ IMAXBEL = 0x2000
+ INLCR = 0x40
+ INPCK = 0x10
+ IN_CLASSA_HOST = 0xffffff
+ IN_CLASSA_MAX = 0x80
+ IN_CLASSA_NET = 0xff000000
+ IN_CLASSA_NSHIFT = 0x18
+ IN_CLASSB_HOST = 0xffff
+ IN_CLASSB_MAX = 0x10000
+ IN_CLASSB_NET = 0xffff0000
+ IN_CLASSB_NSHIFT = 0x10
+ IN_CLASSC_HOST = 0xff
+ IN_CLASSC_NET = 0xffffff00
+ IN_CLASSC_NSHIFT = 0x8
+ IN_CLASSD_HOST = 0xfffffff
+ IN_CLASSD_NET = 0xf0000000
+ IN_CLASSD_NSHIFT = 0x1c
+ IN_LOOPBACKNET = 0x7f
+ IN_RFC3021_HOST = 0x1
+ IN_RFC3021_NET = 0xfffffffe
+ IN_RFC3021_NSHIFT = 0x1f
+ IPPROTO_AH = 0x33
+ IPPROTO_CARP = 0x70
+ IPPROTO_DIVERT = 0x102
+ IPPROTO_DONE = 0x101
+ IPPROTO_DSTOPTS = 0x3c
+ IPPROTO_EGP = 0x8
+ IPPROTO_ENCAP = 0x62
+ IPPROTO_EON = 0x50
+ IPPROTO_ESP = 0x32
+ IPPROTO_ETHERIP = 0x61
+ IPPROTO_FRAGMENT = 0x2c
+ IPPROTO_GGP = 0x3
+ IPPROTO_GRE = 0x2f
+ IPPROTO_HOPOPTS = 0x0
+ IPPROTO_ICMP = 0x1
+ IPPROTO_ICMPV6 = 0x3a
+ IPPROTO_IDP = 0x16
+ IPPROTO_IGMP = 0x2
+ IPPROTO_IP = 0x0
+ IPPROTO_IPCOMP = 0x6c
+ IPPROTO_IPIP = 0x4
+ IPPROTO_IPV4 = 0x4
+ IPPROTO_IPV6 = 0x29
+ IPPROTO_MAX = 0x100
+ IPPROTO_MAXID = 0x103
+ IPPROTO_MOBILE = 0x37
+ IPPROTO_MPLS = 0x89
+ IPPROTO_NONE = 0x3b
+ IPPROTO_PFSYNC = 0xf0
+ IPPROTO_PIM = 0x67
+ IPPROTO_PUP = 0xc
+ IPPROTO_RAW = 0xff
+ IPPROTO_ROUTING = 0x2b
+ IPPROTO_RSVP = 0x2e
+ IPPROTO_TCP = 0x6
+ IPPROTO_TP = 0x1d
+ IPPROTO_UDP = 0x11
+ IPPROTO_UDPLITE = 0x88
+ IPV6_AUTH_LEVEL = 0x35
+ IPV6_AUTOFLOWLABEL = 0x3b
+ IPV6_CHECKSUM = 0x1a
+ IPV6_DEFAULT_MULTICAST_HOPS = 0x1
+ IPV6_DEFAULT_MULTICAST_LOOP = 0x1
+ IPV6_DEFHLIM = 0x40
+ IPV6_DONTFRAG = 0x3e
+ IPV6_DSTOPTS = 0x32
+ IPV6_ESP_NETWORK_LEVEL = 0x37
+ IPV6_ESP_TRANS_LEVEL = 0x36
+ IPV6_FAITH = 0x1d
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
+ IPV6_FRAGTTL = 0x78
+ IPV6_HLIMDEC = 0x1
+ IPV6_HOPLIMIT = 0x2f
+ IPV6_HOPOPTS = 0x31
+ IPV6_IPCOMP_LEVEL = 0x3c
+ IPV6_JOIN_GROUP = 0xc
+ IPV6_LEAVE_GROUP = 0xd
+ IPV6_MAXHLIM = 0xff
+ IPV6_MAXPACKET = 0xffff
+ IPV6_MINHOPCOUNT = 0x41
+ IPV6_MMTU = 0x500
+ IPV6_MULTICAST_HOPS = 0xa
+ IPV6_MULTICAST_IF = 0x9
+ IPV6_MULTICAST_LOOP = 0xb
+ IPV6_NEXTHOP = 0x30
+ IPV6_OPTIONS = 0x1
+ IPV6_PATHMTU = 0x2c
+ IPV6_PIPEX = 0x3f
+ IPV6_PKTINFO = 0x2e
+ IPV6_PORTRANGE = 0xe
+ IPV6_PORTRANGE_DEFAULT = 0x0
+ IPV6_PORTRANGE_HIGH = 0x1
+ IPV6_PORTRANGE_LOW = 0x2
+ IPV6_RECVDSTOPTS = 0x28
+ IPV6_RECVDSTPORT = 0x40
+ IPV6_RECVHOPLIMIT = 0x25
+ IPV6_RECVHOPOPTS = 0x27
+ IPV6_RECVPATHMTU = 0x2b
+ IPV6_RECVPKTINFO = 0x24
+ IPV6_RECVRTHDR = 0x26
+ IPV6_RECVTCLASS = 0x39
+ IPV6_RTABLE = 0x1021
+ IPV6_RTHDR = 0x33
+ IPV6_RTHDRDSTOPTS = 0x23
+ IPV6_RTHDR_LOOSE = 0x0
+ IPV6_RTHDR_STRICT = 0x1
+ IPV6_RTHDR_TYPE_0 = 0x0
+ IPV6_SOCKOPT_RESERVED1 = 0x3
+ IPV6_TCLASS = 0x3d
+ IPV6_UNICAST_HOPS = 0x4
+ IPV6_USE_MIN_MTU = 0x2a
+ IPV6_V6ONLY = 0x1b
+ IPV6_VERSION = 0x60
+ IPV6_VERSION_MASK = 0xf0
+ IP_ADD_MEMBERSHIP = 0xc
+ IP_AUTH_LEVEL = 0x14
+ IP_DEFAULT_MULTICAST_LOOP = 0x1
+ IP_DEFAULT_MULTICAST_TTL = 0x1
+ IP_DF = 0x4000
+ IP_DROP_MEMBERSHIP = 0xd
+ IP_ESP_NETWORK_LEVEL = 0x16
+ IP_ESP_TRANS_LEVEL = 0x15
+ IP_HDRINCL = 0x2
+ IP_IPCOMP_LEVEL = 0x1d
+ IP_IPDEFTTL = 0x25
+ IP_IPSECFLOWINFO = 0x24
+ IP_IPSEC_LOCAL_AUTH = 0x1b
+ IP_IPSEC_LOCAL_CRED = 0x19
+ IP_IPSEC_LOCAL_ID = 0x17
+ IP_IPSEC_REMOTE_AUTH = 0x1c
+ IP_IPSEC_REMOTE_CRED = 0x1a
+ IP_IPSEC_REMOTE_ID = 0x18
+ IP_MAXPACKET = 0xffff
+ IP_MAX_MEMBERSHIPS = 0xfff
+ IP_MF = 0x2000
+ IP_MINTTL = 0x20
+ IP_MIN_MEMBERSHIPS = 0xf
+ IP_MSS = 0x240
+ IP_MULTICAST_IF = 0x9
+ IP_MULTICAST_LOOP = 0xb
+ IP_MULTICAST_TTL = 0xa
+ IP_OFFMASK = 0x1fff
+ IP_OPTIONS = 0x1
+ IP_PIPEX = 0x22
+ IP_PORTRANGE = 0x13
+ IP_PORTRANGE_DEFAULT = 0x0
+ IP_PORTRANGE_HIGH = 0x1
+ IP_PORTRANGE_LOW = 0x2
+ IP_RECVDSTADDR = 0x7
+ IP_RECVDSTPORT = 0x21
+ IP_RECVIF = 0x1e
+ IP_RECVOPTS = 0x5
+ IP_RECVRETOPTS = 0x6
+ IP_RECVRTABLE = 0x23
+ IP_RECVTTL = 0x1f
+ IP_RETOPTS = 0x8
+ IP_RF = 0x8000
+ IP_RTABLE = 0x1021
+ IP_SENDSRCADDR = 0x7
+ IP_TOS = 0x3
+ IP_TTL = 0x4
+ ISIG = 0x80
+ ISTRIP = 0x20
+ IUCLC = 0x1000
+ IXANY = 0x800
+ IXOFF = 0x400
+ IXON = 0x200
+ KERN_HOSTNAME = 0xa
+ KERN_OSRELEASE = 0x2
+ KERN_OSTYPE = 0x1
+ KERN_VERSION = 0x4
+ LCNT_OVERLOAD_FLUSH = 0x6
+ LOCK_EX = 0x2
+ LOCK_NB = 0x4
+ LOCK_SH = 0x1
+ LOCK_UN = 0x8
+ MADV_DONTNEED = 0x4
+ MADV_FREE = 0x6
+ MADV_NORMAL = 0x0
+ MADV_RANDOM = 0x1
+ MADV_SEQUENTIAL = 0x2
+ MADV_SPACEAVAIL = 0x5
+ MADV_WILLNEED = 0x3
+ MAP_ANON = 0x1000
+ MAP_ANONYMOUS = 0x1000
+ MAP_CONCEAL = 0x8000
+ MAP_COPY = 0x2
+ MAP_FILE = 0x0
+ MAP_FIXED = 0x10
+ MAP_FLAGMASK = 0xfff7
+ MAP_HASSEMAPHORE = 0x0
+ MAP_INHERIT = 0x0
+ MAP_INHERIT_COPY = 0x1
+ MAP_INHERIT_NONE = 0x2
+ MAP_INHERIT_SHARE = 0x0
+ MAP_INHERIT_ZERO = 0x3
+ MAP_NOEXTEND = 0x0
+ MAP_NORESERVE = 0x0
+ MAP_PRIVATE = 0x2
+ MAP_RENAME = 0x0
+ MAP_SHARED = 0x1
+ MAP_STACK = 0x4000
+ MAP_TRYFIXED = 0x0
+ MCL_CURRENT = 0x1
+ MCL_FUTURE = 0x2
+ MNT_ASYNC = 0x40
+ MNT_DEFEXPORTED = 0x200
+ MNT_DELEXPORT = 0x20000
+ MNT_DOOMED = 0x8000000
+ MNT_EXPORTANON = 0x400
+ MNT_EXPORTED = 0x100
+ MNT_EXRDONLY = 0x80
+ MNT_FORCE = 0x80000
+ MNT_LAZY = 0x3
+ MNT_LOCAL = 0x1000
+ MNT_NOATIME = 0x8000
+ MNT_NODEV = 0x10
+ MNT_NOEXEC = 0x4
+ MNT_NOPERM = 0x20
+ MNT_NOSUID = 0x8
+ MNT_NOWAIT = 0x2
+ MNT_QUOTA = 0x2000
+ MNT_RDONLY = 0x1
+ MNT_RELOAD = 0x40000
+ MNT_ROOTFS = 0x4000
+ MNT_SOFTDEP = 0x4000000
+ MNT_STALLED = 0x100000
+ MNT_SWAPPABLE = 0x200000
+ MNT_SYNCHRONOUS = 0x2
+ MNT_UPDATE = 0x10000
+ MNT_VISFLAGMASK = 0x400ffff
+ MNT_WAIT = 0x1
+ MNT_WANTRDWR = 0x2000000
+ MNT_WXALLOWED = 0x800
+ MSG_BCAST = 0x100
+ MSG_CMSG_CLOEXEC = 0x800
+ MSG_CTRUNC = 0x20
+ MSG_DONTROUTE = 0x4
+ MSG_DONTWAIT = 0x80
+ MSG_EOR = 0x8
+ MSG_MCAST = 0x200
+ MSG_NOSIGNAL = 0x400
+ MSG_OOB = 0x1
+ MSG_PEEK = 0x2
+ MSG_TRUNC = 0x10
+ MSG_WAITALL = 0x40
+ MS_ASYNC = 0x1
+ MS_INVALIDATE = 0x4
+ MS_SYNC = 0x2
+ NAME_MAX = 0xff
+ NET_RT_DUMP = 0x1
+ NET_RT_FLAGS = 0x2
+ NET_RT_IFLIST = 0x3
+ NET_RT_IFNAMES = 0x6
+ NET_RT_MAXID = 0x7
+ NET_RT_STATS = 0x4
+ NET_RT_TABLE = 0x5
+ NFDBITS = 0x20
+ NOFLSH = 0x80000000
+ NOKERNINFO = 0x2000000
+ NOTE_ATTRIB = 0x8
+ NOTE_CHANGE = 0x1
+ NOTE_CHILD = 0x4
+ NOTE_DELETE = 0x1
+ NOTE_EOF = 0x2
+ NOTE_EXEC = 0x20000000
+ NOTE_EXIT = 0x80000000
+ NOTE_EXTEND = 0x4
+ NOTE_FORK = 0x40000000
+ NOTE_LINK = 0x10
+ NOTE_LOWAT = 0x1
+ NOTE_PCTRLMASK = 0xf0000000
+ NOTE_PDATAMASK = 0xfffff
+ NOTE_RENAME = 0x20
+ NOTE_REVOKE = 0x40
+ NOTE_TRACK = 0x1
+ NOTE_TRACKERR = 0x2
+ NOTE_TRUNCATE = 0x80
+ NOTE_WRITE = 0x2
+ OCRNL = 0x10
+ OLCUC = 0x20
+ ONLCR = 0x2
+ ONLRET = 0x80
+ ONOCR = 0x40
+ ONOEOT = 0x8
+ OPOST = 0x1
+ OXTABS = 0x4
+ O_ACCMODE = 0x3
+ O_APPEND = 0x8
+ O_ASYNC = 0x40
+ O_CLOEXEC = 0x10000
+ O_CREAT = 0x200
+ O_DIRECTORY = 0x20000
+ O_DSYNC = 0x80
+ O_EXCL = 0x800
+ O_EXLOCK = 0x20
+ O_FSYNC = 0x80
+ O_NDELAY = 0x4
+ O_NOCTTY = 0x8000
+ O_NOFOLLOW = 0x100
+ O_NONBLOCK = 0x4
+ O_RDONLY = 0x0
+ O_RDWR = 0x2
+ O_RSYNC = 0x80
+ O_SHLOCK = 0x10
+ O_SYNC = 0x80
+ O_TRUNC = 0x400
+ O_WRONLY = 0x1
+ PARENB = 0x1000
+ PARMRK = 0x8
+ PARODD = 0x2000
+ PENDIN = 0x20000000
+ PF_FLUSH = 0x1
+ PRIO_PGRP = 0x1
+ PRIO_PROCESS = 0x0
+ PRIO_USER = 0x2
+ PROT_EXEC = 0x4
+ PROT_NONE = 0x0
+ PROT_READ = 0x1
+ PROT_WRITE = 0x2
+ RLIMIT_CORE = 0x4
+ RLIMIT_CPU = 0x0
+ RLIMIT_DATA = 0x2
+ RLIMIT_FSIZE = 0x1
+ RLIMIT_MEMLOCK = 0x6
+ RLIMIT_NOFILE = 0x8
+ RLIMIT_NPROC = 0x7
+ RLIMIT_RSS = 0x5
+ RLIMIT_STACK = 0x3
+ RLIM_INFINITY = 0x7fffffffffffffff
+ RTAX_AUTHOR = 0x6
+ RTAX_BFD = 0xb
+ RTAX_BRD = 0x7
+ RTAX_DNS = 0xc
+ RTAX_DST = 0x0
+ RTAX_GATEWAY = 0x1
+ RTAX_GENMASK = 0x3
+ RTAX_IFA = 0x5
+ RTAX_IFP = 0x4
+ RTAX_LABEL = 0xa
+ RTAX_MAX = 0xf
+ RTAX_NETMASK = 0x2
+ RTAX_SEARCH = 0xe
+ RTAX_SRC = 0x8
+ RTAX_SRCMASK = 0x9
+ RTAX_STATIC = 0xd
+ RTA_AUTHOR = 0x40
+ RTA_BFD = 0x800
+ RTA_BRD = 0x80
+ RTA_DNS = 0x1000
+ RTA_DST = 0x1
+ RTA_GATEWAY = 0x2
+ RTA_GENMASK = 0x8
+ RTA_IFA = 0x20
+ RTA_IFP = 0x10
+ RTA_LABEL = 0x400
+ RTA_NETMASK = 0x4
+ RTA_SEARCH = 0x4000
+ RTA_SRC = 0x100
+ RTA_SRCMASK = 0x200
+ RTA_STATIC = 0x2000
+ RTF_ANNOUNCE = 0x4000
+ RTF_BFD = 0x1000000
+ RTF_BLACKHOLE = 0x1000
+ RTF_BROADCAST = 0x400000
+ RTF_CACHED = 0x20000
+ RTF_CLONED = 0x10000
+ RTF_CLONING = 0x100
+ RTF_CONNECTED = 0x800000
+ RTF_DONE = 0x40
+ RTF_DYNAMIC = 0x10
+ RTF_FMASK = 0x110fc08
+ RTF_GATEWAY = 0x2
+ RTF_HOST = 0x4
+ RTF_LLINFO = 0x400
+ RTF_LOCAL = 0x200000
+ RTF_MODIFIED = 0x20
+ RTF_MPATH = 0x40000
+ RTF_MPLS = 0x100000
+ RTF_MULTICAST = 0x200
+ RTF_PERMANENT_ARP = 0x2000
+ RTF_PROTO1 = 0x8000
+ RTF_PROTO2 = 0x4000
+ RTF_PROTO3 = 0x2000
+ RTF_REJECT = 0x8
+ RTF_STATIC = 0x800
+ RTF_UP = 0x1
+ RTF_USETRAILERS = 0x8000
+ RTM_80211INFO = 0x15
+ RTM_ADD = 0x1
+ RTM_BFD = 0x12
+ RTM_CHANGE = 0x3
+ RTM_CHGADDRATTR = 0x14
+ RTM_DELADDR = 0xd
+ RTM_DELETE = 0x2
+ RTM_DESYNC = 0x10
+ RTM_GET = 0x4
+ RTM_IFANNOUNCE = 0xf
+ RTM_IFINFO = 0xe
+ RTM_INVALIDATE = 0x11
+ RTM_LOSING = 0x5
+ RTM_MAXSIZE = 0x800
+ RTM_MISS = 0x7
+ RTM_NEWADDR = 0xc
+ RTM_PROPOSAL = 0x13
+ RTM_REDIRECT = 0x6
+ RTM_RESOLVE = 0xb
+ RTM_RTTUNIT = 0xf4240
+ RTM_VERSION = 0x5
+ RTV_EXPIRE = 0x4
+ RTV_HOPCOUNT = 0x2
+ RTV_MTU = 0x1
+ RTV_RPIPE = 0x8
+ RTV_RTT = 0x40
+ RTV_RTTVAR = 0x80
+ RTV_SPIPE = 0x10
+ RTV_SSTHRESH = 0x20
+ RT_TABLEID_BITS = 0x8
+ RT_TABLEID_MASK = 0xff
+ RT_TABLEID_MAX = 0xff
+ RUSAGE_CHILDREN = -0x1
+ RUSAGE_SELF = 0x0
+ RUSAGE_THREAD = 0x1
+ SCM_RIGHTS = 0x1
+ SCM_TIMESTAMP = 0x4
+ SHUT_RD = 0x0
+ SHUT_RDWR = 0x2
+ SHUT_WR = 0x1
+ SIOCADDMULTI = 0x80206931
+ SIOCAIFADDR = 0x8040691a
+ SIOCAIFGROUP = 0x80286987
+ SIOCATMARK = 0x40047307
+ SIOCBRDGADD = 0x8060693c
+ SIOCBRDGADDL = 0x80606949
+ SIOCBRDGADDS = 0x80606941
+ SIOCBRDGARL = 0x808c694d
+ SIOCBRDGDADDR = 0x81286947
+ SIOCBRDGDEL = 0x8060693d
+ SIOCBRDGDELS = 0x80606942
+ SIOCBRDGFLUSH = 0x80606948
+ SIOCBRDGFRL = 0x808c694e
+ SIOCBRDGGCACHE = 0xc0186941
+ SIOCBRDGGFD = 0xc0186952
+ SIOCBRDGGHT = 0xc0186951
+ SIOCBRDGGIFFLGS = 0xc060693e
+ SIOCBRDGGMA = 0xc0186953
+ SIOCBRDGGPARAM = 0xc0406958
+ SIOCBRDGGPRI = 0xc0186950
+ SIOCBRDGGRL = 0xc030694f
+ SIOCBRDGGTO = 0xc0186946
+ SIOCBRDGIFS = 0xc0606942
+ SIOCBRDGRTS = 0xc0206943
+ SIOCBRDGSADDR = 0xc1286944
+ SIOCBRDGSCACHE = 0x80186940
+ SIOCBRDGSFD = 0x80186952
+ SIOCBRDGSHT = 0x80186951
+ SIOCBRDGSIFCOST = 0x80606955
+ SIOCBRDGSIFFLGS = 0x8060693f
+ SIOCBRDGSIFPRIO = 0x80606954
+ SIOCBRDGSIFPROT = 0x8060694a
+ SIOCBRDGSMA = 0x80186953
+ SIOCBRDGSPRI = 0x80186950
+ SIOCBRDGSPROTO = 0x8018695a
+ SIOCBRDGSTO = 0x80186945
+ SIOCBRDGSTXHC = 0x80186959
+ SIOCDELLABEL = 0x80206997
+ SIOCDELMULTI = 0x80206932
+ SIOCDIFADDR = 0x80206919
+ SIOCDIFGROUP = 0x80286989
+ SIOCDIFPARENT = 0x802069b4
+ SIOCDIFPHYADDR = 0x80206949
+ SIOCDPWE3NEIGHBOR = 0x802069de
+ SIOCDVNETID = 0x802069af
+ SIOCGETKALIVE = 0xc01869a4
+ SIOCGETLABEL = 0x8020699a
+ SIOCGETMPWCFG = 0xc02069ae
+ SIOCGETPFLOW = 0xc02069fe
+ SIOCGETPFSYNC = 0xc02069f8
+ SIOCGETSGCNT = 0xc0207534
+ SIOCGETVIFCNT = 0xc0287533
+ SIOCGETVLAN = 0xc0206990
+ SIOCGIFADDR = 0xc0206921
+ SIOCGIFBRDADDR = 0xc0206923
+ SIOCGIFCONF = 0xc0106924
+ SIOCGIFDATA = 0xc020691b
+ SIOCGIFDESCR = 0xc0206981
+ SIOCGIFDSTADDR = 0xc0206922
+ SIOCGIFFLAGS = 0xc0206911
+ SIOCGIFGATTR = 0xc028698b
+ SIOCGIFGENERIC = 0xc020693a
+ SIOCGIFGLIST = 0xc028698d
+ SIOCGIFGMEMB = 0xc028698a
+ SIOCGIFGROUP = 0xc0286988
+ SIOCGIFHARDMTU = 0xc02069a5
+ SIOCGIFLLPRIO = 0xc02069b6
+ SIOCGIFMEDIA = 0xc0406938
+ SIOCGIFMETRIC = 0xc0206917
+ SIOCGIFMTU = 0xc020697e
+ SIOCGIFNETMASK = 0xc0206925
+ SIOCGIFPAIR = 0xc02069b1
+ SIOCGIFPARENT = 0xc02069b3
+ SIOCGIFPRIORITY = 0xc020699c
+ SIOCGIFRDOMAIN = 0xc02069a0
+ SIOCGIFRTLABEL = 0xc0206983
+ SIOCGIFRXR = 0x802069aa
+ SIOCGIFSFFPAGE = 0xc1126939
+ SIOCGIFXFLAGS = 0xc020699e
+ SIOCGLIFPHYADDR = 0xc218694b
+ SIOCGLIFPHYDF = 0xc02069c2
+ SIOCGLIFPHYECN = 0xc02069c8
+ SIOCGLIFPHYRTABLE = 0xc02069a2
+ SIOCGLIFPHYTTL = 0xc02069a9
+ SIOCGPGRP = 0x40047309
+ SIOCGPWE3 = 0xc0206998
+ SIOCGPWE3CTRLWORD = 0xc02069dc
+ SIOCGPWE3FAT = 0xc02069dd
+ SIOCGPWE3NEIGHBOR = 0xc21869de
+ SIOCGRXHPRIO = 0xc02069db
+ SIOCGSPPPPARAMS = 0xc0206994
+ SIOCGTXHPRIO = 0xc02069c6
+ SIOCGUMBINFO = 0xc02069be
+ SIOCGUMBPARAM = 0xc02069c0
+ SIOCGVH = 0xc02069f6
+ SIOCGVNETFLOWID = 0xc02069c4
+ SIOCGVNETID = 0xc02069a7
+ SIOCIFAFATTACH = 0x801169ab
+ SIOCIFAFDETACH = 0x801169ac
+ SIOCIFCREATE = 0x8020697a
+ SIOCIFDESTROY = 0x80206979
+ SIOCIFGCLONERS = 0xc0106978
+ SIOCSETKALIVE = 0x801869a3
+ SIOCSETLABEL = 0x80206999
+ SIOCSETMPWCFG = 0x802069ad
+ SIOCSETPFLOW = 0x802069fd
+ SIOCSETPFSYNC = 0x802069f7
+ SIOCSETVLAN = 0x8020698f
+ SIOCSIFADDR = 0x8020690c
+ SIOCSIFBRDADDR = 0x80206913
+ SIOCSIFDESCR = 0x80206980
+ SIOCSIFDSTADDR = 0x8020690e
+ SIOCSIFFLAGS = 0x80206910
+ SIOCSIFGATTR = 0x8028698c
+ SIOCSIFGENERIC = 0x80206939
+ SIOCSIFLLADDR = 0x8020691f
+ SIOCSIFLLPRIO = 0x802069b5
+ SIOCSIFMEDIA = 0xc0206937
+ SIOCSIFMETRIC = 0x80206918
+ SIOCSIFMTU = 0x8020697f
+ SIOCSIFNETMASK = 0x80206916
+ SIOCSIFPAIR = 0x802069b0
+ SIOCSIFPARENT = 0x802069b2
+ SIOCSIFPRIORITY = 0x8020699b
+ SIOCSIFRDOMAIN = 0x8020699f
+ SIOCSIFRTLABEL = 0x80206982
+ SIOCSIFXFLAGS = 0x8020699d
+ SIOCSLIFPHYADDR = 0x8218694a
+ SIOCSLIFPHYDF = 0x802069c1
+ SIOCSLIFPHYECN = 0x802069c7
+ SIOCSLIFPHYRTABLE = 0x802069a1
+ SIOCSLIFPHYTTL = 0x802069a8
+ SIOCSPGRP = 0x80047308
+ SIOCSPWE3CTRLWORD = 0x802069dc
+ SIOCSPWE3FAT = 0x802069dd
+ SIOCSPWE3NEIGHBOR = 0x821869de
+ SIOCSRXHPRIO = 0x802069db
+ SIOCSSPPPPARAMS = 0x80206993
+ SIOCSTXHPRIO = 0x802069c5
+ SIOCSUMBPARAM = 0x802069bf
+ SIOCSVH = 0xc02069f5
+ SIOCSVNETFLOWID = 0x802069c3
+ SIOCSVNETID = 0x802069a6
+ SIOCSWGDPID = 0xc018695b
+ SIOCSWGMAXFLOW = 0xc0186960
+ SIOCSWGMAXGROUP = 0xc018695d
+ SIOCSWSDPID = 0x8018695c
+ SIOCSWSPORTNO = 0xc060695f
+ SOCK_CLOEXEC = 0x8000
+ SOCK_DGRAM = 0x2
+ SOCK_DNS = 0x1000
+ SOCK_NONBLOCK = 0x4000
+ SOCK_RAW = 0x3
+ SOCK_RDM = 0x4
+ SOCK_SEQPACKET = 0x5
+ SOCK_STREAM = 0x1
+ SOL_SOCKET = 0xffff
+ SOMAXCONN = 0x80
+ SO_ACCEPTCONN = 0x2
+ SO_BINDANY = 0x1000
+ SO_BROADCAST = 0x20
+ SO_DEBUG = 0x1
+ SO_DOMAIN = 0x1024
+ SO_DONTROUTE = 0x10
+ SO_ERROR = 0x1007
+ SO_KEEPALIVE = 0x8
+ SO_LINGER = 0x80
+ SO_NETPROC = 0x1020
+ SO_OOBINLINE = 0x100
+ SO_PEERCRED = 0x1022
+ SO_PROTOCOL = 0x1025
+ SO_RCVBUF = 0x1002
+ SO_RCVLOWAT = 0x1004
+ SO_RCVTIMEO = 0x1006
+ SO_REUSEADDR = 0x4
+ SO_REUSEPORT = 0x200
+ SO_RTABLE = 0x1021
+ SO_SNDBUF = 0x1001
+ SO_SNDLOWAT = 0x1003
+ SO_SNDTIMEO = 0x1005
+ SO_SPLICE = 0x1023
+ SO_TIMESTAMP = 0x800
+ SO_TYPE = 0x1008
+ SO_USELOOPBACK = 0x40
+ SO_ZEROIZE = 0x2000
+ S_BLKSIZE = 0x200
+ S_IEXEC = 0x40
+ S_IFBLK = 0x6000
+ S_IFCHR = 0x2000
+ S_IFDIR = 0x4000
+ S_IFIFO = 0x1000
+ S_IFLNK = 0xa000
+ S_IFMT = 0xf000
+ S_IFREG = 0x8000
+ S_IFSOCK = 0xc000
+ S_IREAD = 0x100
+ S_IRGRP = 0x20
+ S_IROTH = 0x4
+ S_IRUSR = 0x100
+ S_IRWXG = 0x38
+ S_IRWXO = 0x7
+ S_IRWXU = 0x1c0
+ S_ISGID = 0x400
+ S_ISTXT = 0x200
+ S_ISUID = 0x800
+ S_ISVTX = 0x200
+ S_IWGRP = 0x10
+ S_IWOTH = 0x2
+ S_IWRITE = 0x80
+ S_IWUSR = 0x80
+ S_IXGRP = 0x8
+ S_IXOTH = 0x1
+ S_IXUSR = 0x40
+ TCIFLUSH = 0x1
+ TCIOFF = 0x3
+ TCIOFLUSH = 0x3
+ TCION = 0x4
+ TCOFLUSH = 0x2
+ TCOOFF = 0x1
+ TCOON = 0x2
+ TCP_MAXBURST = 0x4
+ TCP_MAXSEG = 0x2
+ TCP_MAXWIN = 0xffff
+ TCP_MAX_SACK = 0x3
+ TCP_MAX_WINSHIFT = 0xe
+ TCP_MD5SIG = 0x4
+ TCP_MSS = 0x200
+ TCP_NODELAY = 0x1
+ TCP_NOPUSH = 0x10
+ TCP_SACKHOLE_LIMIT = 0x80
+ TCP_SACK_ENABLE = 0x8
+ TCSAFLUSH = 0x2
+ TIMER_ABSTIME = 0x1
+ TIMER_RELTIME = 0x0
+ TIOCCBRK = 0x2000747a
+ TIOCCDTR = 0x20007478
+ TIOCCHKVERAUTH = 0x2000741e
+ TIOCCLRVERAUTH = 0x2000741d
+ TIOCCONS = 0x80047462
+ TIOCDRAIN = 0x2000745e
+ TIOCEXCL = 0x2000740d
+ TIOCEXT = 0x80047460
+ TIOCFLAG_CLOCAL = 0x2
+ TIOCFLAG_CRTSCTS = 0x4
+ TIOCFLAG_MDMBUF = 0x8
+ TIOCFLAG_PPS = 0x10
+ TIOCFLAG_SOFTCAR = 0x1
+ TIOCFLUSH = 0x80047410
+ TIOCGETA = 0x402c7413
+ TIOCGETD = 0x4004741a
+ TIOCGFLAGS = 0x4004745d
+ TIOCGPGRP = 0x40047477
+ TIOCGSID = 0x40047463
+ TIOCGTSTAMP = 0x4010745b
+ TIOCGWINSZ = 0x40087468
+ TIOCMBIC = 0x8004746b
+ TIOCMBIS = 0x8004746c
+ TIOCMGET = 0x4004746a
+ TIOCMODG = 0x4004746a
+ TIOCMODS = 0x8004746d
+ TIOCMSET = 0x8004746d
+ TIOCM_CAR = 0x40
+ TIOCM_CD = 0x40
+ TIOCM_CTS = 0x20
+ TIOCM_DSR = 0x100
+ TIOCM_DTR = 0x2
+ TIOCM_LE = 0x1
+ TIOCM_RI = 0x80
+ TIOCM_RNG = 0x80
+ TIOCM_RTS = 0x4
+ TIOCM_SR = 0x10
+ TIOCM_ST = 0x8
+ TIOCNOTTY = 0x20007471
+ TIOCNXCL = 0x2000740e
+ TIOCOUTQ = 0x40047473
+ TIOCPKT = 0x80047470
+ TIOCPKT_DATA = 0x0
+ TIOCPKT_DOSTOP = 0x20
+ TIOCPKT_FLUSHREAD = 0x1
+ TIOCPKT_FLUSHWRITE = 0x2
+ TIOCPKT_IOCTL = 0x40
+ TIOCPKT_NOSTOP = 0x10
+ TIOCPKT_START = 0x8
+ TIOCPKT_STOP = 0x4
+ TIOCREMOTE = 0x80047469
+ TIOCSBRK = 0x2000747b
+ TIOCSCTTY = 0x20007461
+ TIOCSDTR = 0x20007479
+ TIOCSETA = 0x802c7414
+ TIOCSETAF = 0x802c7416
+ TIOCSETAW = 0x802c7415
+ TIOCSETD = 0x8004741b
+ TIOCSETVERAUTH = 0x8004741c
+ TIOCSFLAGS = 0x8004745c
+ TIOCSIG = 0x8004745f
+ TIOCSPGRP = 0x80047476
+ TIOCSTART = 0x2000746e
+ TIOCSTAT = 0x20007465
+ TIOCSTOP = 0x2000746f
+ TIOCSTSTAMP = 0x8008745a
+ TIOCSWINSZ = 0x80087467
+ TIOCUCNTL = 0x80047466
+ TIOCUCNTL_CBRK = 0x7a
+ TIOCUCNTL_SBRK = 0x7b
+ TOSTOP = 0x400000
+ UTIME_NOW = -0x2
+ UTIME_OMIT = -0x1
+ VDISCARD = 0xf
+ VDSUSP = 0xb
+ VEOF = 0x0
+ VEOL = 0x1
+ VEOL2 = 0x2
+ VERASE = 0x3
+ VINTR = 0x8
+ VKILL = 0x5
+ VLNEXT = 0xe
+ VMIN = 0x10
+ VM_ANONMIN = 0x7
+ VM_LOADAVG = 0x2
+ VM_MALLOC_CONF = 0xc
+ VM_MAXID = 0xd
+ VM_MAXSLP = 0xa
+ VM_METER = 0x1
+ VM_NKMEMPAGES = 0x6
+ VM_PSSTRINGS = 0x3
+ VM_SWAPENCRYPT = 0x5
+ VM_USPACE = 0xb
+ VM_UVMEXP = 0x4
+ VM_VNODEMIN = 0x9
+ VM_VTEXTMIN = 0x8
+ VQUIT = 0x9
+ VREPRINT = 0x6
+ VSTART = 0xc
+ VSTATUS = 0x12
+ VSTOP = 0xd
+ VSUSP = 0xa
+ VTIME = 0x11
+ VWERASE = 0x4
+ WALTSIG = 0x4
+ WCONTINUED = 0x8
+ WCOREFLAG = 0x80
+ WNOHANG = 0x1
+ WUNTRACED = 0x2
+ XCASE = 0x1000000
+)
+
+// Errors
+const (
+ E2BIG = syscall.Errno(0x7)
+ EACCES = syscall.Errno(0xd)
+ EADDRINUSE = syscall.Errno(0x30)
+ EADDRNOTAVAIL = syscall.Errno(0x31)
+ EAFNOSUPPORT = syscall.Errno(0x2f)
+ EAGAIN = syscall.Errno(0x23)
+ EALREADY = syscall.Errno(0x25)
+ EAUTH = syscall.Errno(0x50)
+ EBADF = syscall.Errno(0x9)
+ EBADMSG = syscall.Errno(0x5c)
+ EBADRPC = syscall.Errno(0x48)
+ EBUSY = syscall.Errno(0x10)
+ ECANCELED = syscall.Errno(0x58)
+ ECHILD = syscall.Errno(0xa)
+ ECONNABORTED = syscall.Errno(0x35)
+ ECONNREFUSED = syscall.Errno(0x3d)
+ ECONNRESET = syscall.Errno(0x36)
+ EDEADLK = syscall.Errno(0xb)
+ EDESTADDRREQ = syscall.Errno(0x27)
+ EDOM = syscall.Errno(0x21)
+ EDQUOT = syscall.Errno(0x45)
+ EEXIST = syscall.Errno(0x11)
+ EFAULT = syscall.Errno(0xe)
+ EFBIG = syscall.Errno(0x1b)
+ EFTYPE = syscall.Errno(0x4f)
+ EHOSTDOWN = syscall.Errno(0x40)
+ EHOSTUNREACH = syscall.Errno(0x41)
+ EIDRM = syscall.Errno(0x59)
+ EILSEQ = syscall.Errno(0x54)
+ EINPROGRESS = syscall.Errno(0x24)
+ EINTR = syscall.Errno(0x4)
+ EINVAL = syscall.Errno(0x16)
+ EIO = syscall.Errno(0x5)
+ EIPSEC = syscall.Errno(0x52)
+ EISCONN = syscall.Errno(0x38)
+ EISDIR = syscall.Errno(0x15)
+ ELAST = syscall.Errno(0x5f)
+ ELOOP = syscall.Errno(0x3e)
+ EMEDIUMTYPE = syscall.Errno(0x56)
+ EMFILE = syscall.Errno(0x18)
+ EMLINK = syscall.Errno(0x1f)
+ EMSGSIZE = syscall.Errno(0x28)
+ ENAMETOOLONG = syscall.Errno(0x3f)
+ ENEEDAUTH = syscall.Errno(0x51)
+ ENETDOWN = syscall.Errno(0x32)
+ ENETRESET = syscall.Errno(0x34)
+ ENETUNREACH = syscall.Errno(0x33)
+ ENFILE = syscall.Errno(0x17)
+ ENOATTR = syscall.Errno(0x53)
+ ENOBUFS = syscall.Errno(0x37)
+ ENODEV = syscall.Errno(0x13)
+ ENOENT = syscall.Errno(0x2)
+ ENOEXEC = syscall.Errno(0x8)
+ ENOLCK = syscall.Errno(0x4d)
+ ENOMEDIUM = syscall.Errno(0x55)
+ ENOMEM = syscall.Errno(0xc)
+ ENOMSG = syscall.Errno(0x5a)
+ ENOPROTOOPT = syscall.Errno(0x2a)
+ ENOSPC = syscall.Errno(0x1c)
+ ENOSYS = syscall.Errno(0x4e)
+ ENOTBLK = syscall.Errno(0xf)
+ ENOTCONN = syscall.Errno(0x39)
+ ENOTDIR = syscall.Errno(0x14)
+ ENOTEMPTY = syscall.Errno(0x42)
+ ENOTRECOVERABLE = syscall.Errno(0x5d)
+ ENOTSOCK = syscall.Errno(0x26)
+ ENOTSUP = syscall.Errno(0x5b)
+ ENOTTY = syscall.Errno(0x19)
+ ENXIO = syscall.Errno(0x6)
+ EOPNOTSUPP = syscall.Errno(0x2d)
+ EOVERFLOW = syscall.Errno(0x57)
+ EOWNERDEAD = syscall.Errno(0x5e)
+ EPERM = syscall.Errno(0x1)
+ EPFNOSUPPORT = syscall.Errno(0x2e)
+ EPIPE = syscall.Errno(0x20)
+ EPROCLIM = syscall.Errno(0x43)
+ EPROCUNAVAIL = syscall.Errno(0x4c)
+ EPROGMISMATCH = syscall.Errno(0x4b)
+ EPROGUNAVAIL = syscall.Errno(0x4a)
+ EPROTO = syscall.Errno(0x5f)
+ EPROTONOSUPPORT = syscall.Errno(0x2b)
+ EPROTOTYPE = syscall.Errno(0x29)
+ ERANGE = syscall.Errno(0x22)
+ EREMOTE = syscall.Errno(0x47)
+ EROFS = syscall.Errno(0x1e)
+ ERPCMISMATCH = syscall.Errno(0x49)
+ ESHUTDOWN = syscall.Errno(0x3a)
+ ESOCKTNOSUPPORT = syscall.Errno(0x2c)
+ ESPIPE = syscall.Errno(0x1d)
+ ESRCH = syscall.Errno(0x3)
+ ESTALE = syscall.Errno(0x46)
+ ETIMEDOUT = syscall.Errno(0x3c)
+ ETOOMANYREFS = syscall.Errno(0x3b)
+ ETXTBSY = syscall.Errno(0x1a)
+ EUSERS = syscall.Errno(0x44)
+ EWOULDBLOCK = syscall.Errno(0x23)
+ EXDEV = syscall.Errno(0x12)
+)
+
+// Signals
+const (
+ SIGABRT = syscall.Signal(0x6)
+ SIGALRM = syscall.Signal(0xe)
+ SIGBUS = syscall.Signal(0xa)
+ SIGCHLD = syscall.Signal(0x14)
+ SIGCONT = syscall.Signal(0x13)
+ SIGEMT = syscall.Signal(0x7)
+ SIGFPE = syscall.Signal(0x8)
+ SIGHUP = syscall.Signal(0x1)
+ SIGILL = syscall.Signal(0x4)
+ SIGINFO = syscall.Signal(0x1d)
+ SIGINT = syscall.Signal(0x2)
+ SIGIO = syscall.Signal(0x17)
+ SIGIOT = syscall.Signal(0x6)
+ SIGKILL = syscall.Signal(0x9)
+ SIGPIPE = syscall.Signal(0xd)
+ SIGPROF = syscall.Signal(0x1b)
+ SIGQUIT = syscall.Signal(0x3)
+ SIGSEGV = syscall.Signal(0xb)
+ SIGSTOP = syscall.Signal(0x11)
+ SIGSYS = syscall.Signal(0xc)
+ SIGTERM = syscall.Signal(0xf)
+ SIGTHR = syscall.Signal(0x20)
+ SIGTRAP = syscall.Signal(0x5)
+ SIGTSTP = syscall.Signal(0x12)
+ SIGTTIN = syscall.Signal(0x15)
+ SIGTTOU = syscall.Signal(0x16)
+ SIGURG = syscall.Signal(0x10)
+ SIGUSR1 = syscall.Signal(0x1e)
+ SIGUSR2 = syscall.Signal(0x1f)
+ SIGVTALRM = syscall.Signal(0x1a)
+ SIGWINCH = syscall.Signal(0x1c)
+ SIGXCPU = syscall.Signal(0x18)
+ SIGXFSZ = syscall.Signal(0x19)
+)
+
+// Error table
+var errorList = [...]struct {
+ num syscall.Errno
+ name string
+ desc string
+}{
+ {1, "EPERM", "operation not permitted"},
+ {2, "ENOENT", "no such file or directory"},
+ {3, "ESRCH", "no such process"},
+ {4, "EINTR", "interrupted system call"},
+ {5, "EIO", "input/output error"},
+ {6, "ENXIO", "device not configured"},
+ {7, "E2BIG", "argument list too long"},
+ {8, "ENOEXEC", "exec format error"},
+ {9, "EBADF", "bad file descriptor"},
+ {10, "ECHILD", "no child processes"},
+ {11, "EDEADLK", "resource deadlock avoided"},
+ {12, "ENOMEM", "cannot allocate memory"},
+ {13, "EACCES", "permission denied"},
+ {14, "EFAULT", "bad address"},
+ {15, "ENOTBLK", "block device required"},
+ {16, "EBUSY", "device busy"},
+ {17, "EEXIST", "file exists"},
+ {18, "EXDEV", "cross-device link"},
+ {19, "ENODEV", "operation not supported by device"},
+ {20, "ENOTDIR", "not a directory"},
+ {21, "EISDIR", "is a directory"},
+ {22, "EINVAL", "invalid argument"},
+ {23, "ENFILE", "too many open files in system"},
+ {24, "EMFILE", "too many open files"},
+ {25, "ENOTTY", "inappropriate ioctl for device"},
+ {26, "ETXTBSY", "text file busy"},
+ {27, "EFBIG", "file too large"},
+ {28, "ENOSPC", "no space left on device"},
+ {29, "ESPIPE", "illegal seek"},
+ {30, "EROFS", "read-only file system"},
+ {31, "EMLINK", "too many links"},
+ {32, "EPIPE", "broken pipe"},
+ {33, "EDOM", "numerical argument out of domain"},
+ {34, "ERANGE", "result too large"},
+ {35, "EAGAIN", "resource temporarily unavailable"},
+ {36, "EINPROGRESS", "operation now in progress"},
+ {37, "EALREADY", "operation already in progress"},
+ {38, "ENOTSOCK", "socket operation on non-socket"},
+ {39, "EDESTADDRREQ", "destination address required"},
+ {40, "EMSGSIZE", "message too long"},
+ {41, "EPROTOTYPE", "protocol wrong type for socket"},
+ {42, "ENOPROTOOPT", "protocol not available"},
+ {43, "EPROTONOSUPPORT", "protocol not supported"},
+ {44, "ESOCKTNOSUPPORT", "socket type not supported"},
+ {45, "EOPNOTSUPP", "operation not supported"},
+ {46, "EPFNOSUPPORT", "protocol family not supported"},
+ {47, "EAFNOSUPPORT", "address family not supported by protocol family"},
+ {48, "EADDRINUSE", "address already in use"},
+ {49, "EADDRNOTAVAIL", "can't assign requested address"},
+ {50, "ENETDOWN", "network is down"},
+ {51, "ENETUNREACH", "network is unreachable"},
+ {52, "ENETRESET", "network dropped connection on reset"},
+ {53, "ECONNABORTED", "software caused connection abort"},
+ {54, "ECONNRESET", "connection reset by peer"},
+ {55, "ENOBUFS", "no buffer space available"},
+ {56, "EISCONN", "socket is already connected"},
+ {57, "ENOTCONN", "socket is not connected"},
+ {58, "ESHUTDOWN", "can't send after socket shutdown"},
+ {59, "ETOOMANYREFS", "too many references: can't splice"},
+ {60, "ETIMEDOUT", "operation timed out"},
+ {61, "ECONNREFUSED", "connection refused"},
+ {62, "ELOOP", "too many levels of symbolic links"},
+ {63, "ENAMETOOLONG", "file name too long"},
+ {64, "EHOSTDOWN", "host is down"},
+ {65, "EHOSTUNREACH", "no route to host"},
+ {66, "ENOTEMPTY", "directory not empty"},
+ {67, "EPROCLIM", "too many processes"},
+ {68, "EUSERS", "too many users"},
+ {69, "EDQUOT", "disk quota exceeded"},
+ {70, "ESTALE", "stale NFS file handle"},
+ {71, "EREMOTE", "too many levels of remote in path"},
+ {72, "EBADRPC", "RPC struct is bad"},
+ {73, "ERPCMISMATCH", "RPC version wrong"},
+ {74, "EPROGUNAVAIL", "RPC program not available"},
+ {75, "EPROGMISMATCH", "program version wrong"},
+ {76, "EPROCUNAVAIL", "bad procedure for program"},
+ {77, "ENOLCK", "no locks available"},
+ {78, "ENOSYS", "function not implemented"},
+ {79, "EFTYPE", "inappropriate file type or format"},
+ {80, "EAUTH", "authentication error"},
+ {81, "ENEEDAUTH", "need authenticator"},
+ {82, "EIPSEC", "IPsec processing failure"},
+ {83, "ENOATTR", "attribute not found"},
+ {84, "EILSEQ", "illegal byte sequence"},
+ {85, "ENOMEDIUM", "no medium found"},
+ {86, "EMEDIUMTYPE", "wrong medium type"},
+ {87, "EOVERFLOW", "value too large to be stored in data type"},
+ {88, "ECANCELED", "operation canceled"},
+ {89, "EIDRM", "identifier removed"},
+ {90, "ENOMSG", "no message of desired type"},
+ {91, "ENOTSUP", "not supported"},
+ {92, "EBADMSG", "bad message"},
+ {93, "ENOTRECOVERABLE", "state not recoverable"},
+ {94, "EOWNERDEAD", "previous owner died"},
+ {95, "ELAST", "protocol error"},
+}
+
+// Signal table
+var signalList = [...]struct {
+ num syscall.Signal
+ name string
+ desc string
+}{
+ {1, "SIGHUP", "hangup"},
+ {2, "SIGINT", "interrupt"},
+ {3, "SIGQUIT", "quit"},
+ {4, "SIGILL", "illegal instruction"},
+ {5, "SIGTRAP", "trace/BPT trap"},
+ {6, "SIGABRT", "abort trap"},
+ {7, "SIGEMT", "EMT trap"},
+ {8, "SIGFPE", "floating point exception"},
+ {9, "SIGKILL", "killed"},
+ {10, "SIGBUS", "bus error"},
+ {11, "SIGSEGV", "segmentation fault"},
+ {12, "SIGSYS", "bad system call"},
+ {13, "SIGPIPE", "broken pipe"},
+ {14, "SIGALRM", "alarm clock"},
+ {15, "SIGTERM", "terminated"},
+ {16, "SIGURG", "urgent I/O condition"},
+ {17, "SIGSTOP", "suspended (signal)"},
+ {18, "SIGTSTP", "suspended"},
+ {19, "SIGCONT", "continued"},
+ {20, "SIGCHLD", "child exited"},
+ {21, "SIGTTIN", "stopped (tty input)"},
+ {22, "SIGTTOU", "stopped (tty output)"},
+ {23, "SIGIO", "I/O possible"},
+ {24, "SIGXCPU", "cputime limit exceeded"},
+ {25, "SIGXFSZ", "filesize limit exceeded"},
+ {26, "SIGVTALRM", "virtual timer expired"},
+ {27, "SIGPROF", "profiling timer expired"},
+ {28, "SIGWINCH", "window size changes"},
+ {29, "SIGINFO", "information request"},
+ {30, "SIGUSR1", "user defined signal 1"},
+ {31, "SIGUSR2", "user defined signal 2"},
+ {32, "SIGTHR", "thread AST"},
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go
deleted file mode 100644
index 23e94d36..00000000
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go
+++ /dev/null
@@ -1,1809 +0,0 @@
-// go run mksyscall.go -l32 -tags darwin,386,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_386.1_11.go syscall_darwin_386.go
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build darwin,386,!go1.12
-
-package unix
-
-import (
- "syscall"
- "unsafe"
-)
-
-var _ syscall.Errno
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func setgroups(ngid int, gid *_Gid_t) (err error) {
- _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
- r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
- wpid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
- _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
- _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func socket(domain int, typ int, proto int) (fd int, err error) {
- r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
- _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
- _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
- _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
- _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Shutdown(s int, how int) (err error) {
- _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
- _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
- var _p0 unsafe.Pointer
- if len(buf) > 0 {
- _p0 = unsafe.Pointer(&buf[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) {
- r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func utimes(path string, timeval *[2]Timeval) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func futimes(fd int, timeval *[2]Timeval) (err error) {
- _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Madvise(b []byte, behav int) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mlock(b []byte) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mlockall(flags int) (err error) {
- _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mprotect(b []byte, prot int) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Msync(b []byte, flags int) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Munlock(b []byte) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Munlockall() (err error) {
- _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) {
- _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func pipe() (r int, w int, err error) {
- r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0)
- r = int(r0)
- w = int(r1)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options))
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options))
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func removexattr(path string, attr string, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fremovexattr(fd int, attr string, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func listxattr(path string, dest *byte, size int, options int) (sz int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall6(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0)
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) {
- r0, _, e1 := Syscall6(SYS_FLISTXATTR, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0)
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) {
- _, _, e1 := Syscall6(SYS_SETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fcntl(fd int, cmd int, arg int) (val int, err error) {
- r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
- val = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func kill(pid int, signum int, posix int) (err error) {
- _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func ioctl(fd int, req uint, arg uintptr) (err error) {
- _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
- var _p0 unsafe.Pointer
- if len(mib) > 0 {
- _p0 = unsafe.Pointer(&mib[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
- _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Access(path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
- _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chdir(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chflags(path string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chmod(path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chown(path string, uid int, gid int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chroot(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Close(fd int) (err error) {
- _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Dup(fd int) (nfd int, err error) {
- r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0)
- nfd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Dup2(from int, to int) (err error) {
- _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Exchangedata(path1 string, path2 string, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path1)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(path2)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Exit(code int) {
- Syscall(SYS_EXIT, uintptr(code), 0, 0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchdir(fd int) (err error) {
- _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchflags(fd int, flags int) (err error) {
- _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchmod(fd int, mode uint32) (err error) {
- _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchown(fd int, uid int, gid int) (err error) {
- _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Flock(fd int, how int) (err error) {
- _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fpathconf(fd int, name int) (val int, err error) {
- r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0)
- val = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fsync(fd int) (err error) {
- _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Ftruncate(fd int, length int64) (err error) {
- _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), uintptr(length>>32))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getdtablesize() (size int) {
- r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0)
- size = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getegid() (egid int) {
- r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
- egid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Geteuid() (uid int) {
- r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
- uid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getgid() (gid int) {
- r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
- gid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpgid(pid int) (pgid int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
- pgid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpgrp() (pgrp int) {
- r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0)
- pgrp = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpid() (pid int) {
- r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
- pid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getppid() (ppid int) {
- r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
- ppid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpriority(which int, who int) (prio int, err error) {
- r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
- prio = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getrlimit(which int, lim *Rlimit) (err error) {
- _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getrusage(who int, rusage *Rusage) (err error) {
- _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getsid(pid int) (sid int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
- sid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Gettimeofday(tp *Timeval) (err error) {
- _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getuid() (uid int) {
- r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
- uid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Issetugid() (tainted bool) {
- r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0)
- tainted = bool(r0 != 0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Kqueue() (fd int, err error) {
- r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0)
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Lchown(path string, uid int, gid int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Link(path string, link string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(link)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(link)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Listen(s int, backlog int) (err error) {
- _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mkdir(path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mkdirat(dirfd int, path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mkfifo(path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mknod(path string, mode uint32, dev int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Open(path string, mode int, perm uint32) (fd int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0)
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Pathconf(path string, name int) (val int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
- val = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Pread(fd int, p []byte, offset int64) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func read(fd int, p []byte) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Readlink(path string, buf []byte) (n int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 unsafe.Pointer
- if len(buf) > 0 {
- _p1 = unsafe.Pointer(&buf[0])
- } else {
- _p1 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 unsafe.Pointer
- if len(buf) > 0 {
- _p1 = unsafe.Pointer(&buf[0])
- } else {
- _p1 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Rename(from string, to string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(from)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(to)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Renameat(fromfd int, from string, tofd int, to string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(from)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(to)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Revoke(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Rmdir(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
- r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0)
- newoffset = int64(int64(r1)<<32 | int64(r0))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
- r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setegid(egid int) (err error) {
- _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Seteuid(euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setgid(gid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setlogin(name string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(name)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setpgid(pid int, pgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setpriority(which int, who int, prio int) (err error) {
- _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setprivexec(flag int) (err error) {
- _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setrlimit(which int, lim *Rlimit) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setsid() (pid int, err error) {
- r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
- pid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Settimeofday(tp *Timeval) (err error) {
- _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setuid(uid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Symlink(path string, link string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(link)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Sync() (err error) {
- _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Truncate(path string, length int64) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Umask(newmask int) (oldmask int) {
- r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0)
- oldmask = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Undelete(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Unlink(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Unlinkat(dirfd int, path string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Unmount(path string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func write(fd int, p []byte) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
- r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0)
- ret = uintptr(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func munmap(addr uintptr, length uintptr) (err error) {
- _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(buf) > 0 {
- _p0 = unsafe.Pointer(&buf[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
- _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fstat(fd int, stat *Stat_t) (err error) {
- _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fstatfs(fd int, stat *Statfs_t) (err error) {
- _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(buf), uintptr(size), uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Lstat(path string, stat *Stat_t) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Stat(path string, stat *Stat_t) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Statfs(path string, stat *Statfs_t) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go
deleted file mode 100644
index 10256173..00000000
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go
+++ /dev/null
@@ -1,1809 +0,0 @@
-// go run mksyscall.go -tags darwin,amd64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.1_11.go syscall_darwin_amd64.go
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build darwin,amd64,!go1.12
-
-package unix
-
-import (
- "syscall"
- "unsafe"
-)
-
-var _ syscall.Errno
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func setgroups(ngid int, gid *_Gid_t) (err error) {
- _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
- r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
- wpid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
- _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
- _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func socket(domain int, typ int, proto int) (fd int, err error) {
- r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
- _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
- _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
- _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
- _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Shutdown(s int, how int) (err error) {
- _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
- _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
- var _p0 unsafe.Pointer
- if len(buf) > 0 {
- _p0 = unsafe.Pointer(&buf[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) {
- r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func utimes(path string, timeval *[2]Timeval) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func futimes(fd int, timeval *[2]Timeval) (err error) {
- _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Madvise(b []byte, behav int) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mlock(b []byte) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mlockall(flags int) (err error) {
- _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mprotect(b []byte, prot int) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Msync(b []byte, flags int) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Munlock(b []byte) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Munlockall() (err error) {
- _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) {
- _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func pipe() (r int, w int, err error) {
- r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0)
- r = int(r0)
- w = int(r1)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options))
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options))
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func removexattr(path string, attr string, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fremovexattr(fd int, attr string, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func listxattr(path string, dest *byte, size int, options int) (sz int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall6(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0)
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) {
- r0, _, e1 := Syscall6(SYS_FLISTXATTR, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0)
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) {
- _, _, e1 := Syscall6(SYS_SETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fcntl(fd int, cmd int, arg int) (val int, err error) {
- r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
- val = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func kill(pid int, signum int, posix int) (err error) {
- _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func ioctl(fd int, req uint, arg uintptr) (err error) {
- _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
- var _p0 unsafe.Pointer
- if len(mib) > 0 {
- _p0 = unsafe.Pointer(&mib[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
- _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Access(path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
- _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chdir(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chflags(path string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chmod(path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chown(path string, uid int, gid int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chroot(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Close(fd int) (err error) {
- _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Dup(fd int) (nfd int, err error) {
- r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0)
- nfd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Dup2(from int, to int) (err error) {
- _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Exchangedata(path1 string, path2 string, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path1)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(path2)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Exit(code int) {
- Syscall(SYS_EXIT, uintptr(code), 0, 0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchdir(fd int) (err error) {
- _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchflags(fd int, flags int) (err error) {
- _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchmod(fd int, mode uint32) (err error) {
- _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchown(fd int, uid int, gid int) (err error) {
- _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Flock(fd int, how int) (err error) {
- _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fpathconf(fd int, name int) (val int, err error) {
- r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0)
- val = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fsync(fd int) (err error) {
- _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Ftruncate(fd int, length int64) (err error) {
- _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getdtablesize() (size int) {
- r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0)
- size = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getegid() (egid int) {
- r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
- egid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Geteuid() (uid int) {
- r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
- uid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getgid() (gid int) {
- r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
- gid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpgid(pid int) (pgid int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
- pgid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpgrp() (pgrp int) {
- r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0)
- pgrp = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpid() (pid int) {
- r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
- pid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getppid() (ppid int) {
- r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
- ppid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpriority(which int, who int) (prio int, err error) {
- r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
- prio = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getrlimit(which int, lim *Rlimit) (err error) {
- _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getrusage(who int, rusage *Rusage) (err error) {
- _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getsid(pid int) (sid int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
- sid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Gettimeofday(tp *Timeval) (err error) {
- _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getuid() (uid int) {
- r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
- uid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Issetugid() (tainted bool) {
- r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0)
- tainted = bool(r0 != 0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Kqueue() (fd int, err error) {
- r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0)
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Lchown(path string, uid int, gid int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Link(path string, link string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(link)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(link)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Listen(s int, backlog int) (err error) {
- _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mkdir(path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mkdirat(dirfd int, path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mkfifo(path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mknod(path string, mode uint32, dev int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Open(path string, mode int, perm uint32) (fd int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0)
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Pathconf(path string, name int) (val int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
- val = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Pread(fd int, p []byte, offset int64) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func read(fd int, p []byte) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Readlink(path string, buf []byte) (n int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 unsafe.Pointer
- if len(buf) > 0 {
- _p1 = unsafe.Pointer(&buf[0])
- } else {
- _p1 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 unsafe.Pointer
- if len(buf) > 0 {
- _p1 = unsafe.Pointer(&buf[0])
- } else {
- _p1 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Rename(from string, to string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(from)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(to)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Renameat(fromfd int, from string, tofd int, to string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(from)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(to)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Revoke(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Rmdir(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
- r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
- newoffset = int64(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
- r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setegid(egid int) (err error) {
- _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Seteuid(euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setgid(gid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setlogin(name string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(name)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setpgid(pid int, pgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setpriority(which int, who int, prio int) (err error) {
- _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setprivexec(flag int) (err error) {
- _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setrlimit(which int, lim *Rlimit) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setsid() (pid int, err error) {
- r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
- pid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Settimeofday(tp *Timeval) (err error) {
- _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setuid(uid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Symlink(path string, link string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(link)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Sync() (err error) {
- _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Truncate(path string, length int64) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Umask(newmask int) (oldmask int) {
- r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0)
- oldmask = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Undelete(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Unlink(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Unlinkat(dirfd int, path string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Unmount(path string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func write(fd int, p []byte) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
- r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos))
- ret = uintptr(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func munmap(addr uintptr, length uintptr) (err error) {
- _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(buf) > 0 {
- _p0 = unsafe.Pointer(&buf[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
- _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fstat(fd int, stat *Stat_t) (err error) {
- _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fstatfs(fd int, stat *Statfs_t) (err error) {
- _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(buf), uintptr(size), uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Lstat(path string, stat *Stat_t) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Stat(path string, stat *Stat_t) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Statfs(path string, stat *Statfs_t) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go
deleted file mode 100644
index d34e6df2..00000000
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go
+++ /dev/null
@@ -1,1782 +0,0 @@
-// go run mksyscall.go -l32 -tags darwin,arm,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm.1_11.go syscall_darwin_arm.go
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build darwin,arm,!go1.12
-
-package unix
-
-import (
- "syscall"
- "unsafe"
-)
-
-var _ syscall.Errno
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func setgroups(ngid int, gid *_Gid_t) (err error) {
- _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
- r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
- wpid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
- _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
- _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func socket(domain int, typ int, proto int) (fd int, err error) {
- r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
- _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
- _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
- _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
- _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Shutdown(s int, how int) (err error) {
- _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
- _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
- var _p0 unsafe.Pointer
- if len(buf) > 0 {
- _p0 = unsafe.Pointer(&buf[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) {
- r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func utimes(path string, timeval *[2]Timeval) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func futimes(fd int, timeval *[2]Timeval) (err error) {
- _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Madvise(b []byte, behav int) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mlock(b []byte) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mlockall(flags int) (err error) {
- _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mprotect(b []byte, prot int) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Msync(b []byte, flags int) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Munlock(b []byte) (err error) {
- var _p0 unsafe.Pointer
- if len(b) > 0 {
- _p0 = unsafe.Pointer(&b[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Munlockall() (err error) {
- _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) {
- _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func pipe() (r int, w int, err error) {
- r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0)
- r = int(r0)
- w = int(r1)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options))
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options))
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func removexattr(path string, attr string, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fremovexattr(fd int, attr string, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func listxattr(path string, dest *byte, size int, options int) (sz int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall6(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0)
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) {
- r0, _, e1 := Syscall6(SYS_FLISTXATTR, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0)
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) {
- _, _, e1 := Syscall6(SYS_SETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fcntl(fd int, cmd int, arg int) (val int, err error) {
- r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
- val = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func kill(pid int, signum int, posix int) (err error) {
- _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func ioctl(fd int, req uint, arg uintptr) (err error) {
- _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
- var _p0 unsafe.Pointer
- if len(mib) > 0 {
- _p0 = unsafe.Pointer(&mib[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
- _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Access(path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
- _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chdir(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chflags(path string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chmod(path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chown(path string, uid int, gid int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Chroot(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Close(fd int) (err error) {
- _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Dup(fd int) (nfd int, err error) {
- r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0)
- nfd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Dup2(from int, to int) (err error) {
- _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Exchangedata(path1 string, path2 string, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path1)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(path2)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Exit(code int) {
- Syscall(SYS_EXIT, uintptr(code), 0, 0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchdir(fd int) (err error) {
- _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchflags(fd int, flags int) (err error) {
- _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchmod(fd int, mode uint32) (err error) {
- _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchown(fd int, uid int, gid int) (err error) {
- _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Flock(fd int, how int) (err error) {
- _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fpathconf(fd int, name int) (val int, err error) {
- r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0)
- val = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fsync(fd int) (err error) {
- _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Ftruncate(fd int, length int64) (err error) {
- _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), uintptr(length>>32))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getdtablesize() (size int) {
- r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0)
- size = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getegid() (egid int) {
- r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
- egid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Geteuid() (uid int) {
- r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
- uid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getgid() (gid int) {
- r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
- gid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpgid(pid int) (pgid int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
- pgid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpgrp() (pgrp int) {
- r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0)
- pgrp = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpid() (pid int) {
- r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
- pid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getppid() (ppid int) {
- r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
- ppid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getpriority(which int, who int) (prio int, err error) {
- r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
- prio = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getrlimit(which int, lim *Rlimit) (err error) {
- _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getrusage(who int, rusage *Rusage) (err error) {
- _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getsid(pid int) (sid int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
- sid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Gettimeofday(tp *Timeval) (err error) {
- _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Getuid() (uid int) {
- r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
- uid = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Issetugid() (tainted bool) {
- r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0)
- tainted = bool(r0 != 0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Kqueue() (fd int, err error) {
- r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0)
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Lchown(path string, uid int, gid int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Link(path string, link string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(link)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(link)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Listen(s int, backlog int) (err error) {
- _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mkdir(path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mkdirat(dirfd int, path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mkfifo(path string, mode uint32) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Mknod(path string, mode uint32, dev int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Open(path string, mode int, perm uint32) (fd int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0)
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Pathconf(path string, name int) (val int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
- val = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Pread(fd int, p []byte, offset int64) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func read(fd int, p []byte) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Readlink(path string, buf []byte) (n int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 unsafe.Pointer
- if len(buf) > 0 {
- _p1 = unsafe.Pointer(&buf[0])
- } else {
- _p1 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 unsafe.Pointer
- if len(buf) > 0 {
- _p1 = unsafe.Pointer(&buf[0])
- } else {
- _p1 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Rename(from string, to string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(from)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(to)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Renameat(fromfd int, from string, tofd int, to string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(from)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(to)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Revoke(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Rmdir(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
- r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0)
- newoffset = int64(int64(r1)<<32 | int64(r0))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
- r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setegid(egid int) (err error) {
- _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Seteuid(euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setgid(gid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setlogin(name string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(name)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setpgid(pid int, pgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setpriority(which int, who int, prio int) (err error) {
- _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setprivexec(flag int) (err error) {
- _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setrlimit(which int, lim *Rlimit) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setsid() (pid int, err error) {
- r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
- pid = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Settimeofday(tp *Timeval) (err error) {
- _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setuid(uid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Symlink(path string, link string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(link)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(oldpath)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(newpath)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Sync() (err error) {
- _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Truncate(path string, length int64) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Umask(newmask int) (oldmask int) {
- r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0)
- oldmask = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Undelete(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Unlink(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Unlinkat(dirfd int, path string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Unmount(path string, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func write(fd int, p []byte) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(p) > 0 {
- _p0 = unsafe.Pointer(&p[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
- r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0)
- ret = uintptr(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func munmap(addr uintptr, length uintptr) (err error) {
- _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fstat(fd int, stat *Stat_t) (err error) {
- _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fstatfs(fd int, stat *Statfs_t) (err error) {
- _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(buf), uintptr(size), uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Lstat(path string, stat *Stat_t) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Stat(path string, stat *Stat_t) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Statfs(path string, stat *Statfs_t) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go
index 92efa1da..d885ef38 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go
@@ -13,17 +13,20 @@ import (
//go:cgo_import_dynamic libc_preadv preadv "libc.so"
//go:cgo_import_dynamic libc_writev writev "libc.so"
//go:cgo_import_dynamic libc_pwritev pwritev "libc.so"
+//go:cgo_import_dynamic libc_accept4 accept4 "libc.so"
//go:linkname procreadv libc_readv
//go:linkname procpreadv libc_preadv
//go:linkname procwritev libc_writev
//go:linkname procpwritev libc_pwritev
+//go:linkname procaccept4 libc_accept4
var (
procreadv,
procpreadv,
procwritev,
- procpwritev syscallFunc
+ procpwritev,
+ procaccept4 syscallFunc
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -85,3 +88,14 @@ func pwritev(fd int, iovs []Iovec, off int64) (n int, err error) {
}
return
}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
+ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept4)), 4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index df217825..2fbbbe5a 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -83,6 +83,22 @@ func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_OPENAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(open_how)), uintptr(size), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
n = int(r0)
@@ -1821,6 +1837,21 @@ func faccessat(dirfd int, path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Faccessat2(dirfd int, path string, mode uint32, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FACCESSAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(pathname)
@@ -1847,6 +1878,52 @@ func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ProcessVMReadv(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(localIov) > 0 {
+ _p0 = unsafe.Pointer(&localIov[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ var _p1 unsafe.Pointer
+ if len(remoteIov) > 0 {
+ _p1 = unsafe.Pointer(&remoteIov[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PROCESS_VM_READV, uintptr(pid), uintptr(_p0), uintptr(len(localIov)), uintptr(_p1), uintptr(len(remoteIov)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ProcessVMWritev(pid int, localIov []Iovec, remoteIov []RemoteIovec, flags uint) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(localIov) > 0 {
+ _p0 = unsafe.Pointer(&localIov[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ var _p1 unsafe.Pointer
+ if len(remoteIov) > 0 {
+ _p1 = unsafe.Pointer(&remoteIov[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PROCESS_VM_WRITEV, uintptr(pid), uintptr(_p0), uintptr(len(localIov)), uintptr(_p1), uintptr(len(remoteIov)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func pipe2(p *[2]_C_int, flags int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
similarity index 86%
rename from vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go
rename to vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
index 8d39a09f..ec6bd5bb 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
@@ -1,7 +1,7 @@
-// go run mksyscall.go -tags darwin,arm64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.1_11.go syscall_darwin_arm64.go
+// go run mksyscall.go -openbsd -tags openbsd,mips64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_mips64.go
// Code generated by the command above; see README.md. DO NOT EDIT.
-// +build darwin,arm64,!go1.12
+// +build openbsd,mips64
package unix
@@ -350,8 +350,8 @@ func Munlockall() (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) {
- _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0)
+func pipe2(p *[2]_C_int, flags int) (err error) {
+ _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
@@ -360,10 +360,15 @@ func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func pipe() (r int, w int, err error) {
- r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0)
- r = int(r0)
- w = int(r1)
+func Getdents(fd int, buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+ n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
@@ -372,163 +377,15 @@ func pipe() (r int, w int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
+func Getcwd(buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
}
- var _p1 *byte
- _p1, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options))
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options))
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fsetxattr(fd int, attr string, data *byte, size int, position uint32, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func removexattr(path string, attr string, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fremovexattr(fd int, attr string, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(attr)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(options))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func listxattr(path string, dest *byte, size int, options int) (sz int, err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- r0, _, e1 := Syscall6(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0)
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) {
- r0, _, e1 := Syscall6(SYS_FLISTXATTR, uintptr(fd), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0)
- sz = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) {
- _, _, e1 := Syscall6(SYS_SETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func fcntl(fd int, cmd int, arg int) (val int, err error) {
- r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
- val = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func kill(pid int, signum int, posix int) (err error) {
- _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix))
+ r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0)
+ n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
@@ -554,7 +411,7 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr)
} else {
_p0 = unsafe.Pointer(&_zero)
}
- _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+ _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
if e1 != 0 {
err = errnoErr(e1)
}
@@ -563,8 +420,9 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
- _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags))
+func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+ r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
+ n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
@@ -704,18 +562,8 @@ func Dup2(from int, to int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Exchangedata(path1 string, path2 string, options int) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path1)
- if err != nil {
- return
- }
- var _p1 *byte
- _p1, err = BytePtrFromString(path2)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
+func Dup3(from int, to int, flags int) (err error) {
+ _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags))
if e1 != 0 {
err = errnoErr(e1)
}
@@ -837,6 +685,41 @@ func Fpathconf(fd int, name int) (val int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Fstat(fd int, stat *Stat_t) (err error) {
+ _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstatfs(fd int, stat *Statfs_t) (err error) {
+ _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fsync(fd int) (err error) {
_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
if e1 != 0 {
@@ -848,7 +731,7 @@ func Fsync(fd int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Ftruncate(fd int, length int64) (err error) {
- _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0)
+ _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length))
if e1 != 0 {
err = errnoErr(e1)
}
@@ -857,14 +740,6 @@ func Ftruncate(fd int, length int64) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Getdtablesize() (size int) {
- r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0)
- size = int(r0)
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Getegid() (egid int) {
r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
egid = int(r0)
@@ -945,6 +820,17 @@ func Getrlimit(which int, lim *Rlimit) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Getrtable() (rtable int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0)
+ rtable = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Getrusage(who int, rusage *Rusage) (err error) {
_, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
if e1 != 0 {
@@ -966,8 +852,8 @@ func Getsid(pid int) (sid int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Gettimeofday(tp *Timeval) (err error) {
- _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
+func Gettimeofday(tv *Timeval) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
@@ -985,13 +871,23 @@ func Getuid() (uid int) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Issetugid() (tainted bool) {
- r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0)
+ r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0)
tainted = bool(r0 != 0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Kill(pid int, signum syscall.Signal) (err error) {
+ _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Kqueue() (fd int, err error) {
r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0)
fd = int(r0)
@@ -1068,6 +964,21 @@ func Listen(s int, backlog int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Lstat(path string, stat *Stat_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mkdir(path string, mode uint32) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1113,6 +1024,21 @@ func Mkfifo(path string, mode uint32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Mkfifoat(dirfd int, path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Mknod(path string, mode uint32, dev int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1128,6 +1054,31 @@ func Mknod(path string, mode uint32, dev int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Open(path string, mode int, perm uint32) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1183,7 +1134,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+ r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1200,7 +1151,7 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+ r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1342,7 +1293,7 @@ func Rmdir(path string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
- r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
+ r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0)
newoffset = int64(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1364,7 +1315,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setegid(egid int) (err error) {
- _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0)
+ _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
@@ -1428,16 +1379,6 @@ func Setpriority(which int, who int, prio int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setprivexec(flag int) (err error) {
- _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Setregid(rgid int, egid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
if e1 != 0 {
@@ -1458,6 +1399,26 @@ func Setreuid(ruid int, euid int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Setresgid(rgid int, egid int, sgid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setresuid(ruid int, euid int, suid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Setrlimit(which int, lim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
if e1 != 0 {
@@ -1468,6 +1429,16 @@ func Setrlimit(which int, lim *Rlimit) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Setrtable(rtable int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Setsid() (pid int, err error) {
r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
pid = int(r0)
@@ -1499,6 +1470,36 @@ func Setuid(uid int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Stat(path string, stat *Stat_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Statfs(path string, stat *Statfs_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Symlink(path string, link string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1555,7 +1556,7 @@ func Truncate(path string, length int64) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0)
+ _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length))
if e1 != 0 {
err = errnoErr(e1)
}
@@ -1572,21 +1573,6 @@ func Umask(newmask int) (oldmask int) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Undelete(path string) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Unlink(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
@@ -1650,7 +1636,7 @@ func write(fd int, p []byte) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
- r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos))
+ r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0)
ret = uintptr(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1692,89 +1678,13 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Fstat(fd int, stat *Stat_t) (err error) {
- _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
+func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Fstatfs(fd int, stat *Statfs_t) (err error) {
- _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(buf), uintptr(size), uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Lstat(path string, stat *Stat_t) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Stat(path string, stat *Stat_t) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Statfs(path string, stat *Statfs_t) (err error) {
- var _p0 *byte
- _p0, err = BytePtrFromString(path)
- if err != nil {
- return
- }
- _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go
new file mode 100644
index 00000000..aca34b34
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go
@@ -0,0 +1,279 @@
+// go run mksysctl_openbsd.go
+// Code generated by the command above; DO NOT EDIT.
+
+// +build mips64,openbsd
+
+package unix
+
+type mibentry struct {
+ ctlname string
+ ctloid []_C_int
+}
+
+var sysctlMib = []mibentry{
+ {"ddb.console", []_C_int{9, 6}},
+ {"ddb.log", []_C_int{9, 7}},
+ {"ddb.max_line", []_C_int{9, 3}},
+ {"ddb.max_width", []_C_int{9, 2}},
+ {"ddb.panic", []_C_int{9, 5}},
+ {"ddb.profile", []_C_int{9, 9}},
+ {"ddb.radix", []_C_int{9, 1}},
+ {"ddb.tab_stop_width", []_C_int{9, 4}},
+ {"ddb.trigger", []_C_int{9, 8}},
+ {"fs.posix.setuid", []_C_int{3, 1, 1}},
+ {"hw.allowpowerdown", []_C_int{6, 22}},
+ {"hw.byteorder", []_C_int{6, 4}},
+ {"hw.cpuspeed", []_C_int{6, 12}},
+ {"hw.diskcount", []_C_int{6, 10}},
+ {"hw.disknames", []_C_int{6, 8}},
+ {"hw.diskstats", []_C_int{6, 9}},
+ {"hw.machine", []_C_int{6, 1}},
+ {"hw.model", []_C_int{6, 2}},
+ {"hw.ncpu", []_C_int{6, 3}},
+ {"hw.ncpufound", []_C_int{6, 21}},
+ {"hw.ncpuonline", []_C_int{6, 25}},
+ {"hw.pagesize", []_C_int{6, 7}},
+ {"hw.perfpolicy", []_C_int{6, 23}},
+ {"hw.physmem", []_C_int{6, 19}},
+ {"hw.product", []_C_int{6, 15}},
+ {"hw.serialno", []_C_int{6, 17}},
+ {"hw.setperf", []_C_int{6, 13}},
+ {"hw.smt", []_C_int{6, 24}},
+ {"hw.usermem", []_C_int{6, 20}},
+ {"hw.uuid", []_C_int{6, 18}},
+ {"hw.vendor", []_C_int{6, 14}},
+ {"hw.version", []_C_int{6, 16}},
+ {"kern.allowdt", []_C_int{1, 65}},
+ {"kern.allowkmem", []_C_int{1, 52}},
+ {"kern.argmax", []_C_int{1, 8}},
+ {"kern.audio", []_C_int{1, 84}},
+ {"kern.boottime", []_C_int{1, 21}},
+ {"kern.bufcachepercent", []_C_int{1, 72}},
+ {"kern.ccpu", []_C_int{1, 45}},
+ {"kern.clockrate", []_C_int{1, 12}},
+ {"kern.consbuf", []_C_int{1, 83}},
+ {"kern.consbufsize", []_C_int{1, 82}},
+ {"kern.consdev", []_C_int{1, 75}},
+ {"kern.cp_time", []_C_int{1, 40}},
+ {"kern.cp_time2", []_C_int{1, 71}},
+ {"kern.cpustats", []_C_int{1, 85}},
+ {"kern.domainname", []_C_int{1, 22}},
+ {"kern.file", []_C_int{1, 73}},
+ {"kern.forkstat", []_C_int{1, 42}},
+ {"kern.fscale", []_C_int{1, 46}},
+ {"kern.fsync", []_C_int{1, 33}},
+ {"kern.global_ptrace", []_C_int{1, 81}},
+ {"kern.hostid", []_C_int{1, 11}},
+ {"kern.hostname", []_C_int{1, 10}},
+ {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}},
+ {"kern.job_control", []_C_int{1, 19}},
+ {"kern.malloc.buckets", []_C_int{1, 39, 1}},
+ {"kern.malloc.kmemnames", []_C_int{1, 39, 3}},
+ {"kern.maxclusters", []_C_int{1, 67}},
+ {"kern.maxfiles", []_C_int{1, 7}},
+ {"kern.maxlocksperuid", []_C_int{1, 70}},
+ {"kern.maxpartitions", []_C_int{1, 23}},
+ {"kern.maxproc", []_C_int{1, 6}},
+ {"kern.maxthread", []_C_int{1, 25}},
+ {"kern.maxvnodes", []_C_int{1, 5}},
+ {"kern.mbstat", []_C_int{1, 59}},
+ {"kern.msgbuf", []_C_int{1, 48}},
+ {"kern.msgbufsize", []_C_int{1, 38}},
+ {"kern.nchstats", []_C_int{1, 41}},
+ {"kern.netlivelocks", []_C_int{1, 76}},
+ {"kern.nfiles", []_C_int{1, 56}},
+ {"kern.ngroups", []_C_int{1, 18}},
+ {"kern.nosuidcoredump", []_C_int{1, 32}},
+ {"kern.nprocs", []_C_int{1, 47}},
+ {"kern.nselcoll", []_C_int{1, 43}},
+ {"kern.nthreads", []_C_int{1, 26}},
+ {"kern.numvnodes", []_C_int{1, 58}},
+ {"kern.osrelease", []_C_int{1, 2}},
+ {"kern.osrevision", []_C_int{1, 3}},
+ {"kern.ostype", []_C_int{1, 1}},
+ {"kern.osversion", []_C_int{1, 27}},
+ {"kern.pfstatus", []_C_int{1, 86}},
+ {"kern.pool_debug", []_C_int{1, 77}},
+ {"kern.posix1version", []_C_int{1, 17}},
+ {"kern.proc", []_C_int{1, 66}},
+ {"kern.rawpartition", []_C_int{1, 24}},
+ {"kern.saved_ids", []_C_int{1, 20}},
+ {"kern.securelevel", []_C_int{1, 9}},
+ {"kern.seminfo", []_C_int{1, 61}},
+ {"kern.shminfo", []_C_int{1, 62}},
+ {"kern.somaxconn", []_C_int{1, 28}},
+ {"kern.sominconn", []_C_int{1, 29}},
+ {"kern.splassert", []_C_int{1, 54}},
+ {"kern.stackgap_random", []_C_int{1, 50}},
+ {"kern.sysvipc_info", []_C_int{1, 51}},
+ {"kern.sysvmsg", []_C_int{1, 34}},
+ {"kern.sysvsem", []_C_int{1, 35}},
+ {"kern.sysvshm", []_C_int{1, 36}},
+ {"kern.timecounter.choice", []_C_int{1, 69, 4}},
+ {"kern.timecounter.hardware", []_C_int{1, 69, 3}},
+ {"kern.timecounter.tick", []_C_int{1, 69, 1}},
+ {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}},
+ {"kern.timeout_stats", []_C_int{1, 87}},
+ {"kern.tty.tk_cancc", []_C_int{1, 44, 4}},
+ {"kern.tty.tk_nin", []_C_int{1, 44, 1}},
+ {"kern.tty.tk_nout", []_C_int{1, 44, 2}},
+ {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}},
+ {"kern.tty.ttyinfo", []_C_int{1, 44, 5}},
+ {"kern.ttycount", []_C_int{1, 57}},
+ {"kern.utc_offset", []_C_int{1, 88}},
+ {"kern.version", []_C_int{1, 4}},
+ {"kern.watchdog.auto", []_C_int{1, 64, 2}},
+ {"kern.watchdog.period", []_C_int{1, 64, 1}},
+ {"kern.witnesswatch", []_C_int{1, 53}},
+ {"kern.wxabort", []_C_int{1, 74}},
+ {"net.bpf.bufsize", []_C_int{4, 31, 1}},
+ {"net.bpf.maxbufsize", []_C_int{4, 31, 2}},
+ {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}},
+ {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}},
+ {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}},
+ {"net.inet.carp.log", []_C_int{4, 2, 112, 3}},
+ {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}},
+ {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}},
+ {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}},
+ {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}},
+ {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}},
+ {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}},
+ {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}},
+ {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}},
+ {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}},
+ {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}},
+ {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}},
+ {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}},
+ {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}},
+ {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}},
+ {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}},
+ {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}},
+ {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}},
+ {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}},
+ {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}},
+ {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}},
+ {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}},
+ {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}},
+ {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}},
+ {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}},
+ {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}},
+ {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}},
+ {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}},
+ {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}},
+ {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}},
+ {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}},
+ {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}},
+ {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}},
+ {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}},
+ {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}},
+ {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}},
+ {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}},
+ {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}},
+ {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}},
+ {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}},
+ {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}},
+ {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}},
+ {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}},
+ {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}},
+ {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}},
+ {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}},
+ {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}},
+ {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}},
+ {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}},
+ {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}},
+ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}},
+ {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}},
+ {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}},
+ {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}},
+ {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}},
+ {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}},
+ {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}},
+ {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}},
+ {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}},
+ {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}},
+ {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}},
+ {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}},
+ {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}},
+ {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}},
+ {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}},
+ {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}},
+ {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}},
+ {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}},
+ {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}},
+ {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}},
+ {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}},
+ {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}},
+ {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}},
+ {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}},
+ {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}},
+ {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}},
+ {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}},
+ {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}},
+ {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}},
+ {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}},
+ {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}},
+ {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}},
+ {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}},
+ {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}},
+ {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}},
+ {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}},
+ {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}},
+ {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}},
+ {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}},
+ {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}},
+ {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}},
+ {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}},
+ {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}},
+ {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}},
+ {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}},
+ {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}},
+ {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}},
+ {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}},
+ {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}},
+ {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}},
+ {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}},
+ {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}},
+ {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}},
+ {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}},
+ {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}},
+ {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}},
+ {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}},
+ {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}},
+ {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}},
+ {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}},
+ {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}},
+ {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}},
+ {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}},
+ {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}},
+ {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}},
+ {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}},
+ {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}},
+ {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}},
+ {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}},
+ {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}},
+ {"net.key.sadb_dump", []_C_int{4, 30, 1}},
+ {"net.key.spd_dump", []_C_int{4, 30, 2}},
+ {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}},
+ {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}},
+ {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}},
+ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}},
+ {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}},
+ {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}},
+ {"net.mpls.ttl", []_C_int{4, 33, 2}},
+ {"net.pflow.stats", []_C_int{4, 34, 1}},
+ {"net.pipex.enable", []_C_int{4, 35, 1}},
+ {"vm.anonmin", []_C_int{2, 7}},
+ {"vm.loadavg", []_C_int{2, 2}},
+ {"vm.malloc_conf", []_C_int{2, 12}},
+ {"vm.maxslp", []_C_int{2, 10}},
+ {"vm.nkmempages", []_C_int{2, 6}},
+ {"vm.psstrings", []_C_int{2, 3}},
+ {"vm.swapencrypt.enable", []_C_int{2, 5, 0}},
+ {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}},
+ {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}},
+ {"vm.uspace", []_C_int{2, 11}},
+ {"vm.uvmexp", []_C_int{2, 4}},
+ {"vm.vmmeter", []_C_int{2, 1}},
+ {"vm.vnodemin", []_C_int{2, 9}},
+ {"vm.vtextmin", []_C_int{2, 8}},
+}
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go
deleted file mode 100644
index f3361453..00000000
--- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go
+++ /dev/null
@@ -1,436 +0,0 @@
-// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.13.sdk/usr/include/sys/syscall.h
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build 386,darwin
-
-package unix
-
-const (
- SYS_SYSCALL = 0
- SYS_EXIT = 1
- SYS_FORK = 2
- SYS_READ = 3
- SYS_WRITE = 4
- SYS_OPEN = 5
- SYS_CLOSE = 6
- SYS_WAIT4 = 7
- SYS_LINK = 9
- SYS_UNLINK = 10
- SYS_CHDIR = 12
- SYS_FCHDIR = 13
- SYS_MKNOD = 14
- SYS_CHMOD = 15
- SYS_CHOWN = 16
- SYS_GETFSSTAT = 18
- SYS_GETPID = 20
- SYS_SETUID = 23
- SYS_GETUID = 24
- SYS_GETEUID = 25
- SYS_PTRACE = 26
- SYS_RECVMSG = 27
- SYS_SENDMSG = 28
- SYS_RECVFROM = 29
- SYS_ACCEPT = 30
- SYS_GETPEERNAME = 31
- SYS_GETSOCKNAME = 32
- SYS_ACCESS = 33
- SYS_CHFLAGS = 34
- SYS_FCHFLAGS = 35
- SYS_SYNC = 36
- SYS_KILL = 37
- SYS_GETPPID = 39
- SYS_DUP = 41
- SYS_PIPE = 42
- SYS_GETEGID = 43
- SYS_SIGACTION = 46
- SYS_GETGID = 47
- SYS_SIGPROCMASK = 48
- SYS_GETLOGIN = 49
- SYS_SETLOGIN = 50
- SYS_ACCT = 51
- SYS_SIGPENDING = 52
- SYS_SIGALTSTACK = 53
- SYS_IOCTL = 54
- SYS_REBOOT = 55
- SYS_REVOKE = 56
- SYS_SYMLINK = 57
- SYS_READLINK = 58
- SYS_EXECVE = 59
- SYS_UMASK = 60
- SYS_CHROOT = 61
- SYS_MSYNC = 65
- SYS_VFORK = 66
- SYS_MUNMAP = 73
- SYS_MPROTECT = 74
- SYS_MADVISE = 75
- SYS_MINCORE = 78
- SYS_GETGROUPS = 79
- SYS_SETGROUPS = 80
- SYS_GETPGRP = 81
- SYS_SETPGID = 82
- SYS_SETITIMER = 83
- SYS_SWAPON = 85
- SYS_GETITIMER = 86
- SYS_GETDTABLESIZE = 89
- SYS_DUP2 = 90
- SYS_FCNTL = 92
- SYS_SELECT = 93
- SYS_FSYNC = 95
- SYS_SETPRIORITY = 96
- SYS_SOCKET = 97
- SYS_CONNECT = 98
- SYS_GETPRIORITY = 100
- SYS_BIND = 104
- SYS_SETSOCKOPT = 105
- SYS_LISTEN = 106
- SYS_SIGSUSPEND = 111
- SYS_GETTIMEOFDAY = 116
- SYS_GETRUSAGE = 117
- SYS_GETSOCKOPT = 118
- SYS_READV = 120
- SYS_WRITEV = 121
- SYS_SETTIMEOFDAY = 122
- SYS_FCHOWN = 123
- SYS_FCHMOD = 124
- SYS_SETREUID = 126
- SYS_SETREGID = 127
- SYS_RENAME = 128
- SYS_FLOCK = 131
- SYS_MKFIFO = 132
- SYS_SENDTO = 133
- SYS_SHUTDOWN = 134
- SYS_SOCKETPAIR = 135
- SYS_MKDIR = 136
- SYS_RMDIR = 137
- SYS_UTIMES = 138
- SYS_FUTIMES = 139
- SYS_ADJTIME = 140
- SYS_GETHOSTUUID = 142
- SYS_SETSID = 147
- SYS_GETPGID = 151
- SYS_SETPRIVEXEC = 152
- SYS_PREAD = 153
- SYS_PWRITE = 154
- SYS_NFSSVC = 155
- SYS_STATFS = 157
- SYS_FSTATFS = 158
- SYS_UNMOUNT = 159
- SYS_GETFH = 161
- SYS_QUOTACTL = 165
- SYS_MOUNT = 167
- SYS_CSOPS = 169
- SYS_CSOPS_AUDITTOKEN = 170
- SYS_WAITID = 173
- SYS_KDEBUG_TYPEFILTER = 177
- SYS_KDEBUG_TRACE_STRING = 178
- SYS_KDEBUG_TRACE64 = 179
- SYS_KDEBUG_TRACE = 180
- SYS_SETGID = 181
- SYS_SETEGID = 182
- SYS_SETEUID = 183
- SYS_SIGRETURN = 184
- SYS_THREAD_SELFCOUNTS = 186
- SYS_FDATASYNC = 187
- SYS_STAT = 188
- SYS_FSTAT = 189
- SYS_LSTAT = 190
- SYS_PATHCONF = 191
- SYS_FPATHCONF = 192
- SYS_GETRLIMIT = 194
- SYS_SETRLIMIT = 195
- SYS_GETDIRENTRIES = 196
- SYS_MMAP = 197
- SYS_LSEEK = 199
- SYS_TRUNCATE = 200
- SYS_FTRUNCATE = 201
- SYS_SYSCTL = 202
- SYS_MLOCK = 203
- SYS_MUNLOCK = 204
- SYS_UNDELETE = 205
- SYS_OPEN_DPROTECTED_NP = 216
- SYS_GETATTRLIST = 220
- SYS_SETATTRLIST = 221
- SYS_GETDIRENTRIESATTR = 222
- SYS_EXCHANGEDATA = 223
- SYS_SEARCHFS = 225
- SYS_DELETE = 226
- SYS_COPYFILE = 227
- SYS_FGETATTRLIST = 228
- SYS_FSETATTRLIST = 229
- SYS_POLL = 230
- SYS_WATCHEVENT = 231
- SYS_WAITEVENT = 232
- SYS_MODWATCH = 233
- SYS_GETXATTR = 234
- SYS_FGETXATTR = 235
- SYS_SETXATTR = 236
- SYS_FSETXATTR = 237
- SYS_REMOVEXATTR = 238
- SYS_FREMOVEXATTR = 239
- SYS_LISTXATTR = 240
- SYS_FLISTXATTR = 241
- SYS_FSCTL = 242
- SYS_INITGROUPS = 243
- SYS_POSIX_SPAWN = 244
- SYS_FFSCTL = 245
- SYS_NFSCLNT = 247
- SYS_FHOPEN = 248
- SYS_MINHERIT = 250
- SYS_SEMSYS = 251
- SYS_MSGSYS = 252
- SYS_SHMSYS = 253
- SYS_SEMCTL = 254
- SYS_SEMGET = 255
- SYS_SEMOP = 256
- SYS_MSGCTL = 258
- SYS_MSGGET = 259
- SYS_MSGSND = 260
- SYS_MSGRCV = 261
- SYS_SHMAT = 262
- SYS_SHMCTL = 263
- SYS_SHMDT = 264
- SYS_SHMGET = 265
- SYS_SHM_OPEN = 266
- SYS_SHM_UNLINK = 267
- SYS_SEM_OPEN = 268
- SYS_SEM_CLOSE = 269
- SYS_SEM_UNLINK = 270
- SYS_SEM_WAIT = 271
- SYS_SEM_TRYWAIT = 272
- SYS_SEM_POST = 273
- SYS_SYSCTLBYNAME = 274
- SYS_OPEN_EXTENDED = 277
- SYS_UMASK_EXTENDED = 278
- SYS_STAT_EXTENDED = 279
- SYS_LSTAT_EXTENDED = 280
- SYS_FSTAT_EXTENDED = 281
- SYS_CHMOD_EXTENDED = 282
- SYS_FCHMOD_EXTENDED = 283
- SYS_ACCESS_EXTENDED = 284
- SYS_SETTID = 285
- SYS_GETTID = 286
- SYS_SETSGROUPS = 287
- SYS_GETSGROUPS = 288
- SYS_SETWGROUPS = 289
- SYS_GETWGROUPS = 290
- SYS_MKFIFO_EXTENDED = 291
- SYS_MKDIR_EXTENDED = 292
- SYS_IDENTITYSVC = 293
- SYS_SHARED_REGION_CHECK_NP = 294
- SYS_VM_PRESSURE_MONITOR = 296
- SYS_PSYNCH_RW_LONGRDLOCK = 297
- SYS_PSYNCH_RW_YIELDWRLOCK = 298
- SYS_PSYNCH_RW_DOWNGRADE = 299
- SYS_PSYNCH_RW_UPGRADE = 300
- SYS_PSYNCH_MUTEXWAIT = 301
- SYS_PSYNCH_MUTEXDROP = 302
- SYS_PSYNCH_CVBROAD = 303
- SYS_PSYNCH_CVSIGNAL = 304
- SYS_PSYNCH_CVWAIT = 305
- SYS_PSYNCH_RW_RDLOCK = 306
- SYS_PSYNCH_RW_WRLOCK = 307
- SYS_PSYNCH_RW_UNLOCK = 308
- SYS_PSYNCH_RW_UNLOCK2 = 309
- SYS_GETSID = 310
- SYS_SETTID_WITH_PID = 311
- SYS_PSYNCH_CVCLRPREPOST = 312
- SYS_AIO_FSYNC = 313
- SYS_AIO_RETURN = 314
- SYS_AIO_SUSPEND = 315
- SYS_AIO_CANCEL = 316
- SYS_AIO_ERROR = 317
- SYS_AIO_READ = 318
- SYS_AIO_WRITE = 319
- SYS_LIO_LISTIO = 320
- SYS_IOPOLICYSYS = 322
- SYS_PROCESS_POLICY = 323
- SYS_MLOCKALL = 324
- SYS_MUNLOCKALL = 325
- SYS_ISSETUGID = 327
- SYS___PTHREAD_KILL = 328
- SYS___PTHREAD_SIGMASK = 329
- SYS___SIGWAIT = 330
- SYS___DISABLE_THREADSIGNAL = 331
- SYS___PTHREAD_MARKCANCEL = 332
- SYS___PTHREAD_CANCELED = 333
- SYS___SEMWAIT_SIGNAL = 334
- SYS_PROC_INFO = 336
- SYS_SENDFILE = 337
- SYS_STAT64 = 338
- SYS_FSTAT64 = 339
- SYS_LSTAT64 = 340
- SYS_STAT64_EXTENDED = 341
- SYS_LSTAT64_EXTENDED = 342
- SYS_FSTAT64_EXTENDED = 343
- SYS_GETDIRENTRIES64 = 344
- SYS_STATFS64 = 345
- SYS_FSTATFS64 = 346
- SYS_GETFSSTAT64 = 347
- SYS___PTHREAD_CHDIR = 348
- SYS___PTHREAD_FCHDIR = 349
- SYS_AUDIT = 350
- SYS_AUDITON = 351
- SYS_GETAUID = 353
- SYS_SETAUID = 354
- SYS_GETAUDIT_ADDR = 357
- SYS_SETAUDIT_ADDR = 358
- SYS_AUDITCTL = 359
- SYS_BSDTHREAD_CREATE = 360
- SYS_BSDTHREAD_TERMINATE = 361
- SYS_KQUEUE = 362
- SYS_KEVENT = 363
- SYS_LCHOWN = 364
- SYS_BSDTHREAD_REGISTER = 366
- SYS_WORKQ_OPEN = 367
- SYS_WORKQ_KERNRETURN = 368
- SYS_KEVENT64 = 369
- SYS___OLD_SEMWAIT_SIGNAL = 370
- SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371
- SYS_THREAD_SELFID = 372
- SYS_LEDGER = 373
- SYS_KEVENT_QOS = 374
- SYS_KEVENT_ID = 375
- SYS___MAC_EXECVE = 380
- SYS___MAC_SYSCALL = 381
- SYS___MAC_GET_FILE = 382
- SYS___MAC_SET_FILE = 383
- SYS___MAC_GET_LINK = 384
- SYS___MAC_SET_LINK = 385
- SYS___MAC_GET_PROC = 386
- SYS___MAC_SET_PROC = 387
- SYS___MAC_GET_FD = 388
- SYS___MAC_SET_FD = 389
- SYS___MAC_GET_PID = 390
- SYS_PSELECT = 394
- SYS_PSELECT_NOCANCEL = 395
- SYS_READ_NOCANCEL = 396
- SYS_WRITE_NOCANCEL = 397
- SYS_OPEN_NOCANCEL = 398
- SYS_CLOSE_NOCANCEL = 399
- SYS_WAIT4_NOCANCEL = 400
- SYS_RECVMSG_NOCANCEL = 401
- SYS_SENDMSG_NOCANCEL = 402
- SYS_RECVFROM_NOCANCEL = 403
- SYS_ACCEPT_NOCANCEL = 404
- SYS_MSYNC_NOCANCEL = 405
- SYS_FCNTL_NOCANCEL = 406
- SYS_SELECT_NOCANCEL = 407
- SYS_FSYNC_NOCANCEL = 408
- SYS_CONNECT_NOCANCEL = 409
- SYS_SIGSUSPEND_NOCANCEL = 410
- SYS_READV_NOCANCEL = 411
- SYS_WRITEV_NOCANCEL = 412
- SYS_SENDTO_NOCANCEL = 413
- SYS_PREAD_NOCANCEL = 414
- SYS_PWRITE_NOCANCEL = 415
- SYS_WAITID_NOCANCEL = 416
- SYS_POLL_NOCANCEL = 417
- SYS_MSGSND_NOCANCEL = 418
- SYS_MSGRCV_NOCANCEL = 419
- SYS_SEM_WAIT_NOCANCEL = 420
- SYS_AIO_SUSPEND_NOCANCEL = 421
- SYS___SIGWAIT_NOCANCEL = 422
- SYS___SEMWAIT_SIGNAL_NOCANCEL = 423
- SYS___MAC_MOUNT = 424
- SYS___MAC_GET_MOUNT = 425
- SYS___MAC_GETFSSTAT = 426
- SYS_FSGETPATH = 427
- SYS_AUDIT_SESSION_SELF = 428
- SYS_AUDIT_SESSION_JOIN = 429
- SYS_FILEPORT_MAKEPORT = 430
- SYS_FILEPORT_MAKEFD = 431
- SYS_AUDIT_SESSION_PORT = 432
- SYS_PID_SUSPEND = 433
- SYS_PID_RESUME = 434
- SYS_PID_HIBERNATE = 435
- SYS_PID_SHUTDOWN_SOCKETS = 436
- SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438
- SYS_KAS_INFO = 439
- SYS_MEMORYSTATUS_CONTROL = 440
- SYS_GUARDED_OPEN_NP = 441
- SYS_GUARDED_CLOSE_NP = 442
- SYS_GUARDED_KQUEUE_NP = 443
- SYS_CHANGE_FDGUARD_NP = 444
- SYS_USRCTL = 445
- SYS_PROC_RLIMIT_CONTROL = 446
- SYS_CONNECTX = 447
- SYS_DISCONNECTX = 448
- SYS_PEELOFF = 449
- SYS_SOCKET_DELEGATE = 450
- SYS_TELEMETRY = 451
- SYS_PROC_UUID_POLICY = 452
- SYS_MEMORYSTATUS_GET_LEVEL = 453
- SYS_SYSTEM_OVERRIDE = 454
- SYS_VFS_PURGE = 455
- SYS_SFI_CTL = 456
- SYS_SFI_PIDCTL = 457
- SYS_COALITION = 458
- SYS_COALITION_INFO = 459
- SYS_NECP_MATCH_POLICY = 460
- SYS_GETATTRLISTBULK = 461
- SYS_CLONEFILEAT = 462
- SYS_OPENAT = 463
- SYS_OPENAT_NOCANCEL = 464
- SYS_RENAMEAT = 465
- SYS_FACCESSAT = 466
- SYS_FCHMODAT = 467
- SYS_FCHOWNAT = 468
- SYS_FSTATAT = 469
- SYS_FSTATAT64 = 470
- SYS_LINKAT = 471
- SYS_UNLINKAT = 472
- SYS_READLINKAT = 473
- SYS_SYMLINKAT = 474
- SYS_MKDIRAT = 475
- SYS_GETATTRLISTAT = 476
- SYS_PROC_TRACE_LOG = 477
- SYS_BSDTHREAD_CTL = 478
- SYS_OPENBYID_NP = 479
- SYS_RECVMSG_X = 480
- SYS_SENDMSG_X = 481
- SYS_THREAD_SELFUSAGE = 482
- SYS_CSRCTL = 483
- SYS_GUARDED_OPEN_DPROTECTED_NP = 484
- SYS_GUARDED_WRITE_NP = 485
- SYS_GUARDED_PWRITE_NP = 486
- SYS_GUARDED_WRITEV_NP = 487
- SYS_RENAMEATX_NP = 488
- SYS_MREMAP_ENCRYPTED = 489
- SYS_NETAGENT_TRIGGER = 490
- SYS_STACK_SNAPSHOT_WITH_CONFIG = 491
- SYS_MICROSTACKSHOT = 492
- SYS_GRAB_PGO_DATA = 493
- SYS_PERSONA = 494
- SYS_WORK_INTERVAL_CTL = 499
- SYS_GETENTROPY = 500
- SYS_NECP_OPEN = 501
- SYS_NECP_CLIENT_ACTION = 502
- SYS___NEXUS_OPEN = 503
- SYS___NEXUS_REGISTER = 504
- SYS___NEXUS_DEREGISTER = 505
- SYS___NEXUS_CREATE = 506
- SYS___NEXUS_DESTROY = 507
- SYS___NEXUS_GET_OPT = 508
- SYS___NEXUS_SET_OPT = 509
- SYS___CHANNEL_OPEN = 510
- SYS___CHANNEL_GET_INFO = 511
- SYS___CHANNEL_SYNC = 512
- SYS___CHANNEL_GET_OPT = 513
- SYS___CHANNEL_SET_OPT = 514
- SYS_ULOCK_WAIT = 515
- SYS_ULOCK_WAKE = 516
- SYS_FCLONEFILEAT = 517
- SYS_FS_SNAPSHOT = 518
- SYS_TERMINATE_WITH_PAYLOAD = 520
- SYS_ABORT_WITH_PAYLOAD = 521
- SYS_NECP_SESSION_OPEN = 522
- SYS_NECP_SESSION_ACTION = 523
- SYS_SETATTRLISTAT = 524
- SYS_NET_QOS_GUIDELINE = 525
- SYS_FMOUNT = 526
- SYS_NTP_ADJTIME = 527
- SYS_NTP_GETTIME = 528
- SYS_OS_FAULT_WITH_PAYLOAD = 529
- SYS_MAXSYSCALL = 530
- SYS_INVALID = 63
-)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go
deleted file mode 100644
index 654dd3da..00000000
--- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go
+++ /dev/null
@@ -1,438 +0,0 @@
-// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.14.sdk/usr/include/sys/syscall.h
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build amd64,darwin
-
-package unix
-
-const (
- SYS_SYSCALL = 0
- SYS_EXIT = 1
- SYS_FORK = 2
- SYS_READ = 3
- SYS_WRITE = 4
- SYS_OPEN = 5
- SYS_CLOSE = 6
- SYS_WAIT4 = 7
- SYS_LINK = 9
- SYS_UNLINK = 10
- SYS_CHDIR = 12
- SYS_FCHDIR = 13
- SYS_MKNOD = 14
- SYS_CHMOD = 15
- SYS_CHOWN = 16
- SYS_GETFSSTAT = 18
- SYS_GETPID = 20
- SYS_SETUID = 23
- SYS_GETUID = 24
- SYS_GETEUID = 25
- SYS_PTRACE = 26
- SYS_RECVMSG = 27
- SYS_SENDMSG = 28
- SYS_RECVFROM = 29
- SYS_ACCEPT = 30
- SYS_GETPEERNAME = 31
- SYS_GETSOCKNAME = 32
- SYS_ACCESS = 33
- SYS_CHFLAGS = 34
- SYS_FCHFLAGS = 35
- SYS_SYNC = 36
- SYS_KILL = 37
- SYS_GETPPID = 39
- SYS_DUP = 41
- SYS_PIPE = 42
- SYS_GETEGID = 43
- SYS_SIGACTION = 46
- SYS_GETGID = 47
- SYS_SIGPROCMASK = 48
- SYS_GETLOGIN = 49
- SYS_SETLOGIN = 50
- SYS_ACCT = 51
- SYS_SIGPENDING = 52
- SYS_SIGALTSTACK = 53
- SYS_IOCTL = 54
- SYS_REBOOT = 55
- SYS_REVOKE = 56
- SYS_SYMLINK = 57
- SYS_READLINK = 58
- SYS_EXECVE = 59
- SYS_UMASK = 60
- SYS_CHROOT = 61
- SYS_MSYNC = 65
- SYS_VFORK = 66
- SYS_MUNMAP = 73
- SYS_MPROTECT = 74
- SYS_MADVISE = 75
- SYS_MINCORE = 78
- SYS_GETGROUPS = 79
- SYS_SETGROUPS = 80
- SYS_GETPGRP = 81
- SYS_SETPGID = 82
- SYS_SETITIMER = 83
- SYS_SWAPON = 85
- SYS_GETITIMER = 86
- SYS_GETDTABLESIZE = 89
- SYS_DUP2 = 90
- SYS_FCNTL = 92
- SYS_SELECT = 93
- SYS_FSYNC = 95
- SYS_SETPRIORITY = 96
- SYS_SOCKET = 97
- SYS_CONNECT = 98
- SYS_GETPRIORITY = 100
- SYS_BIND = 104
- SYS_SETSOCKOPT = 105
- SYS_LISTEN = 106
- SYS_SIGSUSPEND = 111
- SYS_GETTIMEOFDAY = 116
- SYS_GETRUSAGE = 117
- SYS_GETSOCKOPT = 118
- SYS_READV = 120
- SYS_WRITEV = 121
- SYS_SETTIMEOFDAY = 122
- SYS_FCHOWN = 123
- SYS_FCHMOD = 124
- SYS_SETREUID = 126
- SYS_SETREGID = 127
- SYS_RENAME = 128
- SYS_FLOCK = 131
- SYS_MKFIFO = 132
- SYS_SENDTO = 133
- SYS_SHUTDOWN = 134
- SYS_SOCKETPAIR = 135
- SYS_MKDIR = 136
- SYS_RMDIR = 137
- SYS_UTIMES = 138
- SYS_FUTIMES = 139
- SYS_ADJTIME = 140
- SYS_GETHOSTUUID = 142
- SYS_SETSID = 147
- SYS_GETPGID = 151
- SYS_SETPRIVEXEC = 152
- SYS_PREAD = 153
- SYS_PWRITE = 154
- SYS_NFSSVC = 155
- SYS_STATFS = 157
- SYS_FSTATFS = 158
- SYS_UNMOUNT = 159
- SYS_GETFH = 161
- SYS_QUOTACTL = 165
- SYS_MOUNT = 167
- SYS_CSOPS = 169
- SYS_CSOPS_AUDITTOKEN = 170
- SYS_WAITID = 173
- SYS_KDEBUG_TYPEFILTER = 177
- SYS_KDEBUG_TRACE_STRING = 178
- SYS_KDEBUG_TRACE64 = 179
- SYS_KDEBUG_TRACE = 180
- SYS_SETGID = 181
- SYS_SETEGID = 182
- SYS_SETEUID = 183
- SYS_SIGRETURN = 184
- SYS_THREAD_SELFCOUNTS = 186
- SYS_FDATASYNC = 187
- SYS_STAT = 188
- SYS_FSTAT = 189
- SYS_LSTAT = 190
- SYS_PATHCONF = 191
- SYS_FPATHCONF = 192
- SYS_GETRLIMIT = 194
- SYS_SETRLIMIT = 195
- SYS_GETDIRENTRIES = 196
- SYS_MMAP = 197
- SYS_LSEEK = 199
- SYS_TRUNCATE = 200
- SYS_FTRUNCATE = 201
- SYS_SYSCTL = 202
- SYS_MLOCK = 203
- SYS_MUNLOCK = 204
- SYS_UNDELETE = 205
- SYS_OPEN_DPROTECTED_NP = 216
- SYS_GETATTRLIST = 220
- SYS_SETATTRLIST = 221
- SYS_GETDIRENTRIESATTR = 222
- SYS_EXCHANGEDATA = 223
- SYS_SEARCHFS = 225
- SYS_DELETE = 226
- SYS_COPYFILE = 227
- SYS_FGETATTRLIST = 228
- SYS_FSETATTRLIST = 229
- SYS_POLL = 230
- SYS_WATCHEVENT = 231
- SYS_WAITEVENT = 232
- SYS_MODWATCH = 233
- SYS_GETXATTR = 234
- SYS_FGETXATTR = 235
- SYS_SETXATTR = 236
- SYS_FSETXATTR = 237
- SYS_REMOVEXATTR = 238
- SYS_FREMOVEXATTR = 239
- SYS_LISTXATTR = 240
- SYS_FLISTXATTR = 241
- SYS_FSCTL = 242
- SYS_INITGROUPS = 243
- SYS_POSIX_SPAWN = 244
- SYS_FFSCTL = 245
- SYS_NFSCLNT = 247
- SYS_FHOPEN = 248
- SYS_MINHERIT = 250
- SYS_SEMSYS = 251
- SYS_MSGSYS = 252
- SYS_SHMSYS = 253
- SYS_SEMCTL = 254
- SYS_SEMGET = 255
- SYS_SEMOP = 256
- SYS_MSGCTL = 258
- SYS_MSGGET = 259
- SYS_MSGSND = 260
- SYS_MSGRCV = 261
- SYS_SHMAT = 262
- SYS_SHMCTL = 263
- SYS_SHMDT = 264
- SYS_SHMGET = 265
- SYS_SHM_OPEN = 266
- SYS_SHM_UNLINK = 267
- SYS_SEM_OPEN = 268
- SYS_SEM_CLOSE = 269
- SYS_SEM_UNLINK = 270
- SYS_SEM_WAIT = 271
- SYS_SEM_TRYWAIT = 272
- SYS_SEM_POST = 273
- SYS_SYSCTLBYNAME = 274
- SYS_OPEN_EXTENDED = 277
- SYS_UMASK_EXTENDED = 278
- SYS_STAT_EXTENDED = 279
- SYS_LSTAT_EXTENDED = 280
- SYS_FSTAT_EXTENDED = 281
- SYS_CHMOD_EXTENDED = 282
- SYS_FCHMOD_EXTENDED = 283
- SYS_ACCESS_EXTENDED = 284
- SYS_SETTID = 285
- SYS_GETTID = 286
- SYS_SETSGROUPS = 287
- SYS_GETSGROUPS = 288
- SYS_SETWGROUPS = 289
- SYS_GETWGROUPS = 290
- SYS_MKFIFO_EXTENDED = 291
- SYS_MKDIR_EXTENDED = 292
- SYS_IDENTITYSVC = 293
- SYS_SHARED_REGION_CHECK_NP = 294
- SYS_VM_PRESSURE_MONITOR = 296
- SYS_PSYNCH_RW_LONGRDLOCK = 297
- SYS_PSYNCH_RW_YIELDWRLOCK = 298
- SYS_PSYNCH_RW_DOWNGRADE = 299
- SYS_PSYNCH_RW_UPGRADE = 300
- SYS_PSYNCH_MUTEXWAIT = 301
- SYS_PSYNCH_MUTEXDROP = 302
- SYS_PSYNCH_CVBROAD = 303
- SYS_PSYNCH_CVSIGNAL = 304
- SYS_PSYNCH_CVWAIT = 305
- SYS_PSYNCH_RW_RDLOCK = 306
- SYS_PSYNCH_RW_WRLOCK = 307
- SYS_PSYNCH_RW_UNLOCK = 308
- SYS_PSYNCH_RW_UNLOCK2 = 309
- SYS_GETSID = 310
- SYS_SETTID_WITH_PID = 311
- SYS_PSYNCH_CVCLRPREPOST = 312
- SYS_AIO_FSYNC = 313
- SYS_AIO_RETURN = 314
- SYS_AIO_SUSPEND = 315
- SYS_AIO_CANCEL = 316
- SYS_AIO_ERROR = 317
- SYS_AIO_READ = 318
- SYS_AIO_WRITE = 319
- SYS_LIO_LISTIO = 320
- SYS_IOPOLICYSYS = 322
- SYS_PROCESS_POLICY = 323
- SYS_MLOCKALL = 324
- SYS_MUNLOCKALL = 325
- SYS_ISSETUGID = 327
- SYS___PTHREAD_KILL = 328
- SYS___PTHREAD_SIGMASK = 329
- SYS___SIGWAIT = 330
- SYS___DISABLE_THREADSIGNAL = 331
- SYS___PTHREAD_MARKCANCEL = 332
- SYS___PTHREAD_CANCELED = 333
- SYS___SEMWAIT_SIGNAL = 334
- SYS_PROC_INFO = 336
- SYS_SENDFILE = 337
- SYS_STAT64 = 338
- SYS_FSTAT64 = 339
- SYS_LSTAT64 = 340
- SYS_STAT64_EXTENDED = 341
- SYS_LSTAT64_EXTENDED = 342
- SYS_FSTAT64_EXTENDED = 343
- SYS_GETDIRENTRIES64 = 344
- SYS_STATFS64 = 345
- SYS_FSTATFS64 = 346
- SYS_GETFSSTAT64 = 347
- SYS___PTHREAD_CHDIR = 348
- SYS___PTHREAD_FCHDIR = 349
- SYS_AUDIT = 350
- SYS_AUDITON = 351
- SYS_GETAUID = 353
- SYS_SETAUID = 354
- SYS_GETAUDIT_ADDR = 357
- SYS_SETAUDIT_ADDR = 358
- SYS_AUDITCTL = 359
- SYS_BSDTHREAD_CREATE = 360
- SYS_BSDTHREAD_TERMINATE = 361
- SYS_KQUEUE = 362
- SYS_KEVENT = 363
- SYS_LCHOWN = 364
- SYS_BSDTHREAD_REGISTER = 366
- SYS_WORKQ_OPEN = 367
- SYS_WORKQ_KERNRETURN = 368
- SYS_KEVENT64 = 369
- SYS___OLD_SEMWAIT_SIGNAL = 370
- SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371
- SYS_THREAD_SELFID = 372
- SYS_LEDGER = 373
- SYS_KEVENT_QOS = 374
- SYS_KEVENT_ID = 375
- SYS___MAC_EXECVE = 380
- SYS___MAC_SYSCALL = 381
- SYS___MAC_GET_FILE = 382
- SYS___MAC_SET_FILE = 383
- SYS___MAC_GET_LINK = 384
- SYS___MAC_SET_LINK = 385
- SYS___MAC_GET_PROC = 386
- SYS___MAC_SET_PROC = 387
- SYS___MAC_GET_FD = 388
- SYS___MAC_SET_FD = 389
- SYS___MAC_GET_PID = 390
- SYS_PSELECT = 394
- SYS_PSELECT_NOCANCEL = 395
- SYS_READ_NOCANCEL = 396
- SYS_WRITE_NOCANCEL = 397
- SYS_OPEN_NOCANCEL = 398
- SYS_CLOSE_NOCANCEL = 399
- SYS_WAIT4_NOCANCEL = 400
- SYS_RECVMSG_NOCANCEL = 401
- SYS_SENDMSG_NOCANCEL = 402
- SYS_RECVFROM_NOCANCEL = 403
- SYS_ACCEPT_NOCANCEL = 404
- SYS_MSYNC_NOCANCEL = 405
- SYS_FCNTL_NOCANCEL = 406
- SYS_SELECT_NOCANCEL = 407
- SYS_FSYNC_NOCANCEL = 408
- SYS_CONNECT_NOCANCEL = 409
- SYS_SIGSUSPEND_NOCANCEL = 410
- SYS_READV_NOCANCEL = 411
- SYS_WRITEV_NOCANCEL = 412
- SYS_SENDTO_NOCANCEL = 413
- SYS_PREAD_NOCANCEL = 414
- SYS_PWRITE_NOCANCEL = 415
- SYS_WAITID_NOCANCEL = 416
- SYS_POLL_NOCANCEL = 417
- SYS_MSGSND_NOCANCEL = 418
- SYS_MSGRCV_NOCANCEL = 419
- SYS_SEM_WAIT_NOCANCEL = 420
- SYS_AIO_SUSPEND_NOCANCEL = 421
- SYS___SIGWAIT_NOCANCEL = 422
- SYS___SEMWAIT_SIGNAL_NOCANCEL = 423
- SYS___MAC_MOUNT = 424
- SYS___MAC_GET_MOUNT = 425
- SYS___MAC_GETFSSTAT = 426
- SYS_FSGETPATH = 427
- SYS_AUDIT_SESSION_SELF = 428
- SYS_AUDIT_SESSION_JOIN = 429
- SYS_FILEPORT_MAKEPORT = 430
- SYS_FILEPORT_MAKEFD = 431
- SYS_AUDIT_SESSION_PORT = 432
- SYS_PID_SUSPEND = 433
- SYS_PID_RESUME = 434
- SYS_PID_HIBERNATE = 435
- SYS_PID_SHUTDOWN_SOCKETS = 436
- SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438
- SYS_KAS_INFO = 439
- SYS_MEMORYSTATUS_CONTROL = 440
- SYS_GUARDED_OPEN_NP = 441
- SYS_GUARDED_CLOSE_NP = 442
- SYS_GUARDED_KQUEUE_NP = 443
- SYS_CHANGE_FDGUARD_NP = 444
- SYS_USRCTL = 445
- SYS_PROC_RLIMIT_CONTROL = 446
- SYS_CONNECTX = 447
- SYS_DISCONNECTX = 448
- SYS_PEELOFF = 449
- SYS_SOCKET_DELEGATE = 450
- SYS_TELEMETRY = 451
- SYS_PROC_UUID_POLICY = 452
- SYS_MEMORYSTATUS_GET_LEVEL = 453
- SYS_SYSTEM_OVERRIDE = 454
- SYS_VFS_PURGE = 455
- SYS_SFI_CTL = 456
- SYS_SFI_PIDCTL = 457
- SYS_COALITION = 458
- SYS_COALITION_INFO = 459
- SYS_NECP_MATCH_POLICY = 460
- SYS_GETATTRLISTBULK = 461
- SYS_CLONEFILEAT = 462
- SYS_OPENAT = 463
- SYS_OPENAT_NOCANCEL = 464
- SYS_RENAMEAT = 465
- SYS_FACCESSAT = 466
- SYS_FCHMODAT = 467
- SYS_FCHOWNAT = 468
- SYS_FSTATAT = 469
- SYS_FSTATAT64 = 470
- SYS_LINKAT = 471
- SYS_UNLINKAT = 472
- SYS_READLINKAT = 473
- SYS_SYMLINKAT = 474
- SYS_MKDIRAT = 475
- SYS_GETATTRLISTAT = 476
- SYS_PROC_TRACE_LOG = 477
- SYS_BSDTHREAD_CTL = 478
- SYS_OPENBYID_NP = 479
- SYS_RECVMSG_X = 480
- SYS_SENDMSG_X = 481
- SYS_THREAD_SELFUSAGE = 482
- SYS_CSRCTL = 483
- SYS_GUARDED_OPEN_DPROTECTED_NP = 484
- SYS_GUARDED_WRITE_NP = 485
- SYS_GUARDED_PWRITE_NP = 486
- SYS_GUARDED_WRITEV_NP = 487
- SYS_RENAMEATX_NP = 488
- SYS_MREMAP_ENCRYPTED = 489
- SYS_NETAGENT_TRIGGER = 490
- SYS_STACK_SNAPSHOT_WITH_CONFIG = 491
- SYS_MICROSTACKSHOT = 492
- SYS_GRAB_PGO_DATA = 493
- SYS_PERSONA = 494
- SYS_WORK_INTERVAL_CTL = 499
- SYS_GETENTROPY = 500
- SYS_NECP_OPEN = 501
- SYS_NECP_CLIENT_ACTION = 502
- SYS___NEXUS_OPEN = 503
- SYS___NEXUS_REGISTER = 504
- SYS___NEXUS_DEREGISTER = 505
- SYS___NEXUS_CREATE = 506
- SYS___NEXUS_DESTROY = 507
- SYS___NEXUS_GET_OPT = 508
- SYS___NEXUS_SET_OPT = 509
- SYS___CHANNEL_OPEN = 510
- SYS___CHANNEL_GET_INFO = 511
- SYS___CHANNEL_SYNC = 512
- SYS___CHANNEL_GET_OPT = 513
- SYS___CHANNEL_SET_OPT = 514
- SYS_ULOCK_WAIT = 515
- SYS_ULOCK_WAKE = 516
- SYS_FCLONEFILEAT = 517
- SYS_FS_SNAPSHOT = 518
- SYS_TERMINATE_WITH_PAYLOAD = 520
- SYS_ABORT_WITH_PAYLOAD = 521
- SYS_NECP_SESSION_OPEN = 522
- SYS_NECP_SESSION_ACTION = 523
- SYS_SETATTRLISTAT = 524
- SYS_NET_QOS_GUIDELINE = 525
- SYS_FMOUNT = 526
- SYS_NTP_ADJTIME = 527
- SYS_NTP_GETTIME = 528
- SYS_OS_FAULT_WITH_PAYLOAD = 529
- SYS_KQUEUE_WORKLOOP_CTL = 530
- SYS___MACH_BRIDGE_REMOTE_TIME = 531
- SYS_MAXSYSCALL = 532
- SYS_INVALID = 63
-)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go
deleted file mode 100644
index 103a72ed..00000000
--- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go
+++ /dev/null
@@ -1,436 +0,0 @@
-// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS11.1.sdk/usr/include/sys/syscall.h
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build arm,darwin
-
-package unix
-
-const (
- SYS_SYSCALL = 0
- SYS_EXIT = 1
- SYS_FORK = 2
- SYS_READ = 3
- SYS_WRITE = 4
- SYS_OPEN = 5
- SYS_CLOSE = 6
- SYS_WAIT4 = 7
- SYS_LINK = 9
- SYS_UNLINK = 10
- SYS_CHDIR = 12
- SYS_FCHDIR = 13
- SYS_MKNOD = 14
- SYS_CHMOD = 15
- SYS_CHOWN = 16
- SYS_GETFSSTAT = 18
- SYS_GETPID = 20
- SYS_SETUID = 23
- SYS_GETUID = 24
- SYS_GETEUID = 25
- SYS_PTRACE = 26
- SYS_RECVMSG = 27
- SYS_SENDMSG = 28
- SYS_RECVFROM = 29
- SYS_ACCEPT = 30
- SYS_GETPEERNAME = 31
- SYS_GETSOCKNAME = 32
- SYS_ACCESS = 33
- SYS_CHFLAGS = 34
- SYS_FCHFLAGS = 35
- SYS_SYNC = 36
- SYS_KILL = 37
- SYS_GETPPID = 39
- SYS_DUP = 41
- SYS_PIPE = 42
- SYS_GETEGID = 43
- SYS_SIGACTION = 46
- SYS_GETGID = 47
- SYS_SIGPROCMASK = 48
- SYS_GETLOGIN = 49
- SYS_SETLOGIN = 50
- SYS_ACCT = 51
- SYS_SIGPENDING = 52
- SYS_SIGALTSTACK = 53
- SYS_IOCTL = 54
- SYS_REBOOT = 55
- SYS_REVOKE = 56
- SYS_SYMLINK = 57
- SYS_READLINK = 58
- SYS_EXECVE = 59
- SYS_UMASK = 60
- SYS_CHROOT = 61
- SYS_MSYNC = 65
- SYS_VFORK = 66
- SYS_MUNMAP = 73
- SYS_MPROTECT = 74
- SYS_MADVISE = 75
- SYS_MINCORE = 78
- SYS_GETGROUPS = 79
- SYS_SETGROUPS = 80
- SYS_GETPGRP = 81
- SYS_SETPGID = 82
- SYS_SETITIMER = 83
- SYS_SWAPON = 85
- SYS_GETITIMER = 86
- SYS_GETDTABLESIZE = 89
- SYS_DUP2 = 90
- SYS_FCNTL = 92
- SYS_SELECT = 93
- SYS_FSYNC = 95
- SYS_SETPRIORITY = 96
- SYS_SOCKET = 97
- SYS_CONNECT = 98
- SYS_GETPRIORITY = 100
- SYS_BIND = 104
- SYS_SETSOCKOPT = 105
- SYS_LISTEN = 106
- SYS_SIGSUSPEND = 111
- SYS_GETTIMEOFDAY = 116
- SYS_GETRUSAGE = 117
- SYS_GETSOCKOPT = 118
- SYS_READV = 120
- SYS_WRITEV = 121
- SYS_SETTIMEOFDAY = 122
- SYS_FCHOWN = 123
- SYS_FCHMOD = 124
- SYS_SETREUID = 126
- SYS_SETREGID = 127
- SYS_RENAME = 128
- SYS_FLOCK = 131
- SYS_MKFIFO = 132
- SYS_SENDTO = 133
- SYS_SHUTDOWN = 134
- SYS_SOCKETPAIR = 135
- SYS_MKDIR = 136
- SYS_RMDIR = 137
- SYS_UTIMES = 138
- SYS_FUTIMES = 139
- SYS_ADJTIME = 140
- SYS_GETHOSTUUID = 142
- SYS_SETSID = 147
- SYS_GETPGID = 151
- SYS_SETPRIVEXEC = 152
- SYS_PREAD = 153
- SYS_PWRITE = 154
- SYS_NFSSVC = 155
- SYS_STATFS = 157
- SYS_FSTATFS = 158
- SYS_UNMOUNT = 159
- SYS_GETFH = 161
- SYS_QUOTACTL = 165
- SYS_MOUNT = 167
- SYS_CSOPS = 169
- SYS_CSOPS_AUDITTOKEN = 170
- SYS_WAITID = 173
- SYS_KDEBUG_TYPEFILTER = 177
- SYS_KDEBUG_TRACE_STRING = 178
- SYS_KDEBUG_TRACE64 = 179
- SYS_KDEBUG_TRACE = 180
- SYS_SETGID = 181
- SYS_SETEGID = 182
- SYS_SETEUID = 183
- SYS_SIGRETURN = 184
- SYS_THREAD_SELFCOUNTS = 186
- SYS_FDATASYNC = 187
- SYS_STAT = 188
- SYS_FSTAT = 189
- SYS_LSTAT = 190
- SYS_PATHCONF = 191
- SYS_FPATHCONF = 192
- SYS_GETRLIMIT = 194
- SYS_SETRLIMIT = 195
- SYS_GETDIRENTRIES = 196
- SYS_MMAP = 197
- SYS_LSEEK = 199
- SYS_TRUNCATE = 200
- SYS_FTRUNCATE = 201
- SYS_SYSCTL = 202
- SYS_MLOCK = 203
- SYS_MUNLOCK = 204
- SYS_UNDELETE = 205
- SYS_OPEN_DPROTECTED_NP = 216
- SYS_GETATTRLIST = 220
- SYS_SETATTRLIST = 221
- SYS_GETDIRENTRIESATTR = 222
- SYS_EXCHANGEDATA = 223
- SYS_SEARCHFS = 225
- SYS_DELETE = 226
- SYS_COPYFILE = 227
- SYS_FGETATTRLIST = 228
- SYS_FSETATTRLIST = 229
- SYS_POLL = 230
- SYS_WATCHEVENT = 231
- SYS_WAITEVENT = 232
- SYS_MODWATCH = 233
- SYS_GETXATTR = 234
- SYS_FGETXATTR = 235
- SYS_SETXATTR = 236
- SYS_FSETXATTR = 237
- SYS_REMOVEXATTR = 238
- SYS_FREMOVEXATTR = 239
- SYS_LISTXATTR = 240
- SYS_FLISTXATTR = 241
- SYS_FSCTL = 242
- SYS_INITGROUPS = 243
- SYS_POSIX_SPAWN = 244
- SYS_FFSCTL = 245
- SYS_NFSCLNT = 247
- SYS_FHOPEN = 248
- SYS_MINHERIT = 250
- SYS_SEMSYS = 251
- SYS_MSGSYS = 252
- SYS_SHMSYS = 253
- SYS_SEMCTL = 254
- SYS_SEMGET = 255
- SYS_SEMOP = 256
- SYS_MSGCTL = 258
- SYS_MSGGET = 259
- SYS_MSGSND = 260
- SYS_MSGRCV = 261
- SYS_SHMAT = 262
- SYS_SHMCTL = 263
- SYS_SHMDT = 264
- SYS_SHMGET = 265
- SYS_SHM_OPEN = 266
- SYS_SHM_UNLINK = 267
- SYS_SEM_OPEN = 268
- SYS_SEM_CLOSE = 269
- SYS_SEM_UNLINK = 270
- SYS_SEM_WAIT = 271
- SYS_SEM_TRYWAIT = 272
- SYS_SEM_POST = 273
- SYS_SYSCTLBYNAME = 274
- SYS_OPEN_EXTENDED = 277
- SYS_UMASK_EXTENDED = 278
- SYS_STAT_EXTENDED = 279
- SYS_LSTAT_EXTENDED = 280
- SYS_FSTAT_EXTENDED = 281
- SYS_CHMOD_EXTENDED = 282
- SYS_FCHMOD_EXTENDED = 283
- SYS_ACCESS_EXTENDED = 284
- SYS_SETTID = 285
- SYS_GETTID = 286
- SYS_SETSGROUPS = 287
- SYS_GETSGROUPS = 288
- SYS_SETWGROUPS = 289
- SYS_GETWGROUPS = 290
- SYS_MKFIFO_EXTENDED = 291
- SYS_MKDIR_EXTENDED = 292
- SYS_IDENTITYSVC = 293
- SYS_SHARED_REGION_CHECK_NP = 294
- SYS_VM_PRESSURE_MONITOR = 296
- SYS_PSYNCH_RW_LONGRDLOCK = 297
- SYS_PSYNCH_RW_YIELDWRLOCK = 298
- SYS_PSYNCH_RW_DOWNGRADE = 299
- SYS_PSYNCH_RW_UPGRADE = 300
- SYS_PSYNCH_MUTEXWAIT = 301
- SYS_PSYNCH_MUTEXDROP = 302
- SYS_PSYNCH_CVBROAD = 303
- SYS_PSYNCH_CVSIGNAL = 304
- SYS_PSYNCH_CVWAIT = 305
- SYS_PSYNCH_RW_RDLOCK = 306
- SYS_PSYNCH_RW_WRLOCK = 307
- SYS_PSYNCH_RW_UNLOCK = 308
- SYS_PSYNCH_RW_UNLOCK2 = 309
- SYS_GETSID = 310
- SYS_SETTID_WITH_PID = 311
- SYS_PSYNCH_CVCLRPREPOST = 312
- SYS_AIO_FSYNC = 313
- SYS_AIO_RETURN = 314
- SYS_AIO_SUSPEND = 315
- SYS_AIO_CANCEL = 316
- SYS_AIO_ERROR = 317
- SYS_AIO_READ = 318
- SYS_AIO_WRITE = 319
- SYS_LIO_LISTIO = 320
- SYS_IOPOLICYSYS = 322
- SYS_PROCESS_POLICY = 323
- SYS_MLOCKALL = 324
- SYS_MUNLOCKALL = 325
- SYS_ISSETUGID = 327
- SYS___PTHREAD_KILL = 328
- SYS___PTHREAD_SIGMASK = 329
- SYS___SIGWAIT = 330
- SYS___DISABLE_THREADSIGNAL = 331
- SYS___PTHREAD_MARKCANCEL = 332
- SYS___PTHREAD_CANCELED = 333
- SYS___SEMWAIT_SIGNAL = 334
- SYS_PROC_INFO = 336
- SYS_SENDFILE = 337
- SYS_STAT64 = 338
- SYS_FSTAT64 = 339
- SYS_LSTAT64 = 340
- SYS_STAT64_EXTENDED = 341
- SYS_LSTAT64_EXTENDED = 342
- SYS_FSTAT64_EXTENDED = 343
- SYS_GETDIRENTRIES64 = 344
- SYS_STATFS64 = 345
- SYS_FSTATFS64 = 346
- SYS_GETFSSTAT64 = 347
- SYS___PTHREAD_CHDIR = 348
- SYS___PTHREAD_FCHDIR = 349
- SYS_AUDIT = 350
- SYS_AUDITON = 351
- SYS_GETAUID = 353
- SYS_SETAUID = 354
- SYS_GETAUDIT_ADDR = 357
- SYS_SETAUDIT_ADDR = 358
- SYS_AUDITCTL = 359
- SYS_BSDTHREAD_CREATE = 360
- SYS_BSDTHREAD_TERMINATE = 361
- SYS_KQUEUE = 362
- SYS_KEVENT = 363
- SYS_LCHOWN = 364
- SYS_BSDTHREAD_REGISTER = 366
- SYS_WORKQ_OPEN = 367
- SYS_WORKQ_KERNRETURN = 368
- SYS_KEVENT64 = 369
- SYS___OLD_SEMWAIT_SIGNAL = 370
- SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371
- SYS_THREAD_SELFID = 372
- SYS_LEDGER = 373
- SYS_KEVENT_QOS = 374
- SYS_KEVENT_ID = 375
- SYS___MAC_EXECVE = 380
- SYS___MAC_SYSCALL = 381
- SYS___MAC_GET_FILE = 382
- SYS___MAC_SET_FILE = 383
- SYS___MAC_GET_LINK = 384
- SYS___MAC_SET_LINK = 385
- SYS___MAC_GET_PROC = 386
- SYS___MAC_SET_PROC = 387
- SYS___MAC_GET_FD = 388
- SYS___MAC_SET_FD = 389
- SYS___MAC_GET_PID = 390
- SYS_PSELECT = 394
- SYS_PSELECT_NOCANCEL = 395
- SYS_READ_NOCANCEL = 396
- SYS_WRITE_NOCANCEL = 397
- SYS_OPEN_NOCANCEL = 398
- SYS_CLOSE_NOCANCEL = 399
- SYS_WAIT4_NOCANCEL = 400
- SYS_RECVMSG_NOCANCEL = 401
- SYS_SENDMSG_NOCANCEL = 402
- SYS_RECVFROM_NOCANCEL = 403
- SYS_ACCEPT_NOCANCEL = 404
- SYS_MSYNC_NOCANCEL = 405
- SYS_FCNTL_NOCANCEL = 406
- SYS_SELECT_NOCANCEL = 407
- SYS_FSYNC_NOCANCEL = 408
- SYS_CONNECT_NOCANCEL = 409
- SYS_SIGSUSPEND_NOCANCEL = 410
- SYS_READV_NOCANCEL = 411
- SYS_WRITEV_NOCANCEL = 412
- SYS_SENDTO_NOCANCEL = 413
- SYS_PREAD_NOCANCEL = 414
- SYS_PWRITE_NOCANCEL = 415
- SYS_WAITID_NOCANCEL = 416
- SYS_POLL_NOCANCEL = 417
- SYS_MSGSND_NOCANCEL = 418
- SYS_MSGRCV_NOCANCEL = 419
- SYS_SEM_WAIT_NOCANCEL = 420
- SYS_AIO_SUSPEND_NOCANCEL = 421
- SYS___SIGWAIT_NOCANCEL = 422
- SYS___SEMWAIT_SIGNAL_NOCANCEL = 423
- SYS___MAC_MOUNT = 424
- SYS___MAC_GET_MOUNT = 425
- SYS___MAC_GETFSSTAT = 426
- SYS_FSGETPATH = 427
- SYS_AUDIT_SESSION_SELF = 428
- SYS_AUDIT_SESSION_JOIN = 429
- SYS_FILEPORT_MAKEPORT = 430
- SYS_FILEPORT_MAKEFD = 431
- SYS_AUDIT_SESSION_PORT = 432
- SYS_PID_SUSPEND = 433
- SYS_PID_RESUME = 434
- SYS_PID_HIBERNATE = 435
- SYS_PID_SHUTDOWN_SOCKETS = 436
- SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438
- SYS_KAS_INFO = 439
- SYS_MEMORYSTATUS_CONTROL = 440
- SYS_GUARDED_OPEN_NP = 441
- SYS_GUARDED_CLOSE_NP = 442
- SYS_GUARDED_KQUEUE_NP = 443
- SYS_CHANGE_FDGUARD_NP = 444
- SYS_USRCTL = 445
- SYS_PROC_RLIMIT_CONTROL = 446
- SYS_CONNECTX = 447
- SYS_DISCONNECTX = 448
- SYS_PEELOFF = 449
- SYS_SOCKET_DELEGATE = 450
- SYS_TELEMETRY = 451
- SYS_PROC_UUID_POLICY = 452
- SYS_MEMORYSTATUS_GET_LEVEL = 453
- SYS_SYSTEM_OVERRIDE = 454
- SYS_VFS_PURGE = 455
- SYS_SFI_CTL = 456
- SYS_SFI_PIDCTL = 457
- SYS_COALITION = 458
- SYS_COALITION_INFO = 459
- SYS_NECP_MATCH_POLICY = 460
- SYS_GETATTRLISTBULK = 461
- SYS_CLONEFILEAT = 462
- SYS_OPENAT = 463
- SYS_OPENAT_NOCANCEL = 464
- SYS_RENAMEAT = 465
- SYS_FACCESSAT = 466
- SYS_FCHMODAT = 467
- SYS_FCHOWNAT = 468
- SYS_FSTATAT = 469
- SYS_FSTATAT64 = 470
- SYS_LINKAT = 471
- SYS_UNLINKAT = 472
- SYS_READLINKAT = 473
- SYS_SYMLINKAT = 474
- SYS_MKDIRAT = 475
- SYS_GETATTRLISTAT = 476
- SYS_PROC_TRACE_LOG = 477
- SYS_BSDTHREAD_CTL = 478
- SYS_OPENBYID_NP = 479
- SYS_RECVMSG_X = 480
- SYS_SENDMSG_X = 481
- SYS_THREAD_SELFUSAGE = 482
- SYS_CSRCTL = 483
- SYS_GUARDED_OPEN_DPROTECTED_NP = 484
- SYS_GUARDED_WRITE_NP = 485
- SYS_GUARDED_PWRITE_NP = 486
- SYS_GUARDED_WRITEV_NP = 487
- SYS_RENAMEATX_NP = 488
- SYS_MREMAP_ENCRYPTED = 489
- SYS_NETAGENT_TRIGGER = 490
- SYS_STACK_SNAPSHOT_WITH_CONFIG = 491
- SYS_MICROSTACKSHOT = 492
- SYS_GRAB_PGO_DATA = 493
- SYS_PERSONA = 494
- SYS_WORK_INTERVAL_CTL = 499
- SYS_GETENTROPY = 500
- SYS_NECP_OPEN = 501
- SYS_NECP_CLIENT_ACTION = 502
- SYS___NEXUS_OPEN = 503
- SYS___NEXUS_REGISTER = 504
- SYS___NEXUS_DEREGISTER = 505
- SYS___NEXUS_CREATE = 506
- SYS___NEXUS_DESTROY = 507
- SYS___NEXUS_GET_OPT = 508
- SYS___NEXUS_SET_OPT = 509
- SYS___CHANNEL_OPEN = 510
- SYS___CHANNEL_GET_INFO = 511
- SYS___CHANNEL_SYNC = 512
- SYS___CHANNEL_GET_OPT = 513
- SYS___CHANNEL_SET_OPT = 514
- SYS_ULOCK_WAIT = 515
- SYS_ULOCK_WAKE = 516
- SYS_FCLONEFILEAT = 517
- SYS_FS_SNAPSHOT = 518
- SYS_TERMINATE_WITH_PAYLOAD = 520
- SYS_ABORT_WITH_PAYLOAD = 521
- SYS_NECP_SESSION_OPEN = 522
- SYS_NECP_SESSION_ACTION = 523
- SYS_SETATTRLISTAT = 524
- SYS_NET_QOS_GUIDELINE = 525
- SYS_FMOUNT = 526
- SYS_NTP_ADJTIME = 527
- SYS_NTP_GETTIME = 528
- SYS_OS_FAULT_WITH_PAYLOAD = 529
- SYS_MAXSYSCALL = 530
- SYS_INVALID = 63
-)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go
deleted file mode 100644
index 7ab2130b..00000000
--- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go
+++ /dev/null
@@ -1,436 +0,0 @@
-// go run mksysnum.go /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS11.1.sdk/usr/include/sys/syscall.h
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-// +build arm64,darwin
-
-package unix
-
-const (
- SYS_SYSCALL = 0
- SYS_EXIT = 1
- SYS_FORK = 2
- SYS_READ = 3
- SYS_WRITE = 4
- SYS_OPEN = 5
- SYS_CLOSE = 6
- SYS_WAIT4 = 7
- SYS_LINK = 9
- SYS_UNLINK = 10
- SYS_CHDIR = 12
- SYS_FCHDIR = 13
- SYS_MKNOD = 14
- SYS_CHMOD = 15
- SYS_CHOWN = 16
- SYS_GETFSSTAT = 18
- SYS_GETPID = 20
- SYS_SETUID = 23
- SYS_GETUID = 24
- SYS_GETEUID = 25
- SYS_PTRACE = 26
- SYS_RECVMSG = 27
- SYS_SENDMSG = 28
- SYS_RECVFROM = 29
- SYS_ACCEPT = 30
- SYS_GETPEERNAME = 31
- SYS_GETSOCKNAME = 32
- SYS_ACCESS = 33
- SYS_CHFLAGS = 34
- SYS_FCHFLAGS = 35
- SYS_SYNC = 36
- SYS_KILL = 37
- SYS_GETPPID = 39
- SYS_DUP = 41
- SYS_PIPE = 42
- SYS_GETEGID = 43
- SYS_SIGACTION = 46
- SYS_GETGID = 47
- SYS_SIGPROCMASK = 48
- SYS_GETLOGIN = 49
- SYS_SETLOGIN = 50
- SYS_ACCT = 51
- SYS_SIGPENDING = 52
- SYS_SIGALTSTACK = 53
- SYS_IOCTL = 54
- SYS_REBOOT = 55
- SYS_REVOKE = 56
- SYS_SYMLINK = 57
- SYS_READLINK = 58
- SYS_EXECVE = 59
- SYS_UMASK = 60
- SYS_CHROOT = 61
- SYS_MSYNC = 65
- SYS_VFORK = 66
- SYS_MUNMAP = 73
- SYS_MPROTECT = 74
- SYS_MADVISE = 75
- SYS_MINCORE = 78
- SYS_GETGROUPS = 79
- SYS_SETGROUPS = 80
- SYS_GETPGRP = 81
- SYS_SETPGID = 82
- SYS_SETITIMER = 83
- SYS_SWAPON = 85
- SYS_GETITIMER = 86
- SYS_GETDTABLESIZE = 89
- SYS_DUP2 = 90
- SYS_FCNTL = 92
- SYS_SELECT = 93
- SYS_FSYNC = 95
- SYS_SETPRIORITY = 96
- SYS_SOCKET = 97
- SYS_CONNECT = 98
- SYS_GETPRIORITY = 100
- SYS_BIND = 104
- SYS_SETSOCKOPT = 105
- SYS_LISTEN = 106
- SYS_SIGSUSPEND = 111
- SYS_GETTIMEOFDAY = 116
- SYS_GETRUSAGE = 117
- SYS_GETSOCKOPT = 118
- SYS_READV = 120
- SYS_WRITEV = 121
- SYS_SETTIMEOFDAY = 122
- SYS_FCHOWN = 123
- SYS_FCHMOD = 124
- SYS_SETREUID = 126
- SYS_SETREGID = 127
- SYS_RENAME = 128
- SYS_FLOCK = 131
- SYS_MKFIFO = 132
- SYS_SENDTO = 133
- SYS_SHUTDOWN = 134
- SYS_SOCKETPAIR = 135
- SYS_MKDIR = 136
- SYS_RMDIR = 137
- SYS_UTIMES = 138
- SYS_FUTIMES = 139
- SYS_ADJTIME = 140
- SYS_GETHOSTUUID = 142
- SYS_SETSID = 147
- SYS_GETPGID = 151
- SYS_SETPRIVEXEC = 152
- SYS_PREAD = 153
- SYS_PWRITE = 154
- SYS_NFSSVC = 155
- SYS_STATFS = 157
- SYS_FSTATFS = 158
- SYS_UNMOUNT = 159
- SYS_GETFH = 161
- SYS_QUOTACTL = 165
- SYS_MOUNT = 167
- SYS_CSOPS = 169
- SYS_CSOPS_AUDITTOKEN = 170
- SYS_WAITID = 173
- SYS_KDEBUG_TYPEFILTER = 177
- SYS_KDEBUG_TRACE_STRING = 178
- SYS_KDEBUG_TRACE64 = 179
- SYS_KDEBUG_TRACE = 180
- SYS_SETGID = 181
- SYS_SETEGID = 182
- SYS_SETEUID = 183
- SYS_SIGRETURN = 184
- SYS_THREAD_SELFCOUNTS = 186
- SYS_FDATASYNC = 187
- SYS_STAT = 188
- SYS_FSTAT = 189
- SYS_LSTAT = 190
- SYS_PATHCONF = 191
- SYS_FPATHCONF = 192
- SYS_GETRLIMIT = 194
- SYS_SETRLIMIT = 195
- SYS_GETDIRENTRIES = 196
- SYS_MMAP = 197
- SYS_LSEEK = 199
- SYS_TRUNCATE = 200
- SYS_FTRUNCATE = 201
- SYS_SYSCTL = 202
- SYS_MLOCK = 203
- SYS_MUNLOCK = 204
- SYS_UNDELETE = 205
- SYS_OPEN_DPROTECTED_NP = 216
- SYS_GETATTRLIST = 220
- SYS_SETATTRLIST = 221
- SYS_GETDIRENTRIESATTR = 222
- SYS_EXCHANGEDATA = 223
- SYS_SEARCHFS = 225
- SYS_DELETE = 226
- SYS_COPYFILE = 227
- SYS_FGETATTRLIST = 228
- SYS_FSETATTRLIST = 229
- SYS_POLL = 230
- SYS_WATCHEVENT = 231
- SYS_WAITEVENT = 232
- SYS_MODWATCH = 233
- SYS_GETXATTR = 234
- SYS_FGETXATTR = 235
- SYS_SETXATTR = 236
- SYS_FSETXATTR = 237
- SYS_REMOVEXATTR = 238
- SYS_FREMOVEXATTR = 239
- SYS_LISTXATTR = 240
- SYS_FLISTXATTR = 241
- SYS_FSCTL = 242
- SYS_INITGROUPS = 243
- SYS_POSIX_SPAWN = 244
- SYS_FFSCTL = 245
- SYS_NFSCLNT = 247
- SYS_FHOPEN = 248
- SYS_MINHERIT = 250
- SYS_SEMSYS = 251
- SYS_MSGSYS = 252
- SYS_SHMSYS = 253
- SYS_SEMCTL = 254
- SYS_SEMGET = 255
- SYS_SEMOP = 256
- SYS_MSGCTL = 258
- SYS_MSGGET = 259
- SYS_MSGSND = 260
- SYS_MSGRCV = 261
- SYS_SHMAT = 262
- SYS_SHMCTL = 263
- SYS_SHMDT = 264
- SYS_SHMGET = 265
- SYS_SHM_OPEN = 266
- SYS_SHM_UNLINK = 267
- SYS_SEM_OPEN = 268
- SYS_SEM_CLOSE = 269
- SYS_SEM_UNLINK = 270
- SYS_SEM_WAIT = 271
- SYS_SEM_TRYWAIT = 272
- SYS_SEM_POST = 273
- SYS_SYSCTLBYNAME = 274
- SYS_OPEN_EXTENDED = 277
- SYS_UMASK_EXTENDED = 278
- SYS_STAT_EXTENDED = 279
- SYS_LSTAT_EXTENDED = 280
- SYS_FSTAT_EXTENDED = 281
- SYS_CHMOD_EXTENDED = 282
- SYS_FCHMOD_EXTENDED = 283
- SYS_ACCESS_EXTENDED = 284
- SYS_SETTID = 285
- SYS_GETTID = 286
- SYS_SETSGROUPS = 287
- SYS_GETSGROUPS = 288
- SYS_SETWGROUPS = 289
- SYS_GETWGROUPS = 290
- SYS_MKFIFO_EXTENDED = 291
- SYS_MKDIR_EXTENDED = 292
- SYS_IDENTITYSVC = 293
- SYS_SHARED_REGION_CHECK_NP = 294
- SYS_VM_PRESSURE_MONITOR = 296
- SYS_PSYNCH_RW_LONGRDLOCK = 297
- SYS_PSYNCH_RW_YIELDWRLOCK = 298
- SYS_PSYNCH_RW_DOWNGRADE = 299
- SYS_PSYNCH_RW_UPGRADE = 300
- SYS_PSYNCH_MUTEXWAIT = 301
- SYS_PSYNCH_MUTEXDROP = 302
- SYS_PSYNCH_CVBROAD = 303
- SYS_PSYNCH_CVSIGNAL = 304
- SYS_PSYNCH_CVWAIT = 305
- SYS_PSYNCH_RW_RDLOCK = 306
- SYS_PSYNCH_RW_WRLOCK = 307
- SYS_PSYNCH_RW_UNLOCK = 308
- SYS_PSYNCH_RW_UNLOCK2 = 309
- SYS_GETSID = 310
- SYS_SETTID_WITH_PID = 311
- SYS_PSYNCH_CVCLRPREPOST = 312
- SYS_AIO_FSYNC = 313
- SYS_AIO_RETURN = 314
- SYS_AIO_SUSPEND = 315
- SYS_AIO_CANCEL = 316
- SYS_AIO_ERROR = 317
- SYS_AIO_READ = 318
- SYS_AIO_WRITE = 319
- SYS_LIO_LISTIO = 320
- SYS_IOPOLICYSYS = 322
- SYS_PROCESS_POLICY = 323
- SYS_MLOCKALL = 324
- SYS_MUNLOCKALL = 325
- SYS_ISSETUGID = 327
- SYS___PTHREAD_KILL = 328
- SYS___PTHREAD_SIGMASK = 329
- SYS___SIGWAIT = 330
- SYS___DISABLE_THREADSIGNAL = 331
- SYS___PTHREAD_MARKCANCEL = 332
- SYS___PTHREAD_CANCELED = 333
- SYS___SEMWAIT_SIGNAL = 334
- SYS_PROC_INFO = 336
- SYS_SENDFILE = 337
- SYS_STAT64 = 338
- SYS_FSTAT64 = 339
- SYS_LSTAT64 = 340
- SYS_STAT64_EXTENDED = 341
- SYS_LSTAT64_EXTENDED = 342
- SYS_FSTAT64_EXTENDED = 343
- SYS_GETDIRENTRIES64 = 344
- SYS_STATFS64 = 345
- SYS_FSTATFS64 = 346
- SYS_GETFSSTAT64 = 347
- SYS___PTHREAD_CHDIR = 348
- SYS___PTHREAD_FCHDIR = 349
- SYS_AUDIT = 350
- SYS_AUDITON = 351
- SYS_GETAUID = 353
- SYS_SETAUID = 354
- SYS_GETAUDIT_ADDR = 357
- SYS_SETAUDIT_ADDR = 358
- SYS_AUDITCTL = 359
- SYS_BSDTHREAD_CREATE = 360
- SYS_BSDTHREAD_TERMINATE = 361
- SYS_KQUEUE = 362
- SYS_KEVENT = 363
- SYS_LCHOWN = 364
- SYS_BSDTHREAD_REGISTER = 366
- SYS_WORKQ_OPEN = 367
- SYS_WORKQ_KERNRETURN = 368
- SYS_KEVENT64 = 369
- SYS___OLD_SEMWAIT_SIGNAL = 370
- SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL = 371
- SYS_THREAD_SELFID = 372
- SYS_LEDGER = 373
- SYS_KEVENT_QOS = 374
- SYS_KEVENT_ID = 375
- SYS___MAC_EXECVE = 380
- SYS___MAC_SYSCALL = 381
- SYS___MAC_GET_FILE = 382
- SYS___MAC_SET_FILE = 383
- SYS___MAC_GET_LINK = 384
- SYS___MAC_SET_LINK = 385
- SYS___MAC_GET_PROC = 386
- SYS___MAC_SET_PROC = 387
- SYS___MAC_GET_FD = 388
- SYS___MAC_SET_FD = 389
- SYS___MAC_GET_PID = 390
- SYS_PSELECT = 394
- SYS_PSELECT_NOCANCEL = 395
- SYS_READ_NOCANCEL = 396
- SYS_WRITE_NOCANCEL = 397
- SYS_OPEN_NOCANCEL = 398
- SYS_CLOSE_NOCANCEL = 399
- SYS_WAIT4_NOCANCEL = 400
- SYS_RECVMSG_NOCANCEL = 401
- SYS_SENDMSG_NOCANCEL = 402
- SYS_RECVFROM_NOCANCEL = 403
- SYS_ACCEPT_NOCANCEL = 404
- SYS_MSYNC_NOCANCEL = 405
- SYS_FCNTL_NOCANCEL = 406
- SYS_SELECT_NOCANCEL = 407
- SYS_FSYNC_NOCANCEL = 408
- SYS_CONNECT_NOCANCEL = 409
- SYS_SIGSUSPEND_NOCANCEL = 410
- SYS_READV_NOCANCEL = 411
- SYS_WRITEV_NOCANCEL = 412
- SYS_SENDTO_NOCANCEL = 413
- SYS_PREAD_NOCANCEL = 414
- SYS_PWRITE_NOCANCEL = 415
- SYS_WAITID_NOCANCEL = 416
- SYS_POLL_NOCANCEL = 417
- SYS_MSGSND_NOCANCEL = 418
- SYS_MSGRCV_NOCANCEL = 419
- SYS_SEM_WAIT_NOCANCEL = 420
- SYS_AIO_SUSPEND_NOCANCEL = 421
- SYS___SIGWAIT_NOCANCEL = 422
- SYS___SEMWAIT_SIGNAL_NOCANCEL = 423
- SYS___MAC_MOUNT = 424
- SYS___MAC_GET_MOUNT = 425
- SYS___MAC_GETFSSTAT = 426
- SYS_FSGETPATH = 427
- SYS_AUDIT_SESSION_SELF = 428
- SYS_AUDIT_SESSION_JOIN = 429
- SYS_FILEPORT_MAKEPORT = 430
- SYS_FILEPORT_MAKEFD = 431
- SYS_AUDIT_SESSION_PORT = 432
- SYS_PID_SUSPEND = 433
- SYS_PID_RESUME = 434
- SYS_PID_HIBERNATE = 435
- SYS_PID_SHUTDOWN_SOCKETS = 436
- SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438
- SYS_KAS_INFO = 439
- SYS_MEMORYSTATUS_CONTROL = 440
- SYS_GUARDED_OPEN_NP = 441
- SYS_GUARDED_CLOSE_NP = 442
- SYS_GUARDED_KQUEUE_NP = 443
- SYS_CHANGE_FDGUARD_NP = 444
- SYS_USRCTL = 445
- SYS_PROC_RLIMIT_CONTROL = 446
- SYS_CONNECTX = 447
- SYS_DISCONNECTX = 448
- SYS_PEELOFF = 449
- SYS_SOCKET_DELEGATE = 450
- SYS_TELEMETRY = 451
- SYS_PROC_UUID_POLICY = 452
- SYS_MEMORYSTATUS_GET_LEVEL = 453
- SYS_SYSTEM_OVERRIDE = 454
- SYS_VFS_PURGE = 455
- SYS_SFI_CTL = 456
- SYS_SFI_PIDCTL = 457
- SYS_COALITION = 458
- SYS_COALITION_INFO = 459
- SYS_NECP_MATCH_POLICY = 460
- SYS_GETATTRLISTBULK = 461
- SYS_CLONEFILEAT = 462
- SYS_OPENAT = 463
- SYS_OPENAT_NOCANCEL = 464
- SYS_RENAMEAT = 465
- SYS_FACCESSAT = 466
- SYS_FCHMODAT = 467
- SYS_FCHOWNAT = 468
- SYS_FSTATAT = 469
- SYS_FSTATAT64 = 470
- SYS_LINKAT = 471
- SYS_UNLINKAT = 472
- SYS_READLINKAT = 473
- SYS_SYMLINKAT = 474
- SYS_MKDIRAT = 475
- SYS_GETATTRLISTAT = 476
- SYS_PROC_TRACE_LOG = 477
- SYS_BSDTHREAD_CTL = 478
- SYS_OPENBYID_NP = 479
- SYS_RECVMSG_X = 480
- SYS_SENDMSG_X = 481
- SYS_THREAD_SELFUSAGE = 482
- SYS_CSRCTL = 483
- SYS_GUARDED_OPEN_DPROTECTED_NP = 484
- SYS_GUARDED_WRITE_NP = 485
- SYS_GUARDED_PWRITE_NP = 486
- SYS_GUARDED_WRITEV_NP = 487
- SYS_RENAMEATX_NP = 488
- SYS_MREMAP_ENCRYPTED = 489
- SYS_NETAGENT_TRIGGER = 490
- SYS_STACK_SNAPSHOT_WITH_CONFIG = 491
- SYS_MICROSTACKSHOT = 492
- SYS_GRAB_PGO_DATA = 493
- SYS_PERSONA = 494
- SYS_WORK_INTERVAL_CTL = 499
- SYS_GETENTROPY = 500
- SYS_NECP_OPEN = 501
- SYS_NECP_CLIENT_ACTION = 502
- SYS___NEXUS_OPEN = 503
- SYS___NEXUS_REGISTER = 504
- SYS___NEXUS_DEREGISTER = 505
- SYS___NEXUS_CREATE = 506
- SYS___NEXUS_DESTROY = 507
- SYS___NEXUS_GET_OPT = 508
- SYS___NEXUS_SET_OPT = 509
- SYS___CHANNEL_OPEN = 510
- SYS___CHANNEL_GET_INFO = 511
- SYS___CHANNEL_SYNC = 512
- SYS___CHANNEL_GET_OPT = 513
- SYS___CHANNEL_SET_OPT = 514
- SYS_ULOCK_WAIT = 515
- SYS_ULOCK_WAKE = 516
- SYS_FCLONEFILEAT = 517
- SYS_FS_SNAPSHOT = 518
- SYS_TERMINATE_WITH_PAYLOAD = 520
- SYS_ABORT_WITH_PAYLOAD = 521
- SYS_NECP_SESSION_OPEN = 522
- SYS_NECP_SESSION_ACTION = 523
- SYS_SETATTRLISTAT = 524
- SYS_NET_QOS_GUIDELINE = 525
- SYS_FMOUNT = 526
- SYS_NTP_ADJTIME = 527
- SYS_NTP_GETTIME = 528
- SYS_OS_FAULT_WITH_PAYLOAD = 529
- SYS_MAXSYSCALL = 530
- SYS_INVALID = 63
-)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
index 54559a89..a597e061 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
@@ -433,4 +433,5 @@ const (
SYS_CLONE3 = 435
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index 054a741b..8c102e55 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -355,4 +355,5 @@ const (
SYS_CLONE3 = 435
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
index 307f2ba1..98f9b68f 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
@@ -397,4 +397,5 @@ const (
SYS_CLONE3 = 435
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index e9404dd5..4dabc33f 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -300,4 +300,5 @@ const (
SYS_CLONE3 = 435
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
index 68bb6d29..d5724e59 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -418,4 +418,5 @@ const (
SYS_CLONE3 = 4435
SYS_OPENAT2 = 4437
SYS_PIDFD_GETFD = 4438
+ SYS_FACCESSAT2 = 4439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
index 4e525118..c1d824a4 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -348,4 +348,5 @@ const (
SYS_CLONE3 = 5435
SYS_OPENAT2 = 5437
SYS_PIDFD_GETFD = 5438
+ SYS_FACCESSAT2 = 5439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
index 4d9aa300..598dd5d6 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -348,4 +348,5 @@ const (
SYS_CLONE3 = 5435
SYS_OPENAT2 = 5437
SYS_PIDFD_GETFD = 5438
+ SYS_FACCESSAT2 = 5439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
index 64af0707..c36782d0 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -418,4 +418,5 @@ const (
SYS_CLONE3 = 4435
SYS_OPENAT2 = 4437
SYS_PIDFD_GETFD = 4438
+ SYS_FACCESSAT2 = 4439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index cc3c067b..9287538d 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -397,4 +397,5 @@ const (
SYS_CLONE3 = 435
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index 4050ff98..4dafad83 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -397,4 +397,5 @@ const (
SYS_CLONE3 = 435
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index 529abb6a..6642cfcc 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -299,4 +299,5 @@ const (
SYS_CLONE3 = 435
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index 27665001..23367b94 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -362,4 +362,5 @@ const (
SYS_CLONE3 = 435
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
index 4dc82bb2..083aa020 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
@@ -376,4 +376,5 @@ const (
SYS_PIDFD_OPEN = 434
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
+ SYS_FACCESSAT2 = 439
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go
new file mode 100644
index 00000000..5c08d573
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go
@@ -0,0 +1,220 @@
+// go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build mips64,openbsd
+
+package unix
+
+const (
+ SYS_EXIT = 1 // { void sys_exit(int rval); }
+ SYS_FORK = 2 // { int sys_fork(void); }
+ SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); }
+ SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, size_t nbyte); }
+ SYS_OPEN = 5 // { int sys_open(const char *path, int flags, ... mode_t mode); }
+ SYS_CLOSE = 6 // { int sys_close(int fd); }
+ SYS_GETENTROPY = 7 // { int sys_getentropy(void *buf, size_t nbyte); }
+ SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, size_t psize); }
+ SYS_LINK = 9 // { int sys_link(const char *path, const char *link); }
+ SYS_UNLINK = 10 // { int sys_unlink(const char *path); }
+ SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage); }
+ SYS_CHDIR = 12 // { int sys_chdir(const char *path); }
+ SYS_FCHDIR = 13 // { int sys_fchdir(int fd); }
+ SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, dev_t dev); }
+ SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); }
+ SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, gid_t gid); }
+ SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break
+ SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); }
+ SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, struct rusage *rusage); }
+ SYS_GETPID = 20 // { pid_t sys_getpid(void); }
+ SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, int flags, void *data); }
+ SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); }
+ SYS_SETUID = 23 // { int sys_setuid(uid_t uid); }
+ SYS_GETUID = 24 // { uid_t sys_getuid(void); }
+ SYS_GETEUID = 25 // { uid_t sys_geteuid(void); }
+ SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, int data); }
+ SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, int flags); }
+ SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, const struct msghdr *msg, int flags); }
+ SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); }
+ SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, socklen_t *anamelen); }
+ SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); }
+ SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); }
+ SYS_ACCESS = 33 // { int sys_access(const char *path, int amode); }
+ SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); }
+ SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); }
+ SYS_SYNC = 36 // { void sys_sync(void); }
+ SYS_MSYSCALL = 37 // { int sys_msyscall(void *addr, size_t len); }
+ SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); }
+ SYS_GETPPID = 39 // { pid_t sys_getppid(void); }
+ SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); }
+ SYS_DUP = 41 // { int sys_dup(int fd); }
+ SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, struct stat *buf, int flag); }
+ SYS_GETEGID = 43 // { gid_t sys_getegid(void); }
+ SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, u_long offset, u_int scale); }
+ SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, int facs, pid_t pid); }
+ SYS_SIGACTION = 46 // { int sys_sigaction(int signum, const struct sigaction *nsa, struct sigaction *osa); }
+ SYS_GETGID = 47 // { gid_t sys_getgid(void); }
+ SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); }
+ SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); }
+ SYS_ACCT = 51 // { int sys_acct(const char *path); }
+ SYS_SIGPENDING = 52 // { int sys_sigpending(void); }
+ SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); }
+ SYS_IOCTL = 54 // { int sys_ioctl(int fd, u_long com, ... void *data); }
+ SYS_REBOOT = 55 // { int sys_reboot(int opt); }
+ SYS_REVOKE = 56 // { int sys_revoke(const char *path); }
+ SYS_SYMLINK = 57 // { int sys_symlink(const char *path, const char *link); }
+ SYS_READLINK = 58 // { ssize_t sys_readlink(const char *path, char *buf, size_t count); }
+ SYS_EXECVE = 59 // { int sys_execve(const char *path, char * const *argp, char * const *envp); }
+ SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); }
+ SYS_CHROOT = 61 // { int sys_chroot(const char *path); }
+ SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, int flags); }
+ SYS_STATFS = 63 // { int sys_statfs(const char *path, struct statfs *buf); }
+ SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); }
+ SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, struct statfs *buf); }
+ SYS_VFORK = 66 // { int sys_vfork(void); }
+ SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, struct timezone *tzp); }
+ SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, const struct timezone *tzp); }
+ SYS_SETITIMER = 69 // { int sys_setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); }
+ SYS_GETITIMER = 70 // { int sys_getitimer(int which, struct itimerval *itv); }
+ SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); }
+ SYS_KEVENT = 72 // { int sys_kevent(int fd, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); }
+ SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); }
+ SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, int prot); }
+ SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, int behav); }
+ SYS_UTIMES = 76 // { int sys_utimes(const char *path, const struct timeval *tptr); }
+ SYS_FUTIMES = 77 // { int sys_futimes(int fd, const struct timeval *tptr); }
+ SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, gid_t *gidset); }
+ SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, const gid_t *gidset); }
+ SYS_GETPGRP = 81 // { int sys_getpgrp(void); }
+ SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, pid_t pgid); }
+ SYS_FUTEX = 83 // { int sys_futex(uint32_t *f, int op, int val, const struct timespec *timeout, uint32_t *g); }
+ SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, const struct timespec *times, int flag); }
+ SYS_FUTIMENS = 85 // { int sys_futimens(int fd, const struct timespec *times); }
+ SYS_KBIND = 86 // { int sys_kbind(const struct __kbind *param, size_t psize, int64_t proc_cookie); }
+ SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, struct timespec *tp); }
+ SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, const struct timespec *tp); }
+ SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, struct timespec *tp); }
+ SYS_DUP2 = 90 // { int sys_dup2(int from, int to); }
+ SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, struct timespec *rmtp); }
+ SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); }
+ SYS_ACCEPT4 = 93 // { int sys_accept4(int s, struct sockaddr *name, socklen_t *anamelen, int flags); }
+ SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, clockid_t clock_id, const struct timespec *tp, void *lock, const int *abort); }
+ SYS_FSYNC = 95 // { int sys_fsync(int fd); }
+ SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); }
+ SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); }
+ SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, socklen_t namelen); }
+ SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); }
+ SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); }
+ SYS_PIPE2 = 101 // { int sys_pipe2(int *fdp, int flags); }
+ SYS_DUP3 = 102 // { int sys_dup3(int from, int to, int flags); }
+ SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); }
+ SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, socklen_t namelen); }
+ SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, const void *val, socklen_t valsize); }
+ SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); }
+ SYS_CHFLAGSAT = 107 // { int sys_chflagsat(int fd, const char *path, u_int flags, int atflags); }
+ SYS_PLEDGE = 108 // { int sys_pledge(const char *promises, const char *execpromises); }
+ SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); }
+ SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); }
+ SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); }
+ SYS_SENDSYSLOG = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, int flags); }
+ SYS_UNVEIL = 114 // { int sys_unveil(const char *path, const char *permissions); }
+ SYS___REALPATH = 115 // { int sys___realpath(const char *pathname, char *resolved); }
+ SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); }
+ SYS_THRKILL = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); }
+ SYS_READV = 120 // { ssize_t sys_readv(int fd, const struct iovec *iovp, int iovcnt); }
+ SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, const struct iovec *iovp, int iovcnt); }
+ SYS_KILL = 122 // { int sys_kill(int pid, int signum); }
+ SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); }
+ SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); }
+ SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); }
+ SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); }
+ SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); }
+ SYS_FLOCK = 131 // { int sys_flock(int fd, int how); }
+ SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); }
+ SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); }
+ SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); }
+ SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, int protocol, int *rsv); }
+ SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); }
+ SYS_RMDIR = 137 // { int sys_rmdir(const char *path); }
+ SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, struct timeval *olddelta); }
+ SYS_GETLOGIN_R = 141 // { int sys_getlogin_r(char *namebuf, u_int namelen); }
+ SYS_SETSID = 147 // { int sys_setsid(void); }
+ SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, int uid, char *arg); }
+ SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); }
+ SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); }
+ SYS___TMPFD = 164 // { int sys___tmpfd(int flags); }
+ SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); }
+ SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); }
+ SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); }
+ SYS_SETGID = 181 // { int sys_setgid(gid_t gid); }
+ SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); }
+ SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); }
+ SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); }
+ SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); }
+ SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); }
+ SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, struct rlimit *rlp); }
+ SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, const struct rlimit *rlp); }
+ SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); }
+ SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, int whence); }
+ SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, off_t length); }
+ SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); }
+ SYS_SYSCTL = 202 // { int sys_sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); }
+ SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); }
+ SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); }
+ SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); }
+ SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, size_t len); }
+ SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); }
+ SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); }
+ SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); }
+ SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); }
+ SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, int shmflg); }
+ SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); }
+ SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, int inherit); }
+ SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, u_int nfds, int timeout); }
+ SYS_ISSETUGID = 253 // { int sys_issetugid(void); }
+ SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); }
+ SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); }
+ SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); }
+ SYS_PIPE = 263 // { int sys_pipe(int *fdp); }
+ SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); }
+ SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); }
+ SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); }
+ SYS_KQUEUE = 269 // { int sys_kqueue(void); }
+ SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); }
+ SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); }
+ SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); }
+ SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); }
+ SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); }
+ SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); }
+ SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); }
+ SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); }
+ SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, struct sigaltstack *oss); }
+ SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); }
+ SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, size_t nsops); }
+ SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, struct stat *sb); }
+ SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, union semun *arg); }
+ SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, struct shmid_ds *buf); }
+ SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, struct msqid_ds *buf); }
+ SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); }
+ SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); }
+ SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, int n); }
+ SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); }
+ SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, siginfo_t *info, const struct timespec *timeout); }
+ SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); }
+ SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, int64_t *oldfreq); }
+ SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); }
+ SYS_GETRTABLE = 311 // { int sys_getrtable(void); }
+ SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, int amode, int flag); }
+ SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, mode_t mode, int flag); }
+ SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, uid_t uid, gid_t gid, int flag); }
+ SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, const char *path2, int flag); }
+ SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, mode_t mode); }
+ SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, mode_t mode); }
+ SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, mode_t mode, dev_t dev); }
+ SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, ... mode_t mode); }
+ SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, char *buf, size_t count); }
+ SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, int tofd, const char *to); }
+ SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, const char *link); }
+ SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, int flag); }
+ SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); }
+ SYS___GET_TCB = 330 // { void *sys___get_tcb(void); }
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 27d67ac8..68e4974a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -67,7 +67,9 @@ type Statx_t struct {
Rdev_minor uint32
Dev_major uint32
Dev_minor uint32
- _ [14]uint64
+ Mnt_id uint64
+ _ uint64
+ _ [12]uint64
}
type Fsid struct {
@@ -138,6 +140,48 @@ type FscryptGetKeyStatusArg struct {
_ [13]uint32
}
+type DmIoctl struct {
+ Version [3]uint32
+ Data_size uint32
+ Data_start uint32
+ Target_count uint32
+ Open_count int32
+ Flags uint32
+ Event_nr uint32
+ _ uint32
+ Dev uint64
+ Name [128]byte
+ Uuid [129]byte
+ Data [7]byte
+}
+
+type DmTargetSpec struct {
+ Sector_start uint64
+ Length uint64
+ Status int32
+ Next uint32
+ Target_type [16]byte
+}
+
+type DmTargetDeps struct {
+ Count uint32
+ _ uint32
+}
+
+type DmTargetVersions struct {
+ Next uint32
+ Version [3]uint32
+}
+
+type DmTargetMsg struct {
+ Sector uint64
+}
+
+const (
+ SizeofDmIoctl = 0x138
+ SizeofDmTargetSpec = 0x28
+)
+
type KeyctlDHParams struct {
Private int32
Prime int32
@@ -266,6 +310,15 @@ type RawSockaddrL2TPIP6 struct {
Conn_id uint32
}
+type RawSockaddrIUCV struct {
+ Family uint16
+ Port uint16
+ Addr uint32
+ Nodeid [8]int8
+ User_id [8]int8
+ Name [8]int8
+}
+
type _Socklen uint32
type Linger struct {
@@ -378,6 +431,7 @@ const (
SizeofSockaddrTIPC = 0x10
SizeofSockaddrL2TPIP = 0x10
SizeofSockaddrL2TPIP6 = 0x20
+ SizeofSockaddrIUCV = 0x20
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc
@@ -671,6 +725,8 @@ type InotifyEvent struct {
const SizeofInotifyEvent = 0x10
+const SI_LOAD_SHIFT = 0x10
+
type Utsname struct {
Sysname [65]byte
Nodename [65]byte
@@ -696,6 +752,22 @@ const (
AT_EACCESS = 0x200
)
+type OpenHow struct {
+ Flags uint64
+ Mode uint64
+ Resolve uint64
+}
+
+const SizeofOpenHow = 0x18
+
+const (
+ RESOLVE_BENEATH = 0x8
+ RESOLVE_IN_ROOT = 0x10
+ RESOLVE_NO_MAGICLINKS = 0x2
+ RESOLVE_NO_SYMLINKS = 0x4
+ RESOLVE_NO_XDEV = 0x1
+)
+
type PollFd struct {
Fd int32
Events int16
@@ -960,6 +1032,13 @@ const (
PERF_SAMPLE_STREAM_ID = 0x200
PERF_SAMPLE_RAW = 0x400
PERF_SAMPLE_BRANCH_STACK = 0x800
+ PERF_SAMPLE_REGS_USER = 0x1000
+ PERF_SAMPLE_STACK_USER = 0x2000
+ PERF_SAMPLE_WEIGHT = 0x4000
+ PERF_SAMPLE_DATA_SRC = 0x8000
+ PERF_SAMPLE_IDENTIFIER = 0x10000
+ PERF_SAMPLE_TRANSACTION = 0x20000
+ PERF_SAMPLE_REGS_INTR = 0x40000
PERF_SAMPLE_BRANCH_USER = 0x1
PERF_SAMPLE_BRANCH_KERNEL = 0x2
@@ -1689,6 +1768,21 @@ const (
NFT_NG_RANDOM = 0x1
)
+const (
+ NFTA_TARGET_UNSPEC = 0x0
+ NFTA_TARGET_NAME = 0x1
+ NFTA_TARGET_REV = 0x2
+ NFTA_TARGET_INFO = 0x3
+ NFTA_MATCH_UNSPEC = 0x0
+ NFTA_MATCH_NAME = 0x1
+ NFTA_MATCH_REV = 0x2
+ NFTA_MATCH_INFO = 0x3
+ NFTA_COMPAT_UNSPEC = 0x0
+ NFTA_COMPAT_NAME = 0x1
+ NFTA_COMPAT_REV = 0x2
+ NFTA_COMPAT_TYPE = 0x3
+)
+
type RTCTime struct {
Sec int32
Min int32
@@ -1912,6 +2006,10 @@ const (
BPF_MAP_DELETE_BATCH = 0x1b
BPF_LINK_CREATE = 0x1c
BPF_LINK_UPDATE = 0x1d
+ BPF_LINK_GET_FD_BY_ID = 0x1e
+ BPF_LINK_GET_NEXT_ID = 0x1f
+ BPF_ENABLE_STATS = 0x20
+ BPF_ITER_CREATE = 0x21
BPF_MAP_TYPE_UNSPEC = 0x0
BPF_MAP_TYPE_HASH = 0x1
BPF_MAP_TYPE_ARRAY = 0x2
@@ -1939,6 +2037,7 @@ const (
BPF_MAP_TYPE_SK_STORAGE = 0x18
BPF_MAP_TYPE_DEVMAP_HASH = 0x19
BPF_MAP_TYPE_STRUCT_OPS = 0x1a
+ BPF_MAP_TYPE_RINGBUF = 0x1b
BPF_PROG_TYPE_UNSPEC = 0x0
BPF_PROG_TYPE_SOCKET_FILTER = 0x1
BPF_PROG_TYPE_KPROBE = 0x2
@@ -1997,6 +2096,18 @@ const (
BPF_TRACE_FEXIT = 0x19
BPF_MODIFY_RETURN = 0x1a
BPF_LSM_MAC = 0x1b
+ BPF_TRACE_ITER = 0x1c
+ BPF_CGROUP_INET4_GETPEERNAME = 0x1d
+ BPF_CGROUP_INET6_GETPEERNAME = 0x1e
+ BPF_CGROUP_INET4_GETSOCKNAME = 0x1f
+ BPF_CGROUP_INET6_GETSOCKNAME = 0x20
+ BPF_XDP_DEVMAP = 0x21
+ BPF_LINK_TYPE_UNSPEC = 0x0
+ BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1
+ BPF_LINK_TYPE_TRACING = 0x2
+ BPF_LINK_TYPE_CGROUP = 0x3
+ BPF_LINK_TYPE_ITER = 0x4
+ BPF_LINK_TYPE_NETNS = 0x5
BPF_ANY = 0x0
BPF_NOEXIST = 0x1
BPF_EXIST = 0x2
@@ -2012,6 +2123,7 @@ const (
BPF_F_WRONLY_PROG = 0x100
BPF_F_CLONE = 0x200
BPF_F_MMAPABLE = 0x400
+ BPF_STATS_RUN_TIME = 0x0
BPF_STACK_BUILD_ID_EMPTY = 0x0
BPF_STACK_BUILD_ID_VALID = 0x1
BPF_STACK_BUILD_ID_IP = 0x2
@@ -2035,16 +2147,30 @@ const (
BPF_F_CURRENT_CPU = 0xffffffff
BPF_F_CTXLEN_MASK = 0xfffff00000000
BPF_F_CURRENT_NETNS = -0x1
+ BPF_CSUM_LEVEL_QUERY = 0x0
+ BPF_CSUM_LEVEL_INC = 0x1
+ BPF_CSUM_LEVEL_DEC = 0x2
+ BPF_CSUM_LEVEL_RESET = 0x3
BPF_F_ADJ_ROOM_FIXED_GSO = 0x1
BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2
BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4
BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10
+ BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20
BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff
BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38
BPF_F_SYSCTL_BASE_NAME = 0x1
BPF_SK_STORAGE_GET_F_CREATE = 0x1
BPF_F_GET_BRANCH_RECORDS_SIZE = 0x1
+ BPF_RB_NO_WAKEUP = 0x1
+ BPF_RB_FORCE_WAKEUP = 0x2
+ BPF_RB_AVAIL_DATA = 0x0
+ BPF_RB_RING_SIZE = 0x1
+ BPF_RB_CONS_POS = 0x2
+ BPF_RB_PROD_POS = 0x3
+ BPF_RINGBUF_BUSY_BIT = 0x80000000
+ BPF_RINGBUF_DISCARD_BIT = 0x40000000
+ BPF_RINGBUF_HDR_SZ = 0x8
BPF_ADJ_ROOM_NET = 0x0
BPF_ADJ_ROOM_MAC = 0x1
BPF_HDR_START_MAC = 0x0
@@ -2417,3 +2543,12 @@ const (
NHA_GROUPS = 0x9
NHA_MASTER = 0xa
)
+
+const (
+ CAN_RAW_FILTER = 0x1
+ CAN_RAW_ERR_FILTER = 0x2
+ CAN_RAW_LOOPBACK = 0x3
+ CAN_RAW_RECV_OWN_MSGS = 0x4
+ CAN_RAW_FD_FRAMES = 0x5
+ CAN_RAW_JOIN_FILTERS = 0x6
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index 761b67c8..73509d89 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -117,6 +117,11 @@ type Flock_t struct {
Pid int32
}
+type DmNameList struct {
+ Dev uint64
+ Next uint32
+}
+
const (
FADV_DONTNEED = 0x4
FADV_NOREUSE = 0x5
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index 201fb348..45eb8738 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -117,6 +117,13 @@ type Flock_t struct {
_ [4]byte
}
+type DmNameList struct {
+ Dev uint64
+ Next uint32
+ Name [0]byte
+ _ [4]byte
+}
+
const (
FADV_DONTNEED = 0x4
FADV_NOREUSE = 0x5
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index 8051b561..8f6b453a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -121,6 +121,13 @@ type Flock_t struct {
_ [4]byte
}
+type DmNameList struct {
+ Dev uint64
+ Next uint32
+ Name [0]byte
+ _ [4]byte
+}
+
const (
FADV_DONTNEED = 0x4
FADV_NOREUSE = 0x5
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index a936f216..b1e0c24f 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -118,6 +118,13 @@ type Flock_t struct {
_ [4]byte
}
+type DmNameList struct {
+ Dev uint64
+ Next uint32
+ Name [0]byte
+ _ [4]byte
+}
+
const (
FADV_DONTNEED = 0x4
FADV_NOREUSE = 0x5
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index aaca03dd..fb802c3e 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -120,6 +120,13 @@ type Flock_t struct {
_ [4]byte
}
+type DmNameList struct {
+ Dev uint64
+ Next uint32
+ Name [0]byte
+ _ [4]byte
+}
+
const (
FADV_DONTNEED = 0x4
FADV_NOREUSE = 0x5
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index 2e7f3b8c..30abcf3b 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -118,6 +118,13 @@ type Flock_t struct {
_ [4]byte
}
+type DmNameList struct {
+ Dev uint64
+ Next uint32
+ Name [0]byte
+ _ [4]byte
+}
+
const (
FADV_DONTNEED = 0x4
FADV_NOREUSE = 0x5
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index 16add5a2..99761aa9 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -118,6 +118,13 @@ type Flock_t struct {
_ [4]byte
}
+type DmNameList struct {
+ Dev uint64
+ Next uint32
+ Name [0]byte
+ _ [4]byte
+}
+
const (
FADV_DONTNEED = 0x4
FADV_NOREUSE = 0x5
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index 4ed2c8e5..29369034 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -120,6 +120,13 @@ type Flock_t struct {
_ [4]byte
}
+type DmNameList struct {
+ Dev uint64
+ Next uint32
+ Name [0]byte
+ _ [4]byte
+}
+
const (
FADV_DONTNEED = 0x4
FADV_NOREUSE = 0x5
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index 74151909..0ca856e5 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -119,6 +119,13 @@ type Flock_t struct {
_ [4]byte
}
+type DmNameList struct {
+ Dev uint64
+ Next uint32
+ Name [0]byte
+ _ [4]byte
+}
+
const (
FADV_DONTNEED = 0x4
FADV_NOREUSE = 0x5
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index 046c2deb..f50f6482 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -119,6 +119,13 @@ type Flock_t struct {
_ [4]byte
}
+type DmNameList struct {
+ Dev uint64
+ Next uint32
+ Name [0]byte
+ _ [4]byte
+}
+
const (
FADV_DONTNEED = 0x4
FADV_NOREUSE = 0x5
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
index 0f2f61a6..4d3ac8d7 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
@@ -118,6 +118,13 @@ type Flock_t struct {
_ [4]byte
}
+type DmNameList struct {
+ Dev uint64
+ Next uint32
+ Name [0]byte
+ _ [4]byte
+}
+
const (
FADV_DONTNEED = 0x4
FADV_NOREUSE = 0x5
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index cca1b6be..349f483a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -117,6 +117,13 @@ type Flock_t struct {
_ [4]byte
}
+type DmNameList struct {
+ Dev uint64
+ Next uint32
+ Name [0]byte
+ _ [4]byte
+}
+
const (
FADV_DONTNEED = 0x6
FADV_NOREUSE = 0x7
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
index 33a73bf1..80c73bea 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
@@ -121,6 +121,13 @@ type Flock_t struct {
_ [2]byte
}
+type DmNameList struct {
+ Dev uint64
+ Next uint32
+ Name [0]byte
+ _ [4]byte
+}
+
const (
FADV_DONTNEED = 0x4
FADV_NOREUSE = 0x5
diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go
new file mode 100644
index 00000000..992a1f8c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go
@@ -0,0 +1,565 @@
+// cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+// +build mips64,openbsd
+
+package unix
+
+const (
+ SizeofPtr = 0x8
+ SizeofShort = 0x2
+ SizeofInt = 0x4
+ SizeofLong = 0x8
+ SizeofLongLong = 0x8
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type Timespec struct {
+ Sec int64
+ Nsec int64
+}
+
+type Timeval struct {
+ Sec int64
+ Usec int64
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int64
+ Ixrss int64
+ Idrss int64
+ Isrss int64
+ Minflt int64
+ Majflt int64
+ Nswap int64
+ Inblock int64
+ Oublock int64
+ Msgsnd int64
+ Msgrcv int64
+ Nsignals int64
+ Nvcsw int64
+ Nivcsw int64
+}
+
+type Rlimit struct {
+ Cur uint64
+ Max uint64
+}
+
+type _Gid_t uint32
+
+type Stat_t struct {
+ Mode uint32
+ Dev int32
+ Ino uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Rdev int32
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Size int64
+ Blocks int64
+ Blksize int32
+ Flags uint32
+ Gen uint32
+ _ Timespec
+}
+
+type Statfs_t struct {
+ F_flags uint32
+ F_bsize uint32
+ F_iosize uint32
+ F_blocks uint64
+ F_bfree uint64
+ F_bavail int64
+ F_files uint64
+ F_ffree uint64
+ F_favail int64
+ F_syncwrites uint64
+ F_syncreads uint64
+ F_asyncwrites uint64
+ F_asyncreads uint64
+ F_fsid Fsid
+ F_namemax uint32
+ F_owner uint32
+ F_ctime uint64
+ F_fstypename [16]int8
+ F_mntonname [90]int8
+ F_mntfromname [90]int8
+ F_mntfromspec [90]int8
+ _ [2]byte
+ Mount_info [160]byte
+}
+
+type Flock_t struct {
+ Start int64
+ Len int64
+ Pid int32
+ Type int16
+ Whence int16
+}
+
+type Dirent struct {
+ Fileno uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Namlen uint8
+ _ [4]uint8
+ Name [256]int8
+}
+
+type Fsid struct {
+ Val [2]int32
+}
+
+const (
+ PathMax = 0x400
+)
+
+type RawSockaddrInet4 struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Addr [4]byte /* in_addr */
+ Zero [8]int8
+}
+
+type RawSockaddrInet6 struct {
+ Len uint8
+ Family uint8
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type RawSockaddrUnix struct {
+ Len uint8
+ Family uint8
+ Path [104]int8
+}
+
+type RawSockaddrDatalink struct {
+ Len uint8
+ Family uint8
+ Index uint16
+ Type uint8
+ Nlen uint8
+ Alen uint8
+ Slen uint8
+ Data [24]int8
+}
+
+type RawSockaddr struct {
+ Len uint8
+ Family uint8
+ Data [14]int8
+}
+
+type RawSockaddrAny struct {
+ Addr RawSockaddr
+ Pad [92]int8
+}
+
+type _Socklen uint32
+
+type Linger struct {
+ Onoff int32
+ Linger int32
+}
+
+type Iovec struct {
+ Base *byte
+ Len uint64
+}
+
+type IPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type IPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Interface uint32
+}
+
+type Msghdr struct {
+ Name *byte
+ Namelen uint32
+ Iov *Iovec
+ Iovlen uint32
+ Control *byte
+ Controllen uint32
+ Flags int32
+}
+
+type Cmsghdr struct {
+ Len uint32
+ Level int32
+ Type int32
+}
+
+type Inet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex uint32
+}
+
+type IPv6MTUInfo struct {
+ Addr RawSockaddrInet6
+ Mtu uint32
+}
+
+type ICMPv6Filter struct {
+ Filt [8]uint32
+}
+
+const (
+ SizeofSockaddrInet4 = 0x10
+ SizeofSockaddrInet6 = 0x1c
+ SizeofSockaddrAny = 0x6c
+ SizeofSockaddrUnix = 0x6a
+ SizeofSockaddrDatalink = 0x20
+ SizeofLinger = 0x8
+ SizeofIPMreq = 0x8
+ SizeofIPv6Mreq = 0x14
+ SizeofMsghdr = 0x30
+ SizeofCmsghdr = 0xc
+ SizeofInet6Pktinfo = 0x14
+ SizeofIPv6MTUInfo = 0x20
+ SizeofICMPv6Filter = 0x20
+)
+
+const (
+ PTRACE_TRACEME = 0x0
+ PTRACE_CONT = 0x7
+ PTRACE_KILL = 0x8
+)
+
+type Kevent_t struct {
+ Ident uint64
+ Filter int16
+ Flags uint16
+ Fflags uint32
+ Data int64
+ Udata *byte
+}
+
+type FdSet struct {
+ Bits [32]uint32
+}
+
+const (
+ SizeofIfMsghdr = 0xa8
+ SizeofIfData = 0x90
+ SizeofIfaMsghdr = 0x18
+ SizeofIfAnnounceMsghdr = 0x1a
+ SizeofRtMsghdr = 0x60
+ SizeofRtMetrics = 0x38
+)
+
+type IfMsghdr struct {
+ Msglen uint16
+ Version uint8
+ Type uint8
+ Hdrlen uint16
+ Index uint16
+ Tableid uint16
+ Pad1 uint8
+ Pad2 uint8
+ Addrs int32
+ Flags int32
+ Xflags int32
+ Data IfData
+}
+
+type IfData struct {
+ Type uint8
+ Addrlen uint8
+ Hdrlen uint8
+ Link_state uint8
+ Mtu uint32
+ Metric uint32
+ Rdomain uint32
+ Baudrate uint64
+ Ipackets uint64
+ Ierrors uint64
+ Opackets uint64
+ Oerrors uint64
+ Collisions uint64
+ Ibytes uint64
+ Obytes uint64
+ Imcasts uint64
+ Omcasts uint64
+ Iqdrops uint64
+ Oqdrops uint64
+ Noproto uint64
+ Capabilities uint32
+ Lastchange Timeval
+}
+
+type IfaMsghdr struct {
+ Msglen uint16
+ Version uint8
+ Type uint8
+ Hdrlen uint16
+ Index uint16
+ Tableid uint16
+ Pad1 uint8
+ Pad2 uint8
+ Addrs int32
+ Flags int32
+ Metric int32
+}
+
+type IfAnnounceMsghdr struct {
+ Msglen uint16
+ Version uint8
+ Type uint8
+ Hdrlen uint16
+ Index uint16
+ What uint16
+ Name [16]int8
+}
+
+type RtMsghdr struct {
+ Msglen uint16
+ Version uint8
+ Type uint8
+ Hdrlen uint16
+ Index uint16
+ Tableid uint16
+ Priority uint8
+ Mpls uint8
+ Addrs int32
+ Flags int32
+ Fmask int32
+ Pid int32
+ Seq int32
+ Errno int32
+ Inits uint32
+ Rmx RtMetrics
+}
+
+type RtMetrics struct {
+ Pksent uint64
+ Expire int64
+ Locks uint32
+ Mtu uint32
+ Refcnt uint32
+ Hopcount uint32
+ Recvpipe uint32
+ Sendpipe uint32
+ Ssthresh uint32
+ Rtt uint32
+ Rttvar uint32
+ Pad uint32
+}
+
+type Mclpool struct{}
+
+const (
+ SizeofBpfVersion = 0x4
+ SizeofBpfStat = 0x8
+ SizeofBpfProgram = 0x10
+ SizeofBpfInsn = 0x8
+ SizeofBpfHdr = 0x14
+)
+
+type BpfVersion struct {
+ Major uint16
+ Minor uint16
+}
+
+type BpfStat struct {
+ Recv uint32
+ Drop uint32
+}
+
+type BpfProgram struct {
+ Len uint32
+ Insns *BpfInsn
+}
+
+type BpfInsn struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
+
+type BpfHdr struct {
+ Tstamp BpfTimeval
+ Caplen uint32
+ Datalen uint32
+ Hdrlen uint16
+ _ [2]byte
+}
+
+type BpfTimeval struct {
+ Sec uint32
+ Usec uint32
+}
+
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed int32
+ Ospeed int32
+}
+
+type Winsize struct {
+ Row uint16
+ Col uint16
+ Xpixel uint16
+ Ypixel uint16
+}
+
+const (
+ AT_FDCWD = -0x64
+ AT_SYMLINK_FOLLOW = 0x4
+ AT_SYMLINK_NOFOLLOW = 0x2
+)
+
+type PollFd struct {
+ Fd int32
+ Events int16
+ Revents int16
+}
+
+const (
+ POLLERR = 0x8
+ POLLHUP = 0x10
+ POLLIN = 0x1
+ POLLNVAL = 0x20
+ POLLOUT = 0x4
+ POLLPRI = 0x2
+ POLLRDBAND = 0x80
+ POLLRDNORM = 0x40
+ POLLWRBAND = 0x100
+ POLLWRNORM = 0x4
+)
+
+type Sigset_t uint32
+
+type Utsname struct {
+ Sysname [256]byte
+ Nodename [256]byte
+ Release [256]byte
+ Version [256]byte
+ Machine [256]byte
+}
+
+const SizeofUvmexp = 0x158
+
+type Uvmexp struct {
+ Pagesize int32
+ Pagemask int32
+ Pageshift int32
+ Npages int32
+ Free int32
+ Active int32
+ Inactive int32
+ Paging int32
+ Wired int32
+ Zeropages int32
+ Reserve_pagedaemon int32
+ Reserve_kernel int32
+ Unused01 int32
+ Vnodepages int32
+ Vtextpages int32
+ Freemin int32
+ Freetarg int32
+ Inactarg int32
+ Wiredmax int32
+ Anonmin int32
+ Vtextmin int32
+ Vnodemin int32
+ Anonminpct int32
+ Vtextminpct int32
+ Vnodeminpct int32
+ Nswapdev int32
+ Swpages int32
+ Swpginuse int32
+ Swpgonly int32
+ Nswget int32
+ Nanon int32
+ Unused05 int32
+ Unused06 int32
+ Faults int32
+ Traps int32
+ Intrs int32
+ Swtch int32
+ Softs int32
+ Syscalls int32
+ Pageins int32
+ Unused07 int32
+ Unused08 int32
+ Pgswapin int32
+ Pgswapout int32
+ Forks int32
+ Forks_ppwait int32
+ Forks_sharevm int32
+ Pga_zerohit int32
+ Pga_zeromiss int32
+ Unused09 int32
+ Fltnoram int32
+ Fltnoanon int32
+ Fltnoamap int32
+ Fltpgwait int32
+ Fltpgrele int32
+ Fltrelck int32
+ Fltrelckok int32
+ Fltanget int32
+ Fltanretry int32
+ Fltamcopy int32
+ Fltnamap int32
+ Fltnomap int32
+ Fltlget int32
+ Fltget int32
+ Flt_anon int32
+ Flt_acow int32
+ Flt_obj int32
+ Flt_prcopy int32
+ Flt_przero int32
+ Pdwoke int32
+ Pdrevs int32
+ Pdswout int32
+ Pdfreed int32
+ Pdscans int32
+ Pdanscan int32
+ Pdobscan int32
+ Pdreact int32
+ Pdbusy int32
+ Pdpageouts int32
+ Pdpending int32
+ Pddeact int32
+ Unused11 int32
+ Unused12 int32
+ Unused13 int32
+ Fpswtch int32
+ Kmapent int32
+}
+
+const SizeofClockinfo = 0x14
+
+type Clockinfo struct {
+ Hz int32
+ Tick int32
+ Tickadj int32
+ Stathz int32
+ Profhz int32
+}
diff --git a/vendor/golang.org/x/text/transform/transform.go b/vendor/golang.org/x/text/transform/transform.go
index 520b9ada..48ec64b4 100644
--- a/vendor/golang.org/x/text/transform/transform.go
+++ b/vendor/golang.org/x/text/transform/transform.go
@@ -648,7 +648,8 @@ func String(t Transformer, s string) (result string, n int, err error) {
// Transform the remaining input, growing dst and src buffers as necessary.
for {
n := copy(src, s[pSrc:])
- nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s))
+ atEOF := pSrc+n == len(s)
+ nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], atEOF)
pDst += nDst
pSrc += nSrc
@@ -659,6 +660,9 @@ func String(t Transformer, s string) (result string, n int, err error) {
dst = grow(dst, pDst)
}
} else if err == ErrShortSrc {
+ if atEOF {
+ return string(dst[:pDst]), pSrc, err
+ }
if nSrc == 0 {
src = grow(src, 0)
}
diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go
index 48d14400..50deb660 100644
--- a/vendor/golang.org/x/text/unicode/bidi/core.go
+++ b/vendor/golang.org/x/text/unicode/bidi/core.go
@@ -480,15 +480,15 @@ func (s *isolatingRunSequence) resolveWeakTypes() {
// Rule W1.
// Changes all NSMs.
- preceedingCharacterType := s.sos
+ precedingCharacterType := s.sos
for i, t := range s.types {
if t == NSM {
- s.types[i] = preceedingCharacterType
+ s.types[i] = precedingCharacterType
} else {
if t.in(LRI, RLI, FSI, PDI) {
- preceedingCharacterType = ON
+ precedingCharacterType = ON
}
- preceedingCharacterType = t
+ precedingCharacterType = t
}
}
diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports.go b/vendor/golang.org/x/tools/cmd/goimports/goimports.go
index f177b2d9..27708972 100644
--- a/vendor/golang.org/x/tools/cmd/goimports/goimports.go
+++ b/vendor/golang.org/x/tools/cmd/goimports/goimports.go
@@ -21,6 +21,7 @@ import (
"runtime/pprof"
"strings"
+ "golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/imports"
)
@@ -42,14 +43,16 @@ var (
TabIndent: true,
Comments: true,
Fragment: true,
- Env: &imports.ProcessEnv{},
+ Env: &imports.ProcessEnv{
+ GocmdRunner: &gocommand.Runner{},
+ },
}
exitCode = 0
)
func init() {
flag.BoolVar(&options.AllErrors, "e", false, "report all errors (not just the first 10 on different lines)")
- flag.StringVar(&options.Env.LocalPrefix, "local", "", "put imports beginning with this string after 3rd-party packages; comma-separated list")
+ flag.StringVar(&options.LocalPrefix, "local", "", "put imports beginning with this string after 3rd-party packages; comma-separated list")
flag.BoolVar(&options.FormatOnly, "format-only", false, "if true, don't fix imports and only format. In this mode, goimports is effectively gofmt, with the addition that imports are grouped into sections.")
}
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go b/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go
index e0916037..f0b15051 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go
@@ -116,7 +116,11 @@ func checkCanonicalFieldTag(pass *analysis.Pass, field *types.Var, tag string, s
}
for _, enc := range [...]string{"json", "xml"} {
- if reflect.StructTag(tag).Get(enc) != "" {
+ switch reflect.StructTag(tag).Get(enc) {
+ // Ignore warning if the field not exported and the tag is marked as
+ // ignored.
+ case "", "-":
+ default:
pass.Reportf(field.Pos(), "struct field %s has %s tag but is not exported", field.Name(), enc)
return
}
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go
index f9cc993c..92b37caf 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go
@@ -30,7 +30,7 @@ var Analyzer = &analysis.Analyzer{
func run(pass *analysis.Pass) (interface{}, error) {
switch pass.Pkg.Path() {
- case "encoding/gob", "encoding/json", "encoding/xml":
+ case "encoding/gob", "encoding/json", "encoding/xml", "encoding/asn1":
// These packages know how to use their own APIs.
// Sometimes they are testing what happens to incorrect programs.
return nil, nil
@@ -53,9 +53,10 @@ func run(pass *analysis.Pass) (interface{}, error) {
recv := fn.Type().(*types.Signature).Recv()
if fn.Name() == "Unmarshal" && recv == nil {
// "encoding/json".Unmarshal
- // "encoding/xml".Unmarshal
+ // "encoding/xml".Unmarshal
+ // "encoding/asn1".Unmarshal
switch fn.Pkg().Path() {
- case "encoding/json", "encoding/xml":
+ case "encoding/json", "encoding/xml", "encoding/asn1":
argidx = 1 // func([]byte, interface{})
}
} else if fn.Name() == "Decode" && recv != nil {
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go
index 76d4ab23..bececee7 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go
@@ -44,7 +44,7 @@ var funcs, stringMethods stringSetFlag
func init() {
// TODO(adonovan): provide a comment syntax to allow users to
// add their functions to this set using facts.
- funcs.Set("errors.New,fmt.Errorf,fmt.Sprintf,fmt.Sprint,sort.Reverse")
+ funcs.Set("errors.New,fmt.Errorf,fmt.Sprintf,fmt.Sprint,sort.Reverse,context.WithValue,context.WithCancel,context.WithDeadline,context.WithTimeout")
Analyzer.Flags.Var(&funcs, "funcs",
"comma-separated list of functions whose results must be used")
diff --git a/vendor/golang.org/x/tools/go/analysis/validate.go b/vendor/golang.org/x/tools/go/analysis/validate.go
index be981434..ad0e7276 100644
--- a/vendor/golang.org/x/tools/go/analysis/validate.go
+++ b/vendor/golang.org/x/tools/go/analysis/validate.go
@@ -3,6 +3,7 @@ package analysis
import (
"fmt"
"reflect"
+ "strings"
"unicode"
)
@@ -58,14 +59,28 @@ func Validate(analyzers []*Analyzer) error {
}
// recursion
- for i, req := range a.Requires {
+ for _, req := range a.Requires {
if err := visit(req); err != nil {
- return fmt.Errorf("%s.Requires[%d]: %v", a.Name, i, err)
+ return err
}
}
color[a] = black
}
+ if color[a] == grey {
+ stack := []*Analyzer{a}
+ inCycle := map[string]bool{}
+ for len(stack) > 0 {
+ current := stack[len(stack)-1]
+ stack = stack[:len(stack)-1]
+ if color[current] == grey && !inCycle[current.Name] {
+ inCycle[current.Name] = true
+ stack = append(stack, current.Requires...)
+ }
+ }
+ return &CycleInRequiresGraphError{AnalyzerNames: inCycle}
+ }
+
return nil
}
for _, a := range analyzers {
@@ -95,3 +110,17 @@ func validIdent(name string) bool {
}
return name != ""
}
+
+type CycleInRequiresGraphError struct {
+ AnalyzerNames map[string]bool
+}
+
+func (e *CycleInRequiresGraphError) Error() string {
+ var b strings.Builder
+ b.WriteString("cycle detected involving the following analyzers:")
+ for n := range e.AnalyzerNames {
+ b.WriteByte(' ')
+ b.WriteString(n)
+ }
+ return b.String()
+}
diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go
index 6e91391c..bc04503c 100644
--- a/vendor/golang.org/x/tools/go/packages/golist.go
+++ b/vendor/golang.org/x/tools/go/packages/golist.go
@@ -89,6 +89,10 @@ type golistState struct {
rootDirsError error
rootDirs map[string]string
+ goVersionOnce sync.Once
+ goVersionError error
+ goVersion string // third field of 'go version'
+
// vendorDirs caches the (non)existence of vendor directories.
vendorDirs map[string]bool
}
@@ -242,6 +246,21 @@ extractQueries:
}
}
}
+ // Add root for any package that matches a pattern. This applies only to
+ // packages that are modified by overlays, since they are not added as
+ // roots automatically.
+ for _, pattern := range restPatterns {
+ match := matchPattern(pattern)
+ for _, pkgID := range modifiedPkgs {
+ pkg, ok := response.seenPackages[pkgID]
+ if !ok {
+ continue
+ }
+ if match(pkg.PkgPath) {
+ response.addRoot(pkg.ID)
+ }
+ }
+ }
sizeswg.Wait()
if sizeserr != nil {
@@ -635,6 +654,39 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
pkg.CompiledGoFiles = pkg.GoFiles
}
+ // Temporary work-around for golang/go#39986. Parse filenames out of
+ // error messages. This happens if there are unrecoverable syntax
+ // errors in the source, so we can't match on a specific error message.
+ if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) {
+ addFilenameFromPos := func(pos string) bool {
+ split := strings.Split(pos, ":")
+ if len(split) < 1 {
+ return false
+ }
+ filename := strings.TrimSpace(split[0])
+ if filename == "" {
+ return false
+ }
+ if !filepath.IsAbs(filename) {
+ filename = filepath.Join(state.cfg.Dir, filename)
+ }
+ info, _ := os.Stat(filename)
+ if info == nil {
+ return false
+ }
+ pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename)
+ pkg.GoFiles = append(pkg.GoFiles, filename)
+ return true
+ }
+ found := addFilenameFromPos(err.Pos)
+ // In some cases, go list only reports the error position in the
+ // error text, not the error position. One such case is when the
+ // file's package name is a keyword (see golang.org/issue/39763).
+ if !found {
+ addFilenameFromPos(err.Err)
+ }
+ }
+
if p.Error != nil {
msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363.
// Address golang.org/issue/35964 by appending import stack to error message.
@@ -664,7 +716,60 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
return &response, nil
}
-// getPkgPath finds the package path of a directory if it's relative to a root directory.
+func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool {
+ if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 {
+ return false
+ }
+
+ goV, err := state.getGoVersion()
+ if err != nil {
+ return false
+ }
+
+ // On Go 1.14 and earlier, only add filenames from errors if the import stack is empty.
+ // The import stack behaves differently for these versions than newer Go versions.
+ if strings.HasPrefix(goV, "go1.13") || strings.HasPrefix(goV, "go1.14") {
+ return len(p.Error.ImportStack) == 0
+ }
+
+ // On Go 1.15 and later, only parse filenames out of error if there's no import stack,
+ // or the current package is at the top of the import stack. This is not guaranteed
+ // to work perfectly, but should avoid some cases where files in errors don't belong to this
+ // package.
+ return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath
+}
+
+func (state *golistState) getGoVersion() (string, error) {
+ state.goVersionOnce.Do(func() {
+ var b *bytes.Buffer
+ // Invoke go version. Don't use invokeGo because it will supply build flags, and
+ // go version doesn't expect build flags.
+ inv := gocommand.Invocation{
+ Verb: "version",
+ Env: state.cfg.Env,
+ Logf: state.cfg.Logf,
+ }
+ gocmdRunner := state.cfg.gocmdRunner
+ if gocmdRunner == nil {
+ gocmdRunner = &gocommand.Runner{}
+ }
+ b, _, _, state.goVersionError = gocmdRunner.RunRaw(state.cfg.Context, inv)
+ if state.goVersionError != nil {
+ return
+ }
+
+ sp := strings.Split(b.String(), " ")
+ if len(sp) < 3 {
+ state.goVersionError = fmt.Errorf("go version output: expected 'go version ', got '%s'", b.String())
+ return
+ }
+ state.goVersion = sp[2]
+ })
+ return state.goVersion, state.goVersionError
+}
+
+// getPkgPath finds the package path of a directory if it's relative to a root
+// directory.
func (state *golistState) getPkgPath(dir string) (string, bool, error) {
absDir, err := filepath.Abs(dir)
if err != nil {
diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go
index 338d6f62..874f9013 100644
--- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go
+++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go
@@ -8,9 +8,12 @@ import (
"log"
"os"
"path/filepath"
+ "regexp"
"sort"
"strconv"
"strings"
+
+ "golang.org/x/tools/internal/gocommand"
)
// processGolistOverlay provides rudimentary support for adding
@@ -89,9 +92,19 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif
// because the file is generated in another directory.
testVariantOf = p
continue nextPackage
+ } else if !isTestFile && hasTestFiles(p) {
+ // We're examining a test variant, but the overlaid file is
+ // a non-test file. Because the overlay implementation
+ // (currently) only adds a file to one package, skip this
+ // package, so that we can add the file to the production
+ // variant of the package. (https://golang.org/issue/36857
+ // tracks handling overlays on both the production and test
+ // variant of a package).
+ continue nextPackage
}
- // We must have already seen the package of which this is a test variant.
if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath {
+ // We have already seen the production version of the
+ // for which p is a test variant.
if hasTestFiles(p) {
testVariantOf = pkg
}
@@ -102,8 +115,11 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif
}
}
}
- // The overlay could have included an entirely new package.
- if pkg == nil {
+ // The overlay could have included an entirely new package or an
+ // ad-hoc package. An ad-hoc package is one that we have manually
+ // constructed from inadequate `go list` results for a file= query.
+ // It will have the ID command-line-arguments.
+ if pkg == nil || pkg.ID == "command-line-arguments" {
// Try to find the module or gopath dir the file is contained in.
// Then for modules, add the module opath to the beginning.
pkgPath, ok, err := state.getPkgPath(dir)
@@ -127,35 +143,40 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif
id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath)
}
}
- // Try to reclaim a package with the same ID, if it exists in the response.
- for _, p := range response.dr.Packages {
- if reclaimPackage(p, id, opath, contents) {
- pkg = p
- break
- }
- }
- // Otherwise, create a new package.
- if pkg == nil {
- pkg = &Package{
- PkgPath: pkgPath,
- ID: id,
- Name: pkgName,
- Imports: make(map[string]*Package),
- }
- response.addPackage(pkg)
- havePkgs[pkg.PkgPath] = id
- // Add the production package's sources for a test variant.
- if isTestFile && !isXTest && testVariantOf != nil {
- pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...)
- pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...)
- // Add the package under test and its imports to the test variant.
- pkg.forTest = testVariantOf.PkgPath
- for k, v := range testVariantOf.Imports {
- pkg.Imports[k] = &Package{ID: v.ID}
+ if pkg != nil {
+ // TODO(rstambler): We should change the package's path and ID
+ // here. The only issue is that this messes with the roots.
+ } else {
+ // Try to reclaim a package with the same ID, if it exists in the response.
+ for _, p := range response.dr.Packages {
+ if reclaimPackage(p, id, opath, contents) {
+ pkg = p
+ break
}
}
- if isXTest {
- pkg.forTest = forTest
+ // Otherwise, create a new package.
+ if pkg == nil {
+ pkg = &Package{
+ PkgPath: pkgPath,
+ ID: id,
+ Name: pkgName,
+ Imports: make(map[string]*Package),
+ }
+ response.addPackage(pkg)
+ havePkgs[pkg.PkgPath] = id
+ // Add the production package's sources for a test variant.
+ if isTestFile && !isXTest && testVariantOf != nil {
+ pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...)
+ pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...)
+ // Add the package under test and its imports to the test variant.
+ pkg.forTest = testVariantOf.PkgPath
+ for k, v := range testVariantOf.Imports {
+ pkg.Imports[k] = &Package{ID: v.ID}
+ }
+ }
+ if isXTest {
+ pkg.forTest = forTest
+ }
}
}
}
@@ -307,24 +328,25 @@ func (state *golistState) determineRootDirs() (map[string]string, error) {
}
func (state *golistState) determineRootDirsModules() (map[string]string, error) {
- // This will only return the root directory for the main module.
- // For now we only support overlays in main modules.
+ // List all of the modules--the first will be the directory for the main
+ // module. Any replaced modules will also need to be treated as roots.
// Editing files in the module cache isn't a great idea, so we don't
- // plan to ever support that, but editing files in replaced modules
- // is something we may want to support. To do that, we'll want to
- // do a go list -m to determine the replaced module's module path and
- // directory, and then a go list -m {{with .Replace}}{{.Dir}}{{end}}
- // from the main module to determine if that module is actually a replacement.
- // See bcmills's comment here: https://github.com/golang/go/issues/37629#issuecomment-594179751
- // for more information.
- out, err := state.invokeGo("list", "-m", "-json")
+ // plan to ever support that.
+ out, err := state.invokeGo("list", "-m", "-json", "all")
if err != nil {
- return nil, err
+ // 'go list all' will fail if we're outside of a module and
+ // GO111MODULE=on. Try falling back without 'all'.
+ var innerErr error
+ out, innerErr = state.invokeGo("list", "-m", "-json")
+ if innerErr != nil {
+ return nil, err
+ }
}
- m := map[string]string{}
- type jsonMod struct{ Path, Dir string }
+ roots := map[string]string{}
+ modules := map[string]string{}
+ var i int
for dec := json.NewDecoder(out); dec.More(); {
- mod := new(jsonMod)
+ mod := new(gocommand.ModuleJSON)
if err := dec.Decode(mod); err != nil {
return nil, err
}
@@ -334,10 +356,15 @@ func (state *golistState) determineRootDirsModules() (map[string]string, error)
if err != nil {
return nil, err
}
- m[absDir] = mod.Path
+ modules[absDir] = mod.Path
+ // The first result is the main module.
+ if i == 0 || mod.Replace != nil && mod.Replace.Path != "" {
+ roots[absDir] = mod.Path
+ }
}
+ i++
}
- return m, nil
+ return roots, nil
}
func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) {
@@ -463,3 +490,79 @@ func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package)
p.Name = newName
}
}
+
+// This function is copy-pasted from
+// https://github.com/golang/go/blob/9706f510a5e2754595d716bd64be8375997311fb/src/cmd/go/internal/search/search.go#L360.
+// It should be deleted when we remove support for overlays from go/packages.
+//
+// NOTE: This does not handle any ./... or ./ style queries, as this function
+// doesn't know the working directory.
+//
+// matchPattern(pattern)(name) reports whether
+// name matches pattern. Pattern is a limited glob
+// pattern in which '...' means 'any string' and there
+// is no other special syntax.
+// Unfortunately, there are two special cases. Quoting "go help packages":
+//
+// First, /... at the end of the pattern can match an empty string,
+// so that net/... matches both net and packages in its subdirectories, like net/http.
+// Second, any slash-separated pattern element containing a wildcard never
+// participates in a match of the "vendor" element in the path of a vendored
+// package, so that ./... does not match packages in subdirectories of
+// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do.
+// Note, however, that a directory named vendor that itself contains code
+// is not a vendored package: cmd/vendor would be a command named vendor,
+// and the pattern cmd/... matches it.
+func matchPattern(pattern string) func(name string) bool {
+ // Convert pattern to regular expression.
+ // The strategy for the trailing /... is to nest it in an explicit ? expression.
+ // The strategy for the vendor exclusion is to change the unmatchable
+ // vendor strings to a disallowed code point (vendorChar) and to use
+ // "(anything but that codepoint)*" as the implementation of the ... wildcard.
+ // This is a bit complicated but the obvious alternative,
+ // namely a hand-written search like in most shell glob matchers,
+ // is too easy to make accidentally exponential.
+ // Using package regexp guarantees linear-time matching.
+
+ const vendorChar = "\x00"
+
+ if strings.Contains(pattern, vendorChar) {
+ return func(name string) bool { return false }
+ }
+
+ re := regexp.QuoteMeta(pattern)
+ re = replaceVendor(re, vendorChar)
+ switch {
+ case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`):
+ re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)`
+ case re == vendorChar+`/\.\.\.`:
+ re = `(/vendor|/` + vendorChar + `/\.\.\.)`
+ case strings.HasSuffix(re, `/\.\.\.`):
+ re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?`
+ }
+ re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`)
+
+ reg := regexp.MustCompile(`^` + re + `$`)
+
+ return func(name string) bool {
+ if strings.Contains(name, vendorChar) {
+ return false
+ }
+ return reg.MatchString(replaceVendor(name, vendorChar))
+ }
+}
+
+// replaceVendor returns the result of replacing
+// non-trailing vendor path elements in x with repl.
+func replaceVendor(x, repl string) string {
+ if !strings.Contains(x, "vendor") {
+ return x
+ }
+ elem := strings.Split(x, "/")
+ for i := 0; i < len(elem)-1; i++ {
+ if elem[i] == "vendor" {
+ elem[i] = repl
+ }
+ }
+ return strings.Join(elem, "/")
+}
diff --git a/vendor/golang.org/x/tools/go/ssa/testmain.go b/vendor/golang.org/x/tools/go/ssa/testmain.go
index 4bf8d98f..c4256d1e 100644
--- a/vendor/golang.org/x/tools/go/ssa/testmain.go
+++ b/vendor/golang.org/x/tools/go/ssa/testmain.go
@@ -222,6 +222,7 @@ type deps struct{}
func (deps) ImportPath() string { return "" }
func (deps) MatchString(pat, str string) (bool, error) { return true, nil }
+func (deps) SetPanicOnExit0(bool) {}
func (deps) StartCPUProfile(io.Writer) error { return nil }
func (deps) StartTestLog(io.Writer) {}
func (deps) StopCPUProfile() {}
diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go
index 83f4e447..a4e40adb 100644
--- a/vendor/golang.org/x/tools/imports/forward.go
+++ b/vendor/golang.org/x/tools/imports/forward.go
@@ -3,8 +3,10 @@
package imports // import "golang.org/x/tools/imports"
import (
+ "io/ioutil"
"log"
+ "golang.org/x/tools/internal/gocommand"
intimp "golang.org/x/tools/internal/imports"
)
@@ -29,25 +31,34 @@ var Debug = false
var LocalPrefix string
// Process formats and adjusts imports for the provided file.
-// If opt is nil the defaults are used.
+// If opt is nil the defaults are used, and if src is nil the source
+// is read from the filesystem.
//
// Note that filename's directory influences which imports can be chosen,
// so it is important that filename be accurate.
// To process data ``as if'' it were in filename, pass the data as a non-nil src.
func Process(filename string, src []byte, opt *Options) ([]byte, error) {
+ var err error
+ if src == nil {
+ src, err = ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ }
if opt == nil {
opt = &Options{Comments: true, TabIndent: true, TabWidth: 8}
}
intopt := &intimp.Options{
Env: &intimp.ProcessEnv{
- LocalPrefix: LocalPrefix,
+ GocmdRunner: &gocommand.Runner{},
},
- AllErrors: opt.AllErrors,
- Comments: opt.Comments,
- FormatOnly: opt.FormatOnly,
- Fragment: opt.Fragment,
- TabIndent: opt.TabIndent,
- TabWidth: opt.TabWidth,
+ LocalPrefix: LocalPrefix,
+ AllErrors: opt.AllErrors,
+ Comments: opt.Comments,
+ FormatOnly: opt.FormatOnly,
+ Fragment: opt.Fragment,
+ TabIndent: opt.TabIndent,
+ TabWidth: opt.TabWidth,
}
if Debug {
intopt.Env.Logf = log.Printf
diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go
index 14b96a79..01f6e829 100644
--- a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go
+++ b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go
@@ -14,6 +14,12 @@ import (
"strings"
"golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/internal/lsp/fuzzy"
+)
+
+var (
+ GetTypeErrors func(p interface{}) []types.Error
+ SetTypeErrors func(p interface{}, errors []types.Error)
)
func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos {
@@ -45,7 +51,7 @@ func ZeroValue(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.T
default:
panic("unknown basic type")
}
- case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice:
+ case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice, *types.Array:
return ast.NewIdent("nil")
case *types.Struct:
texpr := TypeExpr(fset, f, pkg, typ) // typ because we want the name here.
@@ -55,21 +61,23 @@ func ZeroValue(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.T
return &ast.CompositeLit{
Type: texpr,
}
- case *types.Array:
- texpr := TypeExpr(fset, f, pkg, u.Elem())
- if texpr == nil {
- return nil
- }
- return &ast.CompositeLit{
- Type: &ast.ArrayType{
- Elt: texpr,
- Len: &ast.BasicLit{Kind: token.INT, Value: fmt.Sprintf("%v", u.Len())},
- },
- }
}
return nil
}
+// IsZeroValue checks whether the given expression is a 'zero value' (as determined by output of
+// analysisinternal.ZeroValue)
+func IsZeroValue(expr ast.Expr) bool {
+ switch e := expr.(type) {
+ case *ast.BasicLit:
+ return e.Value == "0" || e.Value == `""`
+ case *ast.Ident:
+ return e.Name == "nil" || e.Name == "false"
+ default:
+ return false
+ }
+}
+
func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
switch t := typ.(type) {
case *types.Basic:
@@ -79,7 +87,96 @@ func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Ty
default:
return ast.NewIdent(t.Name())
}
+ case *types.Pointer:
+ x := TypeExpr(fset, f, pkg, t.Elem())
+ if x == nil {
+ return nil
+ }
+ return &ast.UnaryExpr{
+ Op: token.MUL,
+ X: x,
+ }
+ case *types.Array:
+ elt := TypeExpr(fset, f, pkg, t.Elem())
+ if elt == nil {
+ return nil
+ }
+ return &ast.ArrayType{
+ Len: &ast.BasicLit{
+ Kind: token.INT,
+ Value: fmt.Sprintf("%d", t.Len()),
+ },
+ Elt: elt,
+ }
+ case *types.Slice:
+ elt := TypeExpr(fset, f, pkg, t.Elem())
+ if elt == nil {
+ return nil
+ }
+ return &ast.ArrayType{
+ Elt: elt,
+ }
+ case *types.Map:
+ key := TypeExpr(fset, f, pkg, t.Key())
+ value := TypeExpr(fset, f, pkg, t.Elem())
+ if key == nil || value == nil {
+ return nil
+ }
+ return &ast.MapType{
+ Key: key,
+ Value: value,
+ }
+ case *types.Chan:
+ dir := ast.ChanDir(t.Dir())
+ if t.Dir() == types.SendRecv {
+ dir = ast.SEND | ast.RECV
+ }
+ value := TypeExpr(fset, f, pkg, t.Elem())
+ if value == nil {
+ return nil
+ }
+ return &ast.ChanType{
+ Dir: dir,
+ Value: value,
+ }
+ case *types.Signature:
+ var params []*ast.Field
+ for i := 0; i < t.Params().Len(); i++ {
+ p := TypeExpr(fset, f, pkg, t.Params().At(i).Type())
+ if p == nil {
+ return nil
+ }
+ params = append(params, &ast.Field{
+ Type: p,
+ Names: []*ast.Ident{
+ {
+ Name: t.Params().At(i).Name(),
+ },
+ },
+ })
+ }
+ var returns []*ast.Field
+ for i := 0; i < t.Results().Len(); i++ {
+ r := TypeExpr(fset, f, pkg, t.Results().At(i).Type())
+ if r == nil {
+ return nil
+ }
+ returns = append(returns, &ast.Field{
+ Type: r,
+ })
+ }
+ return &ast.FuncType{
+ Params: &ast.FieldList{
+ List: params,
+ },
+ Results: &ast.FieldList{
+ List: returns,
+ },
+ }
case *types.Named:
+ if t.Obj().Pkg() == nil {
+ return ast.NewIdent(t.Obj().Name())
+ }
if t.Obj().Pkg() == pkg {
return ast.NewIdent(t.Obj().Name())
}
@@ -101,19 +198,15 @@ func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Ty
X: ast.NewIdent(pkgName),
Sel: ast.NewIdent(t.Obj().Name()),
}
- case *types.Pointer:
- return &ast.UnaryExpr{
- Op: token.MUL,
- X: TypeExpr(fset, f, pkg, t.Elem()),
- }
+ case *types.Struct:
+ return ast.NewIdent(t.String())
+ case *types.Interface:
+ return ast.NewIdent(t.String())
default:
- return nil // TODO: anonymous structs, but who does that
+ return nil
}
}
-var GetTypeErrors = func(p interface{}) []types.Error { return nil }
-var SetTypeErrors = func(p interface{}, errors []types.Error) {}
-
type TypeErrorPass string
const (
@@ -121,3 +214,212 @@ const (
NoResultValues TypeErrorPass = "noresultvalues"
UndeclaredName TypeErrorPass = "undeclaredname"
)
+
+// StmtToInsertVarBefore returns the ast.Stmt before which we can safely insert a new variable.
+// Some examples:
+//
+// Basic Example:
+// z := 1
+// y := z + x
+// If x is undeclared, then this function would return `y := z + x`, so that we
+// can insert `x := ` on the line before `y := z + x`.
+//
+// If stmt example:
+// if z == 1 {
+// } else if z == y {}
+// If y is undeclared, then this function would return `if z == 1 {`, because we cannot
+// insert a statement between an if and an else if statement. As a result, we need to find
+// the top of the if chain to insert `y := ` before.
+func StmtToInsertVarBefore(path []ast.Node) ast.Stmt {
+ enclosingIndex := -1
+ for i, p := range path {
+ if _, ok := p.(ast.Stmt); ok {
+ enclosingIndex = i
+ break
+ }
+ }
+ if enclosingIndex == -1 {
+ return nil
+ }
+ enclosingStmt := path[enclosingIndex]
+ switch enclosingStmt.(type) {
+ case *ast.IfStmt:
+ // The enclosingStmt is inside of the if declaration,
+ // We need to check if we are in an else-if stmt and
+ // get the base if statement.
+ return baseIfStmt(path, enclosingIndex)
+ case *ast.CaseClause:
+ // Get the enclosing switch stmt if the enclosingStmt is
+ // inside of the case statement.
+ for i := enclosingIndex + 1; i < len(path); i++ {
+ if node, ok := path[i].(*ast.SwitchStmt); ok {
+ return node
+ } else if node, ok := path[i].(*ast.TypeSwitchStmt); ok {
+ return node
+ }
+ }
+ }
+ if len(path) <= enclosingIndex+1 {
+ return enclosingStmt.(ast.Stmt)
+ }
+ // Check if the enclosing statement is inside another node.
+ switch expr := path[enclosingIndex+1].(type) {
+ case *ast.IfStmt:
+ // Get the base if statement.
+ return baseIfStmt(path, enclosingIndex+1)
+ case *ast.ForStmt:
+ if expr.Init == enclosingStmt || expr.Post == enclosingStmt {
+ return expr
+ }
+ }
+ return enclosingStmt.(ast.Stmt)
+}
+
+// baseIfStmt walks up the if/else-if chain until we get to
+// the top of the current if chain.
+func baseIfStmt(path []ast.Node, index int) ast.Stmt {
+ stmt := path[index]
+ for i := index + 1; i < len(path); i++ {
+ if node, ok := path[i].(*ast.IfStmt); ok && node.Else == stmt {
+ stmt = node
+ continue
+ }
+ break
+ }
+ return stmt.(ast.Stmt)
+}
+
+// WalkASTWithParent walks the AST rooted at n. The semantics are
+// similar to ast.Inspect except it does not call f(nil).
+func WalkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) {
+ var ancestors []ast.Node
+ ast.Inspect(n, func(n ast.Node) (recurse bool) {
+ if n == nil {
+ ancestors = ancestors[:len(ancestors)-1]
+ return false
+ }
+
+ var parent ast.Node
+ if len(ancestors) > 0 {
+ parent = ancestors[len(ancestors)-1]
+ }
+ ancestors = append(ancestors, n)
+ return f(n, parent)
+ })
+}
+
+// FindMatchingIdents finds all identifiers in 'node' that match any of the given types.
+// 'pos' represents the position at which the identifiers may be inserted. 'pos' must be within
+// the scope of each of identifier we select. Otherwise, we will insert a variable at 'pos' that
+// is unrecognized.
+func FindMatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *types.Info, pkg *types.Package) map[types.Type][]*ast.Ident {
+ matches := map[types.Type][]*ast.Ident{}
+ // Initialize matches to contain the variable types we are searching for.
+ for _, typ := range typs {
+ if typ == nil {
+ continue
+ }
+ matches[typ] = []*ast.Ident{}
+ }
+ seen := map[types.Object]struct{}{}
+ ast.Inspect(node, func(n ast.Node) bool {
+ if n == nil {
+ return false
+ }
+ // Prevent circular definitions. If 'pos' is within an assignment statement, do not
+ // allow any identifiers in that assignment statement to be selected. Otherwise,
+ // we could do the following, where 'x' satisfies the type of 'f0':
+ //
+ // x := fakeStruct{f0: x}
+ //
+ assignment, ok := n.(*ast.AssignStmt)
+ if ok && pos > assignment.Pos() && pos <= assignment.End() {
+ return false
+ }
+ if n.End() > pos {
+ return n.Pos() <= pos
+ }
+ ident, ok := n.(*ast.Ident)
+ if !ok || ident.Name == "_" {
+ return true
+ }
+ obj := info.Defs[ident]
+ if obj == nil || obj.Type() == nil {
+ return true
+ }
+ if _, ok := obj.(*types.TypeName); ok {
+ return true
+ }
+ // Prevent duplicates in matches' values.
+ if _, ok = seen[obj]; ok {
+ return true
+ }
+ seen[obj] = struct{}{}
+ // Find the scope for the given position. Then, check whether the object
+ // exists within the scope.
+ innerScope := pkg.Scope().Innermost(pos)
+ if innerScope == nil {
+ return true
+ }
+ _, foundObj := innerScope.LookupParent(ident.Name, pos)
+ if foundObj != obj {
+ return true
+ }
+ // The object must match one of the types that we are searching for.
+ if idents, ok := matches[obj.Type()]; ok {
+ matches[obj.Type()] = append(idents, ast.NewIdent(ident.Name))
+ }
+ // If the object type does not exactly match any of the target types, greedily
+ // find the first target type that the object type can satisfy.
+ for typ := range matches {
+ if obj.Type() == typ {
+ continue
+ }
+ if equivalentTypes(obj.Type(), typ) {
+ matches[typ] = append(matches[typ], ast.NewIdent(ident.Name))
+ }
+ }
+ return true
+ })
+ return matches
+}
+
+func equivalentTypes(want, got types.Type) bool {
+ if want == got || types.Identical(want, got) {
+ return true
+ }
+ // Code segment to help check for untyped equality from (golang/go#32146).
+ if rhs, ok := want.(*types.Basic); ok && rhs.Info()&types.IsUntyped > 0 {
+ if lhs, ok := got.Underlying().(*types.Basic); ok {
+ return rhs.Info()&types.IsConstType == lhs.Info()&types.IsConstType
+ }
+ }
+ return types.AssignableTo(want, got)
+}
+
+// FindBestMatch employs fuzzy matching to evaluate the similarity of each given identifier to the
+// given pattern. We return the identifier whose name is most similar to the pattern.
+func FindBestMatch(pattern string, idents []*ast.Ident) ast.Expr {
+ fuzz := fuzzy.NewMatcher(pattern)
+ var bestFuzz ast.Expr
+ highScore := float32(0) // minimum score is 0 (no match)
+ for _, ident := range idents {
+ // TODO: Improve scoring algorithm.
+ score := fuzz.Score(ident.Name)
+ if score > highScore {
+ highScore = score
+ bestFuzz = ident
+ } else if score == 0 {
+ // Order matters in the fuzzy matching algorithm. If we find no match
+ // when matching the target to the identifier, try matching the identifier
+ // to the target.
+ revFuzz := fuzzy.NewMatcher(ident.Name)
+ revScore := revFuzz.Score(pattern)
+ if revScore > highScore {
+ highScore = revScore
+ bestFuzz = ident
+ }
+ }
+ }
+ return bestFuzz
+}
diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go
index 390cb9db..925ff535 100644
--- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go
+++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go
@@ -10,7 +10,6 @@ import (
"bufio"
"bytes"
"fmt"
- "go/build"
"io/ioutil"
"log"
"os"
@@ -47,16 +46,6 @@ type Root struct {
Type RootType
}
-// SrcDirsRoots returns the roots from build.Default.SrcDirs(). Not modules-compatible.
-func SrcDirsRoots(ctx *build.Context) []Root {
- var roots []Root
- roots = append(roots, Root{filepath.Join(ctx.GOROOT, "src"), RootGOROOT})
- for _, p := range filepath.SplitList(ctx.GOPATH) {
- roots = append(roots, Root{filepath.Join(p, "src"), RootGOPATH})
- }
- return roots
-}
-
// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
// For each package found, add will be called (concurrently) with the absolute
// paths of the containing source directory and the package directory.
diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go
index 2c703076..613afc4d 100644
--- a/vendor/golang.org/x/tools/internal/imports/fix.go
+++ b/vendor/golang.org/x/tools/internal/imports/fix.go
@@ -32,25 +32,25 @@ import (
// importToGroup is a list of functions which map from an import path to
// a group number.
-var importToGroup = []func(env *ProcessEnv, importPath string) (num int, ok bool){
- func(env *ProcessEnv, importPath string) (num int, ok bool) {
- if env.LocalPrefix == "" {
+var importToGroup = []func(localPrefix, importPath string) (num int, ok bool){
+ func(localPrefix, importPath string) (num int, ok bool) {
+ if localPrefix == "" {
return
}
- for _, p := range strings.Split(env.LocalPrefix, ",") {
+ for _, p := range strings.Split(localPrefix, ",") {
if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath {
return 3, true
}
}
return
},
- func(_ *ProcessEnv, importPath string) (num int, ok bool) {
+ func(_, importPath string) (num int, ok bool) {
if strings.HasPrefix(importPath, "appengine") {
return 2, true
}
return
},
- func(_ *ProcessEnv, importPath string) (num int, ok bool) {
+ func(_, importPath string) (num int, ok bool) {
firstComponent := strings.Split(importPath, "/")[0]
if strings.Contains(firstComponent, ".") {
return 1, true
@@ -59,9 +59,9 @@ var importToGroup = []func(env *ProcessEnv, importPath string) (num int, ok bool
},
}
-func importGroup(env *ProcessEnv, importPath string) int {
+func importGroup(localPrefix, importPath string) int {
for _, fn := range importToGroup {
- if n, ok := fn(env, importPath); ok {
+ if n, ok := fn(localPrefix, importPath); ok {
return n
}
}
@@ -278,7 +278,12 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error {
unknown = append(unknown, imp.ImportPath)
}
- names, err := p.env.GetResolver().loadPackageNames(unknown, p.srcDir)
+ resolver, err := p.env.GetResolver()
+ if err != nil {
+ return err
+ }
+
+ names, err := resolver.loadPackageNames(unknown, p.srcDir)
if err != nil {
return err
}
@@ -568,7 +573,9 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv
return fixes, nil
}
- addStdlibCandidates(p, p.missingRefs)
+ if err := addStdlibCandidates(p, p.missingRefs); err != nil {
+ return nil, err
+ }
p.assumeSiblingImportsValid()
if fixes, done := p.fix(); done {
return fixes, nil
@@ -596,15 +603,19 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena
notSelf := func(p *pkg) bool {
return p.packageName != filePkg || p.dir != filepath.Dir(filename)
}
+ goenv, err := env.goEnv()
+ if err != nil {
+ return err
+ }
// Start off with the standard library.
for importPath, exports := range stdlib {
p := &pkg{
- dir: filepath.Join(env.goroot(), "src", importPath),
+ dir: filepath.Join(goenv["GOROOT"], "src", importPath),
importPathShort: importPath,
packageName: path.Base(importPath),
relevance: MaxRelevance,
}
- if notSelf(p) && wrappedCallback.packageNameLoaded(p) {
+ if notSelf(p) && wrappedCallback.dirFound(p) && wrappedCallback.packageNameLoaded(p) {
wrappedCallback.exportsLoaded(p, exports)
}
}
@@ -640,15 +651,23 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena
wrappedCallback.exportsLoaded(pkg, exports)
},
}
- return env.GetResolver().scan(ctx, scanFilter)
+ resolver, err := env.GetResolver()
+ if err != nil {
+ return err
+ }
+ return resolver.scan(ctx, scanFilter)
}
-func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) map[string]int {
+func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map[string]int, error) {
result := make(map[string]int)
- for _, path := range paths {
- result[path] = env.GetResolver().scoreImportPath(ctx, path)
+ resolver, err := env.GetResolver()
+ if err != nil {
+ return nil, err
}
- return result
+ for _, path := range paths {
+ result[path] = resolver.scoreImportPath(ctx, path)
+ }
+ return result, nil
}
func PrimeCache(ctx context.Context, env *ProcessEnv) error {
@@ -674,8 +693,9 @@ func candidateImportName(pkg *pkg) string {
return ""
}
-// getAllCandidates gets all of the candidates to be imported, regardless of if they are needed.
-func getAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error {
+// GetAllCandidates calls wrapped for each package whose name starts with
+// searchPrefix, and can be imported from filename with the package name filePkg.
+func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error {
callback := &scanCallback{
rootFound: func(gopathwalk.Root) bool {
return true
@@ -708,13 +728,43 @@ func getAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix
return getCandidatePkgs(ctx, callback, filename, filePkg, env)
}
+// GetImportPaths calls wrapped for each package whose import path starts with
+// searchPrefix, and can be imported from filename with the package name filePkg.
+func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error {
+ callback := &scanCallback{
+ rootFound: func(gopathwalk.Root) bool {
+ return true
+ },
+ dirFound: func(pkg *pkg) bool {
+ if !canUse(filename, pkg.dir) {
+ return false
+ }
+ return strings.HasPrefix(pkg.importPathShort, searchPrefix)
+ },
+ packageNameLoaded: func(pkg *pkg) bool {
+ wrapped(ImportFix{
+ StmtInfo: ImportInfo{
+ ImportPath: pkg.importPathShort,
+ Name: candidateImportName(pkg),
+ },
+ IdentName: pkg.packageName,
+ FixType: AddImport,
+ Relevance: pkg.relevance,
+ })
+ return false
+ },
+ }
+ return getCandidatePkgs(ctx, callback, filename, filePkg, env)
+}
+
// A PackageExport is a package and its exports.
type PackageExport struct {
Fix *ImportFix
Exports []string
}
-func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error {
+// GetPackageExports returns all known packages with name pkg and their exports.
+func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error {
callback := &scanCallback{
rootFound: func(gopathwalk.Root) bool {
return true
@@ -749,8 +799,6 @@ var RequiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD"
// ProcessEnv contains environment variables and settings that affect the use of
// the go command, the go/build package, etc.
type ProcessEnv struct {
- LocalPrefix string
-
GocmdRunner *gocommand.Runner
BuildFlags []string
@@ -766,33 +814,44 @@ type ProcessEnv struct {
// If Logf is non-nil, debug logging is enabled through this function.
Logf func(format string, args ...interface{})
+ initialized bool
+
resolver Resolver
}
-func (e *ProcessEnv) goroot() string {
- return e.mustGetEnv("GOROOT")
-}
-
-func (e *ProcessEnv) gopath() string {
- return e.mustGetEnv("GOPATH")
-}
-
-func (e *ProcessEnv) mustGetEnv(k string) string {
- v, ok := e.Env[k]
- if !ok {
- panic(fmt.Sprintf("%v not set in evaluated environment", k))
+func (e *ProcessEnv) goEnv() (map[string]string, error) {
+ if err := e.init(); err != nil {
+ return nil, err
}
- return v
+ return e.Env, nil
+}
+
+func (e *ProcessEnv) matchFile(dir, name string) (bool, error) {
+ return build.Default.MatchFile(dir, name)
}
// CopyConfig copies the env's configuration into a new env.
func (e *ProcessEnv) CopyConfig() *ProcessEnv {
- copy := *e
- copy.resolver = nil
- return ©
+ copy := &ProcessEnv{
+ GocmdRunner: e.GocmdRunner,
+ initialized: e.initialized,
+ BuildFlags: e.BuildFlags,
+ Logf: e.Logf,
+ WorkingDir: e.WorkingDir,
+ resolver: nil,
+ Env: map[string]string{},
+ }
+ for k, v := range e.Env {
+ copy.Env[k] = v
+ }
+ return copy
}
func (e *ProcessEnv) init() error {
+ if e.initialized {
+ return nil
+ }
+
foundAllRequired := true
for _, k := range RequiredGoEnvVars {
if _, ok := e.Env[k]; !ok {
@@ -801,6 +860,7 @@ func (e *ProcessEnv) init() error {
}
}
if foundAllRequired {
+ e.initialized = true
return nil
}
@@ -819,6 +879,7 @@ func (e *ProcessEnv) init() error {
for k, v := range goEnv {
e.Env[k] = v
}
+ e.initialized = true
return nil
}
@@ -830,22 +891,29 @@ func (e *ProcessEnv) env() []string {
return env
}
-func (e *ProcessEnv) GetResolver() Resolver {
+func (e *ProcessEnv) GetResolver() (Resolver, error) {
if e.resolver != nil {
- return e.resolver
+ return e.resolver, nil
+ }
+ if err := e.init(); err != nil {
+ return nil, err
}
if len(e.Env["GOMOD"]) == 0 {
e.resolver = newGopathResolver(e)
- return e.resolver
+ return e.resolver, nil
}
e.resolver = newModuleResolver(e)
- return e.resolver
+ return e.resolver, nil
}
-func (e *ProcessEnv) buildContext() *build.Context {
+func (e *ProcessEnv) buildContext() (*build.Context, error) {
ctx := build.Default
- ctx.GOROOT = e.goroot()
- ctx.GOPATH = e.gopath()
+ goenv, err := e.goEnv()
+ if err != nil {
+ return nil, err
+ }
+ ctx.GOROOT = goenv["GOROOT"]
+ ctx.GOPATH = goenv["GOPATH"]
// As of Go 1.14, build.Context has a Dir field
// (see golang.org/issue/34860).
@@ -861,7 +929,7 @@ func (e *ProcessEnv) buildContext() *build.Context {
dir.SetString(e.WorkingDir)
}
- return &ctx
+ return &ctx, nil
}
func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) (*bytes.Buffer, error) {
@@ -876,10 +944,14 @@ func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string)
return e.GocmdRunner.Run(ctx, inv)
}
-func addStdlibCandidates(pass *pass, refs references) {
+func addStdlibCandidates(pass *pass, refs references) error {
+ goenv, err := pass.env.goEnv()
+ if err != nil {
+ return err
+ }
add := func(pkg string) {
// Prevent self-imports.
- if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.env.goroot(), "src", pkg) == pass.srcDir {
+ if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir {
return
}
exports := copyExports(stdlib[pkg])
@@ -900,6 +972,7 @@ func addStdlibCandidates(pass *pass, refs references) {
}
}
}
+ return nil
}
// A Resolver does the build-system-specific parts of goimports.
@@ -964,10 +1037,13 @@ func addExternalCandidates(pass *pass, refs references, filename string) error {
return false // We'll do our own loading after we sort.
},
}
- err := pass.env.GetResolver().scan(context.Background(), callback)
+ resolver, err := pass.env.GetResolver()
if err != nil {
return err
}
+ if err = resolver.scan(context.Background(), callback); err != nil {
+ return err
+ }
// Search for imports matching potential package references.
type result struct {
@@ -1093,21 +1169,24 @@ func (r *gopathResolver) ClearForNewScan() {
func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
names := map[string]string{}
+ bctx, err := r.env.buildContext()
+ if err != nil {
+ return nil, err
+ }
for _, path := range importPaths {
- names[path] = importPathToName(r.env, path, srcDir)
+ names[path] = importPathToName(bctx, path, srcDir)
}
return names, nil
}
// importPathToName finds out the actual package name, as declared in its .go files.
-// If there's a problem, it returns "".
-func importPathToName(env *ProcessEnv, importPath, srcDir string) (packageName string) {
+func importPathToName(bctx *build.Context, importPath, srcDir string) string {
// Fast path for standard library without going to disk.
if _, ok := stdlib[importPath]; ok {
return path.Base(importPath) // stdlib packages always match their paths.
}
- buildPkg, err := env.buildContext().Import(importPath, srcDir, build.FindOnly)
+ buildPkg, err := bctx.Import(importPath, srcDir, build.FindOnly)
if err != nil {
return ""
}
@@ -1268,8 +1347,18 @@ func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error
}
stop := r.cache.ScanAndListen(ctx, processDir)
defer stop()
+
+ goenv, err := r.env.goEnv()
+ if err != nil {
+ return err
+ }
+ var roots []gopathwalk.Root
+ roots = append(roots, gopathwalk.Root{filepath.Join(goenv["GOROOT"], "src"), gopathwalk.RootGOROOT})
+ for _, p := range filepath.SplitList(goenv["GOPATH"]) {
+ roots = append(roots, gopathwalk.Root{filepath.Join(p, "src"), gopathwalk.RootGOPATH})
+ }
// The callback is not necessarily safe to use in the goroutine below. Process roots eagerly.
- roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), callback.rootFound)
+ roots = filterRoots(roots, callback.rootFound)
// We can't cancel walks, because we need them to finish to have a usable
// cache. Instead, run them in a separate goroutine and detach.
scanDone := make(chan struct{})
@@ -1329,8 +1418,6 @@ func VendorlessPath(ipath string) string {
}
func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) {
- var exports []string
-
// Look for non-test, buildable .go files which could provide exports.
all, err := ioutil.ReadDir(dir)
if err != nil {
@@ -1342,7 +1429,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) {
continue
}
- match, err := env.buildContext().MatchFile(dir, fi.Name())
+ match, err := env.matchFile(dir, fi.Name())
if err != nil || !match {
continue
}
@@ -1354,6 +1441,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
}
var pkgName string
+ var exports []string
fset := token.NewFileSet()
for _, fi := range files {
select {
@@ -1408,6 +1496,10 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir)
}
}
+ resolver, err := pass.env.GetResolver()
+ if err != nil {
+ return nil, err
+ }
// Collect exports for packages with matching names.
rescv := make([]chan *pkg, len(candidates))
@@ -1446,7 +1538,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
}
// If we're an x_test, load the package under test's test variant.
includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir
- _, exports, err := pass.env.GetResolver().loadExports(ctx, c.pkg, includeTest)
+ _, exports, err := resolver.loadExports(ctx, c.pkg, includeTest)
if err != nil {
if pass.env.Logf != nil {
pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go
index 04ecdfda..2815edc3 100644
--- a/vendor/golang.org/x/tools/internal/imports/imports.go
+++ b/vendor/golang.org/x/tools/internal/imports/imports.go
@@ -11,7 +11,6 @@ package imports
import (
"bufio"
"bytes"
- "context"
"fmt"
"go/ast"
"go/format"
@@ -19,19 +18,22 @@ import (
"go/printer"
"go/token"
"io"
- "io/ioutil"
"regexp"
"strconv"
"strings"
"golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/internal/gocommand"
)
// Options is golang.org/x/tools/imports.Options with extra internal-only options.
type Options struct {
Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state.
+ // LocalPrefix is a comma-separated string of import path prefixes, which, if
+ // set, instructs Process to sort the import paths with the given prefixes
+ // into another group after 3rd-party packages.
+ LocalPrefix string
+
Fragment bool // Accept fragment of a source file (no package statement)
AllErrors bool // Report all errors (not just the first 10 on different lines)
@@ -42,13 +44,8 @@ type Options struct {
FormatOnly bool // Disable the insertion and deletion of imports
}
-// Process implements golang.org/x/tools/imports.Process with explicit context in env.
+// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env.
func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) {
- src, opt, err = initialize(filename, src, opt)
- if err != nil {
- return nil, err
- }
-
fileSet := token.NewFileSet()
file, adjust, err := parse(fileSet, filename, src, opt)
if err != nil {
@@ -64,16 +61,12 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e
}
// FixImports returns a list of fixes to the imports that, when applied,
-// will leave the imports in the same state as Process.
+// will leave the imports in the same state as Process. src and opt must
+// be specified.
//
// Note that filename's directory influences which imports can be chosen,
// so it is important that filename be accurate.
func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
- src, opt, err = initialize(filename, src, opt)
- if err != nil {
- return nil, err
- }
-
fileSet := token.NewFileSet()
file, _, err := parse(fileSet, filename, src, opt)
if err != nil {
@@ -84,13 +77,9 @@ func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix,
}
// ApplyFixes applies all of the fixes to the file and formats it. extraMode
-// is added in when parsing the file.
+// is added in when parsing the file. src and opts must be specified, but no
+// env is needed.
func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) {
- src, opt, err = initialize(filename, src, opt)
- if err != nil {
- return nil, err
- }
-
// Don't use parse() -- we don't care about fragments or statement lists
// here, and we need to work with unparseable files.
fileSet := token.NewFileSet()
@@ -114,60 +103,9 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
return formatFile(fileSet, file, src, nil, opt)
}
-// GetAllCandidates gets all of the packages starting with prefix that can be
-// imported by filename, sorted by import path.
-func GetAllCandidates(ctx context.Context, callback func(ImportFix), searchPrefix, filename, filePkg string, opt *Options) error {
- _, opt, err := initialize(filename, []byte{}, opt)
- if err != nil {
- return err
- }
- return getAllCandidates(ctx, callback, searchPrefix, filename, filePkg, opt.Env)
-}
-
-// GetPackageExports returns all known packages with name pkg and their exports.
-func GetPackageExports(ctx context.Context, callback func(PackageExport), searchPkg, filename, filePkg string, opt *Options) error {
- _, opt, err := initialize(filename, []byte{}, opt)
- if err != nil {
- return err
- }
- return getPackageExports(ctx, callback, searchPkg, filename, filePkg, opt.Env)
-}
-
-// initialize sets the values for opt and src.
-// If they are provided, they are not changed. Otherwise opt is set to the
-// default values and src is read from the file system.
-func initialize(filename string, src []byte, opt *Options) ([]byte, *Options, error) {
- // Use defaults if opt is nil.
- if opt == nil {
- opt = &Options{Comments: true, TabIndent: true, TabWidth: 8}
- }
-
- // Set the env if the user has not provided it.
- if opt.Env == nil {
- opt.Env = &ProcessEnv{}
- }
- // Set the gocmdRunner if the user has not provided it.
- if opt.Env.GocmdRunner == nil {
- opt.Env.GocmdRunner = &gocommand.Runner{}
- }
- if err := opt.Env.init(); err != nil {
- return nil, nil, err
- }
-
- if src == nil {
- b, err := ioutil.ReadFile(filename)
- if err != nil {
- return nil, nil, err
- }
- src = b
- }
-
- return src, opt, nil
-}
-
func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
- mergeImports(opt.Env, fileSet, file)
- sortImports(opt.Env, fileSet, file)
+ mergeImports(fileSet, file)
+ sortImports(opt.LocalPrefix, fileSet, file)
imps := astutil.Imports(fileSet, file)
var spacesBefore []string // import paths we need spaces before
for _, impSection := range imps {
@@ -178,7 +116,7 @@ func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(
lastGroup := -1
for _, importSpec := range impSection {
importPath, _ := strconv.Unquote(importSpec.Path.Value)
- groupNum := importGroup(opt.Env, importPath)
+ groupNum := importGroup(opt.LocalPrefix, importPath)
if groupNum != lastGroup && lastGroup != -1 {
spacesBefore = append(spacesBefore, importPath)
}
diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go
index 664fbbf5..94880d61 100644
--- a/vendor/golang.org/x/tools/internal/imports/mod.go
+++ b/vendor/golang.org/x/tools/internal/imports/mod.go
@@ -53,6 +53,10 @@ func (r *ModuleResolver) init() error {
return nil
}
+ goenv, err := r.env.goEnv()
+ if err != nil {
+ return err
+ }
inv := gocommand.Invocation{
BuildFlags: r.env.BuildFlags,
Env: r.env.env(),
@@ -82,7 +86,7 @@ func (r *ModuleResolver) init() error {
if gmc := r.env.Env["GOMODCACHE"]; gmc != "" {
r.moduleCacheDir = gmc
} else {
- r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.gopath())[0], "/pkg/mod")
+ r.moduleCacheDir = filepath.Join(filepath.SplitList(goenv["GOPATH"])[0], "/pkg/mod")
}
sort.Slice(r.modsByModPath, func(i, j int) bool {
@@ -99,7 +103,7 @@ func (r *ModuleResolver) init() error {
})
r.roots = []gopathwalk.Root{
- {filepath.Join(r.env.goroot(), "/src"), gopathwalk.RootGOROOT},
+ {filepath.Join(goenv["GOROOT"], "/src"), gopathwalk.RootGOROOT},
}
if r.main != nil {
r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule})
@@ -240,7 +244,7 @@ func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON,
// files in that directory. If not, it could be provided by an
// outer module. See #29736.
for _, fi := range pkgFiles {
- if ok, _ := r.env.buildContext().MatchFile(pkgDir, fi.Name()); ok {
+ if ok, _ := r.env.matchFile(pkgDir, fi.Name()); ok {
return m, pkgDir
}
}
diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/internal/imports/sortimports.go
index 22627947..be8ffa25 100644
--- a/vendor/golang.org/x/tools/internal/imports/sortimports.go
+++ b/vendor/golang.org/x/tools/internal/imports/sortimports.go
@@ -15,7 +15,7 @@ import (
// sortImports sorts runs of consecutive import lines in import blocks in f.
// It also removes duplicate imports when it is possible to do so without data loss.
-func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) {
+func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) {
for i, d := range f.Decls {
d, ok := d.(*ast.GenDecl)
if !ok || d.Tok != token.IMPORT {
@@ -40,11 +40,11 @@ func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) {
for j, s := range d.Specs {
if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
// j begins a new run. End this one.
- specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:j])...)
+ specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:j])...)
i = j
}
}
- specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:])...)
+ specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:])...)
d.Specs = specs
// Deduping can leave a blank line before the rparen; clean that up.
@@ -60,7 +60,7 @@ func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) {
// mergeImports merges all the import declarations into the first one.
// Taken from golang.org/x/tools/ast/astutil.
-func mergeImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) {
+func mergeImports(fset *token.FileSet, f *ast.File) {
if len(f.Decls) <= 1 {
return
}
@@ -142,7 +142,7 @@ type posSpan struct {
End token.Pos
}
-func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
+func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
// Can't short-circuit here even if specs are already sorted,
// since they might yet need deduplication.
// A lone import, however, may be safely ignored.
@@ -191,7 +191,7 @@ func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Sp
// Reassign the import paths to have the same position sequence.
// Reassign each comment to abut the end of its spec.
// Sort the comments by new position.
- sort.Sort(byImportSpec{env, specs})
+ sort.Sort(byImportSpec{localPrefix, specs})
// Dedup. Thanks to our sorting, we can just consider
// adjacent pairs of imports.
@@ -245,8 +245,8 @@ func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Sp
}
type byImportSpec struct {
- env *ProcessEnv
- specs []ast.Spec // slice of *ast.ImportSpec
+ localPrefix string
+ specs []ast.Spec // slice of *ast.ImportSpec
}
func (x byImportSpec) Len() int { return len(x.specs) }
@@ -255,8 +255,8 @@ func (x byImportSpec) Less(i, j int) bool {
ipath := importPath(x.specs[i])
jpath := importPath(x.specs[j])
- igroup := importGroup(x.env, ipath)
- jgroup := importGroup(x.env, jpath)
+ igroup := importGroup(x.localPrefix, ipath)
+ jgroup := importGroup(x.localPrefix, jpath)
if igroup != jgroup {
return igroup < jgroup
}
diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go
index 16252111..7b573b98 100644
--- a/vendor/golang.org/x/tools/internal/imports/zstdlib.go
+++ b/vendor/golang.org/x/tools/internal/imports/zstdlib.go
@@ -56,6 +56,7 @@ var stdlib = map[string][]string{
},
"bufio": []string{
"ErrAdvanceTooFar",
+ "ErrBadReadCount",
"ErrBufferFull",
"ErrFinalToken",
"ErrInvalidUnreadByte",
@@ -303,7 +304,9 @@ var stdlib = map[string][]string{
"PrivateKey",
"PublicKey",
"Sign",
+ "SignASN1",
"Verify",
+ "VerifyASN1",
},
"crypto/ed25519": []string{
"GenerateKey",
@@ -322,11 +325,13 @@ var stdlib = map[string][]string{
"CurveParams",
"GenerateKey",
"Marshal",
+ "MarshalCompressed",
"P224",
"P256",
"P384",
"P521",
"Unmarshal",
+ "UnmarshalCompressed",
},
"crypto/hmac": []string{
"Equal",
@@ -432,6 +437,7 @@ var stdlib = map[string][]string{
"CurveP521",
"Dial",
"DialWithDialer",
+ "Dialer",
"ECDSAWithP256AndSHA256",
"ECDSAWithP384AndSHA384",
"ECDSAWithP521AndSHA512",
@@ -507,6 +513,7 @@ var stdlib = map[string][]string{
"ConstraintViolationError",
"CreateCertificate",
"CreateCertificateRequest",
+ "CreateRevocationList",
"DSA",
"DSAWithSHA1",
"DSAWithSHA256",
@@ -581,6 +588,7 @@ var stdlib = map[string][]string{
"PublicKeyAlgorithm",
"PureEd25519",
"RSA",
+ "RevocationList",
"SHA1WithRSA",
"SHA256WithRSA",
"SHA256WithRSAPSS",
@@ -694,6 +702,7 @@ var stdlib = map[string][]string{
"String",
"Tx",
"TxOptions",
+ "Validator",
"Value",
"ValueConverter",
"Valuer",
@@ -2349,6 +2358,27 @@ var stdlib = map[string][]string{
"IMAGE_DIRECTORY_ENTRY_RESOURCE",
"IMAGE_DIRECTORY_ENTRY_SECURITY",
"IMAGE_DIRECTORY_ENTRY_TLS",
+ "IMAGE_DLLCHARACTERISTICS_APPCONTAINER",
+ "IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE",
+ "IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY",
+ "IMAGE_DLLCHARACTERISTICS_GUARD_CF",
+ "IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA",
+ "IMAGE_DLLCHARACTERISTICS_NO_BIND",
+ "IMAGE_DLLCHARACTERISTICS_NO_ISOLATION",
+ "IMAGE_DLLCHARACTERISTICS_NO_SEH",
+ "IMAGE_DLLCHARACTERISTICS_NX_COMPAT",
+ "IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE",
+ "IMAGE_DLLCHARACTERISTICS_WDM_DRIVER",
+ "IMAGE_FILE_32BIT_MACHINE",
+ "IMAGE_FILE_AGGRESIVE_WS_TRIM",
+ "IMAGE_FILE_BYTES_REVERSED_HI",
+ "IMAGE_FILE_BYTES_REVERSED_LO",
+ "IMAGE_FILE_DEBUG_STRIPPED",
+ "IMAGE_FILE_DLL",
+ "IMAGE_FILE_EXECUTABLE_IMAGE",
+ "IMAGE_FILE_LARGE_ADDRESS_AWARE",
+ "IMAGE_FILE_LINE_NUMS_STRIPPED",
+ "IMAGE_FILE_LOCAL_SYMS_STRIPPED",
"IMAGE_FILE_MACHINE_AM33",
"IMAGE_FILE_MACHINE_AMD64",
"IMAGE_FILE_MACHINE_ARM",
@@ -2371,6 +2401,25 @@ var stdlib = map[string][]string{
"IMAGE_FILE_MACHINE_THUMB",
"IMAGE_FILE_MACHINE_UNKNOWN",
"IMAGE_FILE_MACHINE_WCEMIPSV2",
+ "IMAGE_FILE_NET_RUN_FROM_SWAP",
+ "IMAGE_FILE_RELOCS_STRIPPED",
+ "IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP",
+ "IMAGE_FILE_SYSTEM",
+ "IMAGE_FILE_UP_SYSTEM_ONLY",
+ "IMAGE_SUBSYSTEM_EFI_APPLICATION",
+ "IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER",
+ "IMAGE_SUBSYSTEM_EFI_ROM",
+ "IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER",
+ "IMAGE_SUBSYSTEM_NATIVE",
+ "IMAGE_SUBSYSTEM_NATIVE_WINDOWS",
+ "IMAGE_SUBSYSTEM_OS2_CUI",
+ "IMAGE_SUBSYSTEM_POSIX_CUI",
+ "IMAGE_SUBSYSTEM_UNKNOWN",
+ "IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION",
+ "IMAGE_SUBSYSTEM_WINDOWS_CE_GUI",
+ "IMAGE_SUBSYSTEM_WINDOWS_CUI",
+ "IMAGE_SUBSYSTEM_WINDOWS_GUI",
+ "IMAGE_SUBSYSTEM_XBOX",
"ImportDirectory",
"NewFile",
"Open",
@@ -4188,6 +4237,7 @@ var stdlib = map[string][]string{
"DevNull",
"Environ",
"ErrClosed",
+ "ErrDeadlineExceeded",
"ErrExist",
"ErrInvalid",
"ErrNoDeadline",
@@ -4646,6 +4696,7 @@ var stdlib = map[string][]string{
"ErrRange",
"ErrSyntax",
"FormatBool",
+ "FormatComplex",
"FormatFloat",
"FormatInt",
"FormatUint",
@@ -4655,6 +4706,7 @@ var stdlib = map[string][]string{
"Itoa",
"NumError",
"ParseBool",
+ "ParseComplex",
"ParseFloat",
"ParseInt",
"ParseUint",
diff --git a/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go b/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go
new file mode 100644
index 00000000..ac377035
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/lsp/fuzzy/input.go
@@ -0,0 +1,168 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fuzzy
+
+import (
+ "unicode"
+)
+
+// RuneRole specifies the role of a rune in the context of an input.
+type RuneRole byte
+
+const (
+ // RNone specifies a rune without any role in the input (i.e., whitespace/non-ASCII).
+ RNone RuneRole = iota
+ // RSep specifies a rune with the role of segment separator.
+ RSep
+ // RTail specifies a rune which is a lower-case tail in a word in the input.
+ RTail
+ // RUCTail specifies a rune which is an upper-case tail in a word in the input.
+ RUCTail
+ // RHead specifies a rune which is the first character in a word in the input.
+ RHead
+)
+
+// RuneRoles detects the roles of each byte rune in an input string and stores it in the output
+// slice. The rune role depends on the input type. Stops when it parsed all the runes in the string
+// or when it filled the output. If output is nil, then it gets created.
+func RuneRoles(str string, reuse []RuneRole) []RuneRole {
+ var output []RuneRole
+ if cap(reuse) < len(str) {
+ output = make([]RuneRole, 0, len(str))
+ } else {
+ output = reuse[:0]
+ }
+
+ prev, prev2 := rtNone, rtNone
+ for i := 0; i < len(str); i++ {
+ r := rune(str[i])
+
+ role := RNone
+
+ curr := rtLower
+ if str[i] <= unicode.MaxASCII {
+ curr = runeType(rt[str[i]] - '0')
+ }
+
+ if curr == rtLower {
+ if prev == rtNone || prev == rtPunct {
+ role = RHead
+ } else {
+ role = RTail
+ }
+ } else if curr == rtUpper {
+ role = RHead
+
+ if prev == rtUpper {
+ // This and previous characters are both upper case.
+
+ if i+1 == len(str) {
+ // This is last character, previous was also uppercase -> this is UCTail
+ // i.e., (current char is C): aBC / BC / ABC
+ role = RUCTail
+ }
+ }
+ } else if curr == rtPunct {
+ switch r {
+ case '.', ':':
+ role = RSep
+ }
+ }
+ if curr != rtLower {
+ if i > 1 && output[i-1] == RHead && prev2 == rtUpper && (output[i-2] == RHead || output[i-2] == RUCTail) {
+ // The previous two characters were uppercase. The current one is not a lower case, so the
+ // previous one can't be a HEAD. Make it a UCTail.
+ // i.e., (last char is current char - B must be a UCTail): ABC / ZABC / AB.
+ output[i-1] = RUCTail
+ }
+ }
+
+ output = append(output, role)
+ prev2 = prev
+ prev = curr
+ }
+ return output
+}
+
+type runeType byte
+
+const (
+ rtNone runeType = iota
+ rtPunct
+ rtLower
+ rtUpper
+)
+
+const rt = "00000000000000000000000000000000000000000000001122222222221000000333333333333333333333333330000002222222222222222222222222200000"
+
+// LastSegment returns the substring representing the last segment from the input, where each
+// byte has an associated RuneRole in the roles slice. This makes sense only for inputs of Symbol
+// or Filename type.
+func LastSegment(input string, roles []RuneRole) string {
+ // Exclude ending separators.
+ end := len(input) - 1
+ for end >= 0 && roles[end] == RSep {
+ end--
+ }
+ if end < 0 {
+ return ""
+ }
+
+ start := end - 1
+ for start >= 0 && roles[start] != RSep {
+ start--
+ }
+
+ return input[start+1 : end+1]
+}
+
+// ToLower transforms the input string to lower case, which is stored in the output byte slice.
+// The lower casing considers only ASCII values - non ASCII values are left unmodified.
+// Stops when parsed all input or when it filled the output slice. If output is nil, then it gets
+// created.
+func ToLower(input string, reuse []byte) []byte {
+ output := reuse
+ if cap(reuse) < len(input) {
+ output = make([]byte, len(input))
+ }
+
+ for i := 0; i < len(input); i++ {
+ r := rune(input[i])
+ if r <= unicode.MaxASCII {
+ if 'A' <= r && r <= 'Z' {
+ r += 'a' - 'A'
+ }
+ }
+ output[i] = byte(r)
+ }
+ return output[:len(input)]
+}
+
+// WordConsumer defines a consumer for a word delimited by the [start,end) byte offsets in an input
+// (start is inclusive, end is exclusive).
+type WordConsumer func(start, end int)
+
+// Words find word delimiters in an input based on its bytes' mappings to rune roles. The offset
+// delimiters for each word are fed to the provided consumer function.
+func Words(roles []RuneRole, consume WordConsumer) {
+ var wordStart int
+ for i, r := range roles {
+ switch r {
+ case RUCTail, RTail:
+ case RHead, RNone, RSep:
+ if i != wordStart {
+ consume(wordStart, i)
+ }
+ wordStart = i
+ if r != RHead {
+ // Skip this character.
+ wordStart = i + 1
+ }
+ }
+ }
+ if wordStart != len(roles) {
+ consume(wordStart, len(roles))
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go b/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go
new file mode 100644
index 00000000..16a64309
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/lsp/fuzzy/matcher.go
@@ -0,0 +1,398 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fuzzy implements a fuzzy matching algorithm.
+package fuzzy
+
+import (
+ "bytes"
+ "fmt"
+)
+
+const (
+ // MaxInputSize is the maximum size of the input scored against the fuzzy matcher. Longer inputs
+ // will be truncated to this size.
+ MaxInputSize = 127
+ // MaxPatternSize is the maximum size of the pattern used to construct the fuzzy matcher. Longer
+ // inputs are truncated to this size.
+ MaxPatternSize = 63
+)
+
+type scoreVal int
+
+func (s scoreVal) val() int {
+ return int(s) >> 1
+}
+
+func (s scoreVal) prevK() int {
+ return int(s) & 1
+}
+
+func score(val int, prevK int /*0 or 1*/) scoreVal {
+ return scoreVal(val<<1 + prevK)
+}
+
+// Matcher implements a fuzzy matching algorithm for scoring candidates against a pattern.
+// The matcher does not support parallel usage.
+type Matcher struct {
+ pattern string
+ patternLower []byte // lower-case version of the pattern
+ patternShort []byte // first characters of the pattern
+ caseSensitive bool // set if the pattern is mix-cased
+
+ patternRoles []RuneRole // the role of each character in the pattern
+ roles []RuneRole // the role of each character in the tested string
+
+ scores [MaxInputSize + 1][MaxPatternSize + 1][2]scoreVal
+
+ scoreScale float32
+
+ lastCandidateLen int // in bytes
+ lastCandidateMatched bool
+
+ // Here we save the last candidate in lower-case. This is basically a byte slice we reuse for
+ // performance reasons, so the slice is not reallocated for every candidate.
+ lowerBuf [MaxInputSize]byte
+ rolesBuf [MaxInputSize]RuneRole
+}
+
+func (m *Matcher) bestK(i, j int) int {
+ if m.scores[i][j][0].val() < m.scores[i][j][1].val() {
+ return 1
+ }
+ return 0
+}
+
+// NewMatcher returns a new fuzzy matcher for scoring candidates against the provided pattern.
+func NewMatcher(pattern string) *Matcher {
+ if len(pattern) > MaxPatternSize {
+ pattern = pattern[:MaxPatternSize]
+ }
+
+ m := &Matcher{
+ pattern: pattern,
+ patternLower: ToLower(pattern, nil),
+ }
+
+ for i, c := range m.patternLower {
+ if pattern[i] != c {
+ m.caseSensitive = true
+ break
+ }
+ }
+
+ if len(pattern) > 3 {
+ m.patternShort = m.patternLower[:3]
+ } else {
+ m.patternShort = m.patternLower
+ }
+
+ m.patternRoles = RuneRoles(pattern, nil)
+
+ if len(pattern) > 0 {
+ maxCharScore := 4
+ m.scoreScale = 1 / float32(maxCharScore*len(pattern))
+ }
+
+ return m
+}
+
+// Score returns the score returned by matching the candidate to the pattern.
+// This is not designed for parallel use. Multiple candidates must be scored sequentially.
+// Returns a score between 0 and 1 (0 - no match, 1 - perfect match).
+func (m *Matcher) Score(candidate string) float32 {
+ if len(candidate) > MaxInputSize {
+ candidate = candidate[:MaxInputSize]
+ }
+ lower := ToLower(candidate, m.lowerBuf[:])
+ m.lastCandidateLen = len(candidate)
+
+ if len(m.pattern) == 0 {
+ // Empty patterns perfectly match candidates.
+ return 1
+ }
+
+ if m.match(candidate, lower) {
+ sc := m.computeScore(candidate, lower)
+ if sc > minScore/2 && !m.poorMatch() {
+ m.lastCandidateMatched = true
+ if len(m.pattern) == len(candidate) {
+ // Perfect match.
+ return 1
+ }
+
+ if sc < 0 {
+ sc = 0
+ }
+ normalizedScore := float32(sc) * m.scoreScale
+ if normalizedScore > 1 {
+ normalizedScore = 1
+ }
+
+ return normalizedScore
+ }
+ }
+
+ m.lastCandidateMatched = false
+ return 0
+}
+
+const minScore = -10000
+
+// MatchedRanges returns matches ranges for the last scored string as a flattened array of
+// [begin, end) byte offset pairs.
+func (m *Matcher) MatchedRanges() []int {
+ if len(m.pattern) == 0 || !m.lastCandidateMatched {
+ return nil
+ }
+ i, j := m.lastCandidateLen, len(m.pattern)
+ if m.scores[i][j][0].val() < minScore/2 && m.scores[i][j][1].val() < minScore/2 {
+ return nil
+ }
+
+ var ret []int
+ k := m.bestK(i, j)
+ for i > 0 {
+ take := (k == 1)
+ k = m.scores[i][j][k].prevK()
+ if take {
+ if len(ret) == 0 || ret[len(ret)-1] != i {
+ ret = append(ret, i)
+ ret = append(ret, i-1)
+ } else {
+ ret[len(ret)-1] = i - 1
+ }
+ j--
+ }
+ i--
+ }
+ // Reverse slice.
+ for i := 0; i < len(ret)/2; i++ {
+ ret[i], ret[len(ret)-1-i] = ret[len(ret)-1-i], ret[i]
+ }
+ return ret
+}
+
+func (m *Matcher) match(candidate string, candidateLower []byte) bool {
+ i, j := 0, 0
+ for ; i < len(candidateLower) && j < len(m.patternLower); i++ {
+ if candidateLower[i] == m.patternLower[j] {
+ j++
+ }
+ }
+ if j != len(m.patternLower) {
+ return false
+ }
+
+ // The input passes the simple test against pattern, so it is time to classify its characters.
+ // Character roles are used below to find the last segment.
+ m.roles = RuneRoles(candidate, m.rolesBuf[:])
+
+ return true
+}
+
+func (m *Matcher) computeScore(candidate string, candidateLower []byte) int {
+ pattLen, candLen := len(m.pattern), len(candidate)
+
+ for j := 0; j <= len(m.pattern); j++ {
+ m.scores[0][j][0] = minScore << 1
+ m.scores[0][j][1] = minScore << 1
+ }
+ m.scores[0][0][0] = score(0, 0) // Start with 0.
+
+ segmentsLeft, lastSegStart := 1, 0
+ for i := 0; i < candLen; i++ {
+ if m.roles[i] == RSep {
+ segmentsLeft++
+ lastSegStart = i + 1
+ }
+ }
+
+ // A per-character bonus for a consecutive match.
+ consecutiveBonus := 2
+ wordIdx := 0 // Word count within segment.
+ for i := 1; i <= candLen; i++ {
+
+ role := m.roles[i-1]
+ isHead := role == RHead
+
+ if isHead {
+ wordIdx++
+ } else if role == RSep && segmentsLeft > 1 {
+ wordIdx = 0
+ segmentsLeft--
+ }
+
+ var skipPenalty int
+ if i == 1 || (i-1) == lastSegStart {
+ // Skipping the start of first or last segment.
+ skipPenalty++
+ }
+
+ for j := 0; j <= pattLen; j++ {
+ // By default, we don't have a match. Fill in the skip data.
+ m.scores[i][j][1] = minScore << 1
+
+ // Compute the skip score.
+ k := 0
+ if m.scores[i-1][j][0].val() < m.scores[i-1][j][1].val() {
+ k = 1
+ }
+
+ skipScore := m.scores[i-1][j][k].val()
+ // Do not penalize missing characters after the last matched segment.
+ if j != pattLen {
+ skipScore -= skipPenalty
+ }
+ m.scores[i][j][0] = score(skipScore, k)
+
+ if j == 0 || candidateLower[i-1] != m.patternLower[j-1] {
+ // Not a match.
+ continue
+ }
+ pRole := m.patternRoles[j-1]
+
+ if role == RTail && pRole == RHead {
+ if j > 1 {
+ // Not a match: a head in the pattern matches a tail character in the candidate.
+ continue
+ }
+ // Special treatment for the first character of the pattern. We allow
+ // matches in the middle of a word if they are long enough, at least
+ // min(3, pattern.length) characters.
+ if !bytes.HasPrefix(candidateLower[i-1:], m.patternShort) {
+ continue
+ }
+ }
+
+ // Compute the char score.
+ var charScore int
+ // Bonus 1: the char is in the candidate's last segment.
+ if segmentsLeft <= 1 {
+ charScore++
+ }
+ // Bonus 2: Case match or a Head in the pattern aligns with one in the word.
+ // Single-case patterns lack segmentation signals and we assume any character
+ // can be a head of a segment.
+ if candidate[i-1] == m.pattern[j-1] || role == RHead && (!m.caseSensitive || pRole == RHead) {
+ charScore++
+ }
+
+ // Penalty 1: pattern char is Head, candidate char is Tail.
+ if role == RTail && pRole == RHead {
+ charScore--
+ }
+ // Penalty 2: first pattern character matched in the middle of a word.
+ if j == 1 && role == RTail {
+ charScore -= 4
+ }
+
+ // Third dimension encodes whether there is a gap between the previous match and the current
+ // one.
+ for k := 0; k < 2; k++ {
+ sc := m.scores[i-1][j-1][k].val() + charScore
+
+ isConsecutive := k == 1 || i-1 == 0 || i-1 == lastSegStart
+ if isConsecutive {
+ // Bonus 3: a consecutive match. First character match also gets a bonus to
+ // ensure prefix final match score normalizes to 1.0.
+ // Logically, this is a part of charScore, but we have to compute it here because it
+ // only applies for consecutive matches (k == 1).
+ sc += consecutiveBonus
+ }
+ if k == 0 {
+ // Penalty 3: Matching inside a segment (and previous char wasn't matched). Penalize for the lack
+ // of alignment.
+ if role == RTail || role == RUCTail {
+ sc -= 3
+ }
+ }
+
+ if sc > m.scores[i][j][1].val() {
+ m.scores[i][j][1] = score(sc, k)
+ }
+ }
+ }
+ }
+
+ result := m.scores[len(candidate)][len(m.pattern)][m.bestK(len(candidate), len(m.pattern))].val()
+
+ return result
+}
+
+// ScoreTable returns the score table computed for the provided candidate. Used only for debugging.
+func (m *Matcher) ScoreTable(candidate string) string {
+ var buf bytes.Buffer
+
+ var line1, line2, separator bytes.Buffer
+ line1.WriteString("\t")
+ line2.WriteString("\t")
+ for j := 0; j < len(m.pattern); j++ {
+ line1.WriteString(fmt.Sprintf("%c\t\t", m.pattern[j]))
+ separator.WriteString("----------------")
+ }
+
+ buf.WriteString(line1.String())
+ buf.WriteString("\n")
+ buf.WriteString(separator.String())
+ buf.WriteString("\n")
+
+ for i := 1; i <= len(candidate); i++ {
+ line1.Reset()
+ line2.Reset()
+
+ line1.WriteString(fmt.Sprintf("%c\t", candidate[i-1]))
+ line2.WriteString("\t")
+
+ for j := 1; j <= len(m.pattern); j++ {
+ line1.WriteString(fmt.Sprintf("M%6d(%c)\t", m.scores[i][j][0].val(), dir(m.scores[i][j][0].prevK())))
+ line2.WriteString(fmt.Sprintf("H%6d(%c)\t", m.scores[i][j][1].val(), dir(m.scores[i][j][1].prevK())))
+ }
+ buf.WriteString(line1.String())
+ buf.WriteString("\n")
+ buf.WriteString(line2.String())
+ buf.WriteString("\n")
+ buf.WriteString(separator.String())
+ buf.WriteString("\n")
+ }
+
+ return buf.String()
+}
+
+func dir(prevK int) rune {
+ if prevK == 0 {
+ return 'M'
+ }
+ return 'H'
+}
+
+func (m *Matcher) poorMatch() bool {
+ if len(m.pattern) < 2 {
+ return false
+ }
+
+ i, j := m.lastCandidateLen, len(m.pattern)
+ k := m.bestK(i, j)
+
+ var counter, len int
+ for i > 0 {
+ take := (k == 1)
+ k = m.scores[i][j][k].prevK()
+ if take {
+ len++
+ if k == 0 && len < 3 && m.roles[i-1] == RTail {
+ // Short match in the middle of a word
+ counter++
+ if counter > 1 {
+ return true
+ }
+ }
+ j--
+ } else {
+ len = 0
+ }
+ i--
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/tools/internal/span/token.go b/vendor/golang.org/x/tools/internal/span/token.go
index 1710b777..10b429ef 100644
--- a/vendor/golang.org/x/tools/internal/span/token.go
+++ b/vendor/golang.org/x/tools/internal/span/token.go
@@ -114,6 +114,8 @@ func positionFromOffset(f *token.File, offset int) (string, int, int, error) {
}
pos := f.Pos(offset)
p := f.Position(pos)
+ // TODO(golang/go#41029): Consider returning line, column instead of line+1, 1 if
+ // the file's last character is not a newline.
if offset == f.Size() {
return p.Filename, p.Line + 1, 1, nil
}
diff --git a/vendor/golang.org/x/tools/internal/span/uri.go b/vendor/golang.org/x/tools/internal/span/uri.go
index 78e71fe4..25049213 100644
--- a/vendor/golang.org/x/tools/internal/span/uri.go
+++ b/vendor/golang.org/x/tools/internal/span/uri.go
@@ -160,7 +160,7 @@ func isWindowsDrivePath(path string) bool {
// isWindowsDriveURI returns true if the file URI is of the format used by
// Windows URIs. The url.Parse package does not specially handle Windows paths
-// (see golang/go#6027). We check if the URI path has a drive prefix (e.g. "/C:").
+// (see golang/go#6027), so we check if the URI path has a drive prefix (e.g. "/C:").
func isWindowsDriveURIPath(uri string) bool {
if len(uri) < 4 {
return false
diff --git a/vendor/golang.org/x/tools/internal/span/utf16.go b/vendor/golang.org/x/tools/internal/span/utf16.go
index 561b3fa5..f06a2468 100644
--- a/vendor/golang.org/x/tools/internal/span/utf16.go
+++ b/vendor/golang.org/x/tools/internal/span/utf16.go
@@ -15,9 +15,6 @@ import (
// This is used to convert from the native (always in bytes) column
// representation and the utf16 counts used by some editors.
func ToUTF16Column(p Point, content []byte) (int, error) {
- if content == nil {
- return -1, fmt.Errorf("ToUTF16Column: missing content")
- }
if !p.HasPosition() {
return -1, fmt.Errorf("ToUTF16Column: point is missing position")
}
diff --git a/vendor/gopkg.in/ini.v1/.travis.yml b/vendor/gopkg.in/ini.v1/.travis.yml
deleted file mode 100644
index 149b7249..00000000
--- a/vendor/gopkg.in/ini.v1/.travis.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-sudo: false
-language: go
-go:
- - 1.6.x
- - 1.7.x
- - 1.8.x
- - 1.9.x
- - 1.10.x
- - 1.11.x
- - 1.12.x
- - 1.13.x
-
-install: skip
-script:
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/smartystreets/goconvey
- - mkdir -p $HOME/gopath/src/gopkg.in
- - ln -s $HOME/gopath/src/github.com/go-ini/ini $HOME/gopath/src/gopkg.in/ini.v1
- - cd $HOME/gopath/src/gopkg.in/ini.v1
- - go test -v -cover -race
diff --git a/vendor/gopkg.in/ini.v1/Makefile b/vendor/gopkg.in/ini.v1/Makefile
index af27ff07..f3b0dae2 100644
--- a/vendor/gopkg.in/ini.v1/Makefile
+++ b/vendor/gopkg.in/ini.v1/Makefile
@@ -6,7 +6,7 @@ test:
go test -v -cover -race
bench:
- go test -v -cover -race -test.bench=. -test.benchmem
+ go test -v -cover -test.bench=. -test.benchmem
vet:
go vet
diff --git a/vendor/gopkg.in/ini.v1/README.md b/vendor/gopkg.in/ini.v1/README.md
index 3d6d3cfc..5d65658b 100644
--- a/vendor/gopkg.in/ini.v1/README.md
+++ b/vendor/gopkg.in/ini.v1/README.md
@@ -1,6 +1,9 @@
# INI
-[![Build Status](https://img.shields.io/travis/go-ini/ini/master.svg?style=for-the-badge&logo=travis)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini)
+[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/go-ini/ini/Go?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=workflow%3AGo)
+[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini)
+[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc)
+[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini)
![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
@@ -8,7 +11,7 @@ Package ini provides INI file read and write functionality in Go.
## Features
-- Load from multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites.
+- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites.
- Read with recursion values.
- Read with parent-child sections.
- Read with auto-increment key names.
@@ -33,6 +36,7 @@ Please add `-u` flag to update in the future.
- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started)
- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
+- 中国大陆镜像:https://ini.unknwon.cn
## License
diff --git a/vendor/gopkg.in/ini.v1/codecov.yml b/vendor/gopkg.in/ini.v1/codecov.yml
new file mode 100644
index 00000000..fc947f23
--- /dev/null
+++ b/vendor/gopkg.in/ini.v1/codecov.yml
@@ -0,0 +1,9 @@
+coverage:
+ range: "60...95"
+ status:
+ project:
+ default:
+ threshold: 1%
+
+comment:
+ layout: 'diff, files'
diff --git a/vendor/gopkg.in/ini.v1/data_source.go b/vendor/gopkg.in/ini.v1/data_source.go
index dc0277ec..c3a541f1 100644
--- a/vendor/gopkg.in/ini.v1/data_source.go
+++ b/vendor/gopkg.in/ini.v1/data_source.go
@@ -68,6 +68,8 @@ func parseDataSource(source interface{}) (dataSource, error) {
return &sourceData{s}, nil
case io.ReadCloser:
return &sourceReadCloser{s}, nil
+ case io.Reader:
+ return &sourceReadCloser{ioutil.NopCloser(s)}, nil
default:
return nil, fmt.Errorf("error parsing data source: unknown type %q", s)
}
diff --git a/vendor/gopkg.in/ini.v1/file.go b/vendor/gopkg.in/ini.v1/file.go
index 017b77c8..549c00a8 100644
--- a/vendor/gopkg.in/ini.v1/file.go
+++ b/vendor/gopkg.in/ini.v1/file.go
@@ -25,7 +25,7 @@ import (
"sync"
)
-// File represents a combination of a or more INI file(s) in memory.
+// File represents a combination of one or more INI files in memory.
type File struct {
options LoadOptions
dataSources []dataSource
@@ -36,8 +36,12 @@ type File struct {
// To keep data in order.
sectionList []string
+ // To keep track of the index of a section with same name.
+ // This meta list is only used with non-unique section names are allowed.
+ sectionIndexes []int
+
// Actual data is stored here.
- sections map[string]*Section
+ sections map[string][]*Section
NameMapper
ValueMapper
@@ -48,27 +52,40 @@ func newFile(dataSources []dataSource, opts LoadOptions) *File {
if len(opts.KeyValueDelimiters) == 0 {
opts.KeyValueDelimiters = "=:"
}
+ if len(opts.KeyValueDelimiterOnWrite) == 0 {
+ opts.KeyValueDelimiterOnWrite = "="
+ }
+ if len(opts.ChildSectionDelimiter) == 0 {
+ opts.ChildSectionDelimiter = "."
+ }
+
return &File{
BlockMode: true,
dataSources: dataSources,
- sections: make(map[string]*Section),
- sectionList: make([]string, 0, 10),
+ sections: make(map[string][]*Section),
options: opts,
}
}
// Empty returns an empty file object.
-func Empty() *File {
- // Ignore error here, we sure our data is good.
- f, _ := Load([]byte(""))
+func Empty(opts ...LoadOptions) *File {
+ var opt LoadOptions
+ if len(opts) > 0 {
+ opt = opts[0]
+ }
+
+ // Ignore error here, we are sure our data is good.
+ f, _ := LoadSources(opt, []byte(""))
return f
}
// NewSection creates a new section.
func (f *File) NewSection(name string) (*Section, error) {
if len(name) == 0 {
- return nil, errors.New("error creating new section: empty section name")
- } else if f.options.Insensitive && name != DefaultSection {
+ return nil, errors.New("empty section name")
+ }
+
+ if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection {
name = strings.ToLower(name)
}
@@ -77,13 +94,20 @@ func (f *File) NewSection(name string) (*Section, error) {
defer f.lock.Unlock()
}
- if inSlice(name, f.sectionList) {
- return f.sections[name], nil
+ if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) {
+ return f.sections[name][0], nil
}
f.sectionList = append(f.sectionList, name)
- f.sections[name] = newSection(f, name)
- return f.sections[name], nil
+
+ // NOTE: Append to indexes must happen before appending to sections,
+ // otherwise index will have off-by-one problem.
+ f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name]))
+
+ sec := newSection(f, name)
+ f.sections[name] = append(f.sections[name], sec)
+
+ return sec, nil
}
// NewRawSection creates a new section with an unparseable body.
@@ -110,10 +134,20 @@ func (f *File) NewSections(names ...string) (err error) {
// GetSection returns section by given name.
func (f *File) GetSection(name string) (*Section, error) {
+ secs, err := f.SectionsByName(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return secs[0], err
+}
+
+// SectionsByName returns all sections with given name.
+func (f *File) SectionsByName(name string) ([]*Section, error) {
if len(name) == 0 {
name = DefaultSection
}
- if f.options.Insensitive {
+ if f.options.Insensitive || f.options.InsensitiveSections {
name = strings.ToLower(name)
}
@@ -122,11 +156,12 @@ func (f *File) GetSection(name string) (*Section, error) {
defer f.lock.RUnlock()
}
- sec := f.sections[name]
- if sec == nil {
- return nil, fmt.Errorf("section '%s' does not exist", name)
+ secs := f.sections[name]
+ if len(secs) == 0 {
+ return nil, fmt.Errorf("section %q does not exist", name)
}
- return sec, nil
+
+ return secs, nil
}
// Section assumes named section exists and returns a zero-value when not.
@@ -141,6 +176,19 @@ func (f *File) Section(name string) *Section {
return sec
}
+// SectionWithIndex assumes named section exists and returns a new section when not.
+func (f *File) SectionWithIndex(name string, index int) *Section {
+ secs, err := f.SectionsByName(name)
+ if err != nil || len(secs) <= index {
+ // NOTE: It's OK here because the only possible error is empty section name,
+ // but if it's empty, this piece of code won't be executed.
+ newSec, _ := f.NewSection(name)
+ return newSec
+ }
+
+ return secs[index]
+}
+
// Sections returns a list of Section stored in the current instance.
func (f *File) Sections() []*Section {
if f.BlockMode {
@@ -150,7 +198,7 @@ func (f *File) Sections() []*Section {
sections := make([]*Section, len(f.sectionList))
for i, name := range f.sectionList {
- sections[i] = f.sections[name]
+ sections[i] = f.sections[name][f.sectionIndexes[i]]
}
return sections
}
@@ -167,24 +215,70 @@ func (f *File) SectionStrings() []string {
return list
}
-// DeleteSection deletes a section.
+// DeleteSection deletes a section or all sections with given name.
func (f *File) DeleteSection(name string) {
- if f.BlockMode {
- f.lock.Lock()
- defer f.lock.Unlock()
+ secs, err := f.SectionsByName(name)
+ if err != nil {
+ return
+ }
+
+ for i := 0; i < len(secs); i++ {
+ // For non-unique sections, it is always needed to remove the first one so
+ // in the next iteration, the subsequent section continue having index 0.
+ // Ignoring the error as index 0 never returns an error.
+ _ = f.DeleteSectionWithIndex(name, 0)
+ }
+}
+
+// DeleteSectionWithIndex deletes a section with given name and index.
+func (f *File) DeleteSectionWithIndex(name string, index int) error {
+ if !f.options.AllowNonUniqueSections && index != 0 {
+ return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled")
}
if len(name) == 0 {
name = DefaultSection
}
-
- for i, s := range f.sectionList {
- if s == name {
- f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
- delete(f.sections, name)
- return
- }
+ if f.options.Insensitive || f.options.InsensitiveSections {
+ name = strings.ToLower(name)
}
+
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ // Count occurrences of the sections
+ occurrences := 0
+
+ sectionListCopy := make([]string, len(f.sectionList))
+ copy(sectionListCopy, f.sectionList)
+
+ for i, s := range sectionListCopy {
+ if s != name {
+ continue
+ }
+
+ if occurrences == index {
+ if len(f.sections[name]) <= 1 {
+ delete(f.sections, name) // The last one in the map
+ } else {
+ f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...)
+ }
+
+ // Fix section lists
+ f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
+ f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...)
+
+ } else if occurrences > index {
+ // Fix the indices of all following sections with this name.
+ f.sectionIndexes[i-1]--
+ }
+
+ occurrences++
+ }
+
+ return nil
}
func (f *File) reload(s dataSource) error {
@@ -203,11 +297,14 @@ func (f *File) Reload() (err error) {
if err = f.reload(s); err != nil {
// In loose mode, we create an empty default section for nonexistent files.
if os.IsNotExist(err) && f.options.Loose {
- f.parse(bytes.NewBuffer(nil))
+ _ = f.parse(bytes.NewBuffer(nil))
continue
}
return err
}
+ if f.options.ShortCircuit {
+ return nil
+ }
}
return nil
}
@@ -230,16 +327,16 @@ func (f *File) Append(source interface{}, others ...interface{}) error {
}
func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
- equalSign := DefaultFormatLeft + "=" + DefaultFormatRight
+ equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight
if PrettyFormat || PrettyEqual {
- equalSign = " = "
+ equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite)
}
// Use buffer to make sure target is safe until finish encoding.
buf := bytes.NewBuffer(nil)
for i, sname := range f.sectionList {
- sec := f.Section(sname)
+ sec := f.SectionWithIndex(sname, f.sectionIndexes[i])
if len(sec.Comment) > 0 {
// Support multiline comments
lines := strings.Split(sec.Comment, LineBreak)
@@ -256,7 +353,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
}
}
- if i > 0 || DefaultHeader {
+ if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) {
if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
return nil, err
}
@@ -282,7 +379,7 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
}
// Count and generate alignment length and buffer spaces using the
- // longest key. Keys may be modifed if they contain certain characters so
+ // longest key. Keys may be modified if they contain certain characters so
// we need to take that into account in our calculation.
alignLength := 0
if PrettyFormat {
@@ -360,6 +457,8 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
val = `"""` + val + `"""`
} else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
val = "`" + val + "`"
+ } else if len(strings.TrimSpace(val)) != len(val) {
+ val = `"` + val + `"`
}
if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
return nil, err
diff --git a/vendor/gopkg.in/ini.v1/ini.go b/vendor/gopkg.in/ini.v1/ini.go
index 945fc00c..96322b30 100644
--- a/vendor/gopkg.in/ini.v1/ini.go
+++ b/vendor/gopkg.in/ini.v1/ini.go
@@ -18,8 +18,10 @@
package ini
import (
+ "os"
"regexp"
"runtime"
+ "strings"
)
const (
@@ -29,14 +31,8 @@ const (
// Maximum allowed depth when recursively substituing variable names.
depthValues = 99
- version = "1.51.0"
)
-// Version returns current package version literal.
-func Version() string {
- return version
-}
-
var (
// LineBreak is the delimiter to determine or compose a new line.
// This variable will be changed to "\r\n" automatically on Windows at package init time.
@@ -61,8 +57,10 @@ var (
DefaultFormatRight = ""
)
+var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
+
func init() {
- if runtime.GOOS == "windows" {
+ if runtime.GOOS == "windows" && !inTest {
LineBreak = "\r\n"
}
}
@@ -73,12 +71,18 @@ type LoadOptions struct {
Loose bool
// Insensitive indicates whether the parser forces all section and key names to lowercase.
Insensitive bool
+ // InsensitiveSections indicates whether the parser forces all section to lowercase.
+ InsensitiveSections bool
+ // InsensitiveKeys indicates whether the parser forces all key names to lowercase.
+ InsensitiveKeys bool
// IgnoreContinuation indicates whether to ignore continuation lines while parsing.
IgnoreContinuation bool
// IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
IgnoreInlineComment bool
// SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
SkipUnrecognizableLines bool
+ // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source.
+ ShortCircuit bool
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
// This type of keys are mostly used in my.cnf.
AllowBooleanKeys bool
@@ -109,12 +113,18 @@ type LoadOptions struct {
UnparseableSections []string
// KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:".
KeyValueDelimiters string
+ // KeyValueDelimiters is the delimiter that are used to separate key and value output. By default, it is "=".
+ KeyValueDelimiterOnWrite string
+ // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".".
+ ChildSectionDelimiter string
// PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes).
PreserveSurroundedQuote bool
// DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values).
DebugFunc DebugFunc
// ReaderBufferSize is the buffer size of the reader in bytes.
ReaderBufferSize int
+ // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times.
+ AllowNonUniqueSections bool
}
// DebugFunc is the type of function called to log parse events.
diff --git a/vendor/gopkg.in/ini.v1/key.go b/vendor/gopkg.in/ini.v1/key.go
index 3c197410..8baafd9e 100644
--- a/vendor/gopkg.in/ini.v1/key.go
+++ b/vendor/gopkg.in/ini.v1/key.go
@@ -686,99 +686,127 @@ func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
// parseBools transforms strings to bools.
func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) {
vals := make([]bool, 0, len(strs))
- for _, str := range strs {
+ parser := func(str string) (interface{}, error) {
val, err := parseBool(str)
- if err != nil && returnOnInvalid {
- return nil, err
- }
- if err == nil || addInvalid {
- vals = append(vals, val)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(bool))
}
}
- return vals, nil
+ return vals, err
}
// parseFloat64s transforms strings to float64s.
func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
vals := make([]float64, 0, len(strs))
- for _, str := range strs {
+ parser := func(str string) (interface{}, error) {
val, err := strconv.ParseFloat(str, 64)
- if err != nil && returnOnInvalid {
- return nil, err
- }
- if err == nil || addInvalid {
- vals = append(vals, val)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(float64))
}
}
- return vals, nil
+ return vals, err
}
// parseInts transforms strings to ints.
func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
vals := make([]int, 0, len(strs))
- for _, str := range strs {
- valInt64, err := strconv.ParseInt(str, 0, 64)
- val := int(valInt64)
- if err != nil && returnOnInvalid {
- return nil, err
- }
- if err == nil || addInvalid {
- vals = append(vals, val)
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseInt(str, 0, 64)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, int(val.(int64)))
}
}
- return vals, nil
+ return vals, err
}
// parseInt64s transforms strings to int64s.
func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
vals := make([]int64, 0, len(strs))
- for _, str := range strs {
+ parser := func(str string) (interface{}, error) {
val, err := strconv.ParseInt(str, 0, 64)
- if err != nil && returnOnInvalid {
- return nil, err
- }
- if err == nil || addInvalid {
- vals = append(vals, val)
+ return val, err
+ }
+
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(int64))
}
}
- return vals, nil
+ return vals, err
}
// parseUints transforms strings to uints.
func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
vals := make([]uint, 0, len(strs))
- for _, str := range strs {
- val, err := strconv.ParseUint(str, 0, 0)
- if err != nil && returnOnInvalid {
- return nil, err
- }
- if err == nil || addInvalid {
- vals = append(vals, uint(val))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseUint(str, 0, 64)
+ return val, err
+ }
+
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, uint(val.(uint64)))
}
}
- return vals, nil
+ return vals, err
}
// parseUint64s transforms strings to uint64s.
func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
vals := make([]uint64, 0, len(strs))
- for _, str := range strs {
+ parser := func(str string) (interface{}, error) {
val, err := strconv.ParseUint(str, 0, 64)
- if err != nil && returnOnInvalid {
- return nil, err
- }
- if err == nil || addInvalid {
- vals = append(vals, val)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(uint64))
}
}
- return vals, nil
+ return vals, err
}
+
+type Parser func(str string) (interface{}, error)
+
+
// parseTimesFormat transforms strings to times in given format.
func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
vals := make([]time.Time, 0, len(strs))
- for _, str := range strs {
+ parser := func(str string) (interface{}, error) {
val, err := time.Parse(format, str)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(time.Time))
+ }
+ }
+ return vals, err
+}
+
+
+// doParse transforms strings to different types
+func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) {
+ vals := make([]interface{}, 0, len(strs))
+ for _, str := range strs {
+ val, err := parser(str)
if err != nil && returnOnInvalid {
return nil, err
}
diff --git a/vendor/gopkg.in/ini.v1/parser.go b/vendor/gopkg.in/ini.v1/parser.go
index 53ab45c4..65147166 100644
--- a/vendor/gopkg.in/ini.v1/parser.go
+++ b/vendor/gopkg.in/ini.v1/parser.go
@@ -84,7 +84,10 @@ func (p *parser) BOM() error {
case mask[0] == 254 && mask[1] == 255:
fallthrough
case mask[0] == 255 && mask[1] == 254:
- p.buf.Read(mask)
+ _, err = p.buf.Read(mask)
+ if err != nil {
+ return err
+ }
case mask[0] == 239 && mask[1] == 187:
mask, err := p.buf.Peek(3)
if err != nil && err != io.EOF {
@@ -93,7 +96,10 @@ func (p *parser) BOM() error {
return nil
}
if mask[2] == 191 {
- p.buf.Read(mask)
+ _, err = p.buf.Read(mask)
+ if err != nil {
+ return err
+ }
}
}
return nil
@@ -135,7 +141,7 @@ func readKeyName(delimiters string, in []byte) (string, int, error) {
}
// Get out key name
- endIdx := -1
+ var endIdx int
if len(keyQuote) > 0 {
startIdx := len(keyQuote)
// FIXME: fail case -> """"""name"""=value
@@ -181,7 +187,7 @@ func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
}
val += next
if p.isEOF {
- return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
+ return "", fmt.Errorf("missing closing key quote from %q to %q", line, next)
}
}
return val, nil
@@ -371,7 +377,7 @@ func (f *File) parse(reader io.Reader) (err error) {
// Ignore error because default section name is never empty string.
name := DefaultSection
- if f.options.Insensitive {
+ if f.options.Insensitive || f.options.InsensitiveSections {
name = strings.ToLower(DefaultSection)
}
section, _ := f.NewSection(name)
@@ -413,7 +419,10 @@ func (f *File) parse(reader io.Reader) (err error) {
if f.options.AllowNestedValues &&
isLastValueEmpty && len(line) > 0 {
if line[0] == ' ' || line[0] == '\t' {
- lastRegularKey.addNestedValue(string(bytes.TrimSpace(line)))
+ err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line)))
+ if err != nil {
+ return err
+ }
continue
}
}
@@ -453,14 +462,14 @@ func (f *File) parse(reader io.Reader) (err error) {
section.Comment = strings.TrimSpace(p.comment.String())
- // Reset aotu-counter and comments
+ // Reset auto-counter and comments
p.comment.Reset()
p.count = 1
inUnparseableSection = false
for i := range f.options.UnparseableSections {
if f.options.UnparseableSections[i] == name ||
- (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) {
+ ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) {
inUnparseableSection = true
continue
}
diff --git a/vendor/gopkg.in/ini.v1/section.go b/vendor/gopkg.in/ini.v1/section.go
index 0bd3e130..afaa97c9 100644
--- a/vendor/gopkg.in/ini.v1/section.go
+++ b/vendor/gopkg.in/ini.v1/section.go
@@ -66,7 +66,7 @@ func (s *Section) SetBody(body string) {
func (s *Section) NewKey(name, val string) (*Key, error) {
if len(name) == 0 {
return nil, errors.New("error creating new key: empty key name")
- } else if s.f.options.Insensitive {
+ } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
name = strings.ToLower(name)
}
@@ -109,7 +109,7 @@ func (s *Section) GetKey(name string) (*Key, error) {
if s.f.BlockMode {
s.f.lock.RLock()
}
- if s.f.options.Insensitive {
+ if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
name = strings.ToLower(name)
}
key := s.keys[name]
@@ -121,7 +121,7 @@ func (s *Section) GetKey(name string) (*Key, error) {
// Check if it is a child-section.
sname := s.name
for {
- if i := strings.LastIndex(sname, "."); i > -1 {
+ if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
sname = sname[:i]
sec, err := s.f.GetSection(sname)
if err != nil {
@@ -131,7 +131,7 @@ func (s *Section) GetKey(name string) (*Key, error) {
}
break
}
- return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
+ return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name)
}
return key, nil
}
@@ -188,7 +188,7 @@ func (s *Section) ParentKeys() []*Key {
var parentKeys []*Key
sname := s.name
for {
- if i := strings.LastIndex(sname, "."); i > -1 {
+ if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
sname = sname[:i]
sec, err := s.f.GetSection(sname)
if err != nil {
@@ -245,11 +245,11 @@ func (s *Section) DeleteKey(name string) {
// For example, "[parent.child1]" and "[parent.child12]" are child sections
// of section "[parent]".
func (s *Section) ChildSections() []*Section {
- prefix := s.name + "."
+ prefix := s.name + s.f.options.ChildSectionDelimiter
children := make([]*Section, 0, 3)
for _, name := range s.f.sectionList {
if strings.HasPrefix(name, prefix) {
- children = append(children, s.f.sections[name])
+ children = append(children, s.f.sections[name]...)
}
}
return children
diff --git a/vendor/gopkg.in/ini.v1/struct.go b/vendor/gopkg.in/ini.v1/struct.go
index 6bc70e4d..ad90300f 100644
--- a/vendor/gopkg.in/ini.v1/struct.go
+++ b/vendor/gopkg.in/ini.v1/struct.go
@@ -183,6 +183,10 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
if vt.Name() == "Duration" {
durationVal, err := key.Duration()
if err != nil {
+ if intVal, err := key.Int64(); err == nil {
+ field.SetInt(intVal)
+ return nil
+ }
return wrapStrictError(err, isStrict)
}
if isPtr {
@@ -254,13 +258,13 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
case reflect.Slice:
return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
default:
- return fmt.Errorf("unsupported type '%s'", t)
+ return fmt.Errorf("unsupported type %q", t)
}
return nil
}
-func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) {
- opts := strings.SplitN(tag, ",", 3)
+func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool) {
+ opts := strings.SplitN(tag, ",", 4)
rawName = opts[0]
if len(opts) > 1 {
omitEmpty = opts[1] == "omitempty"
@@ -268,10 +272,15 @@ func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bo
if len(opts) > 2 {
allowShadow = opts[2] == "allowshadow"
}
- return rawName, omitEmpty, allowShadow
+ if len(opts) > 3 {
+ allowNonUnique = opts[3] == "nonunique"
+ }
+ return rawName, omitEmpty, allowShadow, allowNonUnique
}
-func (s *Section) mapTo(val reflect.Value, isStrict bool) error {
+// mapToField maps the given value to the matching field of the given section.
+// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added.
+func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
@@ -286,7 +295,7 @@ func (s *Section) mapTo(val reflect.Value, isStrict bool) error {
continue
}
- rawName, _, allowShadow := parseTagOptions(tag)
+ rawName, _, allowShadow, allowNonUnique := parseTagOptions(tag)
fieldName := s.parseFieldName(tpField.Name, rawName)
if len(fieldName) == 0 || !field.CanSet() {
continue
@@ -300,56 +309,96 @@ func (s *Section) mapTo(val reflect.Value, isStrict bool) error {
}
if isAnonymous || isStruct || isStructPtr {
- if sec, err := s.f.GetSection(fieldName); err == nil {
- // Only set the field to non-nil struct value if we have
- // a section for it. Otherwise, we end up with a non-nil
- // struct ptr even though there is no data.
+ if secs, err := s.f.SectionsByName(fieldName); err == nil {
+ if len(secs) <= sectionIndex {
+ return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName)
+ }
+ // Only set the field to non-nil struct value if we have a section for it.
+ // Otherwise, we end up with a non-nil struct ptr even though there is no data.
if isStructPtr && field.IsNil() {
field.Set(reflect.New(tpField.Type.Elem()))
}
- if err = sec.mapTo(field, isStrict); err != nil {
- return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+ if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex); err != nil {
+ return fmt.Errorf("map to field %q: %v", fieldName, err)
}
continue
}
}
+
+ // Map non-unique sections
+ if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
+ newField, err := s.mapToSlice(fieldName, field, isStrict)
+ if err != nil {
+ return fmt.Errorf("map to slice %q: %v", fieldName, err)
+ }
+
+ field.Set(newField)
+ continue
+ }
+
if key, err := s.GetKey(fieldName); err == nil {
delim := parseDelim(tpField.Tag.Get("delim"))
if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
- return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+ return fmt.Errorf("set field %q: %v", fieldName, err)
}
}
}
return nil
}
-// MapTo maps section to given struct.
-func (s *Section) MapTo(v interface{}) error {
+// mapToSlice maps all sections with the same name and returns the new value.
+// The type of the Value must be a slice.
+func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) {
+ secs, err := s.f.SectionsByName(secName)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ typ := val.Type().Elem()
+ for i, sec := range secs {
+ elem := reflect.New(typ)
+ if err = sec.mapToField(elem, isStrict, i); err != nil {
+ return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err)
+ }
+
+ val = reflect.Append(val, elem.Elem())
+ }
+ return val, nil
+}
+
+// mapTo maps a section to object v.
+func (s *Section) mapTo(v interface{}, isStrict bool) error {
typ := reflect.TypeOf(v)
val := reflect.ValueOf(v)
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
val = val.Elem()
} else {
- return errors.New("cannot map to non-pointer struct")
+ return errors.New("not a pointer to a struct")
}
- return s.mapTo(val, false)
+ if typ.Kind() == reflect.Slice {
+ newField, err := s.mapToSlice(s.name, val, isStrict)
+ if err != nil {
+ return err
+ }
+
+ val.Set(newField)
+ return nil
+ }
+
+ return s.mapToField(val, isStrict, 0)
+}
+
+// MapTo maps section to given struct.
+func (s *Section) MapTo(v interface{}) error {
+ return s.mapTo(v, false)
}
// StrictMapTo maps section to given struct in strict mode,
// which returns all possible error including value parsing error.
func (s *Section) StrictMapTo(v interface{}) error {
- typ := reflect.TypeOf(v)
- val := reflect.ValueOf(v)
- if typ.Kind() == reflect.Ptr {
- typ = typ.Elem()
- val = val.Elem()
- } else {
- return errors.New("cannot map to non-pointer struct")
- }
-
- return s.mapTo(val, true)
+ return s.mapTo(v, true)
}
// MapTo maps file to given struct.
@@ -427,10 +476,10 @@ func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, all
if i == 0 {
keyWithShadows = newKey(key.s, key.name, val)
} else {
- keyWithShadows.AddShadow(val)
+ _ = keyWithShadows.AddShadow(val)
}
}
- key = keyWithShadows
+ *key = *keyWithShadows
return nil
}
@@ -480,7 +529,7 @@ func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim
return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow)
}
default:
- return fmt.Errorf("unsupported type '%s'", t)
+ return fmt.Errorf("unsupported type %q", t)
}
return nil
}
@@ -508,6 +557,11 @@ func isEmptyValue(v reflect.Value) bool {
return false
}
+// StructReflector is the interface implemented by struct types that can extract themselves into INI objects.
+type StructReflector interface {
+ ReflectINIStruct(*File) error
+}
+
func (s *Section) reflectFrom(val reflect.Value) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
@@ -515,6 +569,10 @@ func (s *Section) reflectFrom(val reflect.Value) error {
typ := val.Type()
for i := 0; i < typ.NumField(); i++ {
+ if !val.Field(i).CanInterface() {
+ continue
+ }
+
field := val.Field(i)
tpField := typ.Field(i)
@@ -523,17 +581,21 @@ func (s *Section) reflectFrom(val reflect.Value) error {
continue
}
- rawName, omitEmpty, allowShadow := parseTagOptions(tag)
+ rawName, omitEmpty, allowShadow, allowNonUnique := parseTagOptions(tag)
if omitEmpty && isEmptyValue(field) {
continue
}
+ if r, ok := field.Interface().(StructReflector); ok {
+ return r.ReflectINIStruct(s.f)
+ }
+
fieldName := s.parseFieldName(tpField.Name, rawName)
if len(fieldName) == 0 || !field.CanSet() {
continue
}
- if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
+ if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) ||
(tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
// Note: The only error here is section doesn't exist.
sec, err := s.f.GetSection(fieldName)
@@ -548,12 +610,41 @@ func (s *Section) reflectFrom(val reflect.Value) error {
}
if err = sec.reflectFrom(field); err != nil {
- return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
+ return fmt.Errorf("reflect from field %q: %v", fieldName, err)
}
continue
}
- // Note: Same reason as secion.
+ if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
+ slice := field.Slice(0, field.Len())
+ if field.Len() == 0 {
+ return nil
+ }
+ sliceOf := field.Type().Elem().Kind()
+
+ for i := 0; i < field.Len(); i++ {
+ if sliceOf != reflect.Struct && sliceOf != reflect.Ptr {
+ return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName)
+ }
+
+ sec, err := s.f.NewSection(fieldName)
+ if err != nil {
+ return err
+ }
+
+ // Add comment from comment tag
+ if len(sec.Comment) == 0 {
+ sec.Comment = tpField.Tag.Get("comment")
+ }
+
+ if err := sec.reflectFrom(slice.Index(i)); err != nil {
+ return fmt.Errorf("reflect from field %q: %v", fieldName, err)
+ }
+ }
+ continue
+ }
+
+ // Note: Same reason as section.
key, err := s.GetKey(fieldName)
if err != nil {
key, _ = s.NewKey(fieldName, "")
@@ -564,23 +655,58 @@ func (s *Section) reflectFrom(val reflect.Value) error {
key.Comment = tpField.Tag.Get("comment")
}
- if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim")), allowShadow); err != nil {
- return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
+ delim := parseDelim(tpField.Tag.Get("delim"))
+ if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil {
+ return fmt.Errorf("reflect field %q: %v", fieldName, err)
}
}
return nil
}
-// ReflectFrom reflects secion from given struct.
+// ReflectFrom reflects section from given struct. It overwrites existing ones.
func (s *Section) ReflectFrom(v interface{}) error {
typ := reflect.TypeOf(v)
val := reflect.ValueOf(v)
+
+ if s.name != DefaultSection && s.f.options.AllowNonUniqueSections &&
+ (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) {
+ // Clear sections to make sure none exists before adding the new ones
+ s.f.DeleteSection(s.name)
+
+ if typ.Kind() == reflect.Ptr {
+ sec, err := s.f.NewSection(s.name)
+ if err != nil {
+ return err
+ }
+ return sec.reflectFrom(val.Elem())
+ }
+
+ slice := val.Slice(0, val.Len())
+ sliceOf := val.Type().Elem().Kind()
+ if sliceOf != reflect.Ptr {
+ return fmt.Errorf("not a slice of pointers")
+ }
+
+ for i := 0; i < slice.Len(); i++ {
+ sec, err := s.f.NewSection(s.name)
+ if err != nil {
+ return err
+ }
+
+ err = sec.reflectFrom(slice.Index(i))
+ if err != nil {
+ return fmt.Errorf("reflect from %dth field: %v", i, err)
+ }
+ }
+
+ return nil
+ }
+
if typ.Kind() == reflect.Ptr {
- typ = typ.Elem()
val = val.Elem()
} else {
- return errors.New("cannot reflect from non-pointer struct")
+ return errors.New("not a pointer to a struct")
}
return s.reflectFrom(val)
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go
index 1f7e87e6..d2c2308f 100644
--- a/vendor/gopkg.in/yaml.v2/apic.go
+++ b/vendor/gopkg.in/yaml.v2/apic.go
@@ -86,6 +86,7 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) {
raw_buffer: make([]byte, 0, output_raw_buffer_size),
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
events: make([]yaml_event_t, 0, initial_queue_size),
+ best_width: -1,
}
}
diff --git a/vendor/gopkg.in/yaml.v3/.travis.yml b/vendor/gopkg.in/yaml.v3/.travis.yml
new file mode 100644
index 00000000..a130fe88
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+
+go:
+ - "1.4.x"
+ - "1.5.x"
+ - "1.6.x"
+ - "1.7.x"
+ - "1.8.x"
+ - "1.9.x"
+ - "1.10.x"
+ - "1.11.x"
+ - "1.12.x"
+ - "1.13.x"
+ - "1.14.x"
+ - "tip"
+
+go_import_path: gopkg.in/yaml.v3
diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE
new file mode 100644
index 00000000..2683e4bb
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/LICENSE
@@ -0,0 +1,50 @@
+
+This project is covered by two different licenses: MIT and Apache.
+
+#### MIT License ####
+
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original MIT license, with the additional
+copyright staring in 2011 when the project was ported over:
+
+ apic.go emitterc.go parserc.go readerc.go scannerc.go
+ writerc.go yamlh.go yamlprivateh.go
+
+Copyright (c) 2006-2010 Kirill Simonov
+Copyright (c) 2006-2011 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+### Apache License ###
+
+All the remaining project files are covered by the Apache license:
+
+Copyright (c) 2011-2019 Canonical Ltd
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE
new file mode 100644
index 00000000..866d74a7
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md
new file mode 100644
index 00000000..08eb1bab
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/README.md
@@ -0,0 +1,150 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.2, but preserves some behavior
+from 1.1 for backwards compatibility.
+
+Specifically, as of v3 of the yaml package:
+
+ - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being
+ decoded into a typed bool value. Otherwise they behave as a string. Booleans
+ in YAML 1.2 are _true/false_ only.
+ - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_
+ as specified in YAML 1.2, because most parsers still use the old format.
+ Octals in the _0o777_ format are supported though, so new files work.
+ - Does not support base-60 floats. These are gone from YAML 1.2, and were
+ actually never supported by this package as it's clearly a poor choice.
+
+and offers backwards
+compatibility with YAML 1.1 in some cases.
+1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v3*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v3
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3)
+
+API stability
+-------------
+
+The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the MIT and Apache License 2.0 licenses.
+Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v3"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go
new file mode 100644
index 00000000..ae7d049f
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/apic.go
@@ -0,0 +1,747 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_reader_read_handler
+ parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ best_width: -1,
+ }
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+ event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool,
+) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+}
+
+// Create ALIAS.
+func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ anchor: anchor,
+ }
+ return true
+}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go
new file mode 100644
index 00000000..21c0dacf
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/decode.go
@@ -0,0 +1,948 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *Node
+ anchors map[string]*Node
+ doneInit bool
+ textless bool
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+ yaml_parser_set_input_string(&p.parser, b)
+ return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ yaml_parser_set_input_reader(&p.parser, r)
+ return &p
+}
+
+func (p *parser) init() {
+ if p.doneInit {
+ return
+ }
+ p.anchors = make(map[string]*Node)
+ p.expect(yaml_STREAM_START_EVENT)
+ p.doneInit = true
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+ if p.event.typ == yaml_NO_EVENT {
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ }
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ if p.event.typ != e {
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+ p.fail()
+ }
+ yaml_event_delete(&p.event)
+ p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+ if p.event.typ != yaml_NO_EVENT {
+ return p.event.typ
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ return p.event.typ
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ } else if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *Node, anchor []byte) {
+ if anchor != nil {
+ n.Anchor = string(anchor)
+ p.anchors[n.Anchor] = n
+ }
+}
+
+func (p *parser) parse() *Node {
+ p.init()
+ switch p.peek() {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ case yaml_TAIL_COMMENT_EVENT:
+ panic("internal error: unexpected tail comment event (please report)")
+ default:
+ panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String())
+ }
+}
+
+func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node {
+ var style Style
+ if tag != "" && tag != "!" {
+ tag = shortTag(tag)
+ style = TaggedStyle
+ } else if defaultTag != "" {
+ tag = defaultTag
+ } else if kind == ScalarNode {
+ tag, _ = resolve("", value)
+ }
+ n := &Node{
+ Kind: kind,
+ Tag: tag,
+ Value: value,
+ Style: style,
+ }
+ if !p.textless {
+ n.Line = p.event.start_mark.line + 1
+ n.Column = p.event.start_mark.column + 1
+ n.HeadComment = string(p.event.head_comment)
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ }
+ return n
+}
+
+func (p *parser) parseChild(parent *Node) *Node {
+ child := p.parse()
+ parent.Content = append(parent.Content, child)
+ return child
+}
+
+func (p *parser) document() *Node {
+ n := p.node(DocumentNode, "", "", "")
+ p.doc = n
+ p.expect(yaml_DOCUMENT_START_EVENT)
+ p.parseChild(n)
+ if p.peek() == yaml_DOCUMENT_END_EVENT {
+ n.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_DOCUMENT_END_EVENT)
+ return n
+}
+
+func (p *parser) alias() *Node {
+ n := p.node(AliasNode, "", "", string(p.event.anchor))
+ n.Alias = p.anchors[n.Value]
+ if n.Alias == nil {
+ failf("unknown anchor '%s' referenced", n.Value)
+ }
+ p.expect(yaml_ALIAS_EVENT)
+ return n
+}
+
+func (p *parser) scalar() *Node {
+ var parsedStyle = p.event.scalar_style()
+ var nodeStyle Style
+ switch {
+ case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = DoubleQuotedStyle
+ case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = SingleQuotedStyle
+ case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0:
+ nodeStyle = LiteralStyle
+ case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0:
+ nodeStyle = FoldedStyle
+ }
+ var nodeValue = string(p.event.value)
+ var nodeTag = string(p.event.tag)
+ var defaultTag string
+ if nodeStyle == 0 {
+ if nodeValue == "<<" {
+ defaultTag = mergeTag
+ }
+ } else {
+ defaultTag = strTag
+ }
+ n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue)
+ n.Style |= nodeStyle
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SCALAR_EVENT)
+ return n
+}
+
+func (p *parser) sequence() *Node {
+ n := p.node(SequenceNode, seqTag, string(p.event.tag), "")
+ if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 {
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SEQUENCE_START_EVENT)
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
+ p.parseChild(n)
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ p.expect(yaml_SEQUENCE_END_EVENT)
+ return n
+}
+
+func (p *parser) mapping() *Node {
+ n := p.node(MappingNode, mapTag, string(p.event.tag), "")
+ block := true
+ if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 {
+ block = false
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_MAPPING_START_EVENT)
+ for p.peek() != yaml_MAPPING_END_EVENT {
+ k := p.parseChild(n)
+ if block && k.FootComment != "" {
+ // Must be a foot comment for the prior value when being dedented.
+ if len(n.Content) > 2 {
+ n.Content[len(n.Content)-3].FootComment = k.FootComment
+ k.FootComment = ""
+ }
+ }
+ v := p.parseChild(n)
+ if k.FootComment == "" && v.FootComment != "" {
+ k.FootComment = v.FootComment
+ v.FootComment = ""
+ }
+ if p.peek() == yaml_TAIL_COMMENT_EVENT {
+ if k.FootComment == "" {
+ k.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_TAIL_COMMENT_EVENT)
+ }
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 {
+ n.Content[len(n.Content)-2].FootComment = n.FootComment
+ n.FootComment = ""
+ }
+ p.expect(yaml_MAPPING_END_EVENT)
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *Node
+ aliases map[*Node]bool
+ terrors []string
+
+ stringMapType reflect.Type
+ generalMapType reflect.Type
+
+ knownFields bool
+ uniqueKeys bool
+ decodeCount int
+ aliasCount int
+ aliasDepth int
+}
+
+var (
+ nodeType = reflect.TypeOf(Node{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ stringMapType = reflect.TypeOf(map[string]interface{}{})
+ generalMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = generalMapType.Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ ptrTimeType = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder() *decoder {
+ d := &decoder{
+ stringMapType: stringMapType,
+ generalMapType: generalMapType,
+ uniqueKeys: true,
+ }
+ d.aliases = make(map[*Node]bool)
+ return d
+}
+
+func (d *decoder) terror(n *Node, tag string, out reflect.Value) {
+ if n.Tag != "" {
+ tag = n.Tag
+ }
+ value := n.Value
+ if tag != seqTag && tag != mapTag {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) {
+ err := u.UnmarshalYAML(n)
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.ShortTag() == nullTag || n.Kind == 0 && n.IsZero() {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ outi := out.Addr().Interface()
+ if u, ok := outi.(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ if u, ok := outi.(obsoleteUnmarshaler); ok {
+ good = d.callObsoleteUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) {
+ if n.ShortTag() == nullTag {
+ return reflect.Value{}
+ }
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+const (
+ // 400,000 decode operations is ~500kb of dense object declarations, or
+ // ~5kb of dense object declarations with 10000% alias expansion
+ alias_ratio_range_low = 400000
+
+ // 4,000,000 decode operations is ~5MB of dense object declarations, or
+ // ~4.5MB of dense object declarations with 10% alias expansion
+ alias_ratio_range_high = 4000000
+
+ // alias_ratio_range is the range over which we scale allowed alias ratios
+ alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+ switch {
+ case decodeCount <= alias_ratio_range_low:
+ // allow 99% to come from alias expansion for small-to-medium documents
+ return 0.99
+ case decodeCount >= alias_ratio_range_high:
+ // allow 10% to come from alias expansion for very large documents
+ return 0.10
+ default:
+ // scale smoothly from 99% down to 10% over the range.
+ // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+ // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+ return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+ }
+}
+
+func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) {
+ d.decodeCount++
+ if d.aliasDepth > 0 {
+ d.aliasCount++
+ }
+ if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
+ failf("document contains excessive aliasing")
+ }
+ if out.Type() == nodeType {
+ out.Set(reflect.ValueOf(n).Elem())
+ return true
+ }
+ switch n.Kind {
+ case DocumentNode:
+ return d.document(n, out)
+ case AliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.Kind {
+ case ScalarNode:
+ good = d.scalar(n, out)
+ case MappingNode:
+ good = d.mapping(n, out)
+ case SequenceNode:
+ good = d.sequence(n, out)
+ case 0:
+ if n.IsZero() {
+ return d.null(out)
+ }
+ fallthrough
+ default:
+ failf("cannot decode node with unknown kind %d", n.Kind)
+ }
+ return good
+}
+
+func (d *decoder) document(n *Node, out reflect.Value) (good bool) {
+ if len(n.Content) == 1 {
+ d.doc = n
+ d.unmarshal(n.Content[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *Node, out reflect.Value) (good bool) {
+ if d.aliases[n] {
+ // TODO this could actually be allowed in some circumstances.
+ failf("anchor '%s' value contains itself", n.Value)
+ }
+ d.aliases[n] = true
+ d.aliasDepth++
+ good = d.unmarshal(n.Alias, out)
+ d.aliasDepth--
+ delete(d.aliases, n)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) null(out reflect.Value) bool {
+ if out.CanAddr() {
+ switch out.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ out.Set(reflect.Zero(out.Type()))
+ return true
+ }
+ }
+ return false
+}
+
+func (d *decoder) scalar(n *Node, out reflect.Value) bool {
+ var tag string
+ var resolved interface{}
+ if n.indicatedString() {
+ tag = strTag
+ resolved = n.Value
+ } else {
+ tag, resolved = resolve(n.Tag, n.Value)
+ if tag == binaryTag {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ return d.null(out)
+ }
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ // We've resolved to exactly the type we want, so use that.
+ out.Set(resolvedv)
+ return true
+ }
+ // Perhaps we can use the value as a TextUnmarshaler to
+ // set its value.
+ if out.CanAddr() {
+ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+ if ok {
+ var text []byte
+ if tag == binaryTag {
+ text = []byte(resolved.(string))
+ } else {
+ // We let any value be unmarshaled into TextUnmarshaler.
+ // That might be more lax than we'd like, but the
+ // TextUnmarshaler itself should bowl out any dubious values.
+ text = []byte(n.Value)
+ }
+ err := u.UnmarshalText(text)
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == binaryTag {
+ out.SetString(resolved.(string))
+ return true
+ }
+ out.SetString(n.Value)
+ return true
+ case reflect.Interface:
+ out.Set(reflect.ValueOf(resolved))
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ // This used to work in v2, but it's very unfriendly.
+ isDuration := out.Type() == durationType
+
+ switch resolved := resolved.(type) {
+ case int:
+ if !isDuration && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case int64:
+ if !isDuration && !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ return true
+ }
+ case uint64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case float64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ return true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ return true
+ case string:
+ // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html).
+ // It only works if explicitly attempting to unmarshal into a typed bool value.
+ switch resolved {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON":
+ out.SetBool(true)
+ return true
+ case "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ out.SetBool(false)
+ return true
+ }
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ return true
+ case int64:
+ out.SetFloat(float64(resolved))
+ return true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ return true
+ case float64:
+ out.SetFloat(resolved)
+ return true
+ }
+ case reflect.Struct:
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ out.Set(resolvedv)
+ return true
+ }
+ case reflect.Ptr:
+ panic("yaml internal error: please report the issue")
+ }
+ d.terror(n, tag, out)
+ return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Array:
+ if l != out.Len() {
+ failf("invalid array: want %d elements but got %d", out.Len(), l)
+ }
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, seqTag, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.Content[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ if out.Kind() != reflect.Array {
+ out.Set(out.Slice(0, j))
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+ if d.uniqueKeys {
+ nerrs := len(d.terrors)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ for j := i + 2; j < l; j += 2 {
+ nj := n.Content[j]
+ if ni.Kind == nj.Kind && ni.Value == nj.Value {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line))
+ }
+ }
+ }
+ if len(d.terrors) > nerrs {
+ return false
+ }
+ }
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ iface := out
+ if isStringMap(n) {
+ out = reflect.MakeMap(d.stringMapType)
+ } else {
+ out = reflect.MakeMap(d.generalMapType)
+ }
+ iface.Set(out)
+ default:
+ d.terror(n, mapTag, out)
+ return false
+ }
+
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ stringMapType := d.stringMapType
+ generalMapType := d.generalMapType
+ if outt.Elem() == ifaceType {
+ if outt.Key().Kind() == reflect.String {
+ d.stringMapType = outt
+ } else if outt.Key() == ifaceType {
+ d.generalMapType = outt
+ }
+ }
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.Content[i]) {
+ d.merge(n.Content[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.Content[i], k) {
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.Content[i+1], e) {
+ out.SetMapIndex(k, e)
+ }
+ }
+ }
+ d.stringMapType = stringMapType
+ d.generalMapType = generalMapType
+ return true
+}
+
+func isStringMap(n *Node) bool {
+ if n.Kind != MappingNode {
+ return false
+ }
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ if n.Content[i].ShortTag() != strTag {
+ return false
+ }
+ }
+ return true
+}
+
+func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+ elemType = inlineMap.Type().Elem()
+ }
+
+ for _, index := range sinfo.InlineUnmarshalers {
+ field := d.fieldByIndex(n, out, index)
+ d.prepare(n, field)
+ }
+
+ var doneFields []bool
+ if d.uniqueKeys {
+ doneFields = make([]bool, len(sinfo.FieldsList))
+ }
+ name := settableValueOf("")
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ if isMerge(ni) {
+ d.merge(n.Content[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ if d.uniqueKeys {
+ if doneFields[info.Id] {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type()))
+ continue
+ }
+ doneFields[info.Id] = true
+ }
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = d.fieldByIndex(n, out, info.Inline)
+ }
+ d.unmarshal(n.Content[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.Content[i+1], value)
+ inlineMap.SetMapIndex(name, value)
+ } else if d.knownFields {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type()))
+ }
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *Node, out reflect.Value) {
+ switch n.Kind {
+ case MappingNode:
+ d.unmarshal(n, out)
+ case AliasNode:
+ if n.Alias != nil && n.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(n, out)
+ case SequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.Content) - 1; i >= 0; i-- {
+ ni := n.Content[i]
+ if ni.Kind == AliasNode {
+ if ni.Alias != nil && ni.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ } else if ni.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+}
+
+func isMerge(n *Node) bool {
+ return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag)
+}
diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go
new file mode 100644
index 00000000..c29217ef
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/emitterc.go
@@ -0,0 +1,2022 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and below and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and above and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ // [Go] This was changed so that indentations are more regular.
+ if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE {
+ // The first indent inside a sequence will just skip the "- " indicator.
+ emitter.indent += 2
+ } else {
+ // Everything else aligns to the chosen indentation.
+ emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent)
+ }
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+ emitter.space_above = true
+ emitter.foot_indent = -1
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical || true {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if len(emitter.head_comment) > 0 {
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_emit_node(emitter, event, true, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ // [Go] Force document foot separation.
+ emitter.foot_indent = 0
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.foot_indent = -1
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ if emitter.canonical && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.column == 0 || emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if len(emitter.line_comment) > 0 {
+ // [Go] A line comment was provided for the key. That's unusual as the
+ // scanner associates line comments with the value. Either way,
+ // save the line comment and render it appropriately later.
+ emitter.key_line_comment = emitter.line_comment
+ emitter.line_comment = nil
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ if len(emitter.key_line_comment) > 0 {
+ // [Go] A line comment was previously provided for the key. Handle it before
+ // the value so the inline comments are placed correctly.
+ if yaml_emitter_silent_nil_event(emitter, event) && len(emitter.line_comment) == 0 {
+ // Nothing other than the line comment will be written on the line.
+ emitter.line_comment = emitter.key_line_comment
+ emitter.key_line_comment = nil
+ } else {
+ // An actual value is coming, so emit the comment line.
+ emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
+ // Indent in unless it's a block that will reindent anyway.
+ if event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || (event.typ != yaml_MAPPING_START_EVENT && event.typ != yaml_SEQUENCE_START_EVENT) {
+ emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent)
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+ }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Write a head comment.
+func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.tail_comment) {
+ return false
+ }
+ emitter.tail_comment = emitter.tail_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ }
+
+ if len(emitter.head_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.head_comment) {
+ return false
+ }
+ emitter.head_comment = emitter.head_comment[:0]
+ return true
+}
+
+// Write an line comment.
+func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.line_comment) == 0 {
+ return true
+ }
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.line_comment) {
+ return false
+ }
+ emitter.line_comment = emitter.line_comment[:0]
+ return true
+}
+
+// Write a foot comment.
+func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.foot_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.foot_comment) {
+ return false
+ }
+ emitter.foot_comment = emitter.foot_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ return true
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+ tab_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if value[i] == '\t' {
+ tab_characters = true
+ } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || tab_characters || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ if len(event.head_comment) > 0 {
+ emitter.head_comment = event.head_comment
+ }
+ if len(event.line_comment) > 0 {
+ emitter.line_comment = event.line_comment
+ }
+ if len(event.foot_comment) > 0 {
+ emitter.foot_comment = event.foot_comment
+ }
+ if len(event.tail_comment) > 0 {
+ emitter.tail_comment = event.tail_comment
+ }
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if emitter.foot_indent == indent {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ //emitter.indention = true
+ emitter.space_above = false
+ emitter.foot_indent = -1
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if len(value) > 0 && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ if len(value) > 0 {
+ emitter.whitespace = false
+ }
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ //emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ //emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool {
+ breaks := false
+ pound := false
+ for i := 0; i < len(comment); {
+ if is_break(comment, i) {
+ if !write_break(emitter, comment, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ pound = false
+ } else {
+ if breaks && !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !pound {
+ if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) {
+ return false
+ }
+ pound = true
+ }
+ if !write(emitter, comment, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ if !breaks && !put_break(emitter) {
+ return false
+ }
+
+ emitter.whitespace = true
+ //emitter.indention = true
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go
new file mode 100644
index 00000000..45e8d1e1
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/encode.go
@@ -0,0 +1,572 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+ indent int
+ doneInit bool
+}
+
+func newEncoder() *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_writer(&e.emitter, w)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func (e *encoder) init() {
+ if e.doneInit {
+ return
+ }
+ if e.indent == 0 {
+ e.indent = 4
+ }
+ e.emitter.best_indent = e.indent
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+ e.emit()
+ e.doneInit = true
+}
+
+func (e *encoder) finish() {
+ e.emitter.open_ended = false
+ yaml_stream_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+ e.init()
+ var node *Node
+ if in.IsValid() {
+ node, _ = in.Interface().(*Node)
+ }
+ if node != nil && node.Kind == DocumentNode {
+ e.nodev(in)
+ } else {
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.emit()
+ e.marshal(tag, in)
+ yaml_document_end_event_initialize(&e.event, true)
+ e.emit()
+ }
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ tag = shortTag(tag)
+ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ switch value := iface.(type) {
+ case *Node:
+ e.nodev(in)
+ return
+ case Node:
+ e.nodev(in.Addr())
+ return
+ case time.Time:
+ e.timev(tag, in)
+ return
+ case *time.Time:
+ e.timev(tag, in.Elem())
+ return
+ case time.Duration:
+ e.stringv(tag, reflect.ValueOf(value.String()))
+ return
+ case Marshaler:
+ v, err := value.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ e.marshal(tag, reflect.ValueOf(v))
+ return
+ case encoding.TextMarshaler:
+ text, err := value.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ case nil:
+ e.nilv()
+ return
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ e.marshal(tag, in.Elem())
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ e.marshal(tag, in.Elem())
+ case reflect.Struct:
+ e.structv(tag, in)
+ case reflect.Slice, reflect.Array:
+ e.slicev(tag, in)
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ e.intv(tag, in)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return reflect.Value{}
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = e.fieldByIndex(in, info.Inline)
+ if !value.IsValid() {
+ continue
+ }
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+ e.emit()
+ f()
+ yaml_mapping_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+// isOldBool returns whether s is bool notation as defined in YAML 1.1.
+//
+// We continue to force strings that YAML 1.1 would interpret as booleans to be
+// rendered as quotes strings so that the marshalled output valid for YAML 1.1
+// parsing.
+func isOldBool(s string) (result bool) {
+ switch s {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
+ "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ return true
+ default:
+ return false
+ }
+}
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ canUsePlain := true
+ switch {
+ case !utf8.ValidString(s):
+ if tag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ s = encodeBase64(s)
+ case tag == "":
+ // Check to see if it would resolve to a specific
+ // tag when encoded unquoted. If it doesn't,
+ // there's no need to quote it.
+ rtag, _ := resolve("", s)
+ canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
+ }
+ // Note: it's possible for user code to emit invalid YAML
+ // if they explicitly specify a tag and a string containing
+ // text that's incompatible with that tag.
+ switch {
+ case strings.Contains(s, "\n"):
+ if e.flow {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else {
+ style = yaml_LITERAL_SCALAR_STYLE
+ }
+ case canUsePlain:
+ style = yaml_PLAIN_SCALAR_STYLE
+ default:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+ t := in.Interface().(time.Time)
+ s := t.Format(time.RFC3339Nano)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // Issue #352: When formatting, use the precision of the underlying value
+ precision := 64
+ if in.Kind() == reflect.Float32 {
+ precision = 32
+ }
+
+ s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
+ // TODO Kill this function. Replace all initialize calls by their underlining Go literals.
+ implicit := tag == ""
+ if !implicit {
+ tag = longTag(tag)
+ }
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.event.head_comment = head
+ e.event.line_comment = line
+ e.event.foot_comment = foot
+ e.event.tail_comment = tail
+ e.emit()
+}
+
+func (e *encoder) nodev(in reflect.Value) {
+ e.node(in.Interface().(*Node), "")
+}
+
+func (e *encoder) node(node *Node, tail string) {
+ // Zero nodes behave as nil.
+ if node.Kind == 0 && node.IsZero() {
+ e.nilv()
+ return
+ }
+
+ // If the tag was not explicitly requested, and dropping it won't change the
+ // implicit tag of the value, don't include it in the presentation.
+ var tag = node.Tag
+ var stag = shortTag(tag)
+ var forceQuoting bool
+ if tag != "" && node.Style&TaggedStyle == 0 {
+ if node.Kind == ScalarNode {
+ if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
+ tag = ""
+ } else {
+ rtag, _ := resolve("", node.Value)
+ if rtag == stag {
+ tag = ""
+ } else if stag == strTag {
+ tag = ""
+ forceQuoting = true
+ }
+ }
+ } else {
+ var rtag string
+ switch node.Kind {
+ case MappingNode:
+ rtag = mapTag
+ case SequenceNode:
+ rtag = seqTag
+ }
+ if rtag == stag {
+ tag = ""
+ }
+ }
+ }
+
+ switch node.Kind {
+ case DocumentNode:
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ yaml_document_end_event_initialize(&e.event, true)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case SequenceNode:
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case MappingNode:
+ style := yaml_BLOCK_MAPPING_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)
+ e.event.tail_comment = []byte(tail)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+
+ // The tail logic below moves the foot comment of prior keys to the following key,
+ // since the value for each key may be a nested structure and the foot needs to be
+ // processed only the entirety of the value is streamed. The last tail is processed
+ // with the mapping end event.
+ var tail string
+ for i := 0; i+1 < len(node.Content); i += 2 {
+ k := node.Content[i]
+ foot := k.FootComment
+ if foot != "" {
+ kopy := *k
+ kopy.FootComment = ""
+ k = &kopy
+ }
+ e.node(k, tail)
+ tail = foot
+
+ v := node.Content[i+1]
+ e.node(v, "")
+ }
+
+ yaml_mapping_end_event_initialize(&e.event)
+ e.event.tail_comment = []byte(tail)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case AliasNode:
+ yaml_alias_event_initialize(&e.event, []byte(node.Value))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case ScalarNode:
+ value := node.Value
+ if !utf8.ValidString(value) {
+ if stag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if stag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", stag)
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ value = encodeBase64(value)
+ }
+
+ style := yaml_PLAIN_SCALAR_STYLE
+ switch {
+ case node.Style&DoubleQuotedStyle != 0:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ case node.Style&SingleQuotedStyle != 0:
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ case node.Style&LiteralStyle != 0:
+ style = yaml_LITERAL_SCALAR_STYLE
+ case node.Style&FoldedStyle != 0:
+ style = yaml_FOLDED_SCALAR_STYLE
+ case strings.Contains(value, "\n"):
+ style = yaml_LITERAL_SCALAR_STYLE
+ case forceQuoting:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
+ default:
+ failf("cannot encode node with unknown kind %d", node.Kind)
+ }
+}
diff --git a/vendor/gopkg.in/yaml.v3/go.mod b/vendor/gopkg.in/yaml.v3/go.mod
new file mode 100644
index 00000000..f407ea32
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/go.mod
@@ -0,0 +1,5 @@
+module "gopkg.in/yaml.v3"
+
+require (
+ "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
+)
diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go
new file mode 100644
index 00000000..ac66fccc
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/parserc.go
@@ -0,0 +1,1249 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ token := &parser.tokens[parser.tokens_head]
+ yaml_parser_unfold_comments(parser, token)
+ return token
+ }
+ return nil
+}
+
+// yaml_parser_unfold_comments walks through the comments queue and joins all
+// comments behind the position of the provided token into the respective
+// top-level comment slices in the parser.
+func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) {
+ for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index {
+ comment := &parser.comments[parser.comments_head]
+ if len(comment.head) > 0 {
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ // No heads on ends, so keep comment.head for a follow up token.
+ break
+ }
+ if len(parser.head_comment) > 0 {
+ parser.head_comment = append(parser.head_comment, '\n')
+ }
+ parser.head_comment = append(parser.head_comment, comment.head...)
+ }
+ if len(comment.foot) > 0 {
+ if len(parser.foot_comment) > 0 {
+ parser.foot_comment = append(parser.foot_comment, '\n')
+ }
+ parser.foot_comment = append(parser.foot_comment, comment.foot...)
+ }
+ if len(comment.line) > 0 {
+ if len(parser.line_comment) > 0 {
+ parser.line_comment = append(parser.line_comment, '\n')
+ }
+ parser.line_comment = append(parser.line_comment, comment.line...)
+ }
+ *comment = yaml_comment_t{}
+ parser.comments_head++
+ }
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ var head_comment []byte
+ if len(parser.head_comment) > 0 {
+ // [Go] Scan the header comment backwards, and if an empty line is found, break
+ // the header so the part before the last empty line goes into the
+ // document header, while the bottom of it goes into a follow up event.
+ for i := len(parser.head_comment) - 1; i > 0; i-- {
+ if parser.head_comment[i] == '\n' {
+ if i == len(parser.head_comment)-1 {
+ head_comment = parser.head_comment[:i]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ } else if parser.head_comment[i-1] == '\n' {
+ head_comment = parser.head_comment[:i-1]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ }
+ }
+ }
+ }
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+
+ head_comment: head_comment,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected ", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ if len(event.head_comment) > 0 && len(event.foot_comment) == 0 {
+ event.foot_comment = event.head_comment
+ event.head_comment = nil
+ }
+ return true
+}
+
+func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) {
+ event.head_comment = parser.head_comment
+ event.line_comment = parser.line_comment
+ event.foot_comment = parser.foot_comment
+ parser.head_comment = nil
+ parser.line_comment = nil
+ parser.foot_comment = nil
+ parser.tail_comment = nil
+ parser.stem_comment = nil
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ if parser.stem_comment != nil {
+ event.head_comment = parser.stem_comment
+ parser.stem_comment = nil
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ if parser.stem_comment != nil {
+ event.head_comment = parser.stem_comment
+ parser.stem_comment = nil
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ prior_head_len := len(parser.head_comment)
+ skip_token(parser)
+ yaml_parser_split_stem_comment(parser, prior_head_len)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ prior_head_len := len(parser.head_comment)
+ skip_token(parser)
+ yaml_parser_split_stem_comment(parser, prior_head_len)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Split stem comment from head comment.
+//
+// When a sequence or map is found under a sequence entry, the former head comment
+// is assigned to the underlying sequence or map as a whole, not the individual
+// sequence or map entry as would be expected otherwise. To handle this case the
+// previous head comment is moved aside as the stem comment.
+func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
+ if stem_len == 0 {
+ return
+ }
+
+ token := peek_token(parser)
+ if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN {
+ return
+ }
+
+ parser.stem_comment = parser.head_comment[:stem_len]
+ if len(parser.head_comment) == stem_len {
+ parser.head_comment = nil
+ } else {
+ // Copy suffix to prevent very strange bugs if someone ever appends
+ // further bytes to the prefix in the stem_comment slice above.
+ parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...)
+ }
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // [Go] A tail comment was left from the prior mapping value processed. Emit an event
+ // as it needs to be processed with that value and not the following key.
+ if len(parser.tail_comment) > 0 {
+ *event = yaml_event_t{
+ typ: yaml_TAIL_COMMENT_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ foot_comment: parser.tail_comment,
+ }
+ parser.tail_comment = nil
+ return true
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go
new file mode 100644
index 00000000..b7de0a89
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/readerc.go
@@ -0,0 +1,434 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // [Go] This function was changed to guarantee the requested length size at EOF.
+ // The fact we need to do this is pretty awful, but the description above implies
+ // for that to be the case, and there are tests
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ // [Go] ACTUALLY! Read the documentation of this function above.
+ // This is just broken. To return true, we need to have the
+ // given length in the buffer. Not doing that means every single
+ // check that calls this function to make sure the buffer has a
+ // given length is Go) panicking; or C) accessing invalid memory.
+ //return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ // [Go] Read the documentation of this function above. To return true,
+ // we need to have the given length in the buffer. Not doing that means
+ // every single check that calls this function to make sure the buffer
+ // has a given length is Go) panicking; or C) accessing invalid memory.
+ // This happens here due to the EOF above breaking early.
+ for buffer_len < length {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go
new file mode 100644
index 00000000..64ae8880
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/resolve.go
@@ -0,0 +1,326 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, boolTag, []string{"true", "True", "TRUE"}},
+ {false, boolTag, []string{"false", "False", "FALSE"}},
+ {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", mergeTag, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const (
+ nullTag = "!!null"
+ boolTag = "!!bool"
+ strTag = "!!str"
+ intTag = "!!int"
+ floatTag = "!!float"
+ timestampTag = "!!timestamp"
+ seqTag = "!!seq"
+ mapTag = "!!map"
+ binaryTag = "!!binary"
+ mergeTag = "!!merge"
+)
+
+var longTags = make(map[string]string)
+var shortTags = make(map[string]string)
+
+func init() {
+ for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
+ ltag := longTag(stag)
+ longTags[stag] = ltag
+ shortTags[ltag] = stag
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ if strings.HasPrefix(tag, longTagPrefix) {
+ if stag, ok := shortTags[tag]; ok {
+ return stag
+ }
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ if ltag, ok := longTags[tag]; ok {
+ return ltag
+ }
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ tag = shortTag(tag)
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, strTag, binaryTag:
+ return
+ case floatTag:
+ if rtag == intTag {
+ switch v := out.(type) {
+ case int64:
+ rtag = floatTag
+ out = float64(v)
+ return
+ case int:
+ rtag = floatTag
+ out = float64(v)
+ return
+ }
+ }
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != strTag && tag != binaryTag {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
+ // !!timestamp tag.
+ if tag == "" || tag == timestampTag {
+ t, ok := parseTimestamp(in)
+ if ok {
+ return timestampTag, t
+ }
+ }
+
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ // Octals as introduced in version 1.2 of the spec.
+ // Octals from the 1.1 spec, spelled as 0777, are still
+ // decoded by default in v3 as well for compatibility.
+ // May be dropped in v4 depending on how usage evolves.
+ if strings.HasPrefix(plain, "0o") {
+ intv, err := strconv.ParseInt(plain[2:], 8, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 8, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0o") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ default:
+ panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ return strTag, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
+ "2006-1-2", // date only
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+ // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+ // TODO write code to check all the formats supported by
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+ // Quick check: all date formats start with YYYY-.
+ i := 0
+ for ; i < len(s); i++ {
+ if c := s[i]; c < '0' || c > '9' {
+ break
+ }
+ }
+ if i != 4 || i == len(s) || s[i] != '-' {
+ return time.Time{}, false
+ }
+ for _, format := range allowedTimestampFormats {
+ if t, err := time.Parse(format, s); err == nil {
+ return t, true
+ }
+ }
+ return time.Time{}, false
+}
diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go
new file mode 100644
index 00000000..d9a539c3
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/scannerc.go
@@ -0,0 +1,3028 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ parser.newlines++
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ parser.newlines++
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.newlines++
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // [Go] The comment parsing logic requires a lookahead of two tokens
+ // so that foot comments may be parsed in time of associating them
+ // with the tokens that are parsed before them, and also for line
+ // comments to be transformed into head comments in some edge cases.
+ if parser.tokens_head < len(parser.tokens)-2 {
+ // If a potential simple key is at the head position, we need to fetch
+ // the next token to disambiguate it.
+ head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+ if !ok {
+ break
+ } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
+ return false
+ } else if !valid {
+ break
+ }
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ scan_mark := parser.mark
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // [Go] While unrolling indents, transform the head comments of prior
+ // indentation levels observed after scan_start into foot comments at
+ // the respective indexes.
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ comment_mark := parser.mark
+ if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') {
+ // Associate any following comments with the prior token.
+ comment_mark = parser.tokens[len(parser.tokens)-1].start_mark
+ }
+ defer func() {
+ if !ok {
+ return
+ }
+ if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN {
+ // Sequence indicators alone have no line comments. It becomes
+ // a head comment for whatever follows.
+ return
+ }
+ if !yaml_parser_scan_line_comment(parser, comment_mark) {
+ ok = false
+ return
+ }
+ }()
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] TODO Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+ if !simple_key.possible {
+ return false, true
+ }
+
+ // The 1.2 specification says:
+ //
+ // "If the ? indicator is omitted, parsing needs to see past the
+ // implicit key to recognize it as such. To limit the amount of
+ // lookahead required, the “:” indicator must appear at most 1024
+ // Unicode characters beyond the start of the key. In addition, the key
+ // is restricted to a single line."
+ //
+ if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return false, yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ return false, true
+ }
+ return true, true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ }
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
+ }
+ return true
+}
+
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+ possible: false,
+ required: false,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ })
+
+ // Increase the flow level.
+ parser.flow_level++
+ if parser.flow_level > max_flow_level {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+ }
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ last := len(parser.simple_keys) - 1
+ delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+ parser.simple_keys = parser.simple_keys[:last]
+ }
+ return true
+}
+
+// max_indents limits the indents stack size
+const max_indents = 10000
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+ if len(parser.indents) > max_indents {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_indents))
+ }
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ block_mark := scan_mark
+ block_mark.index--
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+
+ // [Go] Reposition the end token before potential following
+ // foot comments of parent blocks. For that, search
+ // backwards for recent comments that were at the same
+ // indent as the block that is ending now.
+ stop_index := block_mark.index
+ for i := len(parser.comments) - 1; i >= 0; i-- {
+ comment := &parser.comments[i]
+
+ if comment.end_mark.index < stop_index {
+ // Don't go back beyond the start of the comment/whitespace scan, unless column < 0.
+ // If requested indent column is < 0, then the document is over and everything else
+ // is a foot anyway.
+ break
+ }
+ if comment.start_mark.column == parser.indent+1 {
+ // This is a good match. But maybe there's a former comment
+ // at that same indent level, so keep searching.
+ block_mark = comment.start_mark
+ }
+
+ // While the end of the former comment matches with
+ // the start of the following one, we know there's
+ // nothing in between and scanning is still safe.
+ stop_index = comment.scan_mark.index
+ }
+
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: block_mark,
+ end_mark: block_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ parser.simple_keys_by_tok = make(map[int]int)
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+ return false
+
+ } else if valid {
+
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+ delete(parser.simple_keys_by_tok, simple_key.token_number)
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ scan_mark := parser.mark
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if we just had a line comment under a sequence entry that
+ // looks more like a header to the following content. Similar to this:
+ //
+ // - # The comment
+ // - Some data
+ //
+ // If so, transform the line comment to a head comment and reposition.
+ if len(parser.comments) > 0 && len(parser.tokens) > 1 {
+ tokenA := parser.tokens[len(parser.tokens)-2]
+ tokenB := parser.tokens[len(parser.tokens)-1]
+ comment := &parser.comments[len(parser.comments)-1]
+ if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) {
+ // If it was in the prior line, reposition so it becomes a
+ // header of the follow up token. Otherwise, keep it in place
+ // so it becomes a header of the former.
+ comment.head = comment.line
+ comment.line = nil
+ if comment.start_mark.line == parser.mark.line-1 {
+ comment.token_mark = parser.mark
+ }
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ if !yaml_parser_scan_comments(parser, scan_mark) {
+ return false
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ // [Go] Discard this inline comment for the time being.
+ //if !yaml_parser_scan_line_comment(parser, start_mark) {
+ // return false
+ //}
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && string(s) != "!" {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+ hasTag := len(head) > 0
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] TODO Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ hasTag = true
+ }
+
+ if !hasTag {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ // TODO Test this and then re-enable it.
+ //if !yaml_parser_scan_line_comment(parser, start_mark) {
+ // return false
+ //}
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab characters that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violates indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
+
+func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool {
+ if parser.newlines > 0 {
+ return true
+ }
+
+ var start_mark yaml_mark_t
+ var text []byte
+
+ for peek := 0; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ if parser.buffer[parser.buffer_pos+peek] == '#' {
+ seen := parser.mark.index+peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else if parser.mark.index >= seen {
+ if len(text) == 0 {
+ start_mark = parser.mark
+ }
+ text = read(parser, text)
+ } else {
+ skip(parser)
+ }
+ }
+ }
+ break
+ }
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ token_mark: token_mark,
+ start_mark: start_mark,
+ line: text,
+ })
+ }
+ return true
+}
+
+func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool {
+ token := parser.tokens[len(parser.tokens)-1]
+
+ if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 {
+ token = parser.tokens[len(parser.tokens)-2]
+ }
+
+ var token_mark = token.start_mark
+ var start_mark yaml_mark_t
+
+ var recent_empty = false
+ var first_empty = parser.newlines <= 1
+
+ var line = parser.mark.line
+ var column = parser.mark.column
+
+ var text []byte
+
+ // The foot line is the place where a comment must start to
+ // still be considered as a foot of the prior content.
+ // If there's some content in the currently parsed line, then
+ // the foot is the line below it.
+ var foot_line = -1
+ if scan_mark.line > 0 {
+ foot_line = parser.mark.line-parser.newlines+1
+ if parser.newlines == 0 && parser.mark.column > 1 {
+ foot_line++
+ }
+ }
+
+ var peek = 0
+ for ; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ column++
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ c := parser.buffer[parser.buffer_pos+peek]
+ if is_breakz(parser.buffer, parser.buffer_pos+peek) || parser.flow_level > 0 && (c == ']' || c == '}') {
+ // Got line break or terminator.
+ if !recent_empty {
+ if first_empty && (start_mark.line == foot_line || start_mark.column-1 < parser.indent) {
+ // This is the first empty line and there were no empty lines before,
+ // so this initial part of the comment is a foot of the prior token
+ // instead of being a head for the following one. Split it up.
+ if len(text) > 0 {
+ if start_mark.column-1 < parser.indent {
+ // If dedented it's unrelated to the prior token.
+ token_mark = start_mark
+ }
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+ } else {
+ if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 {
+ text = append(text, '\n')
+ }
+ }
+ }
+ if !is_break(parser.buffer, parser.buffer_pos+peek) {
+ break
+ }
+ first_empty = false
+ recent_empty = true
+ column = 0
+ line++
+ continue
+ }
+
+ if len(text) > 0 && column < parser.indent+1 && column != start_mark.column {
+ // The comment at the different indentation is a foot of the
+ // preceding data rather than a head of the upcoming one.
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+
+ if parser.buffer[parser.buffer_pos+peek] != '#' {
+ break
+ }
+
+ if len(text) == 0 {
+ start_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ } else {
+ text = append(text, '\n')
+ }
+
+ recent_empty = false
+
+ // Consume until after the consumed comment line.
+ seen := parser.mark.index+peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else if parser.mark.index >= seen {
+ text = read(parser, text)
+ } else {
+ skip(parser)
+ }
+ }
+
+ peek = 0
+ column = 0
+ line = parser.mark.line
+ }
+
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: start_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column},
+ head: text,
+ })
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go
new file mode 100644
index 00000000..9210ece7
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/sorter.go
@@ -0,0 +1,134 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ digits := false
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ digits = unicode.IsDigit(ar[i])
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ if digits {
+ return al
+ } else {
+ return bl
+ }
+ }
+ var ai, bi int
+ var an, bn int64
+ if ar[i] == '0' || br[i] == '0' {
+ for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+ if ar[j] != '0' {
+ an = 1
+ bn = 1
+ break
+ }
+ }
+ }
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go
new file mode 100644
index 00000000..b8a116bf
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/writerc.go
@@ -0,0 +1,48 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go
new file mode 100644
index 00000000..56e8a849
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yaml.go
@@ -0,0 +1,693 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+ "unicode/utf8"
+)
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document.
+type Unmarshaler interface {
+ UnmarshalYAML(value *Node) error
+}
+
+type obsoleteUnmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// A Decoder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ parser *parser
+ knownFields bool
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ parser: newParserFromReader(r),
+ }
+}
+
+// KnownFields ensures that the keys in decoded mappings to
+// exist as fields in the struct being decoded into.
+func (dec *Decoder) KnownFields(enable bool) {
+ dec.knownFields = enable
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ d.knownFields = dec.knownFields
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ return io.EOF
+ }
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(node, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Decode decodes the node and stores its data into the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (n *Node) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ defer handleErr(&err)
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(n, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+ defer handleErr(&err)
+ d := newDecoder()
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[][,[,]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be excluded if IsZero returns true.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+ encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ encoder: newEncoderWithWriter(w),
+ }
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
+ return nil
+}
+
+// Encode encodes value v and stores its representation in n.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values into YAML.
+func (n *Node) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(v))
+ e.finish()
+ p := newParser(e.out)
+ p.textless = true
+ defer p.destroy()
+ doc := p.parse()
+ *n = *doc.Content[0]
+ return nil
+}
+
+// SetIndent changes the used indentation used when encoding.
+func (e *Encoder) SetIndent(spaces int) {
+ if spaces < 0 {
+ panic("yaml: cannot indent to a negative number of spaces")
+ }
+ e.encoder.indent = spaces
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+ defer handleErr(&err)
+ e.encoder.finish()
+ return nil
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+type Kind uint32
+
+const (
+ DocumentNode Kind = 1 << iota
+ SequenceNode
+ MappingNode
+ ScalarNode
+ AliasNode
+)
+
+type Style uint32
+
+const (
+ TaggedStyle Style = 1 << iota
+ DoubleQuotedStyle
+ SingleQuotedStyle
+ LiteralStyle
+ FoldedStyle
+ FlowStyle
+)
+
+// Node represents an element in the YAML document hierarchy. While documents
+// are typically encoded and decoded into higher level types, such as structs
+// and maps, Node is an intermediate representation that allows detailed
+// control over the content being decoded or encoded.
+//
+// It's worth noting that although Node offers access into details such as
+// line numbers, colums, and comments, the content when re-encoded will not
+// have its original textual representation preserved. An effort is made to
+// render the data plesantly, and to preserve comments near the data they
+// describe, though.
+//
+// Values that make use of the Node type interact with the yaml package in the
+// same way any other type would do, by encoding and decoding yaml data
+// directly or indirectly into them.
+//
+// For example:
+//
+// var person struct {
+// Name string
+// Address yaml.Node
+// }
+// err := yaml.Unmarshal(data, &person)
+//
+// Or by itself:
+//
+// var person Node
+// err := yaml.Unmarshal(data, &person)
+//
+type Node struct {
+ // Kind defines whether the node is a document, a mapping, a sequence,
+ // a scalar value, or an alias to another node. The specific data type of
+ // scalar nodes may be obtained via the ShortTag and LongTag methods.
+ Kind Kind
+
+ // Style allows customizing the apperance of the node in the tree.
+ Style Style
+
+ // Tag holds the YAML tag defining the data type for the value.
+ // When decoding, this field will always be set to the resolved tag,
+ // even when it wasn't explicitly provided in the YAML content.
+ // When encoding, if this field is unset the value type will be
+ // implied from the node properties, and if it is set, it will only
+ // be serialized into the representation if TaggedStyle is used or
+ // the implicit tag diverges from the provided one.
+ Tag string
+
+ // Value holds the unescaped and unquoted represenation of the value.
+ Value string
+
+ // Anchor holds the anchor name for this node, which allows aliases to point to it.
+ Anchor string
+
+ // Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
+ Alias *Node
+
+ // Content holds contained nodes for documents, mappings, and sequences.
+ Content []*Node
+
+ // HeadComment holds any comments in the lines preceding the node and
+ // not separated by an empty line.
+ HeadComment string
+
+ // LineComment holds any comments at the end of the line where the node is in.
+ LineComment string
+
+ // FootComment holds any comments following the node and before empty lines.
+ FootComment string
+
+ // Line and Column hold the node position in the decoded YAML text.
+ // These fields are not respected when encoding the node.
+ Line int
+ Column int
+}
+
+// IsZero returns whether the node has all of its fields unset.
+func (n *Node) IsZero() bool {
+ return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil &&
+ n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0
+}
+
+
+// LongTag returns the long form of the tag that indicates the data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) LongTag() string {
+ return longTag(n.ShortTag())
+}
+
+// ShortTag returns the short form of the YAML tag that indicates data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) ShortTag() string {
+ if n.indicatedString() {
+ return strTag
+ }
+ if n.Tag == "" || n.Tag == "!" {
+ switch n.Kind {
+ case MappingNode:
+ return mapTag
+ case SequenceNode:
+ return seqTag
+ case AliasNode:
+ if n.Alias != nil {
+ return n.Alias.ShortTag()
+ }
+ case ScalarNode:
+ tag, _ := resolve("", n.Value)
+ return tag
+ }
+ return ""
+ }
+ return shortTag(n.Tag)
+}
+
+func (n *Node) indicatedString() bool {
+ return n.Kind == ScalarNode &&
+ (shortTag(n.Tag) == strTag ||
+ (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
+}
+
+// SetString is a convenience function that sets the node to a string value
+// and defines its style in a pleasant way depending on its content.
+func (n *Node) SetString(s string) {
+ n.Kind = ScalarNode
+ if utf8.ValidString(s) {
+ n.Value = s
+ n.Tag = strTag
+ } else {
+ n.Value = encodeBase64(s)
+ n.Tag = binaryTag
+ }
+ if strings.Contains(n.Value, "\n") {
+ n.Style = LiteralStyle
+ }
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+
+ // InlineUnmarshalers holds indexes to inlined fields that
+ // contain unmarshaler values.
+ InlineUnmarshalers [][]int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+var unmarshalerType reflect.Type
+
+func init() {
+ var v Unmarshaler
+ unmarshalerType = reflect.ValueOf(&v).Elem().Type()
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ inlineUnmarshalers := [][]int(nil)
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct, reflect.Ptr:
+ ftype := field.Type
+ for ftype.Kind() == reflect.Ptr {
+ ftype = ftype.Elem()
+ }
+ if ftype.Kind() != reflect.Struct {
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ if reflect.PtrTo(ftype).Implements(unmarshalerType) {
+ inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
+ } else {
+ sinfo, err := getStructInfo(ftype)
+ if err != nil {
+ return nil, err
+ }
+ for _, index := range sinfo.InlineUnmarshalers {
+ inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ finfo.Id = len(fieldsList)
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ }
+ default:
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ info.Id = len(fieldsList)
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ InlineUnmarshalers: inlineUnmarshalers,
+ }
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go
new file mode 100644
index 00000000..7c6d0077
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yamlh.go
@@ -0,0 +1,807 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "fmt"
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
+
+ yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return ""
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+ yaml_TAIL_COMMENT_EVENT
+)
+
+var eventStrings = []string{
+ yaml_NO_EVENT: "none",
+ yaml_STREAM_START_EVENT: "stream start",
+ yaml_STREAM_END_EVENT: "stream end",
+ yaml_DOCUMENT_START_EVENT: "document start",
+ yaml_DOCUMENT_END_EVENT: "document end",
+ yaml_ALIAS_EVENT: "alias",
+ yaml_SCALAR_EVENT: "scalar",
+ yaml_SEQUENCE_START_EVENT: "sequence start",
+ yaml_SEQUENCE_END_EVENT: "sequence end",
+ yaml_MAPPING_START_EVENT: "mapping start",
+ yaml_MAPPING_END_EVENT: "mapping end",
+ yaml_TAIL_COMMENT_EVENT: "tail comment",
+}
+
+func (e yaml_event_type_t) String() string {
+ if e < 0 || int(e) >= len(eventStrings) {
+ return fmt.Sprintf("unknown event %d", e)
+ }
+ return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return ""
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occurred.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_reader io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ newlines int // The number of line breaks since last non-break/non-blank character
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Comments
+
+ head_comment []byte // The current head comments
+ line_comment []byte // The current line comments
+ foot_comment []byte // The current foot comments
+ tail_comment []byte // Foot comment that happens at the end of a block.
+ stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
+
+ comments []yaml_comment_t // The folded comments for all parsed tokens
+ comments_head int
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+ simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+type yaml_comment_t struct {
+
+ scan_mark yaml_mark_t // Position where scanning for comments started
+ token_mark yaml_mark_t // Position after which tokens will be associated with this comment
+ start_mark yaml_mark_t // Position of '#' comment mark
+ end_mark yaml_mark_t // Position where comment terminated
+
+ head []byte
+ line []byte
+ foot []byte
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_writer io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ space_above bool // Is there's an empty line above?
+ foot_indent int // The indent used to write the foot comment above, or -1 if none.
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ key_line_comment []byte
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go
new file mode 100644
index 00000000..e88f9c54
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go
@@ -0,0 +1,198 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return (
+ // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return (
+ // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return (
+ // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/vendor/honnef.co/go/tools/pattern/match.go b/vendor/honnef.co/go/tools/pattern/match.go
index ff039baa..2c036ed1 100644
--- a/vendor/honnef.co/go/tools/pattern/match.go
+++ b/vendor/honnef.co/go/tools/pattern/match.go
@@ -242,6 +242,28 @@ func match(m *Matcher, l, r interface{}) (interface{}, bool) {
}
}
+ {
+ ln, ok1 := l.([]*ast.Field)
+ rn, ok2 := r.([]*ast.Field)
+ if ok1 || ok2 {
+ if ok1 && !ok2 {
+ rn = []*ast.Field{r.(*ast.Field)}
+ } else if !ok1 && ok2 {
+ ln = []*ast.Field{l.(*ast.Field)}
+ }
+
+ if len(ln) != len(rn) {
+ return nil, false
+ }
+ for i, ll := range ln {
+ if _, ok := match(m, ll, rn[i]); !ok {
+ return nil, false
+ }
+ }
+ return r, true
+ }
+ }
+
panic(fmt.Sprintf("unsupported comparison: %T and %T", l, r))
}
diff --git a/vendor/honnef.co/go/tools/staticcheck/lint.go b/vendor/honnef.co/go/tools/staticcheck/lint.go
index 16b6dbd8..22b6bbc2 100644
--- a/vendor/honnef.co/go/tools/staticcheck/lint.go
+++ b/vendor/honnef.co/go/tools/staticcheck/lint.go
@@ -1721,6 +1721,10 @@ func CheckUnreadVariableValues(pass *analysis.Pass) (interface{}, error) {
continue
}
+ if _, ok := val.(*ir.Const); ok {
+ // a zero-valued constant, for example in 'foo := []string(nil)'
+ continue
+ }
if !hasUse(val, nil) {
report.Report(pass, assign, fmt.Sprintf("this value of %s is never used", lhs))
}
diff --git a/vendor/honnef.co/go/tools/version/version.go b/vendor/honnef.co/go/tools/version/version.go
index eed7b0de..79066e90 100644
--- a/vendor/honnef.co/go/tools/version/version.go
+++ b/vendor/honnef.co/go/tools/version/version.go
@@ -7,7 +7,7 @@ import (
"runtime"
)
-const Version = "2020.1.4"
+const Version = "2020.1.5"
// version returns a version descriptor and reports whether the
// version is a known release.
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 707bee14..d8b3c01d 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -13,30 +13,42 @@ cloud.google.com/go/storage
# github.com/BurntSushi/toml v0.3.1
## explicit
github.com/BurntSushi/toml
-# github.com/Djarvur/go-err113 v0.0.0-20200410182137-af658d038157
+# github.com/Djarvur/go-err113 v0.1.0
+## explicit
github.com/Djarvur/go-err113
+# github.com/Masterminds/semver v1.5.0
+github.com/Masterminds/semver
# github.com/OpenPeeDeeP/depguard v1.0.1
github.com/OpenPeeDeeP/depguard
-# github.com/bombsimon/wsl/v3 v3.0.0
+# github.com/bombsimon/wsl/v3 v3.1.0
github.com/bombsimon/wsl/v3
+# github.com/coreos/go-etcd v2.0.0+incompatible
+## explicit
+# github.com/cpuguy83/go-md2man v1.0.10
+## explicit
+# github.com/daixiang0/gci v0.2.4
+github.com/daixiang0/gci/pkg/gci
# github.com/davecgh/go-spew v1.1.1
github.com/davecgh/go-spew/spew
+# github.com/denis-tingajkin/go-header v0.3.1
+github.com/denis-tingajkin/go-header
# github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813
## explicit
github.com/dvyukov/go-fuzz/go-fuzz-build
github.com/dvyukov/go-fuzz/go-fuzz-defs
github.com/dvyukov/go-fuzz/go-fuzz-dep
github.com/dvyukov/go-fuzz/internal/go-fuzz-types
-# github.com/fatih/color v1.7.0
+# github.com/fatih/color v1.9.0
github.com/fatih/color
-# github.com/fsnotify/fsnotify v1.4.7
+# github.com/fsnotify/fsnotify v1.4.9
github.com/fsnotify/fsnotify
-# github.com/go-critic/go-critic v0.4.1
+# github.com/go-critic/go-critic v0.5.2
github.com/go-critic/go-critic/checkers
+github.com/go-critic/go-critic/checkers/internal/astwalk
github.com/go-critic/go-critic/checkers/internal/lintutil
+github.com/go-critic/go-critic/framework/linter
# github.com/go-lintpack/lintpack v0.5.2
-github.com/go-lintpack/lintpack
-github.com/go-lintpack/lintpack/astwalk
+## explicit
# github.com/go-toolsmith/astcast v1.0.0
github.com/go-toolsmith/astcast
# github.com/go-toolsmith/astcopy v1.0.0
@@ -49,8 +61,10 @@ github.com/go-toolsmith/astfmt
github.com/go-toolsmith/astp
# github.com/go-toolsmith/strparse v1.0.0
github.com/go-toolsmith/strparse
-# github.com/go-toolsmith/typep v1.0.0
+# github.com/go-toolsmith/typep v1.0.2
github.com/go-toolsmith/typep
+# github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b
+github.com/go-xmlfmt/xmlfmt
# github.com/gobwas/glob v0.2.3
github.com/gobwas/glob
github.com/gobwas/glob/compiler
@@ -60,10 +74,10 @@ github.com/gobwas/glob/syntax/ast
github.com/gobwas/glob/syntax/lexer
github.com/gobwas/glob/util/runes
github.com/gobwas/glob/util/strings
-# github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b
+# github.com/gofrs/flock v0.8.0
github.com/gofrs/flock
-# github.com/gogo/protobuf v1.2.1
-github.com/gogo/protobuf/proto
+# github.com/gogo/protobuf v1.3.1
+## explicit
# github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e
## explicit
github.com/golang/groupcache/lru
@@ -94,12 +108,12 @@ github.com/golangci/errcheck/internal/errcheck
github.com/golangci/go-misc/deadcode
# github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3
github.com/golangci/goconst
-# github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee
+# github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d
github.com/golangci/gocyclo/pkg/gocyclo
# github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a
github.com/golangci/gofmt/gofmt
github.com/golangci/gofmt/goimports
-# github.com/golangci/golangci-lint v1.27.0
+# github.com/golangci/golangci-lint v1.31.0
## explicit
github.com/golangci/golangci-lint/cmd/golangci-lint
github.com/golangci/golangci-lint/internal/cache
@@ -125,6 +139,7 @@ github.com/golangci/golangci-lint/pkg/printers
github.com/golangci/golangci-lint/pkg/report
github.com/golangci/golangci-lint/pkg/result
github.com/golangci/golangci-lint/pkg/result/processors
+github.com/golangci/golangci-lint/pkg/sliceutil
github.com/golangci/golangci-lint/pkg/timeutils
# github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc
github.com/golangci/ineffassign
@@ -132,15 +147,17 @@ github.com/golangci/ineffassign
github.com/golangci/lint-1
# github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca
github.com/golangci/maligned
-# github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770
+# github.com/golangci/misspell v0.3.5
+## explicit
github.com/golangci/misspell
# github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21
github.com/golangci/prealloc
-# github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0
+# github.com/golangci/revgrep v0.0.0-20180812185044-276a5c0a1039
+## explicit
github.com/golangci/revgrep
# github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4
github.com/golangci/unconvert
-# github.com/google/go-cmp v0.5.0
+# github.com/google/go-cmp v0.5.2
## explicit
github.com/google/go-cmp/cmp
github.com/google/go-cmp/cmp/internal/diff
@@ -151,8 +168,12 @@ github.com/google/go-cmp/cmp/internal/value
## explicit
# github.com/googleapis/gax-go/v2 v2.0.5
github.com/googleapis/gax-go/v2
-# github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3
+# github.com/gostaticanalysis/analysisutil v0.2.1
+## explicit
github.com/gostaticanalysis/analysisutil
+# github.com/gostaticanalysis/comment v1.4.1
+github.com/gostaticanalysis/comment
+github.com/gostaticanalysis/comment/passes/commentmap
# github.com/hashicorp/hcl v1.0.0
github.com/hashicorp/hcl
github.com/hashicorp/hcl/hcl/ast
@@ -171,7 +192,8 @@ github.com/ianlancetaylor/demangle
github.com/inconshreveable/mousetrap
# github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a
github.com/jingyugao/rowserrcheck/passes/rowserr
-# github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3
+# github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af
+## explicit
github.com/jirfag/go-printf-func-name/pkg/analyzer
# github.com/jstemmer/go-junit-report v0.9.2-0.20191008195320-984a47ca6b0a
## explicit
@@ -181,21 +203,28 @@ github.com/jstemmer/go-junit-report/parser
# github.com/kisielk/gotool v1.0.0
github.com/kisielk/gotool
github.com/kisielk/gotool/internal/load
-# github.com/konsorten/go-windows-terminal-sequences v1.0.1
+# github.com/klauspost/cpuid v1.2.0
+## explicit
+# github.com/konsorten/go-windows-terminal-sequences v1.0.3
github.com/konsorten/go-windows-terminal-sequences
-# github.com/magiconair/properties v1.8.1
+# github.com/kyoh86/exportloopref v0.1.7
+github.com/kyoh86/exportloopref
+# github.com/magiconair/properties v1.8.3
+## explicit
github.com/magiconair/properties
# github.com/maratori/testpackage v1.0.1
github.com/maratori/testpackage/pkg/testpackage
-# github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb
+# github.com/matoous/godox v0.0.0-20200801072554-4fb83dc2941e
+## explicit
github.com/matoous/godox
-# github.com/mattn/go-colorable v0.1.4
+# github.com/mattn/go-colorable v0.1.7
github.com/mattn/go-colorable
-# github.com/mattn/go-isatty v0.0.8
+# github.com/mattn/go-isatty v0.0.12
github.com/mattn/go-isatty
# github.com/mitchellh/go-homedir v1.1.0
github.com/mitchellh/go-homedir
-# github.com/mitchellh/mapstructure v1.1.2
+# github.com/mitchellh/mapstructure v1.3.3
+## explicit
github.com/mitchellh/mapstructure
# github.com/nakabonne/nestif v0.3.0
github.com/nakabonne/nestif
@@ -209,52 +238,83 @@ github.com/nbutton23/zxcvbn-go/match
github.com/nbutton23/zxcvbn-go/matching
github.com/nbutton23/zxcvbn-go/scoring
github.com/nbutton23/zxcvbn-go/utils/math
-# github.com/pelletier/go-toml v1.2.0
+# github.com/nishanths/exhaustive v0.0.0-20200811152831-6cf413ae40e0
+github.com/nishanths/exhaustive
+# github.com/pelletier/go-toml v1.8.1
+## explicit
github.com/pelletier/go-toml
-# github.com/pkg/errors v0.8.1
+# github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d
+github.com/phayes/checkstyle
+# github.com/pkg/errors v0.9.1
github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.0
github.com/pmezard/go-difflib/difflib
-# github.com/ryancurrah/gomodguard v1.0.4
+# github.com/quasilyte/go-ruleguard v0.2.0
+github.com/quasilyte/go-ruleguard/dslgen
+github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep
+github.com/quasilyte/go-ruleguard/ruleguard
+github.com/quasilyte/go-ruleguard/ruleguard/typematch
+# github.com/quasilyte/regex/syntax v0.0.0-20200805063351-8f842688393c
+## explicit
+github.com/quasilyte/regex/syntax
+# github.com/ryancurrah/gomodguard v1.1.0
github.com/ryancurrah/gomodguard
-# github.com/securego/gosec/v2 v2.3.0
+# github.com/ryanrolds/sqlclosecheck v0.3.0
+github.com/ryanrolds/sqlclosecheck/pkg/analyzer
+# github.com/securego/gosec/v2 v2.4.0
github.com/securego/gosec/v2
github.com/securego/gosec/v2/rules
-# github.com/sirupsen/logrus v1.4.2
+# github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c
+github.com/shazow/go-diff/difflib
+# github.com/sirupsen/logrus v1.6.0
github.com/sirupsen/logrus
-# github.com/sourcegraph/go-diff v0.5.1
+# github.com/sonatard/noctx v0.0.1
+github.com/sonatard/noctx
+github.com/sonatard/noctx/ngfunc
+github.com/sonatard/noctx/reqwithoutctx
+# github.com/sourcegraph/go-diff v0.6.0
github.com/sourcegraph/go-diff/diff
-# github.com/spf13/afero v1.1.2
+# github.com/spf13/afero v1.4.0
+## explicit
github.com/spf13/afero
github.com/spf13/afero/mem
-# github.com/spf13/cast v1.3.0
+# github.com/spf13/cast v1.3.1
+## explicit
github.com/spf13/cast
-# github.com/spf13/cobra v0.0.5
+# github.com/spf13/cobra v1.0.0
github.com/spf13/cobra
-# github.com/spf13/jwalterweatherman v1.0.0
+# github.com/spf13/jwalterweatherman v1.1.0
+## explicit
github.com/spf13/jwalterweatherman
# github.com/spf13/pflag v1.0.5
github.com/spf13/pflag
-# github.com/spf13/viper v1.6.1
+# github.com/spf13/viper v1.7.1
github.com/spf13/viper
-# github.com/stretchr/objx v0.1.1
+# github.com/ssgreg/nlreturn/v2 v2.1.0
+github.com/ssgreg/nlreturn/v2/pkg/nlreturn
+# github.com/stretchr/objx v0.3.0
+## explicit
github.com/stretchr/objx
-# github.com/stretchr/testify v1.5.1
+# github.com/stretchr/testify v1.6.1
github.com/stretchr/testify/assert
github.com/stretchr/testify/mock
# github.com/subosito/gotenv v1.2.0
github.com/subosito/gotenv
-# github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2
+# github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b
+## explicit
github.com/tdakkota/asciicheck
-# github.com/tetafro/godot v0.3.7
+# github.com/tetafro/godot v0.4.8
github.com/tetafro/godot
-# github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e
+# github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94
+## explicit
github.com/timakin/bodyclose/passes/bodyclose
# github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa
github.com/tommy-muehle/go-mnd
github.com/tommy-muehle/go-mnd/checks
github.com/tommy-muehle/go-mnd/config
-# github.com/ultraware/funlen v0.0.2
+# github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8
+## explicit
+# github.com/ultraware/funlen v0.0.3
github.com/ultraware/funlen
# github.com/ultraware/whitespace v0.0.4
github.com/ultraware/whitespace
@@ -285,7 +345,7 @@ golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/modfile
golang.org/x/mod/module
golang.org/x/mod/semver
-# golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2
+# golang.org/x/net v0.0.0-20200822124328-c89045814202
## explicit
golang.org/x/net/context
golang.org/x/net/context/ctxhttp
@@ -302,18 +362,17 @@ golang.org/x/oauth2/google
golang.org/x/oauth2/internal
golang.org/x/oauth2/jws
golang.org/x/oauth2/jwt
-# golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae
+# golang.org/x/sys v0.0.0-20200915084602-288bc346aa39
## explicit
golang.org/x/sys/internal/unsafeheader
golang.org/x/sys/unix
-# golang.org/x/text v0.3.3-0.20191230102452-929e72ca90de
-## explicit
+# golang.org/x/text v0.3.3
golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
golang.org/x/text/width
-# golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f
+# golang.org/x/tools v0.0.0-20200915031644-64986481280e
## explicit
golang.org/x/tools/cmd/goimports
golang.org/x/tools/go/analysis
@@ -380,12 +439,13 @@ golang.org/x/tools/internal/gopathwalk
golang.org/x/tools/internal/imports
golang.org/x/tools/internal/lsp/diff
golang.org/x/tools/internal/lsp/diff/myers
+golang.org/x/tools/internal/lsp/fuzzy
golang.org/x/tools/internal/packagesinternal
golang.org/x/tools/internal/span
golang.org/x/tools/internal/testenv
golang.org/x/tools/internal/typesinternal
golang.org/x/tools/txtar
-# golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
+# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
golang.org/x/xerrors
golang.org/x/xerrors/internal
# google.golang.org/api v0.28.0
@@ -507,11 +567,15 @@ google.golang.org/protobuf/types/known/anypb
google.golang.org/protobuf/types/known/durationpb
google.golang.org/protobuf/types/known/timestamppb
google.golang.org/protobuf/types/pluginpb
-# gopkg.in/ini.v1 v1.51.0
+# gopkg.in/ini.v1 v1.61.0
+## explicit
gopkg.in/ini.v1
-# gopkg.in/yaml.v2 v2.2.8
+# gopkg.in/yaml.v2 v2.3.0
gopkg.in/yaml.v2
-# honnef.co/go/tools v0.0.1-2020.1.4
+# gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776
+## explicit
+gopkg.in/yaml.v3
+# honnef.co/go/tools v0.0.1-2020.1.5
honnef.co/go/tools/arg
honnef.co/go/tools/code
honnef.co/go/tools/config
@@ -540,11 +604,15 @@ honnef.co/go/tools/staticcheck
honnef.co/go/tools/stylecheck
honnef.co/go/tools/unused
honnef.co/go/tools/version
+# mvdan.cc/gofumpt v0.0.0-20200802201014-ab5a8192947d
+## explicit
+mvdan.cc/gofumpt/format
# mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed
mvdan.cc/interfacer/check
# mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b
mvdan.cc/lint
-# mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f
+# mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7
+## explicit
mvdan.cc/unparam/check
-# sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4
-sourcegraph.com/sqs/pbtypes
+# sourcegraph.com/sqs/pbtypes v1.0.0
+## explicit
diff --git a/vendor/mvdan.cc/gofumpt/LICENSE b/vendor/mvdan.cc/gofumpt/LICENSE
new file mode 100644
index 00000000..03e3bfc0
--- /dev/null
+++ b/vendor/mvdan.cc/gofumpt/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2019, Daniel Martí. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of the copyright holder nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/mvdan.cc/gofumpt/LICENSE.google b/vendor/mvdan.cc/gofumpt/LICENSE.google
new file mode 100644
index 00000000..6a66aea5
--- /dev/null
+++ b/vendor/mvdan.cc/gofumpt/LICENSE.google
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/mvdan.cc/gofumpt/format/format.go b/vendor/mvdan.cc/gofumpt/format/format.go
new file mode 100644
index 00000000..e65bbc46
--- /dev/null
+++ b/vendor/mvdan.cc/gofumpt/format/format.go
@@ -0,0 +1,652 @@
+// Copyright (c) 2019, Daniel Martí
+// See LICENSE for licensing information
+
+// Package format exposes gofumpt's formatting in an API similar to go/format.
+// In general, the APIs are only guaranteed to work well when the input source
+// is in canonical gofmt format.
+package format
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp"
+ "golang.org/x/mod/semver"
+ "golang.org/x/tools/go/ast/astutil"
+)
+
+type Options struct {
+ // LangVersion corresponds to the Go language version a piece of code is
+ // written in. The version is used to decide whether to apply formatting
+ // rules which require new language features. When inside a Go module,
+ // LangVersion should generally be specified as the result of:
+ //
+ // go list -m -f {{.GoVersion}}
+ //
+ // LangVersion is treated as a semantic version, which might start with
+ // a "v" prefix. Like Go versions, it might also be incomplete; "1.14"
+ // is equivalent to "1.14.0". When empty, it is equivalent to "v1", to
+ // not use language features which could break programs.
+ LangVersion string
+
+ ExtraRules bool
+}
+
+// Source formats src in gofumpt's format, assuming that src holds a valid Go
+// source file.
+func Source(src []byte, opts Options) ([]byte, error) {
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err != nil {
+ return nil, err
+ }
+
+ File(fset, file, opts)
+
+ var buf bytes.Buffer
+ if err := format.Node(&buf, fset, file); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// File modifies a file and fset in place to follow gofumpt's format. The
+// changes might include manipulating adding or removing newlines in fset,
+// modifying the position of nodes, or modifying literal values.
+func File(fset *token.FileSet, file *ast.File, opts Options) {
+ if opts.LangVersion == "" {
+ opts.LangVersion = "v1"
+ } else if opts.LangVersion[0] != 'v' {
+ opts.LangVersion = "v" + opts.LangVersion
+ }
+ if !semver.IsValid(opts.LangVersion) {
+ panic(fmt.Sprintf("invalid semver string: %q", opts.LangVersion))
+ }
+ f := &fumpter{
+ File: fset.File(file.Pos()),
+ fset: fset,
+ astFile: file,
+ Options: opts,
+ }
+ pre := func(c *astutil.Cursor) bool {
+ f.applyPre(c)
+ if _, ok := c.Node().(*ast.BlockStmt); ok {
+ f.blockLevel++
+ }
+ return true
+ }
+ post := func(c *astutil.Cursor) bool {
+ if _, ok := c.Node().(*ast.BlockStmt); ok {
+ f.blockLevel--
+ }
+ return true
+ }
+ astutil.Apply(file, pre, post)
+}
+
+// Multiline nodes which could fit on a single line under this many
+// bytes may be collapsed onto a single line.
+const shortLineLimit = 60
+
+var rxOctalInteger = regexp.MustCompile(`\A0[0-7_]+\z`)
+
+type fumpter struct {
+ Options
+
+ *token.File
+ fset *token.FileSet
+
+ astFile *ast.File
+
+ blockLevel int
+}
+
+func (f *fumpter) commentsBetween(p1, p2 token.Pos) []*ast.CommentGroup {
+ comments := f.astFile.Comments
+ i1 := sort.Search(len(comments), func(i int) bool {
+ return comments[i].Pos() >= p1
+ })
+ comments = comments[i1:]
+ i2 := sort.Search(len(comments), func(i int) bool {
+ return comments[i].Pos() >= p2
+ })
+ comments = comments[:i2]
+ return comments
+}
+
+func (f *fumpter) inlineComment(pos token.Pos) *ast.Comment {
+ comments := f.astFile.Comments
+ i := sort.Search(len(comments), func(i int) bool {
+ return comments[i].Pos() >= pos
+ })
+ if i >= len(comments) {
+ return nil
+ }
+ line := f.Line(pos)
+ for _, comment := range comments[i].List {
+ if f.Line(comment.Pos()) == line {
+ return comment
+ }
+ }
+ return nil
+}
+
+// addNewline is a hack to let us force a newline at a certain position.
+func (f *fumpter) addNewline(at token.Pos) {
+ offset := f.Offset(at)
+
+ field := reflect.ValueOf(f.File).Elem().FieldByName("lines")
+ n := field.Len()
+ lines := make([]int, 0, n+1)
+ for i := 0; i < n; i++ {
+ cur := int(field.Index(i).Int())
+ if offset == cur {
+ // This newline already exists; do nothing. Duplicate
+ // newlines can't exist.
+ return
+ }
+ if offset >= 0 && offset < cur {
+ lines = append(lines, offset)
+ offset = -1
+ }
+ lines = append(lines, cur)
+ }
+ if offset >= 0 {
+ lines = append(lines, offset)
+ }
+ if !f.SetLines(lines) {
+ panic(fmt.Sprintf("could not set lines to %v", lines))
+ }
+}
+
+// removeNewlines removes all newlines between two positions, so that they end
+// up on the same line.
+func (f *fumpter) removeLines(fromLine, toLine int) {
+ for fromLine < toLine {
+ f.MergeLine(fromLine)
+ toLine--
+ }
+}
+
+// removeLinesBetween is like removeLines, but it leaves one newline between the
+// two positions.
+func (f *fumpter) removeLinesBetween(from, to token.Pos) {
+ f.removeLines(f.Line(from)+1, f.Line(to))
+}
+
+type byteCounter int
+
+func (b *byteCounter) Write(p []byte) (n int, err error) {
+ *b += byteCounter(len(p))
+ return len(p), nil
+}
+
+func (f *fumpter) printLength(node ast.Node) int {
+ var count byteCounter
+ if err := format.Node(&count, f.fset, node); err != nil {
+ panic(fmt.Sprintf("unexpected print error: %v", err))
+ }
+
+ // Add the space taken by an inline comment.
+ if c := f.inlineComment(node.End()); c != nil {
+ fmt.Fprintf(&count, " %s", c.Text)
+ }
+
+ // Add an approximation of the indentation level. We can't know the
+ // number of tabs go/printer will add ahead of time. Trying to print the
+ // entire top-level declaration would tell us that, but then it's near
+ // impossible to reliably find our node again.
+ return int(count) + (f.blockLevel * 8)
+}
+
+// rxCommentDirective covers all common Go comment directives:
+//
+// //go: | standard Go directives, like go:noinline
+// //someword: | similar to the syntax above, like lint:ignore
+// //line | inserted line information for cmd/compile
+// //export | to mark cgo funcs for exporting
+// //extern | C function declarations for gccgo
+// //sys(nb)? | syscall function wrapper prototypes
+// //nolint | nolint directive for golangci
+var rxCommentDirective = regexp.MustCompile(`^([a-z]+:|line\b|export\b|extern\b|sys(nb)?\b|nolint\b)`)
+
+// visit takes either an ast.Node or a []ast.Stmt.
+func (f *fumpter) applyPre(c *astutil.Cursor) {
+ switch node := c.Node().(type) {
+ case *ast.File:
+ var lastMulti bool
+ var lastEnd token.Pos
+ for _, decl := range node.Decls {
+ pos := decl.Pos()
+ comments := f.commentsBetween(lastEnd, pos)
+ if len(comments) > 0 {
+ pos = comments[0].Pos()
+ }
+
+ // multiline top-level declarations should be separated
+ multi := f.Line(pos) < f.Line(decl.End())
+ if multi && lastMulti && f.Line(lastEnd)+1 == f.Line(pos) {
+ f.addNewline(lastEnd)
+ }
+
+ lastMulti = multi
+ lastEnd = decl.End()
+ }
+
+ // Join contiguous lone var/const/import lines; abort if there
+ // are empty lines or comments in between.
+ newDecls := make([]ast.Decl, 0, len(node.Decls))
+ for i := 0; i < len(node.Decls); {
+ newDecls = append(newDecls, node.Decls[i])
+ start, ok := node.Decls[i].(*ast.GenDecl)
+ if !ok {
+ i++
+ continue
+ }
+ lastPos := start.Pos()
+ for i++; i < len(node.Decls); {
+ cont, ok := node.Decls[i].(*ast.GenDecl)
+ if !ok || cont.Tok != start.Tok || cont.Lparen != token.NoPos ||
+ f.Line(lastPos) < f.Line(cont.Pos())-1 {
+ break
+ }
+ start.Specs = append(start.Specs, cont.Specs...)
+ if c := f.inlineComment(cont.End()); c != nil {
+ // don't move an inline comment outside
+ start.Rparen = c.End()
+ }
+ lastPos = cont.Pos()
+ i++
+ }
+ }
+ node.Decls = newDecls
+
+ // Comments aren't nodes, so they're not walked by default.
+ groupLoop:
+ for _, group := range node.Comments {
+ for _, comment := range group.List {
+ body := strings.TrimPrefix(comment.Text, "//")
+ if body == comment.Text {
+ // /*-style comment
+ continue groupLoop
+ }
+ if rxCommentDirective.MatchString(body) {
+ // this line is a directive
+ continue groupLoop
+ }
+ r, _ := utf8.DecodeRuneInString(body)
+ if !unicode.IsLetter(r) && !unicode.IsNumber(r) && !unicode.IsSpace(r) {
+ // this line could be code like "//{"
+ continue groupLoop
+ }
+ }
+ // If none of the comment group's lines look like a
+ // directive or code, add spaces, if needed.
+ for _, comment := range group.List {
+ body := strings.TrimPrefix(comment.Text, "//")
+ r, _ := utf8.DecodeRuneInString(body)
+ if !unicode.IsSpace(r) {
+ comment.Text = "// " + strings.TrimPrefix(comment.Text, "//")
+ }
+ }
+ }
+
+ case *ast.DeclStmt:
+ decl, ok := node.Decl.(*ast.GenDecl)
+ if !ok || decl.Tok != token.VAR || len(decl.Specs) != 1 {
+ break // e.g. const name = "value"
+ }
+ spec := decl.Specs[0].(*ast.ValueSpec)
+ if spec.Type != nil {
+ break // e.g. var name Type
+ }
+ tok := token.ASSIGN
+ names := make([]ast.Expr, len(spec.Names))
+ for i, name := range spec.Names {
+ names[i] = name
+ if name.Name != "_" {
+ tok = token.DEFINE
+ }
+ }
+ c.Replace(&ast.AssignStmt{
+ Lhs: names,
+ Tok: tok,
+ Rhs: spec.Values,
+ })
+
+ case *ast.GenDecl:
+ if node.Tok == token.IMPORT && node.Lparen.IsValid() {
+ f.joinStdImports(node)
+ }
+
+ // Single var declarations shouldn't use parentheses, unless
+ // there's a comment on the grouped declaration.
+ if node.Tok == token.VAR && len(node.Specs) == 1 &&
+ node.Lparen.IsValid() && node.Doc == nil {
+ specPos := node.Specs[0].Pos()
+ specEnd := node.Specs[0].End()
+
+ if len(f.commentsBetween(node.TokPos, specPos)) > 0 {
+ // If the single spec has any comment, it must
+ // go before the entire declaration now.
+ node.TokPos = specPos
+ } else {
+ f.removeLines(f.Line(node.TokPos), f.Line(specPos))
+ }
+ f.removeLines(f.Line(specEnd), f.Line(node.Rparen))
+
+ // Remove the parentheses. go/printer will automatically
+ // get rid of the newlines.
+ node.Lparen = token.NoPos
+ node.Rparen = token.NoPos
+ }
+
+ case *ast.BlockStmt:
+ f.stmts(node.List)
+ comments := f.commentsBetween(node.Lbrace, node.Rbrace)
+ if len(node.List) == 0 && len(comments) == 0 {
+ f.removeLinesBetween(node.Lbrace, node.Rbrace)
+ break
+ }
+
+ isFuncBody := false
+ switch c.Parent().(type) {
+ case *ast.FuncDecl:
+ isFuncBody = true
+ case *ast.FuncLit:
+ isFuncBody = true
+ }
+
+ if len(node.List) > 1 && !isFuncBody {
+ // only if we have a single statement, or if
+ // it's a func body.
+ break
+ }
+ var bodyPos, bodyEnd token.Pos
+
+ if len(node.List) > 0 {
+ bodyPos = node.List[0].Pos()
+ bodyEnd = node.List[len(node.List)-1].End()
+ }
+ if len(comments) > 0 {
+ if pos := comments[0].Pos(); !bodyPos.IsValid() || pos < bodyPos {
+ bodyPos = pos
+ }
+ if pos := comments[len(comments)-1].End(); !bodyPos.IsValid() || pos > bodyEnd {
+ bodyEnd = pos
+ }
+ }
+
+ f.removeLinesBetween(node.Lbrace, bodyPos)
+ f.removeLinesBetween(bodyEnd, node.Rbrace)
+
+ case *ast.CompositeLit:
+ if len(node.Elts) == 0 {
+ // doesn't have elements
+ break
+ }
+ openLine := f.Line(node.Lbrace)
+ closeLine := f.Line(node.Rbrace)
+ if openLine == closeLine {
+ // all in a single line
+ break
+ }
+
+ newlineAroundElems := false
+ newlineBetweenElems := false
+ lastLine := openLine
+ for i, elem := range node.Elts {
+ if f.Line(elem.Pos()) > lastLine {
+ if i == 0 {
+ newlineAroundElems = true
+ } else {
+ newlineBetweenElems = true
+ }
+ }
+ lastLine = f.Line(elem.End())
+ }
+ if closeLine > lastLine {
+ newlineAroundElems = true
+ }
+
+ if newlineBetweenElems || newlineAroundElems {
+ first := node.Elts[0]
+ if openLine == f.Line(first.Pos()) {
+ // We want the newline right after the brace.
+ f.addNewline(node.Lbrace + 1)
+ closeLine = f.Line(node.Rbrace)
+ }
+ last := node.Elts[len(node.Elts)-1]
+ if closeLine == f.Line(last.End()) {
+ // We want the newline right before the brace.
+ f.addNewline(node.Rbrace)
+ }
+ }
+
+ // If there's a newline between any consecutive elements, there
+ // must be a newline between all composite literal elements.
+ if !newlineBetweenElems {
+ break
+ }
+ for i1, elem1 := range node.Elts {
+ i2 := i1 + 1
+ if i2 >= len(node.Elts) {
+ break
+ }
+ elem2 := node.Elts[i2]
+ // TODO: do we care about &{}?
+ _, ok1 := elem1.(*ast.CompositeLit)
+ _, ok2 := elem2.(*ast.CompositeLit)
+ if !ok1 && !ok2 {
+ continue
+ }
+ if f.Line(elem1.End()) == f.Line(elem2.Pos()) {
+ f.addNewline(elem1.End())
+ }
+ }
+
+ case *ast.CaseClause:
+ f.stmts(node.Body)
+ openLine := f.Line(node.Case)
+ closeLine := f.Line(node.Colon)
+ if openLine == closeLine {
+ // nothing to do
+ break
+ }
+ if len(f.commentsBetween(node.Case, node.Colon)) > 0 {
+ // don't move comments
+ break
+ }
+ if f.printLength(node) > shortLineLimit {
+ // too long to collapse
+ break
+ }
+ f.removeLines(openLine, closeLine)
+
+ case *ast.CommClause:
+ f.stmts(node.Body)
+
+ case *ast.FieldList:
+ // Merging adjacent fields (e.g. parameters) is disabled by default.
+ if !f.ExtraRules {
+ break
+ }
+ switch c.Parent().(type) {
+ case *ast.FuncDecl, *ast.FuncType, *ast.InterfaceType:
+ node.List = f.mergeAdjacentFields(node.List)
+ c.Replace(node)
+ case *ast.StructType:
+ // Do not merge adjacent fields in structs.
+ }
+
+ case *ast.BasicLit:
+ // Octal number literals were introduced in 1.13.
+ if semver.Compare(f.LangVersion, "v1.13") >= 0 {
+ if node.Kind == token.INT && rxOctalInteger.MatchString(node.Value) {
+ node.Value = "0o" + node.Value[1:]
+ c.Replace(node)
+ }
+ }
+ }
+}
+
+func (f *fumpter) stmts(list []ast.Stmt) {
+ for i, stmt := range list {
+ ifs, ok := stmt.(*ast.IfStmt)
+ if !ok || i < 1 {
+ continue // not an if following another statement
+ }
+ as, ok := list[i-1].(*ast.AssignStmt)
+ if !ok || as.Tok != token.DEFINE ||
+ !identEqual(as.Lhs[len(as.Lhs)-1], "err") {
+ continue // not "..., err := ..."
+ }
+ be, ok := ifs.Cond.(*ast.BinaryExpr)
+ if !ok || ifs.Init != nil || ifs.Else != nil {
+ continue // complex if
+ }
+ if be.Op != token.NEQ || !identEqual(be.X, "err") ||
+ !identEqual(be.Y, "nil") {
+ continue // not "err != nil"
+ }
+ f.removeLinesBetween(as.End(), ifs.Pos())
+ }
+}
+
+func identEqual(expr ast.Expr, name string) bool {
+ id, ok := expr.(*ast.Ident)
+ return ok && id.Name == name
+}
+
+// joinStdImports ensures that all standard library imports are together and at
+// the top of the imports list.
+func (f *fumpter) joinStdImports(d *ast.GenDecl) {
+ var std, other []ast.Spec
+ firstGroup := true
+ lastEnd := d.Pos()
+ needsSort := false
+ for i, spec := range d.Specs {
+ spec := spec.(*ast.ImportSpec)
+ if coms := f.commentsBetween(lastEnd, spec.Pos()); len(coms) > 0 {
+ lastEnd = coms[len(coms)-1].End()
+ }
+ if i > 0 && firstGroup && f.Line(spec.Pos()) > f.Line(lastEnd)+1 {
+ firstGroup = false
+ } else {
+ // We're still in the first group, update lastEnd.
+ lastEnd = spec.End()
+ }
+
+ path, _ := strconv.Unquote(spec.Path.Value)
+ switch {
+ // Imports with a period are definitely third party.
+ case strings.Contains(path, "."):
+ fallthrough
+ // "test" and "example" are reserved as per golang.org/issue/37641.
+ // "internal" is unreachable.
+ case strings.HasPrefix(path, "test/") ||
+ strings.HasPrefix(path, "example/") ||
+ strings.HasPrefix(path, "internal/"):
+ fallthrough
+ // To be conservative, if an import has a name or an inline
+ // comment, and isn't part of the top group, treat it as non-std.
+ case !firstGroup && (spec.Name != nil || spec.Comment != nil):
+ other = append(other, spec)
+ continue
+ }
+
+ // If we're moving this std import further up, reset its
+ // position, to avoid breaking comments.
+ if !firstGroup || len(other) > 0 {
+ setPos(reflect.ValueOf(spec), d.Pos())
+ needsSort = true
+ }
+ std = append(std, spec)
+ }
+ // Ensure there is an empty line between std imports and other imports.
+ if len(std) > 0 && len(other) > 0 && f.Line(std[len(std)-1].End())+1 >= f.Line(other[0].Pos()) {
+ // We add two newlines, as that's necessary in some edge cases.
+ // For example, if the std and non-std imports were together and
+ // without indentation, adding one newline isn't enough. Two
+ // empty lines will be printed as one by go/printer, anyway.
+ f.addNewline(other[0].Pos() - 1)
+ f.addNewline(other[0].Pos())
+ }
+ // Finally, join the imports, keeping std at the top.
+ d.Specs = append(std, other...)
+
+ // If we moved any std imports to the first group, we need to sort them
+ // again.
+ if needsSort {
+ ast.SortImports(f.fset, f.astFile)
+ }
+}
+
+// mergeAdjacentFields returns fields with adjacent fields merged if possible.
+func (f *fumpter) mergeAdjacentFields(fields []*ast.Field) []*ast.Field {
+ // If there are less than two fields then there is nothing to merge.
+ if len(fields) < 2 {
+ return fields
+ }
+
+ // Otherwise, iterate over adjacent pairs of fields, merging if possible,
+ // and mutating fields. Elements of fields may be mutated (if merged with
+ // following fields), discarded (if merged with a preceeding field), or left
+ // unchanged.
+ i := 0
+ for j := 1; j < len(fields); j++ {
+ if f.shouldMergeAdjacentFields(fields[i], fields[j]) {
+ fields[i].Names = append(fields[i].Names, fields[j].Names...)
+ } else {
+ i++
+ fields[i] = fields[j]
+ }
+ }
+ return fields[:i+1]
+}
+
+func (f *fumpter) shouldMergeAdjacentFields(f1, f2 *ast.Field) bool {
+ if len(f1.Names) == 0 || len(f2.Names) == 0 {
+ // Both must have names for the merge to work.
+ return false
+ }
+ if f.Line(f1.Pos()) != f.Line(f2.Pos()) {
+ // Trust the user if they used separate lines.
+ return false
+ }
+
+ // Only merge if the types are equal.
+ opt := cmp.Comparer(func(x, y token.Pos) bool { return true })
+ return cmp.Equal(f1.Type, f2.Type, opt)
+}
+
+var posType = reflect.TypeOf(token.NoPos)
+
+// setPos recursively sets all position fields in the node v to pos.
+func setPos(v reflect.Value, pos token.Pos) {
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ if !v.IsValid() {
+ return
+ }
+ if v.Type() == posType {
+ v.Set(reflect.ValueOf(pos))
+ }
+ if v.Kind() == reflect.Struct {
+ for i := 0; i < v.NumField(); i++ {
+ setPos(v.Field(i), pos)
+ }
+ }
+}
diff --git a/vendor/mvdan.cc/unparam/check/check.go b/vendor/mvdan.cc/unparam/check/check.go
index 7e753063..b738c273 100644
--- a/vendor/mvdan.cc/unparam/check/check.go
+++ b/vendor/mvdan.cc/unparam/check/check.go
@@ -28,7 +28,7 @@ import (
// UnusedParams returns a list of human-readable issues that point out unused
// function parameters.
-func UnusedParams(tests bool, exported, debug bool, args ...string) ([]string, error) {
+func UnusedParams(tests, exported, debug bool, args ...string) ([]string, error) {
wd, err := os.Getwd()
if err != nil {
return nil, err
@@ -823,7 +823,7 @@ func dummyImpl(blk *ssa.BasicBlock) bool {
//
// Since this function parses all of the package's Go source files on disk, its
// results are cached.
-func (c *Checker) declCounts(pkgDir string, pkgName string) map[string]int {
+func (c *Checker) declCounts(pkgDir, pkgName string) map[string]int {
key := pkgDir + ":" + pkgName
if m, ok := c.cachedDeclCounts[key]; ok {
return m
diff --git a/vendor/sourcegraph.com/sqs/pbtypes/LICENSE b/vendor/sourcegraph.com/sqs/pbtypes/LICENSE
deleted file mode 100644
index 261eeb9e..00000000
--- a/vendor/sourcegraph.com/sqs/pbtypes/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/sourcegraph.com/sqs/pbtypes/README.md b/vendor/sourcegraph.com/sqs/pbtypes/README.md
deleted file mode 100644
index 270f5975..00000000
--- a/vendor/sourcegraph.com/sqs/pbtypes/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# pbtypes
-
-protobuf helper types
-
diff --git a/vendor/sourcegraph.com/sqs/pbtypes/doc.go b/vendor/sourcegraph.com/sqs/pbtypes/doc.go
deleted file mode 100644
index 8469fb70..00000000
--- a/vendor/sourcegraph.com/sqs/pbtypes/doc.go
+++ /dev/null
@@ -1,3 +0,0 @@
-// Package pbtypes contains protocol buffer types (Timestamp, Void,
-// etc.) and related helpers.
-package pbtypes // import "sourcegraph.com/sqs/pbtypes"
diff --git a/vendor/sourcegraph.com/sqs/pbtypes/gen.go b/vendor/sourcegraph.com/sqs/pbtypes/gen.go
deleted file mode 100644
index a4d81cee..00000000
--- a/vendor/sourcegraph.com/sqs/pbtypes/gen.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package pbtypes
-
-//go:generate gopathexec protoc -I$GOPATH/src -I$GOPATH/src/github.com/gogo/protobuf/protobuf -I$GOPATH/src/github.com/gengo/grpc-gateway/third_party/googleapis -I. --gogo_out=. timestamp.proto void.proto html.proto
diff --git a/vendor/sourcegraph.com/sqs/pbtypes/html.pb.go b/vendor/sourcegraph.com/sqs/pbtypes/html.pb.go
deleted file mode 100644
index 69add306..00000000
--- a/vendor/sourcegraph.com/sqs/pbtypes/html.pb.go
+++ /dev/null
@@ -1,287 +0,0 @@
-// Code generated by protoc-gen-gogo.
-// source: html.proto
-// DO NOT EDIT!
-
-package pbtypes
-
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// discarding unused import gogoproto "github.com/gogo/protobuf/gogoproto"
-
-import io "io"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// HTML is a type which marshals into {__html: "html code here"} to designate
-// that this value is sanitized HTML code, see
-// https://facebook.github.io/react/tips/dangerously-set-inner-html.html
-type HTML struct {
- HTML string `protobuf:"bytes,1,opt,name=HTML,proto3" json:"__html"`
-}
-
-func (m *HTML) Reset() { *m = HTML{} }
-func (m *HTML) String() string { return proto.CompactTextString(m) }
-func (*HTML) ProtoMessage() {}
-
-func (m *HTML) Marshal() (data []byte, err error) {
- size := m.Size()
- data = make([]byte, size)
- n, err := m.MarshalTo(data)
- if err != nil {
- return nil, err
- }
- return data[:n], nil
-}
-
-func (m *HTML) MarshalTo(data []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if len(m.HTML) > 0 {
- data[i] = 0xa
- i++
- i = encodeVarintHtml(data, i, uint64(len(m.HTML)))
- i += copy(data[i:], m.HTML)
- }
- return i, nil
-}
-
-func encodeFixed64Html(data []byte, offset int, v uint64) int {
- data[offset] = uint8(v)
- data[offset+1] = uint8(v >> 8)
- data[offset+2] = uint8(v >> 16)
- data[offset+3] = uint8(v >> 24)
- data[offset+4] = uint8(v >> 32)
- data[offset+5] = uint8(v >> 40)
- data[offset+6] = uint8(v >> 48)
- data[offset+7] = uint8(v >> 56)
- return offset + 8
-}
-func encodeFixed32Html(data []byte, offset int, v uint32) int {
- data[offset] = uint8(v)
- data[offset+1] = uint8(v >> 8)
- data[offset+2] = uint8(v >> 16)
- data[offset+3] = uint8(v >> 24)
- return offset + 4
-}
-func encodeVarintHtml(data []byte, offset int, v uint64) int {
- for v >= 1<<7 {
- data[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- data[offset] = uint8(v)
- return offset + 1
-}
-func (m *HTML) Size() (n int) {
- var l int
- _ = l
- l = len(m.HTML)
- if l > 0 {
- n += 1 + l + sovHtml(uint64(l))
- }
- return n
-}
-
-func sovHtml(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
-}
-func sozHtml(x uint64) (n int) {
- return sovHtml(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *HTML) Unmarshal(data []byte) error {
- l := len(data)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowHtml
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: HTML: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: HTML: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field HTML", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowHtml
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthHtml
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.HTML = string(data[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipHtml(data[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthHtml
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipHtml(data []byte) (n int, err error) {
- l := len(data)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowHtml
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowHtml
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if data[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowHtml
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- iNdEx += length
- if length < 0 {
- return 0, ErrInvalidLengthHtml
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowHtml
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipHtml(data[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthHtml = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowHtml = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/sourcegraph.com/sqs/pbtypes/html.proto b/vendor/sourcegraph.com/sqs/pbtypes/html.proto
deleted file mode 100644
index 09be501d..00000000
--- a/vendor/sourcegraph.com/sqs/pbtypes/html.proto
+++ /dev/null
@@ -1,16 +0,0 @@
-syntax = "proto3";
-
-package pbtypes;
-
-import "github.com/gogo/protobuf/gogoproto/gogo.proto";
-
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-
-// HTML is a type which marshals into {__html: "html code here"} to designate
-// that this value is sanitized HTML code, see
-// https://facebook.github.io/react/tips/dangerously-set-inner-html.html
-message HTML {
- string HTML = 1 [(gogoproto.jsontag) = "__html"];
-}
diff --git a/vendor/sourcegraph.com/sqs/pbtypes/rawmessage.go b/vendor/sourcegraph.com/sqs/pbtypes/rawmessage.go
deleted file mode 100644
index 625b0693..00000000
--- a/vendor/sourcegraph.com/sqs/pbtypes/rawmessage.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package pbtypes
-
-import "errors"
-
-// RawMessage is a raw encoded JSON object.
-// It implements json.Marshaler and json.Unmarshaler like json.RawMessage,
-// but also proto.Marshaler and proto.Unmarshaler.
-type RawMessage []byte
-
-// MarshalJSON returns *m as the JSON encoding of m.
-func (m *RawMessage) MarshalJSON() ([]byte, error) {
- return *m, nil
-}
-
-// UnmarshalJSON sets *m to a copy of data.
-func (m *RawMessage) UnmarshalJSON(data []byte) error {
- if m == nil {
- return errors.New("pbtypes.RawMessage: UnmarshalJSON on nil pointer")
- }
- *m = append((*m)[0:0], data...)
- return nil
-}
-
-// Marshal implements proto.Marshaler.
-func (m *RawMessage) Marshal() ([]byte, error) {
- return *m, nil
-}
-
-// Unmarshal implements proto.Unmarshaler.
-func (m *RawMessage) Unmarshal(data []byte) error {
- if m == nil {
- return errors.New("pbtypes.RawMessage: Unmarshal on nil pointer")
- }
- *m = append((*m)[0:0], data...)
- return nil
-}
diff --git a/vendor/sourcegraph.com/sqs/pbtypes/timestamp.go b/vendor/sourcegraph.com/sqs/pbtypes/timestamp.go
deleted file mode 100644
index 18ac6498..00000000
--- a/vendor/sourcegraph.com/sqs/pbtypes/timestamp.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package pbtypes
-
-import (
- "encoding/json"
- "time"
-)
-
-// NewTimestamp creates a new Timestamp from a time.Time.
-func NewTimestamp(t time.Time) Timestamp {
- return Timestamp{Seconds: t.Unix(), Nanos: int32(t.Nanosecond())}
-}
-
-func (t Timestamp) Time() time.Time {
- return time.Unix(t.Seconds, int64(t.Nanos))
-}
-
-func (t Timestamp) MarshalJSON() ([]byte, error) {
- return json.Marshal(t.Time())
-}
-
-func (t *Timestamp) UnmarshalJSON(data []byte) error {
- var tm time.Time
- if err := json.Unmarshal(data, &tm); err != nil {
- return err
- }
- *t = NewTimestamp(tm)
- return nil
-}
diff --git a/vendor/sourcegraph.com/sqs/pbtypes/timestamp.pb.go b/vendor/sourcegraph.com/sqs/pbtypes/timestamp.pb.go
deleted file mode 100644
index d4c6bd09..00000000
--- a/vendor/sourcegraph.com/sqs/pbtypes/timestamp.pb.go
+++ /dev/null
@@ -1,320 +0,0 @@
-// Code generated by protoc-gen-gogo.
-// source: timestamp.proto
-// DO NOT EDIT!
-
-/*
- Package pbtypes is a generated protocol buffer package.
-
- It is generated from these files:
- timestamp.proto
- void.proto
- html.proto
-
- It has these top-level messages:
- Timestamp
- Void
- HTML
-*/
-package pbtypes
-
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// discarding unused import gogoproto "github.com/gogo/protobuf/gogoproto"
-
-import io "io"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-type Timestamp struct {
- // Represents seconds of UTC time since Unix epoch
- // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
- // 9999-12-31T23:59:59Z inclusive.
- Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
- // Non-negative fractions of a second at nanosecond resolution. Negative
- // second values with fractions must still have non-negative nanos values
- // that count forward in time. Must be from 0 to 999,999,999
- // inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
-}
-
-func (m *Timestamp) Reset() { *m = Timestamp{} }
-func (m *Timestamp) String() string { return proto.CompactTextString(m) }
-func (*Timestamp) ProtoMessage() {}
-
-func (m *Timestamp) Marshal() (data []byte, err error) {
- size := m.Size()
- data = make([]byte, size)
- n, err := m.MarshalTo(data)
- if err != nil {
- return nil, err
- }
- return data[:n], nil
-}
-
-func (m *Timestamp) MarshalTo(data []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if m.Seconds != 0 {
- data[i] = 0x8
- i++
- i = encodeVarintTimestamp(data, i, uint64(m.Seconds))
- }
- if m.Nanos != 0 {
- data[i] = 0x10
- i++
- i = encodeVarintTimestamp(data, i, uint64(m.Nanos))
- }
- return i, nil
-}
-
-func encodeFixed64Timestamp(data []byte, offset int, v uint64) int {
- data[offset] = uint8(v)
- data[offset+1] = uint8(v >> 8)
- data[offset+2] = uint8(v >> 16)
- data[offset+3] = uint8(v >> 24)
- data[offset+4] = uint8(v >> 32)
- data[offset+5] = uint8(v >> 40)
- data[offset+6] = uint8(v >> 48)
- data[offset+7] = uint8(v >> 56)
- return offset + 8
-}
-func encodeFixed32Timestamp(data []byte, offset int, v uint32) int {
- data[offset] = uint8(v)
- data[offset+1] = uint8(v >> 8)
- data[offset+2] = uint8(v >> 16)
- data[offset+3] = uint8(v >> 24)
- return offset + 4
-}
-func encodeVarintTimestamp(data []byte, offset int, v uint64) int {
- for v >= 1<<7 {
- data[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- data[offset] = uint8(v)
- return offset + 1
-}
-func (m *Timestamp) Size() (n int) {
- var l int
- _ = l
- if m.Seconds != 0 {
- n += 1 + sovTimestamp(uint64(m.Seconds))
- }
- if m.Nanos != 0 {
- n += 1 + sovTimestamp(uint64(m.Nanos))
- }
- return n
-}
-
-func sovTimestamp(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
-}
-func sozTimestamp(x uint64) (n int) {
- return sovTimestamp(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Timestamp) Unmarshal(data []byte) error {
- l := len(data)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTimestamp
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Timestamp: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType)
- }
- m.Seconds = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTimestamp
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- m.Seconds |= (int64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType)
- }
- m.Nanos = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTimestamp
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- m.Nanos |= (int32(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipTimestamp(data[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthTimestamp
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipTimestamp(data []byte) (n int, err error) {
- l := len(data)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTimestamp
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTimestamp
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if data[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTimestamp
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- iNdEx += length
- if length < 0 {
- return 0, ErrInvalidLengthTimestamp
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTimestamp
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipTimestamp(data[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/sourcegraph.com/sqs/pbtypes/timestamp.proto b/vendor/sourcegraph.com/sqs/pbtypes/timestamp.proto
deleted file mode 100644
index 926aa4de..00000000
--- a/vendor/sourcegraph.com/sqs/pbtypes/timestamp.proto
+++ /dev/null
@@ -1,55 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// NOTE: Copied from src/google/protobuf/timestamp.proto in the
-// protobuf repository.
-
-syntax = "proto3";
-
-package pbtypes;
-
-import "github.com/gogo/protobuf/gogoproto/gogo.proto";
-
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-
-message Timestamp {
- // Represents seconds of UTC time since Unix epoch
- // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
- // 9999-12-31T23:59:59Z inclusive.
- int64 seconds = 1;
-
- // Non-negative fractions of a second at nanosecond resolution. Negative
- // second values with fractions must still have non-negative nanos values
- // that count forward in time. Must be from 0 to 999,999,999
- // inclusive.
- int32 nanos = 2;
-}
diff --git a/vendor/sourcegraph.com/sqs/pbtypes/void.pb.go b/vendor/sourcegraph.com/sqs/pbtypes/void.pb.go
deleted file mode 100644
index 6e371dc1..00000000
--- a/vendor/sourcegraph.com/sqs/pbtypes/void.pb.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Code generated by protoc-gen-gogo.
-// source: void.proto
-// DO NOT EDIT!
-
-package pbtypes
-
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// discarding unused import gogoproto "github.com/gogo/protobuf/gogoproto"
-
-import io "io"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// Void is an empty type used as the protobuf RPC method arg or result
-// for methods that take no parameters or yield no results,
-// respectively.
-type Void struct {
-}
-
-func (m *Void) Reset() { *m = Void{} }
-func (m *Void) String() string { return proto.CompactTextString(m) }
-func (*Void) ProtoMessage() {}
-
-func (m *Void) Marshal() (data []byte, err error) {
- size := m.Size()
- data = make([]byte, size)
- n, err := m.MarshalTo(data)
- if err != nil {
- return nil, err
- }
- return data[:n], nil
-}
-
-func (m *Void) MarshalTo(data []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- return i, nil
-}
-
-func encodeFixed64Void(data []byte, offset int, v uint64) int {
- data[offset] = uint8(v)
- data[offset+1] = uint8(v >> 8)
- data[offset+2] = uint8(v >> 16)
- data[offset+3] = uint8(v >> 24)
- data[offset+4] = uint8(v >> 32)
- data[offset+5] = uint8(v >> 40)
- data[offset+6] = uint8(v >> 48)
- data[offset+7] = uint8(v >> 56)
- return offset + 8
-}
-func encodeFixed32Void(data []byte, offset int, v uint32) int {
- data[offset] = uint8(v)
- data[offset+1] = uint8(v >> 8)
- data[offset+2] = uint8(v >> 16)
- data[offset+3] = uint8(v >> 24)
- return offset + 4
-}
-func encodeVarintVoid(data []byte, offset int, v uint64) int {
- for v >= 1<<7 {
- data[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- data[offset] = uint8(v)
- return offset + 1
-}
-func (m *Void) Size() (n int) {
- var l int
- _ = l
- return n
-}
-
-func sovVoid(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
-}
-func sozVoid(x uint64) (n int) {
- return sovVoid(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Void) Unmarshal(data []byte) error {
- l := len(data)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowVoid
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Void: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Void: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipVoid(data[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthVoid
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipVoid(data []byte) (n int, err error) {
- l := len(data)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowVoid
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowVoid
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if data[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowVoid
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- iNdEx += length
- if length < 0 {
- return 0, ErrInvalidLengthVoid
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowVoid
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipVoid(data[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthVoid = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowVoid = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/sourcegraph.com/sqs/pbtypes/void.proto b/vendor/sourcegraph.com/sqs/pbtypes/void.proto
deleted file mode 100644
index 1119a980..00000000
--- a/vendor/sourcegraph.com/sqs/pbtypes/void.proto
+++ /dev/null
@@ -1,14 +0,0 @@
-syntax = "proto3";
-
-package pbtypes;
-
-import "github.com/gogo/protobuf/gogoproto/gogo.proto";
-
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-
-// Void is an empty type used as the protobuf RPC method arg or result
-// for methods that take no parameters or yield no results,
-// respectively.
-message Void {}