diff --git a/.promu.yml b/.promu.yml
index 9a17044b..5eb815cc 100644
--- a/.promu.yml
+++ b/.promu.yml
@@ -5,12 +5,11 @@ repository:
build:
flags: -a -tags netgo
ldflags: |
- -s
- -X {{repoPath}}/vendor/github.com/prometheus/common/version.Version={{.Version}}
- -X {{repoPath}}/vendor/github.com/prometheus/common/version.Revision={{.Revision}}
- -X {{repoPath}}/vendor/github.com/prometheus/common/version.Branch={{.Branch}}
- -X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildUser={{user}}@{{host}}
- -X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}}
+ -X github.com/prometheus/common/version.Version={{.Version}}
+ -X github.com/prometheus/common/version.Revision={{.Revision}}
+ -X github.com/prometheus/common/version.Branch={{.Branch}}
+ -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}}
+ -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}}
tarball:
files:
- LICENSE
diff --git a/Makefile b/Makefile
index 8802c739..e5a74931 100644
--- a/Makefile
+++ b/Makefile
@@ -11,10 +11,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-GO := GO15VENDOREXPERIMENT=1 go
-PROMU := $(GOPATH)/bin/promu
-GOLINTER ?= $(GOPATH)/bin/gometalinter
-pkgs = $(shell $(GO) list ./... | grep -v /vendor/)
+GO := GO111MODULE=on go
+PROMU := $(shell $(GO) env GOPATH)/bin/promu
+GOLINTER ?= $(shell $(GO) env GOPATH)/bin/gometalinter
+pkgs = $(shell $(GO) list ./...)
PREFIX ?= $(shell pwd)
BIN_DIR ?= $(shell pwd)
@@ -26,7 +26,7 @@ all: format build test
style:
@echo ">> checking code style"
- @! gofmt -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^'
+ @! gofmt -d $(shell find . -name '*.go' -print) | grep '^'
test:
@echo ">> running tests"
diff --git a/go.mod b/go.mod
new file mode 100644
index 00000000..f84f167a
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,18 @@
+module github.com/justwatchcom/elasticsearch_exporter
+
+go 1.13
+
+require (
+ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect
+ github.com/aws/aws-sdk-go v1.31.3
+ github.com/blang/semver v3.5.2-0.20180723201105-3c1074078d32+incompatible
+ github.com/go-kit/kit v0.10.0
+ github.com/golang/protobuf v1.4.2 // indirect
+ github.com/imdario/mergo v0.3.9
+ github.com/prometheus/client_golang v1.6.0
+ github.com/prometheus/common v0.10.0
+ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 // indirect
+ golang.org/x/sys v0.0.0-20200523222454-059865788121 // indirect
+ gopkg.in/alecthomas/kingpin.v2 v2.2.6
+ gopkg.in/yaml.v2 v2.3.0 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 00000000..a3324948
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,436 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
+github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
+github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
+github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.31.3 h1:vJDjoM+VlM/ZEmGyaIhUXaYAtB9lra7Qhr58SSHHjPE=
+github.com/aws/aws-sdk-go v1.31.3/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
+github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/blang/semver v3.5.2-0.20180723201105-3c1074078d32+incompatible h1:8fBbhRkI5/0ocLFbrhPgnGUm0ogc+Gko1cRodPWDKX4=
+github.com/blang/semver v3.5.2-0.20180723201105-3c1074078d32+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
+github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
+github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo=
+github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
+github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
+github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
+github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
+github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
+github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
+github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
+github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
+github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
+github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
+github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
+github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
+github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
+github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
+github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
+github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
+github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
+github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
+github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
+github.com/prometheus/client_golang v1.6.0 h1:YVPodQOcK15POxhgARIvnDRVpLcuK8mglnMrWfyrw6A=
+github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
+github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI=
+github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
+go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 h1:eDrdRpKgkcCqKZQwyZRyeFZgfqt37SL7Kv3tok06cKE=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
diff --git a/main.go b/main.go
index 47497837..1e075ced 100644
--- a/main.go
+++ b/main.go
@@ -7,12 +7,17 @@ import (
"os/signal"
"time"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+
+ "github.com/justwatchcom/elasticsearch_exporter/pkg/roundtripper"
+
"context"
"github.com/go-kit/kit/log/level"
"github.com/justwatchcom/elasticsearch_exporter/collector"
"github.com/justwatchcom/elasticsearch_exporter/pkg/clusterinfo"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/version"
"gopkg.in/alecthomas/kingpin.v2"
)
@@ -68,6 +73,10 @@ func main() {
esInsecureSkipVerify = kingpin.Flag("es.ssl-skip-verify",
"Skip SSL verification when connecting to Elasticsearch.").
Default("false").Envar("ES_SSL_SKIP_VERIFY").Bool()
+ esAWS = kingpin.Flag("es.aws", "AWS cloud provider being used").
+ Default("false").Envar("ES_AWS").Bool()
+ esAWSRegion = kingpin.Flag("es.aws-region", "Sets the AWS region").
+ Default("").Envar("ES_AWS_REGION").String()
logLevel = kingpin.Flag("log.level",
"Sets the loglevel. Valid levels are debug, info, warn, error").
Default("info").Envar("LOG_LEVEL").String()
@@ -97,12 +106,22 @@ func main() {
// returns nil if not provided and falls back to simple TCP.
tlsConfig := createTLSConfig(*esCA, *esClientCert, *esClientPrivateKey, *esInsecureSkipVerify)
+ defaultTransport := &http.Transport{
+ TLSClientConfig: tlsConfig,
+ Proxy: http.ProxyFromEnvironment,
+ }
+
httpClient := &http.Client{
- Timeout: *esTimeout,
- Transport: &http.Transport{
- TLSClientConfig: tlsConfig,
- Proxy: http.ProxyFromEnvironment,
- },
+ Timeout: *esTimeout,
+ Transport: defaultTransport,
+ }
+
+ if esAWS != nil {
+ httpClient.Transport = &roundtripper.AWSSigningTransport{
+ DefaultTransport: defaultTransport,
+ Credentials: credentials.NewChainCredentials([]credentials.Provider{&credentials.EnvProvider{}, &credentials.SharedCredentialsProvider{}}),
+ Region: *esAWSRegion,
+ }
}
// version metric
@@ -160,7 +179,7 @@ func main() {
prometheus.MustRegister(clusterInfoRetriever)
mux := http.DefaultServeMux
- mux.Handle(*metricsPath, prometheus.Handler())
+ mux.Handle(*metricsPath, promhttp.Handler())
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
_, err = w.Write([]byte(`
Elasticsearch Exporter
diff --git a/pkg/roundtripper/aws.go b/pkg/roundtripper/aws.go
new file mode 100644
index 00000000..dd0896af
--- /dev/null
+++ b/pkg/roundtripper/aws.go
@@ -0,0 +1,31 @@
+package roundtripper
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ v4 "github.com/aws/aws-sdk-go/aws/signer/v4"
+)
+
+const (
+ service = "es"
+)
+
+type AWSSigningTransport struct {
+ DefaultTransport *http.Transport
+ Credentials *credentials.Credentials
+ Region string
+}
+
+// RoundTrip implementation
+func (a AWSSigningTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ signer := v4.NewSigner(a.Credentials)
+
+ // body is nil as we never send data to Elastic, just get
+ if _, err := signer.Sign(req, nil, service, a.Region, time.Now()); err != nil {
+ return nil, err
+ }
+
+ return a.DefaultTransport.RoundTrip(req)
+}
diff --git a/vendor/github.com/alecthomas/template/LICENSE b/vendor/github.com/alecthomas/template/LICENSE
deleted file mode 100644
index 74487567..00000000
--- a/vendor/github.com/alecthomas/template/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2012 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/alecthomas/template/README.md b/vendor/github.com/alecthomas/template/README.md
deleted file mode 100644
index ef6a8ee3..00000000
--- a/vendor/github.com/alecthomas/template/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Go's `text/template` package with newline elision
-
-This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline.
-
-eg.
-
-```
-{{if true}}\
-hello
-{{end}}\
-```
-
-Will result in:
-
-```
-hello\n
-```
-
-Rather than:
-
-```
-\n
-hello\n
-\n
-```
diff --git a/vendor/github.com/alecthomas/template/doc.go b/vendor/github.com/alecthomas/template/doc.go
deleted file mode 100644
index 223c595c..00000000
--- a/vendor/github.com/alecthomas/template/doc.go
+++ /dev/null
@@ -1,406 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package template implements data-driven templates for generating textual output.
-
-To generate HTML output, see package html/template, which has the same interface
-as this package but automatically secures HTML output against certain attacks.
-
-Templates are executed by applying them to a data structure. Annotations in the
-template refer to elements of the data structure (typically a field of a struct
-or a key in a map) to control execution and derive values to be displayed.
-Execution of the template walks the structure and sets the cursor, represented
-by a period '.' and called "dot", to the value at the current location in the
-structure as execution proceeds.
-
-The input text for a template is UTF-8-encoded text in any format.
-"Actions"--data evaluations or control structures--are delimited by
-"{{" and "}}"; all text outside actions is copied to the output unchanged.
-Actions may not span newlines, although comments can.
-
-Once parsed, a template may be executed safely in parallel.
-
-Here is a trivial example that prints "17 items are made of wool".
-
- type Inventory struct {
- Material string
- Count uint
- }
- sweaters := Inventory{"wool", 17}
- tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
- if err != nil { panic(err) }
- err = tmpl.Execute(os.Stdout, sweaters)
- if err != nil { panic(err) }
-
-More intricate examples appear below.
-
-Actions
-
-Here is the list of actions. "Arguments" and "pipelines" are evaluations of
-data, defined in detail below.
-
-*/
-// {{/* a comment */}}
-// A comment; discarded. May contain newlines.
-// Comments do not nest and must start and end at the
-// delimiters, as shown here.
-/*
-
- {{pipeline}}
- The default textual representation of the value of the pipeline
- is copied to the output.
-
- {{if pipeline}} T1 {{end}}
- If the value of the pipeline is empty, no output is generated;
- otherwise, T1 is executed. The empty values are false, 0, any
- nil pointer or interface value, and any array, slice, map, or
- string of length zero.
- Dot is unaffected.
-
- {{if pipeline}} T1 {{else}} T0 {{end}}
- If the value of the pipeline is empty, T0 is executed;
- otherwise, T1 is executed. Dot is unaffected.
-
- {{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
- To simplify the appearance of if-else chains, the else action
- of an if may include another if directly; the effect is exactly
- the same as writing
- {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
-
- {{range pipeline}} T1 {{end}}
- The value of the pipeline must be an array, slice, map, or channel.
- If the value of the pipeline has length zero, nothing is output;
- otherwise, dot is set to the successive elements of the array,
- slice, or map and T1 is executed. If the value is a map and the
- keys are of basic type with a defined order ("comparable"), the
- elements will be visited in sorted key order.
-
- {{range pipeline}} T1 {{else}} T0 {{end}}
- The value of the pipeline must be an array, slice, map, or channel.
- If the value of the pipeline has length zero, dot is unaffected and
- T0 is executed; otherwise, dot is set to the successive elements
- of the array, slice, or map and T1 is executed.
-
- {{template "name"}}
- The template with the specified name is executed with nil data.
-
- {{template "name" pipeline}}
- The template with the specified name is executed with dot set
- to the value of the pipeline.
-
- {{with pipeline}} T1 {{end}}
- If the value of the pipeline is empty, no output is generated;
- otherwise, dot is set to the value of the pipeline and T1 is
- executed.
-
- {{with pipeline}} T1 {{else}} T0 {{end}}
- If the value of the pipeline is empty, dot is unaffected and T0
- is executed; otherwise, dot is set to the value of the pipeline
- and T1 is executed.
-
-Arguments
-
-An argument is a simple value, denoted by one of the following.
-
- - A boolean, string, character, integer, floating-point, imaginary
- or complex constant in Go syntax. These behave like Go's untyped
- constants, although raw strings may not span newlines.
- - The keyword nil, representing an untyped Go nil.
- - The character '.' (period):
- .
- The result is the value of dot.
- - A variable name, which is a (possibly empty) alphanumeric string
- preceded by a dollar sign, such as
- $piOver2
- or
- $
- The result is the value of the variable.
- Variables are described below.
- - The name of a field of the data, which must be a struct, preceded
- by a period, such as
- .Field
- The result is the value of the field. Field invocations may be
- chained:
- .Field1.Field2
- Fields can also be evaluated on variables, including chaining:
- $x.Field1.Field2
- - The name of a key of the data, which must be a map, preceded
- by a period, such as
- .Key
- The result is the map element value indexed by the key.
- Key invocations may be chained and combined with fields to any
- depth:
- .Field1.Key1.Field2.Key2
- Although the key must be an alphanumeric identifier, unlike with
- field names they do not need to start with an upper case letter.
- Keys can also be evaluated on variables, including chaining:
- $x.key1.key2
- - The name of a niladic method of the data, preceded by a period,
- such as
- .Method
- The result is the value of invoking the method with dot as the
- receiver, dot.Method(). Such a method must have one return value (of
- any type) or two return values, the second of which is an error.
- If it has two and the returned error is non-nil, execution terminates
- and an error is returned to the caller as the value of Execute.
- Method invocations may be chained and combined with fields and keys
- to any depth:
- .Field1.Key1.Method1.Field2.Key2.Method2
- Methods can also be evaluated on variables, including chaining:
- $x.Method1.Field
- - The name of a niladic function, such as
- fun
- The result is the value of invoking the function, fun(). The return
- types and values behave as in methods. Functions and function
- names are described below.
- - A parenthesized instance of one the above, for grouping. The result
- may be accessed by a field or map key invocation.
- print (.F1 arg1) (.F2 arg2)
- (.StructValuedMethod "arg").Field
-
-Arguments may evaluate to any type; if they are pointers the implementation
-automatically indirects to the base type when required.
-If an evaluation yields a function value, such as a function-valued
-field of a struct, the function is not invoked automatically, but it
-can be used as a truth value for an if action and the like. To invoke
-it, use the call function, defined below.
-
-A pipeline is a possibly chained sequence of "commands". A command is a simple
-value (argument) or a function or method call, possibly with multiple arguments:
-
- Argument
- The result is the value of evaluating the argument.
- .Method [Argument...]
- The method can be alone or the last element of a chain but,
- unlike methods in the middle of a chain, it can take arguments.
- The result is the value of calling the method with the
- arguments:
- dot.Method(Argument1, etc.)
- functionName [Argument...]
- The result is the value of calling the function associated
- with the name:
- function(Argument1, etc.)
- Functions and function names are described below.
-
-Pipelines
-
-A pipeline may be "chained" by separating a sequence of commands with pipeline
-characters '|'. In a chained pipeline, the result of the each command is
-passed as the last argument of the following command. The output of the final
-command in the pipeline is the value of the pipeline.
-
-The output of a command will be either one value or two values, the second of
-which has type error. If that second value is present and evaluates to
-non-nil, execution terminates and the error is returned to the caller of
-Execute.
-
-Variables
-
-A pipeline inside an action may initialize a variable to capture the result.
-The initialization has syntax
-
- $variable := pipeline
-
-where $variable is the name of the variable. An action that declares a
-variable produces no output.
-
-If a "range" action initializes a variable, the variable is set to the
-successive elements of the iteration. Also, a "range" may declare two
-variables, separated by a comma:
-
- range $index, $element := pipeline
-
-in which case $index and $element are set to the successive values of the
-array/slice index or map key and element, respectively. Note that if there is
-only one variable, it is assigned the element; this is opposite to the
-convention in Go range clauses.
-
-A variable's scope extends to the "end" action of the control structure ("if",
-"with", or "range") in which it is declared, or to the end of the template if
-there is no such control structure. A template invocation does not inherit
-variables from the point of its invocation.
-
-When execution begins, $ is set to the data argument passed to Execute, that is,
-to the starting value of dot.
-
-Examples
-
-Here are some example one-line templates demonstrating pipelines and variables.
-All produce the quoted word "output":
-
- {{"\"output\""}}
- A string constant.
- {{`"output"`}}
- A raw string constant.
- {{printf "%q" "output"}}
- A function call.
- {{"output" | printf "%q"}}
- A function call whose final argument comes from the previous
- command.
- {{printf "%q" (print "out" "put")}}
- A parenthesized argument.
- {{"put" | printf "%s%s" "out" | printf "%q"}}
- A more elaborate call.
- {{"output" | printf "%s" | printf "%q"}}
- A longer chain.
- {{with "output"}}{{printf "%q" .}}{{end}}
- A with action using dot.
- {{with $x := "output" | printf "%q"}}{{$x}}{{end}}
- A with action that creates and uses a variable.
- {{with $x := "output"}}{{printf "%q" $x}}{{end}}
- A with action that uses the variable in another action.
- {{with $x := "output"}}{{$x | printf "%q"}}{{end}}
- The same, but pipelined.
-
-Functions
-
-During execution functions are found in two function maps: first in the
-template, then in the global function map. By default, no functions are defined
-in the template but the Funcs method can be used to add them.
-
-Predefined global functions are named as follows.
-
- and
- Returns the boolean AND of its arguments by returning the
- first empty argument or the last argument, that is,
- "and x y" behaves as "if x then y else x". All the
- arguments are evaluated.
- call
- Returns the result of calling the first argument, which
- must be a function, with the remaining arguments as parameters.
- Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
- Y is a func-valued field, map entry, or the like.
- The first argument must be the result of an evaluation
- that yields a value of function type (as distinct from
- a predefined function such as print). The function must
- return either one or two result values, the second of which
- is of type error. If the arguments don't match the function
- or the returned error value is non-nil, execution stops.
- html
- Returns the escaped HTML equivalent of the textual
- representation of its arguments.
- index
- Returns the result of indexing its first argument by the
- following arguments. Thus "index x 1 2 3" is, in Go syntax,
- x[1][2][3]. Each indexed item must be a map, slice, or array.
- js
- Returns the escaped JavaScript equivalent of the textual
- representation of its arguments.
- len
- Returns the integer length of its argument.
- not
- Returns the boolean negation of its single argument.
- or
- Returns the boolean OR of its arguments by returning the
- first non-empty argument or the last argument, that is,
- "or x y" behaves as "if x then x else y". All the
- arguments are evaluated.
- print
- An alias for fmt.Sprint
- printf
- An alias for fmt.Sprintf
- println
- An alias for fmt.Sprintln
- urlquery
- Returns the escaped value of the textual representation of
- its arguments in a form suitable for embedding in a URL query.
-
-The boolean functions take any zero value to be false and a non-zero
-value to be true.
-
-There is also a set of binary comparison operators defined as
-functions:
-
- eq
- Returns the boolean truth of arg1 == arg2
- ne
- Returns the boolean truth of arg1 != arg2
- lt
- Returns the boolean truth of arg1 < arg2
- le
- Returns the boolean truth of arg1 <= arg2
- gt
- Returns the boolean truth of arg1 > arg2
- ge
- Returns the boolean truth of arg1 >= arg2
-
-For simpler multi-way equality tests, eq (only) accepts two or more
-arguments and compares the second and subsequent to the first,
-returning in effect
-
- arg1==arg2 || arg1==arg3 || arg1==arg4 ...
-
-(Unlike with || in Go, however, eq is a function call and all the
-arguments will be evaluated.)
-
-The comparison functions work on basic types only (or named basic
-types, such as "type Celsius float32"). They implement the Go rules
-for comparison of values, except that size and exact type are
-ignored, so any integer value, signed or unsigned, may be compared
-with any other integer value. (The arithmetic value is compared,
-not the bit pattern, so all negative integers are less than all
-unsigned integers.) However, as usual, one may not compare an int
-with a float32 and so on.
-
-Associated templates
-
-Each template is named by a string specified when it is created. Also, each
-template is associated with zero or more other templates that it may invoke by
-name; such associations are transitive and form a name space of templates.
-
-A template may use a template invocation to instantiate another associated
-template; see the explanation of the "template" action above. The name must be
-that of a template associated with the template that contains the invocation.
-
-Nested template definitions
-
-When parsing a template, another template may be defined and associated with the
-template being parsed. Template definitions must appear at the top level of the
-template, much like global variables in a Go program.
-
-The syntax of such definitions is to surround each template declaration with a
-"define" and "end" action.
-
-The define action names the template being created by providing a string
-constant. Here is a simple example:
-
- `{{define "T1"}}ONE{{end}}
- {{define "T2"}}TWO{{end}}
- {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
- {{template "T3"}}`
-
-This defines two templates, T1 and T2, and a third T3 that invokes the other two
-when it is executed. Finally it invokes T3. If executed this template will
-produce the text
-
- ONE TWO
-
-By construction, a template may reside in only one association. If it's
-necessary to have a template addressable from multiple associations, the
-template definition must be parsed multiple times to create distinct *Template
-values, or must be copied with the Clone or AddParseTree method.
-
-Parse may be called multiple times to assemble the various associated templates;
-see the ParseFiles and ParseGlob functions and methods for simple ways to parse
-related templates stored in files.
-
-A template may be executed directly or through ExecuteTemplate, which executes
-an associated template identified by name. To invoke our example above, we
-might write,
-
- err := tmpl.Execute(os.Stdout, "no data needed")
- if err != nil {
- log.Fatalf("execution failed: %s", err)
- }
-
-or to invoke a particular template explicitly by name,
-
- err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
- if err != nil {
- log.Fatalf("execution failed: %s", err)
- }
-
-*/
-package template
diff --git a/vendor/github.com/alecthomas/template/exec.go b/vendor/github.com/alecthomas/template/exec.go
deleted file mode 100644
index c3078e5d..00000000
--- a/vendor/github.com/alecthomas/template/exec.go
+++ /dev/null
@@ -1,845 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "bytes"
- "fmt"
- "io"
- "reflect"
- "runtime"
- "sort"
- "strings"
-
- "github.com/alecthomas/template/parse"
-)
-
-// state represents the state of an execution. It's not part of the
-// template so that multiple executions of the same template
-// can execute in parallel.
-type state struct {
- tmpl *Template
- wr io.Writer
- node parse.Node // current node, for errors
- vars []variable // push-down stack of variable values.
-}
-
-// variable holds the dynamic value of a variable such as $, $x etc.
-type variable struct {
- name string
- value reflect.Value
-}
-
-// push pushes a new variable on the stack.
-func (s *state) push(name string, value reflect.Value) {
- s.vars = append(s.vars, variable{name, value})
-}
-
-// mark returns the length of the variable stack.
-func (s *state) mark() int {
- return len(s.vars)
-}
-
-// pop pops the variable stack up to the mark.
-func (s *state) pop(mark int) {
- s.vars = s.vars[0:mark]
-}
-
-// setVar overwrites the top-nth variable on the stack. Used by range iterations.
-func (s *state) setVar(n int, value reflect.Value) {
- s.vars[len(s.vars)-n].value = value
-}
-
-// varValue returns the value of the named variable.
-func (s *state) varValue(name string) reflect.Value {
- for i := s.mark() - 1; i >= 0; i-- {
- if s.vars[i].name == name {
- return s.vars[i].value
- }
- }
- s.errorf("undefined variable: %s", name)
- return zero
-}
-
-var zero reflect.Value
-
-// at marks the state to be on node n, for error reporting.
-func (s *state) at(node parse.Node) {
- s.node = node
-}
-
-// doublePercent returns the string with %'s replaced by %%, if necessary,
-// so it can be used safely inside a Printf format string.
-func doublePercent(str string) string {
- if strings.Contains(str, "%") {
- str = strings.Replace(str, "%", "%%", -1)
- }
- return str
-}
-
-// errorf formats the error and terminates processing.
-func (s *state) errorf(format string, args ...interface{}) {
- name := doublePercent(s.tmpl.Name())
- if s.node == nil {
- format = fmt.Sprintf("template: %s: %s", name, format)
- } else {
- location, context := s.tmpl.ErrorContext(s.node)
- format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format)
- }
- panic(fmt.Errorf(format, args...))
-}
-
-// errRecover is the handler that turns panics into returns from the top
-// level of Parse.
-func errRecover(errp *error) {
- e := recover()
- if e != nil {
- switch err := e.(type) {
- case runtime.Error:
- panic(e)
- case error:
- *errp = err
- default:
- panic(e)
- }
- }
-}
-
-// ExecuteTemplate applies the template associated with t that has the given name
-// to the specified data object and writes the output to wr.
-// If an error occurs executing the template or writing its output,
-// execution stops, but partial results may already have been written to
-// the output writer.
-// A template may be executed safely in parallel.
-func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
- tmpl := t.tmpl[name]
- if tmpl == nil {
- return fmt.Errorf("template: no template %q associated with template %q", name, t.name)
- }
- return tmpl.Execute(wr, data)
-}
-
-// Execute applies a parsed template to the specified data object,
-// and writes the output to wr.
-// If an error occurs executing the template or writing its output,
-// execution stops, but partial results may already have been written to
-// the output writer.
-// A template may be executed safely in parallel.
-func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
- defer errRecover(&err)
- value := reflect.ValueOf(data)
- state := &state{
- tmpl: t,
- wr: wr,
- vars: []variable{{"$", value}},
- }
- t.init()
- if t.Tree == nil || t.Root == nil {
- var b bytes.Buffer
- for name, tmpl := range t.tmpl {
- if tmpl.Tree == nil || tmpl.Root == nil {
- continue
- }
- if b.Len() > 0 {
- b.WriteString(", ")
- }
- fmt.Fprintf(&b, "%q", name)
- }
- var s string
- if b.Len() > 0 {
- s = "; defined templates are: " + b.String()
- }
- state.errorf("%q is an incomplete or empty template%s", t.Name(), s)
- }
- state.walk(value, t.Root)
- return
-}
-
-// Walk functions step through the major pieces of the template structure,
-// generating output as they go.
-func (s *state) walk(dot reflect.Value, node parse.Node) {
- s.at(node)
- switch node := node.(type) {
- case *parse.ActionNode:
- // Do not pop variables so they persist until next end.
- // Also, if the action declares variables, don't print the result.
- val := s.evalPipeline(dot, node.Pipe)
- if len(node.Pipe.Decl) == 0 {
- s.printValue(node, val)
- }
- case *parse.IfNode:
- s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
- case *parse.ListNode:
- for _, node := range node.Nodes {
- s.walk(dot, node)
- }
- case *parse.RangeNode:
- s.walkRange(dot, node)
- case *parse.TemplateNode:
- s.walkTemplate(dot, node)
- case *parse.TextNode:
- if _, err := s.wr.Write(node.Text); err != nil {
- s.errorf("%s", err)
- }
- case *parse.WithNode:
- s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList)
- default:
- s.errorf("unknown node: %s", node)
- }
-}
-
-// walkIfOrWith walks an 'if' or 'with' node. The two control structures
-// are identical in behavior except that 'with' sets dot.
-func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
- defer s.pop(s.mark())
- val := s.evalPipeline(dot, pipe)
- truth, ok := isTrue(val)
- if !ok {
- s.errorf("if/with can't use %v", val)
- }
- if truth {
- if typ == parse.NodeWith {
- s.walk(val, list)
- } else {
- s.walk(dot, list)
- }
- } else if elseList != nil {
- s.walk(dot, elseList)
- }
-}
-
-// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
-// and whether the value has a meaningful truth value.
-func isTrue(val reflect.Value) (truth, ok bool) {
- if !val.IsValid() {
- // Something like var x interface{}, never set. It's a form of nil.
- return false, true
- }
- switch val.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- truth = val.Len() > 0
- case reflect.Bool:
- truth = val.Bool()
- case reflect.Complex64, reflect.Complex128:
- truth = val.Complex() != 0
- case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
- truth = !val.IsNil()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- truth = val.Int() != 0
- case reflect.Float32, reflect.Float64:
- truth = val.Float() != 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- truth = val.Uint() != 0
- case reflect.Struct:
- truth = true // Struct values are always true.
- default:
- return
- }
- return truth, true
-}
-
-func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
- s.at(r)
- defer s.pop(s.mark())
- val, _ := indirect(s.evalPipeline(dot, r.Pipe))
- // mark top of stack before any variables in the body are pushed.
- mark := s.mark()
- oneIteration := func(index, elem reflect.Value) {
- // Set top var (lexically the second if there are two) to the element.
- if len(r.Pipe.Decl) > 0 {
- s.setVar(1, elem)
- }
- // Set next var (lexically the first if there are two) to the index.
- if len(r.Pipe.Decl) > 1 {
- s.setVar(2, index)
- }
- s.walk(elem, r.List)
- s.pop(mark)
- }
- switch val.Kind() {
- case reflect.Array, reflect.Slice:
- if val.Len() == 0 {
- break
- }
- for i := 0; i < val.Len(); i++ {
- oneIteration(reflect.ValueOf(i), val.Index(i))
- }
- return
- case reflect.Map:
- if val.Len() == 0 {
- break
- }
- for _, key := range sortKeys(val.MapKeys()) {
- oneIteration(key, val.MapIndex(key))
- }
- return
- case reflect.Chan:
- if val.IsNil() {
- break
- }
- i := 0
- for ; ; i++ {
- elem, ok := val.Recv()
- if !ok {
- break
- }
- oneIteration(reflect.ValueOf(i), elem)
- }
- if i == 0 {
- break
- }
- return
- case reflect.Invalid:
- break // An invalid value is likely a nil map, etc. and acts like an empty map.
- default:
- s.errorf("range can't iterate over %v", val)
- }
- if r.ElseList != nil {
- s.walk(dot, r.ElseList)
- }
-}
-
-func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
- s.at(t)
- tmpl := s.tmpl.tmpl[t.Name]
- if tmpl == nil {
- s.errorf("template %q not defined", t.Name)
- }
- // Variables declared by the pipeline persist.
- dot = s.evalPipeline(dot, t.Pipe)
- newState := *s
- newState.tmpl = tmpl
- // No dynamic scoping: template invocations inherit no variables.
- newState.vars = []variable{{"$", dot}}
- newState.walk(dot, tmpl.Root)
-}
-
-// Eval functions evaluate pipelines, commands, and their elements and extract
-// values from the data structure by examining fields, calling methods, and so on.
-// The printing of those values happens only through walk functions.
-
-// evalPipeline returns the value acquired by evaluating a pipeline. If the
-// pipeline has a variable declaration, the variable will be pushed on the
-// stack. Callers should therefore pop the stack after they are finished
-// executing commands depending on the pipeline value.
-func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
- if pipe == nil {
- return
- }
- s.at(pipe)
- for _, cmd := range pipe.Cmds {
- value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
- // If the object has type interface{}, dig down one level to the thing inside.
- if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
- value = reflect.ValueOf(value.Interface()) // lovely!
- }
- }
- for _, variable := range pipe.Decl {
- s.push(variable.Ident[0], value)
- }
- return value
-}
-
-func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
- if len(args) > 1 || final.IsValid() {
- s.errorf("can't give argument to non-function %s", args[0])
- }
-}
-
-func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
- firstWord := cmd.Args[0]
- switch n := firstWord.(type) {
- case *parse.FieldNode:
- return s.evalFieldNode(dot, n, cmd.Args, final)
- case *parse.ChainNode:
- return s.evalChainNode(dot, n, cmd.Args, final)
- case *parse.IdentifierNode:
- // Must be a function.
- return s.evalFunction(dot, n, cmd, cmd.Args, final)
- case *parse.PipeNode:
- // Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored.
- return s.evalPipeline(dot, n)
- case *parse.VariableNode:
- return s.evalVariableNode(dot, n, cmd.Args, final)
- }
- s.at(firstWord)
- s.notAFunction(cmd.Args, final)
- switch word := firstWord.(type) {
- case *parse.BoolNode:
- return reflect.ValueOf(word.True)
- case *parse.DotNode:
- return dot
- case *parse.NilNode:
- s.errorf("nil is not a command")
- case *parse.NumberNode:
- return s.idealConstant(word)
- case *parse.StringNode:
- return reflect.ValueOf(word.Text)
- }
- s.errorf("can't evaluate command %q", firstWord)
- panic("not reached")
-}
-
-// idealConstant is called to return the value of a number in a context where
-// we don't know the type. In that case, the syntax of the number tells us
-// its type, and we use Go rules to resolve. Note there is no such thing as
-// a uint ideal constant in this situation - the value must be of int type.
-func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
- // These are ideal constants but we don't know the type
- // and we have no context. (If it was a method argument,
- // we'd know what we need.) The syntax guides us to some extent.
- s.at(constant)
- switch {
- case constant.IsComplex:
- return reflect.ValueOf(constant.Complex128) // incontrovertible.
- case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0:
- return reflect.ValueOf(constant.Float64)
- case constant.IsInt:
- n := int(constant.Int64)
- if int64(n) != constant.Int64 {
- s.errorf("%s overflows int", constant.Text)
- }
- return reflect.ValueOf(n)
- case constant.IsUint:
- s.errorf("%s overflows int", constant.Text)
- }
- return zero
-}
-
-func isHexConstant(s string) bool {
- return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X')
-}
-
-func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
- s.at(field)
- return s.evalFieldChain(dot, dot, field, field.Ident, args, final)
-}
-
-func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value {
- s.at(chain)
- // (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields.
- pipe := s.evalArg(dot, nil, chain.Node)
- if len(chain.Field) == 0 {
- s.errorf("internal error: no fields in evalChainNode")
- }
- return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final)
-}
-
-func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
- // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
- s.at(variable)
- value := s.varValue(variable.Ident[0])
- if len(variable.Ident) == 1 {
- s.notAFunction(args, final)
- return value
- }
- return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final)
-}
-
-// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
-// dot is the environment in which to evaluate arguments, while
-// receiver is the value being walked along the chain.
-func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
- n := len(ident)
- for i := 0; i < n-1; i++ {
- receiver = s.evalField(dot, ident[i], node, nil, zero, receiver)
- }
- // Now if it's a method, it gets the arguments.
- return s.evalField(dot, ident[n-1], node, args, final, receiver)
-}
-
-func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value {
- s.at(node)
- name := node.Ident
- function, ok := findFunction(name, s.tmpl)
- if !ok {
- s.errorf("%q is not a defined function", name)
- }
- return s.evalCall(dot, function, cmd, name, args, final)
-}
-
-// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
-// The 'final' argument represents the return value from the preceding
-// value of the pipeline, if any.
-func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {
- if !receiver.IsValid() {
- return zero
- }
- typ := receiver.Type()
- receiver, _ = indirect(receiver)
- // Unless it's an interface, need to get to a value of type *T to guarantee
- // we see all methods of T and *T.
- ptr := receiver
- if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
- ptr = ptr.Addr()
- }
- if method := ptr.MethodByName(fieldName); method.IsValid() {
- return s.evalCall(dot, method, node, fieldName, args, final)
- }
- hasArgs := len(args) > 1 || final.IsValid()
- // It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil.
- receiver, isNil := indirect(receiver)
- if isNil {
- s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
- }
- switch receiver.Kind() {
- case reflect.Struct:
- tField, ok := receiver.Type().FieldByName(fieldName)
- if ok {
- field := receiver.FieldByIndex(tField.Index)
- if tField.PkgPath != "" { // field is unexported
- s.errorf("%s is an unexported field of struct type %s", fieldName, typ)
- }
- // If it's a function, we must call it.
- if hasArgs {
- s.errorf("%s has arguments but cannot be invoked as function", fieldName)
- }
- return field
- }
- s.errorf("%s is not a field of struct type %s", fieldName, typ)
- case reflect.Map:
- // If it's a map, attempt to use the field name as a key.
- nameVal := reflect.ValueOf(fieldName)
- if nameVal.Type().AssignableTo(receiver.Type().Key()) {
- if hasArgs {
- s.errorf("%s is not a method but has arguments", fieldName)
- }
- return receiver.MapIndex(nameVal)
- }
- }
- s.errorf("can't evaluate field %s in type %s", fieldName, typ)
- panic("not reached")
-}
-
-var (
- errorType = reflect.TypeOf((*error)(nil)).Elem()
- fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
-)
-
-// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
-// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
-// as the function itself.
-func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value {
- if args != nil {
- args = args[1:] // Zeroth arg is function name/node; not passed to function.
- }
- typ := fun.Type()
- numIn := len(args)
- if final.IsValid() {
- numIn++
- }
- numFixed := len(args)
- if typ.IsVariadic() {
- numFixed = typ.NumIn() - 1 // last arg is the variadic one.
- if numIn < numFixed {
- s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
- }
- } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
- s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
- }
- if !goodFunc(typ) {
- // TODO: This could still be a confusing error; maybe goodFunc should provide info.
- s.errorf("can't call method/function %q with %d results", name, typ.NumOut())
- }
- // Build the arg list.
- argv := make([]reflect.Value, numIn)
- // Args must be evaluated. Fixed args first.
- i := 0
- for ; i < numFixed && i < len(args); i++ {
- argv[i] = s.evalArg(dot, typ.In(i), args[i])
- }
- // Now the ... args.
- if typ.IsVariadic() {
- argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
- for ; i < len(args); i++ {
- argv[i] = s.evalArg(dot, argType, args[i])
- }
- }
- // Add final value if necessary.
- if final.IsValid() {
- t := typ.In(typ.NumIn() - 1)
- if typ.IsVariadic() {
- t = t.Elem()
- }
- argv[i] = s.validateType(final, t)
- }
- result := fun.Call(argv)
- // If we have an error that is not nil, stop execution and return that error to the caller.
- if len(result) == 2 && !result[1].IsNil() {
- s.at(node)
- s.errorf("error calling %s: %s", name, result[1].Interface().(error))
- }
- return result[0]
-}
-
-// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
-func canBeNil(typ reflect.Type) bool {
- switch typ.Kind() {
- case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return true
- }
- return false
-}
-
-// validateType guarantees that the value is valid and assignable to the type.
-func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
- if !value.IsValid() {
- if typ == nil || canBeNil(typ) {
- // An untyped nil interface{}. Accept as a proper nil value.
- return reflect.Zero(typ)
- }
- s.errorf("invalid value; expected %s", typ)
- }
- if typ != nil && !value.Type().AssignableTo(typ) {
- if value.Kind() == reflect.Interface && !value.IsNil() {
- value = value.Elem()
- if value.Type().AssignableTo(typ) {
- return value
- }
- // fallthrough
- }
- // Does one dereference or indirection work? We could do more, as we
- // do with method receivers, but that gets messy and method receivers
- // are much more constrained, so it makes more sense there than here.
- // Besides, one is almost always all you need.
- switch {
- case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
- value = value.Elem()
- if !value.IsValid() {
- s.errorf("dereference of nil pointer of type %s", typ)
- }
- case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
- value = value.Addr()
- default:
- s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
- }
- }
- return value
-}
-
-func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- switch arg := n.(type) {
- case *parse.DotNode:
- return s.validateType(dot, typ)
- case *parse.NilNode:
- if canBeNil(typ) {
- return reflect.Zero(typ)
- }
- s.errorf("cannot assign nil to %s", typ)
- case *parse.FieldNode:
- return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ)
- case *parse.VariableNode:
- return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ)
- case *parse.PipeNode:
- return s.validateType(s.evalPipeline(dot, arg), typ)
- case *parse.IdentifierNode:
- return s.evalFunction(dot, arg, arg, nil, zero)
- case *parse.ChainNode:
- return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ)
- }
- switch typ.Kind() {
- case reflect.Bool:
- return s.evalBool(typ, n)
- case reflect.Complex64, reflect.Complex128:
- return s.evalComplex(typ, n)
- case reflect.Float32, reflect.Float64:
- return s.evalFloat(typ, n)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return s.evalInteger(typ, n)
- case reflect.Interface:
- if typ.NumMethod() == 0 {
- return s.evalEmptyInterface(dot, n)
- }
- case reflect.String:
- return s.evalString(typ, n)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return s.evalUnsignedInteger(typ, n)
- }
- s.errorf("can't handle %s for arg of type %s", n, typ)
- panic("not reached")
-}
-
-func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.BoolNode); ok {
- value := reflect.New(typ).Elem()
- value.SetBool(n.True)
- return value
- }
- s.errorf("expected bool; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.StringNode); ok {
- value := reflect.New(typ).Elem()
- value.SetString(n.Text)
- return value
- }
- s.errorf("expected string; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
- value := reflect.New(typ).Elem()
- value.SetInt(n.Int64)
- return value
- }
- s.errorf("expected integer; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
- value := reflect.New(typ).Elem()
- value.SetUint(n.Uint64)
- return value
- }
- s.errorf("expected unsigned integer; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
- value := reflect.New(typ).Elem()
- value.SetFloat(n.Float64)
- return value
- }
- s.errorf("expected float; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
- if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
- value := reflect.New(typ).Elem()
- value.SetComplex(n.Complex128)
- return value
- }
- s.errorf("expected complex; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
- s.at(n)
- switch n := n.(type) {
- case *parse.BoolNode:
- return reflect.ValueOf(n.True)
- case *parse.DotNode:
- return dot
- case *parse.FieldNode:
- return s.evalFieldNode(dot, n, nil, zero)
- case *parse.IdentifierNode:
- return s.evalFunction(dot, n, n, nil, zero)
- case *parse.NilNode:
- // NilNode is handled in evalArg, the only place that calls here.
- s.errorf("evalEmptyInterface: nil (can't happen)")
- case *parse.NumberNode:
- return s.idealConstant(n)
- case *parse.StringNode:
- return reflect.ValueOf(n.Text)
- case *parse.VariableNode:
- return s.evalVariableNode(dot, n, nil, zero)
- case *parse.PipeNode:
- return s.evalPipeline(dot, n)
- }
- s.errorf("can't handle assignment of %s to empty interface argument", n)
- panic("not reached")
-}
-
-// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
-// We indirect through pointers and empty interfaces (only) because
-// non-empty interfaces have methods we might need.
-func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
- for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
- if v.IsNil() {
- return v, true
- }
- if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
- break
- }
- }
- return v, false
-}
-
-// printValue writes the textual representation of the value to the output of
-// the template.
-func (s *state) printValue(n parse.Node, v reflect.Value) {
- s.at(n)
- iface, ok := printableValue(v)
- if !ok {
- s.errorf("can't print %s of type %s", n, v.Type())
- }
- fmt.Fprint(s.wr, iface)
-}
-
-// printableValue returns the, possibly indirected, interface value inside v that
-// is best for a call to formatted printer.
-func printableValue(v reflect.Value) (interface{}, bool) {
- if v.Kind() == reflect.Ptr {
- v, _ = indirect(v) // fmt.Fprint handles nil.
- }
- if !v.IsValid() {
- return "", true
- }
-
- if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
- if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
- v = v.Addr()
- } else {
- switch v.Kind() {
- case reflect.Chan, reflect.Func:
- return nil, false
- }
- }
- }
- return v.Interface(), true
-}
-
-// Types to help sort the keys in a map for reproducible output.
-
-type rvs []reflect.Value
-
-func (x rvs) Len() int { return len(x) }
-func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-type rvInts struct{ rvs }
-
-func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() }
-
-type rvUints struct{ rvs }
-
-func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() }
-
-type rvFloats struct{ rvs }
-
-func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() }
-
-type rvStrings struct{ rvs }
-
-func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() }
-
-// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys.
-func sortKeys(v []reflect.Value) []reflect.Value {
- if len(v) <= 1 {
- return v
- }
- switch v[0].Kind() {
- case reflect.Float32, reflect.Float64:
- sort.Sort(rvFloats{v})
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- sort.Sort(rvInts{v})
- case reflect.String:
- sort.Sort(rvStrings{v})
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- sort.Sort(rvUints{v})
- }
- return v
-}
diff --git a/vendor/github.com/alecthomas/template/funcs.go b/vendor/github.com/alecthomas/template/funcs.go
deleted file mode 100644
index 39ee5ed6..00000000
--- a/vendor/github.com/alecthomas/template/funcs.go
+++ /dev/null
@@ -1,598 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "net/url"
- "reflect"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-// FuncMap is the type of the map defining the mapping from names to functions.
-// Each function must have either a single return value, or two return values of
-// which the second has type error. In that case, if the second (error)
-// return value evaluates to non-nil during execution, execution terminates and
-// Execute returns that error.
-type FuncMap map[string]interface{}
-
-var builtins = FuncMap{
- "and": and,
- "call": call,
- "html": HTMLEscaper,
- "index": index,
- "js": JSEscaper,
- "len": length,
- "not": not,
- "or": or,
- "print": fmt.Sprint,
- "printf": fmt.Sprintf,
- "println": fmt.Sprintln,
- "urlquery": URLQueryEscaper,
-
- // Comparisons
- "eq": eq, // ==
- "ge": ge, // >=
- "gt": gt, // >
- "le": le, // <=
- "lt": lt, // <
- "ne": ne, // !=
-}
-
-var builtinFuncs = createValueFuncs(builtins)
-
-// createValueFuncs turns a FuncMap into a map[string]reflect.Value
-func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
- m := make(map[string]reflect.Value)
- addValueFuncs(m, funcMap)
- return m
-}
-
-// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
-func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
- for name, fn := range in {
- v := reflect.ValueOf(fn)
- if v.Kind() != reflect.Func {
- panic("value for " + name + " not a function")
- }
- if !goodFunc(v.Type()) {
- panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
- }
- out[name] = v
- }
-}
-
-// addFuncs adds to values the functions in funcs. It does no checking of the input -
-// call addValueFuncs first.
-func addFuncs(out, in FuncMap) {
- for name, fn := range in {
- out[name] = fn
- }
-}
-
-// goodFunc checks that the function or method has the right result signature.
-func goodFunc(typ reflect.Type) bool {
- // We allow functions with 1 result or 2 results where the second is an error.
- switch {
- case typ.NumOut() == 1:
- return true
- case typ.NumOut() == 2 && typ.Out(1) == errorType:
- return true
- }
- return false
-}
-
-// findFunction looks for a function in the template, and global map.
-func findFunction(name string, tmpl *Template) (reflect.Value, bool) {
- if tmpl != nil && tmpl.common != nil {
- if fn := tmpl.execFuncs[name]; fn.IsValid() {
- return fn, true
- }
- }
- if fn := builtinFuncs[name]; fn.IsValid() {
- return fn, true
- }
- return reflect.Value{}, false
-}
-
-// Indexing.
-
-// index returns the result of indexing its first argument by the following
-// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
-// indexed item must be a map, slice, or array.
-func index(item interface{}, indices ...interface{}) (interface{}, error) {
- v := reflect.ValueOf(item)
- for _, i := range indices {
- index := reflect.ValueOf(i)
- var isNil bool
- if v, isNil = indirect(v); isNil {
- return nil, fmt.Errorf("index of nil pointer")
- }
- switch v.Kind() {
- case reflect.Array, reflect.Slice, reflect.String:
- var x int64
- switch index.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- x = index.Int()
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- x = int64(index.Uint())
- default:
- return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
- }
- if x < 0 || x >= int64(v.Len()) {
- return nil, fmt.Errorf("index out of range: %d", x)
- }
- v = v.Index(int(x))
- case reflect.Map:
- if !index.IsValid() {
- index = reflect.Zero(v.Type().Key())
- }
- if !index.Type().AssignableTo(v.Type().Key()) {
- return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
- }
- if x := v.MapIndex(index); x.IsValid() {
- v = x
- } else {
- v = reflect.Zero(v.Type().Elem())
- }
- default:
- return nil, fmt.Errorf("can't index item of type %s", v.Type())
- }
- }
- return v.Interface(), nil
-}
-
-// Length
-
-// length returns the length of the item, with an error if it has no defined length.
-func length(item interface{}) (int, error) {
- v, isNil := indirect(reflect.ValueOf(item))
- if isNil {
- return 0, fmt.Errorf("len of nil pointer")
- }
- switch v.Kind() {
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
- return v.Len(), nil
- }
- return 0, fmt.Errorf("len of type %s", v.Type())
-}
-
-// Function invocation
-
-// call returns the result of evaluating the first argument as a function.
-// The function must return 1 result, or 2 results, the second of which is an error.
-func call(fn interface{}, args ...interface{}) (interface{}, error) {
- v := reflect.ValueOf(fn)
- typ := v.Type()
- if typ.Kind() != reflect.Func {
- return nil, fmt.Errorf("non-function of type %s", typ)
- }
- if !goodFunc(typ) {
- return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
- }
- numIn := typ.NumIn()
- var dddType reflect.Type
- if typ.IsVariadic() {
- if len(args) < numIn-1 {
- return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
- }
- dddType = typ.In(numIn - 1).Elem()
- } else {
- if len(args) != numIn {
- return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
- }
- }
- argv := make([]reflect.Value, len(args))
- for i, arg := range args {
- value := reflect.ValueOf(arg)
- // Compute the expected type. Clumsy because of variadics.
- var argType reflect.Type
- if !typ.IsVariadic() || i < numIn-1 {
- argType = typ.In(i)
- } else {
- argType = dddType
- }
- if !value.IsValid() && canBeNil(argType) {
- value = reflect.Zero(argType)
- }
- if !value.Type().AssignableTo(argType) {
- return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
- }
- argv[i] = value
- }
- result := v.Call(argv)
- if len(result) == 2 && !result[1].IsNil() {
- return result[0].Interface(), result[1].Interface().(error)
- }
- return result[0].Interface(), nil
-}
-
-// Boolean logic.
-
-func truth(a interface{}) bool {
- t, _ := isTrue(reflect.ValueOf(a))
- return t
-}
-
-// and computes the Boolean AND of its arguments, returning
-// the first false argument it encounters, or the last argument.
-func and(arg0 interface{}, args ...interface{}) interface{} {
- if !truth(arg0) {
- return arg0
- }
- for i := range args {
- arg0 = args[i]
- if !truth(arg0) {
- break
- }
- }
- return arg0
-}
-
-// or computes the Boolean OR of its arguments, returning
-// the first true argument it encounters, or the last argument.
-func or(arg0 interface{}, args ...interface{}) interface{} {
- if truth(arg0) {
- return arg0
- }
- for i := range args {
- arg0 = args[i]
- if truth(arg0) {
- break
- }
- }
- return arg0
-}
-
-// not returns the Boolean negation of its argument.
-func not(arg interface{}) (truth bool) {
- truth, _ = isTrue(reflect.ValueOf(arg))
- return !truth
-}
-
-// Comparison.
-
-// TODO: Perhaps allow comparison between signed and unsigned integers.
-
-var (
- errBadComparisonType = errors.New("invalid type for comparison")
- errBadComparison = errors.New("incompatible types for comparison")
- errNoComparison = errors.New("missing argument for comparison")
-)
-
-type kind int
-
-const (
- invalidKind kind = iota
- boolKind
- complexKind
- intKind
- floatKind
- integerKind
- stringKind
- uintKind
-)
-
-func basicKind(v reflect.Value) (kind, error) {
- switch v.Kind() {
- case reflect.Bool:
- return boolKind, nil
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return intKind, nil
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return uintKind, nil
- case reflect.Float32, reflect.Float64:
- return floatKind, nil
- case reflect.Complex64, reflect.Complex128:
- return complexKind, nil
- case reflect.String:
- return stringKind, nil
- }
- return invalidKind, errBadComparisonType
-}
-
-// eq evaluates the comparison a == b || a == c || ...
-func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
- v1 := reflect.ValueOf(arg1)
- k1, err := basicKind(v1)
- if err != nil {
- return false, err
- }
- if len(arg2) == 0 {
- return false, errNoComparison
- }
- for _, arg := range arg2 {
- v2 := reflect.ValueOf(arg)
- k2, err := basicKind(v2)
- if err != nil {
- return false, err
- }
- truth := false
- if k1 != k2 {
- // Special case: Can compare integer values regardless of type's sign.
- switch {
- case k1 == intKind && k2 == uintKind:
- truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
- case k1 == uintKind && k2 == intKind:
- truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
- default:
- return false, errBadComparison
- }
- } else {
- switch k1 {
- case boolKind:
- truth = v1.Bool() == v2.Bool()
- case complexKind:
- truth = v1.Complex() == v2.Complex()
- case floatKind:
- truth = v1.Float() == v2.Float()
- case intKind:
- truth = v1.Int() == v2.Int()
- case stringKind:
- truth = v1.String() == v2.String()
- case uintKind:
- truth = v1.Uint() == v2.Uint()
- default:
- panic("invalid kind")
- }
- }
- if truth {
- return true, nil
- }
- }
- return false, nil
-}
-
-// ne evaluates the comparison a != b.
-func ne(arg1, arg2 interface{}) (bool, error) {
- // != is the inverse of ==.
- equal, err := eq(arg1, arg2)
- return !equal, err
-}
-
-// lt evaluates the comparison a < b.
-func lt(arg1, arg2 interface{}) (bool, error) {
- v1 := reflect.ValueOf(arg1)
- k1, err := basicKind(v1)
- if err != nil {
- return false, err
- }
- v2 := reflect.ValueOf(arg2)
- k2, err := basicKind(v2)
- if err != nil {
- return false, err
- }
- truth := false
- if k1 != k2 {
- // Special case: Can compare integer values regardless of type's sign.
- switch {
- case k1 == intKind && k2 == uintKind:
- truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
- case k1 == uintKind && k2 == intKind:
- truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
- default:
- return false, errBadComparison
- }
- } else {
- switch k1 {
- case boolKind, complexKind:
- return false, errBadComparisonType
- case floatKind:
- truth = v1.Float() < v2.Float()
- case intKind:
- truth = v1.Int() < v2.Int()
- case stringKind:
- truth = v1.String() < v2.String()
- case uintKind:
- truth = v1.Uint() < v2.Uint()
- default:
- panic("invalid kind")
- }
- }
- return truth, nil
-}
-
-// le evaluates the comparison <= b.
-func le(arg1, arg2 interface{}) (bool, error) {
- // <= is < or ==.
- lessThan, err := lt(arg1, arg2)
- if lessThan || err != nil {
- return lessThan, err
- }
- return eq(arg1, arg2)
-}
-
-// gt evaluates the comparison a > b.
-func gt(arg1, arg2 interface{}) (bool, error) {
- // > is the inverse of <=.
- lessOrEqual, err := le(arg1, arg2)
- if err != nil {
- return false, err
- }
- return !lessOrEqual, nil
-}
-
-// ge evaluates the comparison a >= b.
-func ge(arg1, arg2 interface{}) (bool, error) {
- // >= is the inverse of <.
- lessThan, err := lt(arg1, arg2)
- if err != nil {
- return false, err
- }
- return !lessThan, nil
-}
-
-// HTML escaping.
-
-var (
- htmlQuot = []byte(""") // shorter than """
- htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
- htmlAmp = []byte("&")
- htmlLt = []byte("<")
- htmlGt = []byte(">")
-)
-
-// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
-func HTMLEscape(w io.Writer, b []byte) {
- last := 0
- for i, c := range b {
- var html []byte
- switch c {
- case '"':
- html = htmlQuot
- case '\'':
- html = htmlApos
- case '&':
- html = htmlAmp
- case '<':
- html = htmlLt
- case '>':
- html = htmlGt
- default:
- continue
- }
- w.Write(b[last:i])
- w.Write(html)
- last = i + 1
- }
- w.Write(b[last:])
-}
-
-// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
-func HTMLEscapeString(s string) string {
- // Avoid allocation if we can.
- if strings.IndexAny(s, `'"&<>`) < 0 {
- return s
- }
- var b bytes.Buffer
- HTMLEscape(&b, []byte(s))
- return b.String()
-}
-
-// HTMLEscaper returns the escaped HTML equivalent of the textual
-// representation of its arguments.
-func HTMLEscaper(args ...interface{}) string {
- return HTMLEscapeString(evalArgs(args))
-}
-
-// JavaScript escaping.
-
-var (
- jsLowUni = []byte(`\u00`)
- hex = []byte("0123456789ABCDEF")
-
- jsBackslash = []byte(`\\`)
- jsApos = []byte(`\'`)
- jsQuot = []byte(`\"`)
- jsLt = []byte(`\x3C`)
- jsGt = []byte(`\x3E`)
-)
-
-// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
-func JSEscape(w io.Writer, b []byte) {
- last := 0
- for i := 0; i < len(b); i++ {
- c := b[i]
-
- if !jsIsSpecial(rune(c)) {
- // fast path: nothing to do
- continue
- }
- w.Write(b[last:i])
-
- if c < utf8.RuneSelf {
- // Quotes, slashes and angle brackets get quoted.
- // Control characters get written as \u00XX.
- switch c {
- case '\\':
- w.Write(jsBackslash)
- case '\'':
- w.Write(jsApos)
- case '"':
- w.Write(jsQuot)
- case '<':
- w.Write(jsLt)
- case '>':
- w.Write(jsGt)
- default:
- w.Write(jsLowUni)
- t, b := c>>4, c&0x0f
- w.Write(hex[t : t+1])
- w.Write(hex[b : b+1])
- }
- } else {
- // Unicode rune.
- r, size := utf8.DecodeRune(b[i:])
- if unicode.IsPrint(r) {
- w.Write(b[i : i+size])
- } else {
- fmt.Fprintf(w, "\\u%04X", r)
- }
- i += size - 1
- }
- last = i + 1
- }
- w.Write(b[last:])
-}
-
-// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
-func JSEscapeString(s string) string {
- // Avoid allocation if we can.
- if strings.IndexFunc(s, jsIsSpecial) < 0 {
- return s
- }
- var b bytes.Buffer
- JSEscape(&b, []byte(s))
- return b.String()
-}
-
-func jsIsSpecial(r rune) bool {
- switch r {
- case '\\', '\'', '"', '<', '>':
- return true
- }
- return r < ' ' || utf8.RuneSelf <= r
-}
-
-// JSEscaper returns the escaped JavaScript equivalent of the textual
-// representation of its arguments.
-func JSEscaper(args ...interface{}) string {
- return JSEscapeString(evalArgs(args))
-}
-
-// URLQueryEscaper returns the escaped value of the textual representation of
-// its arguments in a form suitable for embedding in a URL query.
-func URLQueryEscaper(args ...interface{}) string {
- return url.QueryEscape(evalArgs(args))
-}
-
-// evalArgs formats the list of arguments into a string. It is therefore equivalent to
-// fmt.Sprint(args...)
-// except that each argument is indirected (if a pointer), as required,
-// using the same rules as the default string evaluation during template
-// execution.
-func evalArgs(args []interface{}) string {
- ok := false
- var s string
- // Fast path for simple common case.
- if len(args) == 1 {
- s, ok = args[0].(string)
- }
- if !ok {
- for i, arg := range args {
- a, ok := printableValue(reflect.ValueOf(arg))
- if ok {
- args[i] = a
- } // else left fmt do its thing
- }
- s = fmt.Sprint(args...)
- }
- return s
-}
diff --git a/vendor/github.com/alecthomas/template/helper.go b/vendor/github.com/alecthomas/template/helper.go
deleted file mode 100644
index 3636fb54..00000000
--- a/vendor/github.com/alecthomas/template/helper.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Helper functions to make constructing templates easier.
-
-package template
-
-import (
- "fmt"
- "io/ioutil"
- "path/filepath"
-)
-
-// Functions and methods to parse templates.
-
-// Must is a helper that wraps a call to a function returning (*Template, error)
-// and panics if the error is non-nil. It is intended for use in variable
-// initializations such as
-// var t = template.Must(template.New("name").Parse("text"))
-func Must(t *Template, err error) *Template {
- if err != nil {
- panic(err)
- }
- return t
-}
-
-// ParseFiles creates a new Template and parses the template definitions from
-// the named files. The returned template's name will have the (base) name and
-// (parsed) contents of the first file. There must be at least one file.
-// If an error occurs, parsing stops and the returned *Template is nil.
-func ParseFiles(filenames ...string) (*Template, error) {
- return parseFiles(nil, filenames...)
-}
-
-// ParseFiles parses the named files and associates the resulting templates with
-// t. If an error occurs, parsing stops and the returned template is nil;
-// otherwise it is t. There must be at least one file.
-func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
- return parseFiles(t, filenames...)
-}
-
-// parseFiles is the helper for the method and function. If the argument
-// template is nil, it is created from the first file.
-func parseFiles(t *Template, filenames ...string) (*Template, error) {
- if len(filenames) == 0 {
- // Not really a problem, but be consistent.
- return nil, fmt.Errorf("template: no files named in call to ParseFiles")
- }
- for _, filename := range filenames {
- b, err := ioutil.ReadFile(filename)
- if err != nil {
- return nil, err
- }
- s := string(b)
- name := filepath.Base(filename)
- // First template becomes return value if not already defined,
- // and we use that one for subsequent New calls to associate
- // all the templates together. Also, if this file has the same name
- // as t, this file becomes the contents of t, so
- // t, err := New(name).Funcs(xxx).ParseFiles(name)
- // works. Otherwise we create a new template associated with t.
- var tmpl *Template
- if t == nil {
- t = New(name)
- }
- if name == t.Name() {
- tmpl = t
- } else {
- tmpl = t.New(name)
- }
- _, err = tmpl.Parse(s)
- if err != nil {
- return nil, err
- }
- }
- return t, nil
-}
-
-// ParseGlob creates a new Template and parses the template definitions from the
-// files identified by the pattern, which must match at least one file. The
-// returned template will have the (base) name and (parsed) contents of the
-// first file matched by the pattern. ParseGlob is equivalent to calling
-// ParseFiles with the list of files matched by the pattern.
-func ParseGlob(pattern string) (*Template, error) {
- return parseGlob(nil, pattern)
-}
-
-// ParseGlob parses the template definitions in the files identified by the
-// pattern and associates the resulting templates with t. The pattern is
-// processed by filepath.Glob and must match at least one file. ParseGlob is
-// equivalent to calling t.ParseFiles with the list of files matched by the
-// pattern.
-func (t *Template) ParseGlob(pattern string) (*Template, error) {
- return parseGlob(t, pattern)
-}
-
-// parseGlob is the implementation of the function and method ParseGlob.
-func parseGlob(t *Template, pattern string) (*Template, error) {
- filenames, err := filepath.Glob(pattern)
- if err != nil {
- return nil, err
- }
- if len(filenames) == 0 {
- return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
- }
- return parseFiles(t, filenames...)
-}
diff --git a/vendor/github.com/alecthomas/template/parse/lex.go b/vendor/github.com/alecthomas/template/parse/lex.go
deleted file mode 100644
index 55f1c051..00000000
--- a/vendor/github.com/alecthomas/template/parse/lex.go
+++ /dev/null
@@ -1,556 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package parse
-
-import (
- "fmt"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-// item represents a token or text string returned from the scanner.
-type item struct {
- typ itemType // The type of this item.
- pos Pos // The starting position, in bytes, of this item in the input string.
- val string // The value of this item.
-}
-
-func (i item) String() string {
- switch {
- case i.typ == itemEOF:
- return "EOF"
- case i.typ == itemError:
- return i.val
- case i.typ > itemKeyword:
- return fmt.Sprintf("<%s>", i.val)
- case len(i.val) > 10:
- return fmt.Sprintf("%.10q...", i.val)
- }
- return fmt.Sprintf("%q", i.val)
-}
-
-// itemType identifies the type of lex items.
-type itemType int
-
-const (
- itemError itemType = iota // error occurred; value is text of error
- itemBool // boolean constant
- itemChar // printable ASCII character; grab bag for comma etc.
- itemCharConstant // character constant
- itemComplex // complex constant (1+2i); imaginary is just a number
- itemColonEquals // colon-equals (':=') introducing a declaration
- itemEOF
- itemField // alphanumeric identifier starting with '.'
- itemIdentifier // alphanumeric identifier not starting with '.'
- itemLeftDelim // left action delimiter
- itemLeftParen // '(' inside action
- itemNumber // simple number, including imaginary
- itemPipe // pipe symbol
- itemRawString // raw quoted string (includes quotes)
- itemRightDelim // right action delimiter
- itemElideNewline // elide newline after right delim
- itemRightParen // ')' inside action
- itemSpace // run of spaces separating arguments
- itemString // quoted string (includes quotes)
- itemText // plain text
- itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
- // Keywords appear after all the rest.
- itemKeyword // used only to delimit the keywords
- itemDot // the cursor, spelled '.'
- itemDefine // define keyword
- itemElse // else keyword
- itemEnd // end keyword
- itemIf // if keyword
- itemNil // the untyped nil constant, easiest to treat as a keyword
- itemRange // range keyword
- itemTemplate // template keyword
- itemWith // with keyword
-)
-
-var key = map[string]itemType{
- ".": itemDot,
- "define": itemDefine,
- "else": itemElse,
- "end": itemEnd,
- "if": itemIf,
- "range": itemRange,
- "nil": itemNil,
- "template": itemTemplate,
- "with": itemWith,
-}
-
-const eof = -1
-
-// stateFn represents the state of the scanner as a function that returns the next state.
-type stateFn func(*lexer) stateFn
-
-// lexer holds the state of the scanner.
-type lexer struct {
- name string // the name of the input; used only for error reports
- input string // the string being scanned
- leftDelim string // start of action
- rightDelim string // end of action
- state stateFn // the next lexing function to enter
- pos Pos // current position in the input
- start Pos // start position of this item
- width Pos // width of last rune read from input
- lastPos Pos // position of most recent item returned by nextItem
- items chan item // channel of scanned items
- parenDepth int // nesting depth of ( ) exprs
-}
-
-// next returns the next rune in the input.
-func (l *lexer) next() rune {
- if int(l.pos) >= len(l.input) {
- l.width = 0
- return eof
- }
- r, w := utf8.DecodeRuneInString(l.input[l.pos:])
- l.width = Pos(w)
- l.pos += l.width
- return r
-}
-
-// peek returns but does not consume the next rune in the input.
-func (l *lexer) peek() rune {
- r := l.next()
- l.backup()
- return r
-}
-
-// backup steps back one rune. Can only be called once per call of next.
-func (l *lexer) backup() {
- l.pos -= l.width
-}
-
-// emit passes an item back to the client.
-func (l *lexer) emit(t itemType) {
- l.items <- item{t, l.start, l.input[l.start:l.pos]}
- l.start = l.pos
-}
-
-// ignore skips over the pending input before this point.
-func (l *lexer) ignore() {
- l.start = l.pos
-}
-
-// accept consumes the next rune if it's from the valid set.
-func (l *lexer) accept(valid string) bool {
- if strings.IndexRune(valid, l.next()) >= 0 {
- return true
- }
- l.backup()
- return false
-}
-
-// acceptRun consumes a run of runes from the valid set.
-func (l *lexer) acceptRun(valid string) {
- for strings.IndexRune(valid, l.next()) >= 0 {
- }
- l.backup()
-}
-
-// lineNumber reports which line we're on, based on the position of
-// the previous item returned by nextItem. Doing it this way
-// means we don't have to worry about peek double counting.
-func (l *lexer) lineNumber() int {
- return 1 + strings.Count(l.input[:l.lastPos], "\n")
-}
-
-// errorf returns an error token and terminates the scan by passing
-// back a nil pointer that will be the next state, terminating l.nextItem.
-func (l *lexer) errorf(format string, args ...interface{}) stateFn {
- l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
- return nil
-}
-
-// nextItem returns the next item from the input.
-func (l *lexer) nextItem() item {
- item := <-l.items
- l.lastPos = item.pos
- return item
-}
-
-// lex creates a new scanner for the input string.
-func lex(name, input, left, right string) *lexer {
- if left == "" {
- left = leftDelim
- }
- if right == "" {
- right = rightDelim
- }
- l := &lexer{
- name: name,
- input: input,
- leftDelim: left,
- rightDelim: right,
- items: make(chan item),
- }
- go l.run()
- return l
-}
-
-// run runs the state machine for the lexer.
-func (l *lexer) run() {
- for l.state = lexText; l.state != nil; {
- l.state = l.state(l)
- }
-}
-
-// state functions
-
-const (
- leftDelim = "{{"
- rightDelim = "}}"
- leftComment = "/*"
- rightComment = "*/"
-)
-
-// lexText scans until an opening action delimiter, "{{".
-func lexText(l *lexer) stateFn {
- for {
- if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
- if l.pos > l.start {
- l.emit(itemText)
- }
- return lexLeftDelim
- }
- if l.next() == eof {
- break
- }
- }
- // Correctly reached EOF.
- if l.pos > l.start {
- l.emit(itemText)
- }
- l.emit(itemEOF)
- return nil
-}
-
-// lexLeftDelim scans the left delimiter, which is known to be present.
-func lexLeftDelim(l *lexer) stateFn {
- l.pos += Pos(len(l.leftDelim))
- if strings.HasPrefix(l.input[l.pos:], leftComment) {
- return lexComment
- }
- l.emit(itemLeftDelim)
- l.parenDepth = 0
- return lexInsideAction
-}
-
-// lexComment scans a comment. The left comment marker is known to be present.
-func lexComment(l *lexer) stateFn {
- l.pos += Pos(len(leftComment))
- i := strings.Index(l.input[l.pos:], rightComment)
- if i < 0 {
- return l.errorf("unclosed comment")
- }
- l.pos += Pos(i + len(rightComment))
- if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
- return l.errorf("comment ends before closing delimiter")
-
- }
- l.pos += Pos(len(l.rightDelim))
- l.ignore()
- return lexText
-}
-
-// lexRightDelim scans the right delimiter, which is known to be present.
-func lexRightDelim(l *lexer) stateFn {
- l.pos += Pos(len(l.rightDelim))
- l.emit(itemRightDelim)
- if l.peek() == '\\' {
- l.pos++
- l.emit(itemElideNewline)
- }
- return lexText
-}
-
-// lexInsideAction scans the elements inside action delimiters.
-func lexInsideAction(l *lexer) stateFn {
- // Either number, quoted string, or identifier.
- // Spaces separate arguments; runs of spaces turn into itemSpace.
- // Pipe symbols separate and are emitted.
- if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
- if l.parenDepth == 0 {
- return lexRightDelim
- }
- return l.errorf("unclosed left paren")
- }
- switch r := l.next(); {
- case r == eof || isEndOfLine(r):
- return l.errorf("unclosed action")
- case isSpace(r):
- return lexSpace
- case r == ':':
- if l.next() != '=' {
- return l.errorf("expected :=")
- }
- l.emit(itemColonEquals)
- case r == '|':
- l.emit(itemPipe)
- case r == '"':
- return lexQuote
- case r == '`':
- return lexRawQuote
- case r == '$':
- return lexVariable
- case r == '\'':
- return lexChar
- case r == '.':
- // special look-ahead for ".field" so we don't break l.backup().
- if l.pos < Pos(len(l.input)) {
- r := l.input[l.pos]
- if r < '0' || '9' < r {
- return lexField
- }
- }
- fallthrough // '.' can start a number.
- case r == '+' || r == '-' || ('0' <= r && r <= '9'):
- l.backup()
- return lexNumber
- case isAlphaNumeric(r):
- l.backup()
- return lexIdentifier
- case r == '(':
- l.emit(itemLeftParen)
- l.parenDepth++
- return lexInsideAction
- case r == ')':
- l.emit(itemRightParen)
- l.parenDepth--
- if l.parenDepth < 0 {
- return l.errorf("unexpected right paren %#U", r)
- }
- return lexInsideAction
- case r <= unicode.MaxASCII && unicode.IsPrint(r):
- l.emit(itemChar)
- return lexInsideAction
- default:
- return l.errorf("unrecognized character in action: %#U", r)
- }
- return lexInsideAction
-}
-
-// lexSpace scans a run of space characters.
-// One space has already been seen.
-func lexSpace(l *lexer) stateFn {
- for isSpace(l.peek()) {
- l.next()
- }
- l.emit(itemSpace)
- return lexInsideAction
-}
-
-// lexIdentifier scans an alphanumeric.
-func lexIdentifier(l *lexer) stateFn {
-Loop:
- for {
- switch r := l.next(); {
- case isAlphaNumeric(r):
- // absorb.
- default:
- l.backup()
- word := l.input[l.start:l.pos]
- if !l.atTerminator() {
- return l.errorf("bad character %#U", r)
- }
- switch {
- case key[word] > itemKeyword:
- l.emit(key[word])
- case word[0] == '.':
- l.emit(itemField)
- case word == "true", word == "false":
- l.emit(itemBool)
- default:
- l.emit(itemIdentifier)
- }
- break Loop
- }
- }
- return lexInsideAction
-}
-
-// lexField scans a field: .Alphanumeric.
-// The . has been scanned.
-func lexField(l *lexer) stateFn {
- return lexFieldOrVariable(l, itemField)
-}
-
-// lexVariable scans a Variable: $Alphanumeric.
-// The $ has been scanned.
-func lexVariable(l *lexer) stateFn {
- if l.atTerminator() { // Nothing interesting follows -> "$".
- l.emit(itemVariable)
- return lexInsideAction
- }
- return lexFieldOrVariable(l, itemVariable)
-}
-
-// lexVariable scans a field or variable: [.$]Alphanumeric.
-// The . or $ has been scanned.
-func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
- if l.atTerminator() { // Nothing interesting follows -> "." or "$".
- if typ == itemVariable {
- l.emit(itemVariable)
- } else {
- l.emit(itemDot)
- }
- return lexInsideAction
- }
- var r rune
- for {
- r = l.next()
- if !isAlphaNumeric(r) {
- l.backup()
- break
- }
- }
- if !l.atTerminator() {
- return l.errorf("bad character %#U", r)
- }
- l.emit(typ)
- return lexInsideAction
-}
-
-// atTerminator reports whether the input is at valid termination character to
-// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
-// like "$x+2" not being acceptable without a space, in case we decide one
-// day to implement arithmetic.
-func (l *lexer) atTerminator() bool {
- r := l.peek()
- if isSpace(r) || isEndOfLine(r) {
- return true
- }
- switch r {
- case eof, '.', ',', '|', ':', ')', '(':
- return true
- }
- // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
- // succeed but should fail) but only in extremely rare cases caused by willfully
- // bad choice of delimiter.
- if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
- return true
- }
- return false
-}
-
-// lexChar scans a character constant. The initial quote is already
-// scanned. Syntax checking is done by the parser.
-func lexChar(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case '\\':
- if r := l.next(); r != eof && r != '\n' {
- break
- }
- fallthrough
- case eof, '\n':
- return l.errorf("unterminated character constant")
- case '\'':
- break Loop
- }
- }
- l.emit(itemCharConstant)
- return lexInsideAction
-}
-
-// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
-// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
-// and "089" - but when it's wrong the input is invalid and the parser (via
-// strconv) will notice.
-func lexNumber(l *lexer) stateFn {
- if !l.scanNumber() {
- return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
- }
- if sign := l.peek(); sign == '+' || sign == '-' {
- // Complex: 1+2i. No spaces, must end in 'i'.
- if !l.scanNumber() || l.input[l.pos-1] != 'i' {
- return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
- }
- l.emit(itemComplex)
- } else {
- l.emit(itemNumber)
- }
- return lexInsideAction
-}
-
-func (l *lexer) scanNumber() bool {
- // Optional leading sign.
- l.accept("+-")
- // Is it hex?
- digits := "0123456789"
- if l.accept("0") && l.accept("xX") {
- digits = "0123456789abcdefABCDEF"
- }
- l.acceptRun(digits)
- if l.accept(".") {
- l.acceptRun(digits)
- }
- if l.accept("eE") {
- l.accept("+-")
- l.acceptRun("0123456789")
- }
- // Is it imaginary?
- l.accept("i")
- // Next thing mustn't be alphanumeric.
- if isAlphaNumeric(l.peek()) {
- l.next()
- return false
- }
- return true
-}
-
-// lexQuote scans a quoted string.
-func lexQuote(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case '\\':
- if r := l.next(); r != eof && r != '\n' {
- break
- }
- fallthrough
- case eof, '\n':
- return l.errorf("unterminated quoted string")
- case '"':
- break Loop
- }
- }
- l.emit(itemString)
- return lexInsideAction
-}
-
-// lexRawQuote scans a raw quoted string.
-func lexRawQuote(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case eof, '\n':
- return l.errorf("unterminated raw quoted string")
- case '`':
- break Loop
- }
- }
- l.emit(itemRawString)
- return lexInsideAction
-}
-
-// isSpace reports whether r is a space character.
-func isSpace(r rune) bool {
- return r == ' ' || r == '\t'
-}
-
-// isEndOfLine reports whether r is an end-of-line character.
-func isEndOfLine(r rune) bool {
- return r == '\r' || r == '\n'
-}
-
-// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
-func isAlphaNumeric(r rune) bool {
- return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
-}
diff --git a/vendor/github.com/alecthomas/template/parse/node.go b/vendor/github.com/alecthomas/template/parse/node.go
deleted file mode 100644
index 55c37f6d..00000000
--- a/vendor/github.com/alecthomas/template/parse/node.go
+++ /dev/null
@@ -1,834 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Parse nodes.
-
-package parse
-
-import (
- "bytes"
- "fmt"
- "strconv"
- "strings"
-)
-
-var textFormat = "%s" // Changed to "%q" in tests for better error messages.
-
-// A Node is an element in the parse tree. The interface is trivial.
-// The interface contains an unexported method so that only
-// types local to this package can satisfy it.
-type Node interface {
- Type() NodeType
- String() string
- // Copy does a deep copy of the Node and all its components.
- // To avoid type assertions, some XxxNodes also have specialized
- // CopyXxx methods that return *XxxNode.
- Copy() Node
- Position() Pos // byte position of start of node in full original input string
- // tree returns the containing *Tree.
- // It is unexported so all implementations of Node are in this package.
- tree() *Tree
-}
-
-// NodeType identifies the type of a parse tree node.
-type NodeType int
-
-// Pos represents a byte position in the original input text from which
-// this template was parsed.
-type Pos int
-
-func (p Pos) Position() Pos {
- return p
-}
-
-// Type returns itself and provides an easy default implementation
-// for embedding in a Node. Embedded in all non-trivial Nodes.
-func (t NodeType) Type() NodeType {
- return t
-}
-
-const (
- NodeText NodeType = iota // Plain text.
- NodeAction // A non-control action such as a field evaluation.
- NodeBool // A boolean constant.
- NodeChain // A sequence of field accesses.
- NodeCommand // An element of a pipeline.
- NodeDot // The cursor, dot.
- nodeElse // An else action. Not added to tree.
- nodeEnd // An end action. Not added to tree.
- NodeField // A field or method name.
- NodeIdentifier // An identifier; always a function name.
- NodeIf // An if action.
- NodeList // A list of Nodes.
- NodeNil // An untyped nil constant.
- NodeNumber // A numerical constant.
- NodePipe // A pipeline of commands.
- NodeRange // A range action.
- NodeString // A string constant.
- NodeTemplate // A template invocation action.
- NodeVariable // A $ variable.
- NodeWith // A with action.
-)
-
-// Nodes.
-
-// ListNode holds a sequence of nodes.
-type ListNode struct {
- NodeType
- Pos
- tr *Tree
- Nodes []Node // The element nodes in lexical order.
-}
-
-func (t *Tree) newList(pos Pos) *ListNode {
- return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
-}
-
-func (l *ListNode) append(n Node) {
- l.Nodes = append(l.Nodes, n)
-}
-
-func (l *ListNode) tree() *Tree {
- return l.tr
-}
-
-func (l *ListNode) String() string {
- b := new(bytes.Buffer)
- for _, n := range l.Nodes {
- fmt.Fprint(b, n)
- }
- return b.String()
-}
-
-func (l *ListNode) CopyList() *ListNode {
- if l == nil {
- return l
- }
- n := l.tr.newList(l.Pos)
- for _, elem := range l.Nodes {
- n.append(elem.Copy())
- }
- return n
-}
-
-func (l *ListNode) Copy() Node {
- return l.CopyList()
-}
-
-// TextNode holds plain text.
-type TextNode struct {
- NodeType
- Pos
- tr *Tree
- Text []byte // The text; may span newlines.
-}
-
-func (t *Tree) newText(pos Pos, text string) *TextNode {
- return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
-}
-
-func (t *TextNode) String() string {
- return fmt.Sprintf(textFormat, t.Text)
-}
-
-func (t *TextNode) tree() *Tree {
- return t.tr
-}
-
-func (t *TextNode) Copy() Node {
- return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
-}
-
-// PipeNode holds a pipeline with optional declaration
-type PipeNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
- Decl []*VariableNode // Variable declarations in lexical order.
- Cmds []*CommandNode // The commands in lexical order.
-}
-
-func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode {
- return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl}
-}
-
-func (p *PipeNode) append(command *CommandNode) {
- p.Cmds = append(p.Cmds, command)
-}
-
-func (p *PipeNode) String() string {
- s := ""
- if len(p.Decl) > 0 {
- for i, v := range p.Decl {
- if i > 0 {
- s += ", "
- }
- s += v.String()
- }
- s += " := "
- }
- for i, c := range p.Cmds {
- if i > 0 {
- s += " | "
- }
- s += c.String()
- }
- return s
-}
-
-func (p *PipeNode) tree() *Tree {
- return p.tr
-}
-
-func (p *PipeNode) CopyPipe() *PipeNode {
- if p == nil {
- return p
- }
- var decl []*VariableNode
- for _, d := range p.Decl {
- decl = append(decl, d.Copy().(*VariableNode))
- }
- n := p.tr.newPipeline(p.Pos, p.Line, decl)
- for _, c := range p.Cmds {
- n.append(c.Copy().(*CommandNode))
- }
- return n
-}
-
-func (p *PipeNode) Copy() Node {
- return p.CopyPipe()
-}
-
-// ActionNode holds an action (something bounded by delimiters).
-// Control actions have their own nodes; ActionNode represents simple
-// ones such as field evaluations and parenthesized pipelines.
-type ActionNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
- Pipe *PipeNode // The pipeline in the action.
-}
-
-func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
- return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
-}
-
-func (a *ActionNode) String() string {
- return fmt.Sprintf("{{%s}}", a.Pipe)
-
-}
-
-func (a *ActionNode) tree() *Tree {
- return a.tr
-}
-
-func (a *ActionNode) Copy() Node {
- return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
-
-}
-
-// CommandNode holds a command (a pipeline inside an evaluating action).
-type CommandNode struct {
- NodeType
- Pos
- tr *Tree
- Args []Node // Arguments in lexical order: Identifier, field, or constant.
-}
-
-func (t *Tree) newCommand(pos Pos) *CommandNode {
- return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
-}
-
-func (c *CommandNode) append(arg Node) {
- c.Args = append(c.Args, arg)
-}
-
-func (c *CommandNode) String() string {
- s := ""
- for i, arg := range c.Args {
- if i > 0 {
- s += " "
- }
- if arg, ok := arg.(*PipeNode); ok {
- s += "(" + arg.String() + ")"
- continue
- }
- s += arg.String()
- }
- return s
-}
-
-func (c *CommandNode) tree() *Tree {
- return c.tr
-}
-
-func (c *CommandNode) Copy() Node {
- if c == nil {
- return c
- }
- n := c.tr.newCommand(c.Pos)
- for _, c := range c.Args {
- n.append(c.Copy())
- }
- return n
-}
-
-// IdentifierNode holds an identifier.
-type IdentifierNode struct {
- NodeType
- Pos
- tr *Tree
- Ident string // The identifier's name.
-}
-
-// NewIdentifier returns a new IdentifierNode with the given identifier name.
-func NewIdentifier(ident string) *IdentifierNode {
- return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
-}
-
-// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature.
-// Chained for convenience.
-// TODO: fix one day?
-func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
- i.Pos = pos
- return i
-}
-
-// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature.
-// Chained for convenience.
-// TODO: fix one day?
-func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
- i.tr = t
- return i
-}
-
-func (i *IdentifierNode) String() string {
- return i.Ident
-}
-
-func (i *IdentifierNode) tree() *Tree {
- return i.tr
-}
-
-func (i *IdentifierNode) Copy() Node {
- return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
-}
-
-// VariableNode holds a list of variable names, possibly with chained field
-// accesses. The dollar sign is part of the (first) name.
-type VariableNode struct {
- NodeType
- Pos
- tr *Tree
- Ident []string // Variable name and fields in lexical order.
-}
-
-func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
- return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
-}
-
-func (v *VariableNode) String() string {
- s := ""
- for i, id := range v.Ident {
- if i > 0 {
- s += "."
- }
- s += id
- }
- return s
-}
-
-func (v *VariableNode) tree() *Tree {
- return v.tr
-}
-
-func (v *VariableNode) Copy() Node {
- return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
-}
-
-// DotNode holds the special identifier '.'.
-type DotNode struct {
- NodeType
- Pos
- tr *Tree
-}
-
-func (t *Tree) newDot(pos Pos) *DotNode {
- return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
-}
-
-func (d *DotNode) Type() NodeType {
- // Override method on embedded NodeType for API compatibility.
- // TODO: Not really a problem; could change API without effect but
- // api tool complains.
- return NodeDot
-}
-
-func (d *DotNode) String() string {
- return "."
-}
-
-func (d *DotNode) tree() *Tree {
- return d.tr
-}
-
-func (d *DotNode) Copy() Node {
- return d.tr.newDot(d.Pos)
-}
-
-// NilNode holds the special identifier 'nil' representing an untyped nil constant.
-type NilNode struct {
- NodeType
- Pos
- tr *Tree
-}
-
-func (t *Tree) newNil(pos Pos) *NilNode {
- return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
-}
-
-func (n *NilNode) Type() NodeType {
- // Override method on embedded NodeType for API compatibility.
- // TODO: Not really a problem; could change API without effect but
- // api tool complains.
- return NodeNil
-}
-
-func (n *NilNode) String() string {
- return "nil"
-}
-
-func (n *NilNode) tree() *Tree {
- return n.tr
-}
-
-func (n *NilNode) Copy() Node {
- return n.tr.newNil(n.Pos)
-}
-
-// FieldNode holds a field (identifier starting with '.').
-// The names may be chained ('.x.y').
-// The period is dropped from each ident.
-type FieldNode struct {
- NodeType
- Pos
- tr *Tree
- Ident []string // The identifiers in lexical order.
-}
-
-func (t *Tree) newField(pos Pos, ident string) *FieldNode {
- return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
-}
-
-func (f *FieldNode) String() string {
- s := ""
- for _, id := range f.Ident {
- s += "." + id
- }
- return s
-}
-
-func (f *FieldNode) tree() *Tree {
- return f.tr
-}
-
-func (f *FieldNode) Copy() Node {
- return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
-}
-
-// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
-// The names may be chained ('.x.y').
-// The periods are dropped from each ident.
-type ChainNode struct {
- NodeType
- Pos
- tr *Tree
- Node Node
- Field []string // The identifiers in lexical order.
-}
-
-func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
- return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
-}
-
-// Add adds the named field (which should start with a period) to the end of the chain.
-func (c *ChainNode) Add(field string) {
- if len(field) == 0 || field[0] != '.' {
- panic("no dot in field")
- }
- field = field[1:] // Remove leading dot.
- if field == "" {
- panic("empty field")
- }
- c.Field = append(c.Field, field)
-}
-
-func (c *ChainNode) String() string {
- s := c.Node.String()
- if _, ok := c.Node.(*PipeNode); ok {
- s = "(" + s + ")"
- }
- for _, field := range c.Field {
- s += "." + field
- }
- return s
-}
-
-func (c *ChainNode) tree() *Tree {
- return c.tr
-}
-
-func (c *ChainNode) Copy() Node {
- return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
-}
-
-// BoolNode holds a boolean constant.
-type BoolNode struct {
- NodeType
- Pos
- tr *Tree
- True bool // The value of the boolean constant.
-}
-
-func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
- return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
-}
-
-func (b *BoolNode) String() string {
- if b.True {
- return "true"
- }
- return "false"
-}
-
-func (b *BoolNode) tree() *Tree {
- return b.tr
-}
-
-func (b *BoolNode) Copy() Node {
- return b.tr.newBool(b.Pos, b.True)
-}
-
-// NumberNode holds a number: signed or unsigned integer, float, or complex.
-// The value is parsed and stored under all the types that can represent the value.
-// This simulates in a small amount of code the behavior of Go's ideal constants.
-type NumberNode struct {
- NodeType
- Pos
- tr *Tree
- IsInt bool // Number has an integral value.
- IsUint bool // Number has an unsigned integral value.
- IsFloat bool // Number has a floating-point value.
- IsComplex bool // Number is complex.
- Int64 int64 // The signed integer value.
- Uint64 uint64 // The unsigned integer value.
- Float64 float64 // The floating-point value.
- Complex128 complex128 // The complex value.
- Text string // The original textual representation from the input.
-}
-
-func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
- n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
- switch typ {
- case itemCharConstant:
- rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
- if err != nil {
- return nil, err
- }
- if tail != "'" {
- return nil, fmt.Errorf("malformed character constant: %s", text)
- }
- n.Int64 = int64(rune)
- n.IsInt = true
- n.Uint64 = uint64(rune)
- n.IsUint = true
- n.Float64 = float64(rune) // odd but those are the rules.
- n.IsFloat = true
- return n, nil
- case itemComplex:
- // fmt.Sscan can parse the pair, so let it do the work.
- if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
- return nil, err
- }
- n.IsComplex = true
- n.simplifyComplex()
- return n, nil
- }
- // Imaginary constants can only be complex unless they are zero.
- if len(text) > 0 && text[len(text)-1] == 'i' {
- f, err := strconv.ParseFloat(text[:len(text)-1], 64)
- if err == nil {
- n.IsComplex = true
- n.Complex128 = complex(0, f)
- n.simplifyComplex()
- return n, nil
- }
- }
- // Do integer test first so we get 0x123 etc.
- u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
- if err == nil {
- n.IsUint = true
- n.Uint64 = u
- }
- i, err := strconv.ParseInt(text, 0, 64)
- if err == nil {
- n.IsInt = true
- n.Int64 = i
- if i == 0 {
- n.IsUint = true // in case of -0.
- n.Uint64 = u
- }
- }
- // If an integer extraction succeeded, promote the float.
- if n.IsInt {
- n.IsFloat = true
- n.Float64 = float64(n.Int64)
- } else if n.IsUint {
- n.IsFloat = true
- n.Float64 = float64(n.Uint64)
- } else {
- f, err := strconv.ParseFloat(text, 64)
- if err == nil {
- n.IsFloat = true
- n.Float64 = f
- // If a floating-point extraction succeeded, extract the int if needed.
- if !n.IsInt && float64(int64(f)) == f {
- n.IsInt = true
- n.Int64 = int64(f)
- }
- if !n.IsUint && float64(uint64(f)) == f {
- n.IsUint = true
- n.Uint64 = uint64(f)
- }
- }
- }
- if !n.IsInt && !n.IsUint && !n.IsFloat {
- return nil, fmt.Errorf("illegal number syntax: %q", text)
- }
- return n, nil
-}
-
-// simplifyComplex pulls out any other types that are represented by the complex number.
-// These all require that the imaginary part be zero.
-func (n *NumberNode) simplifyComplex() {
- n.IsFloat = imag(n.Complex128) == 0
- if n.IsFloat {
- n.Float64 = real(n.Complex128)
- n.IsInt = float64(int64(n.Float64)) == n.Float64
- if n.IsInt {
- n.Int64 = int64(n.Float64)
- }
- n.IsUint = float64(uint64(n.Float64)) == n.Float64
- if n.IsUint {
- n.Uint64 = uint64(n.Float64)
- }
- }
-}
-
-func (n *NumberNode) String() string {
- return n.Text
-}
-
-func (n *NumberNode) tree() *Tree {
- return n.tr
-}
-
-func (n *NumberNode) Copy() Node {
- nn := new(NumberNode)
- *nn = *n // Easy, fast, correct.
- return nn
-}
-
-// StringNode holds a string constant. The value has been "unquoted".
-type StringNode struct {
- NodeType
- Pos
- tr *Tree
- Quoted string // The original text of the string, with quotes.
- Text string // The string, after quote processing.
-}
-
-func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
- return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
-}
-
-func (s *StringNode) String() string {
- return s.Quoted
-}
-
-func (s *StringNode) tree() *Tree {
- return s.tr
-}
-
-func (s *StringNode) Copy() Node {
- return s.tr.newString(s.Pos, s.Quoted, s.Text)
-}
-
-// endNode represents an {{end}} action.
-// It does not appear in the final parse tree.
-type endNode struct {
- NodeType
- Pos
- tr *Tree
-}
-
-func (t *Tree) newEnd(pos Pos) *endNode {
- return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
-}
-
-func (e *endNode) String() string {
- return "{{end}}"
-}
-
-func (e *endNode) tree() *Tree {
- return e.tr
-}
-
-func (e *endNode) Copy() Node {
- return e.tr.newEnd(e.Pos)
-}
-
-// elseNode represents an {{else}} action. Does not appear in the final tree.
-type elseNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
-}
-
-func (t *Tree) newElse(pos Pos, line int) *elseNode {
- return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
-}
-
-func (e *elseNode) Type() NodeType {
- return nodeElse
-}
-
-func (e *elseNode) String() string {
- return "{{else}}"
-}
-
-func (e *elseNode) tree() *Tree {
- return e.tr
-}
-
-func (e *elseNode) Copy() Node {
- return e.tr.newElse(e.Pos, e.Line)
-}
-
-// BranchNode is the common representation of if, range, and with.
-type BranchNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
- Pipe *PipeNode // The pipeline to be evaluated.
- List *ListNode // What to execute if the value is non-empty.
- ElseList *ListNode // What to execute if the value is empty (nil if absent).
-}
-
-func (b *BranchNode) String() string {
- name := ""
- switch b.NodeType {
- case NodeIf:
- name = "if"
- case NodeRange:
- name = "range"
- case NodeWith:
- name = "with"
- default:
- panic("unknown branch type")
- }
- if b.ElseList != nil {
- return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList)
- }
- return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List)
-}
-
-func (b *BranchNode) tree() *Tree {
- return b.tr
-}
-
-func (b *BranchNode) Copy() Node {
- switch b.NodeType {
- case NodeIf:
- return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
- case NodeRange:
- return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
- case NodeWith:
- return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
- default:
- panic("unknown branch type")
- }
-}
-
-// IfNode represents an {{if}} action and its commands.
-type IfNode struct {
- BranchNode
-}
-
-func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
- return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-func (i *IfNode) Copy() Node {
- return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
-}
-
-// RangeNode represents a {{range}} action and its commands.
-type RangeNode struct {
- BranchNode
-}
-
-func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
- return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-func (r *RangeNode) Copy() Node {
- return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
-}
-
-// WithNode represents a {{with}} action and its commands.
-type WithNode struct {
- BranchNode
-}
-
-func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
- return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-func (w *WithNode) Copy() Node {
- return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
-}
-
-// TemplateNode represents a {{template}} action.
-type TemplateNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
- Name string // The name of the template (unquoted).
- Pipe *PipeNode // The command to evaluate as dot for the template.
-}
-
-func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
- return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
-}
-
-func (t *TemplateNode) String() string {
- if t.Pipe == nil {
- return fmt.Sprintf("{{template %q}}", t.Name)
- }
- return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
-}
-
-func (t *TemplateNode) tree() *Tree {
- return t.tr
-}
-
-func (t *TemplateNode) Copy() Node {
- return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
-}
diff --git a/vendor/github.com/alecthomas/template/parse/parse.go b/vendor/github.com/alecthomas/template/parse/parse.go
deleted file mode 100644
index 0d77ade8..00000000
--- a/vendor/github.com/alecthomas/template/parse/parse.go
+++ /dev/null
@@ -1,700 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package parse builds parse trees for templates as defined by text/template
-// and html/template. Clients should use those packages to construct templates
-// rather than this one, which provides shared internal data structures not
-// intended for general use.
-package parse
-
-import (
- "bytes"
- "fmt"
- "runtime"
- "strconv"
- "strings"
-)
-
-// Tree is the representation of a single parsed template.
-type Tree struct {
- Name string // name of the template represented by the tree.
- ParseName string // name of the top-level template during parsing, for error messages.
- Root *ListNode // top-level root of the tree.
- text string // text parsed to create the template (or its parent)
- // Parsing only; cleared after parse.
- funcs []map[string]interface{}
- lex *lexer
- token [3]item // three-token lookahead for parser.
- peekCount int
- vars []string // variables defined at the moment.
-}
-
-// Copy returns a copy of the Tree. Any parsing state is discarded.
-func (t *Tree) Copy() *Tree {
- if t == nil {
- return nil
- }
- return &Tree{
- Name: t.Name,
- ParseName: t.ParseName,
- Root: t.Root.CopyList(),
- text: t.text,
- }
-}
-
-// Parse returns a map from template name to parse.Tree, created by parsing the
-// templates described in the argument string. The top-level template will be
-// given the specified name. If an error is encountered, parsing stops and an
-// empty map is returned with the error.
-func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) {
- treeSet = make(map[string]*Tree)
- t := New(name)
- t.text = text
- _, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
- return
-}
-
-// next returns the next token.
-func (t *Tree) next() item {
- if t.peekCount > 0 {
- t.peekCount--
- } else {
- t.token[0] = t.lex.nextItem()
- }
- return t.token[t.peekCount]
-}
-
-// backup backs the input stream up one token.
-func (t *Tree) backup() {
- t.peekCount++
-}
-
-// backup2 backs the input stream up two tokens.
-// The zeroth token is already there.
-func (t *Tree) backup2(t1 item) {
- t.token[1] = t1
- t.peekCount = 2
-}
-
-// backup3 backs the input stream up three tokens
-// The zeroth token is already there.
-func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
- t.token[1] = t1
- t.token[2] = t2
- t.peekCount = 3
-}
-
-// peek returns but does not consume the next token.
-func (t *Tree) peek() item {
- if t.peekCount > 0 {
- return t.token[t.peekCount-1]
- }
- t.peekCount = 1
- t.token[0] = t.lex.nextItem()
- return t.token[0]
-}
-
-// nextNonSpace returns the next non-space token.
-func (t *Tree) nextNonSpace() (token item) {
- for {
- token = t.next()
- if token.typ != itemSpace {
- break
- }
- }
- return token
-}
-
-// peekNonSpace returns but does not consume the next non-space token.
-func (t *Tree) peekNonSpace() (token item) {
- for {
- token = t.next()
- if token.typ != itemSpace {
- break
- }
- }
- t.backup()
- return token
-}
-
-// Parsing.
-
-// New allocates a new parse tree with the given name.
-func New(name string, funcs ...map[string]interface{}) *Tree {
- return &Tree{
- Name: name,
- funcs: funcs,
- }
-}
-
-// ErrorContext returns a textual representation of the location of the node in the input text.
-// The receiver is only used when the node does not have a pointer to the tree inside,
-// which can occur in old code.
-func (t *Tree) ErrorContext(n Node) (location, context string) {
- pos := int(n.Position())
- tree := n.tree()
- if tree == nil {
- tree = t
- }
- text := tree.text[:pos]
- byteNum := strings.LastIndex(text, "\n")
- if byteNum == -1 {
- byteNum = pos // On first line.
- } else {
- byteNum++ // After the newline.
- byteNum = pos - byteNum
- }
- lineNum := 1 + strings.Count(text, "\n")
- context = n.String()
- if len(context) > 20 {
- context = fmt.Sprintf("%.20s...", context)
- }
- return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
-}
-
-// errorf formats the error and terminates processing.
-func (t *Tree) errorf(format string, args ...interface{}) {
- t.Root = nil
- format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format)
- panic(fmt.Errorf(format, args...))
-}
-
-// error terminates processing.
-func (t *Tree) error(err error) {
- t.errorf("%s", err)
-}
-
-// expect consumes the next token and guarantees it has the required type.
-func (t *Tree) expect(expected itemType, context string) item {
- token := t.nextNonSpace()
- if token.typ != expected {
- t.unexpected(token, context)
- }
- return token
-}
-
-// expectOneOf consumes the next token and guarantees it has one of the required types.
-func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
- token := t.nextNonSpace()
- if token.typ != expected1 && token.typ != expected2 {
- t.unexpected(token, context)
- }
- return token
-}
-
-// unexpected complains about the token and terminates processing.
-func (t *Tree) unexpected(token item, context string) {
- t.errorf("unexpected %s in %s", token, context)
-}
-
-// recover is the handler that turns panics into returns from the top level of Parse.
-func (t *Tree) recover(errp *error) {
- e := recover()
- if e != nil {
- if _, ok := e.(runtime.Error); ok {
- panic(e)
- }
- if t != nil {
- t.stopParse()
- }
- *errp = e.(error)
- }
- return
-}
-
-// startParse initializes the parser, using the lexer.
-func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
- t.Root = nil
- t.lex = lex
- t.vars = []string{"$"}
- t.funcs = funcs
-}
-
-// stopParse terminates parsing.
-func (t *Tree) stopParse() {
- t.lex = nil
- t.vars = nil
- t.funcs = nil
-}
-
-// Parse parses the template definition string to construct a representation of
-// the template for execution. If either action delimiter string is empty, the
-// default ("{{" or "}}") is used. Embedded template definitions are added to
-// the treeSet map.
-func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
- defer t.recover(&err)
- t.ParseName = t.Name
- t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim))
- t.text = text
- t.parse(treeSet)
- t.add(treeSet)
- t.stopParse()
- return t, nil
-}
-
-// add adds tree to the treeSet.
-func (t *Tree) add(treeSet map[string]*Tree) {
- tree := treeSet[t.Name]
- if tree == nil || IsEmptyTree(tree.Root) {
- treeSet[t.Name] = t
- return
- }
- if !IsEmptyTree(t.Root) {
- t.errorf("template: multiple definition of template %q", t.Name)
- }
-}
-
-// IsEmptyTree reports whether this tree (node) is empty of everything but space.
-func IsEmptyTree(n Node) bool {
- switch n := n.(type) {
- case nil:
- return true
- case *ActionNode:
- case *IfNode:
- case *ListNode:
- for _, node := range n.Nodes {
- if !IsEmptyTree(node) {
- return false
- }
- }
- return true
- case *RangeNode:
- case *TemplateNode:
- case *TextNode:
- return len(bytes.TrimSpace(n.Text)) == 0
- case *WithNode:
- default:
- panic("unknown node: " + n.String())
- }
- return false
-}
-
-// parse is the top-level parser for a template, essentially the same
-// as itemList except it also parses {{define}} actions.
-// It runs to EOF.
-func (t *Tree) parse(treeSet map[string]*Tree) (next Node) {
- t.Root = t.newList(t.peek().pos)
- for t.peek().typ != itemEOF {
- if t.peek().typ == itemLeftDelim {
- delim := t.next()
- if t.nextNonSpace().typ == itemDefine {
- newT := New("definition") // name will be updated once we know it.
- newT.text = t.text
- newT.ParseName = t.ParseName
- newT.startParse(t.funcs, t.lex)
- newT.parseDefinition(treeSet)
- continue
- }
- t.backup2(delim)
- }
- n := t.textOrAction()
- if n.Type() == nodeEnd {
- t.errorf("unexpected %s", n)
- }
- t.Root.append(n)
- }
- return nil
-}
-
-// parseDefinition parses a {{define}} ... {{end}} template definition and
-// installs the definition in the treeSet map. The "define" keyword has already
-// been scanned.
-func (t *Tree) parseDefinition(treeSet map[string]*Tree) {
- const context = "define clause"
- name := t.expectOneOf(itemString, itemRawString, context)
- var err error
- t.Name, err = strconv.Unquote(name.val)
- if err != nil {
- t.error(err)
- }
- t.expect(itemRightDelim, context)
- var end Node
- t.Root, end = t.itemList()
- if end.Type() != nodeEnd {
- t.errorf("unexpected %s in %s", end, context)
- }
- t.add(treeSet)
- t.stopParse()
-}
-
-// itemList:
-// textOrAction*
-// Terminates at {{end}} or {{else}}, returned separately.
-func (t *Tree) itemList() (list *ListNode, next Node) {
- list = t.newList(t.peekNonSpace().pos)
- for t.peekNonSpace().typ != itemEOF {
- n := t.textOrAction()
- switch n.Type() {
- case nodeEnd, nodeElse:
- return list, n
- }
- list.append(n)
- }
- t.errorf("unexpected EOF")
- return
-}
-
-// textOrAction:
-// text | action
-func (t *Tree) textOrAction() Node {
- switch token := t.nextNonSpace(); token.typ {
- case itemElideNewline:
- return t.elideNewline()
- case itemText:
- return t.newText(token.pos, token.val)
- case itemLeftDelim:
- return t.action()
- default:
- t.unexpected(token, "input")
- }
- return nil
-}
-
-// elideNewline:
-// Remove newlines trailing rightDelim if \\ is present.
-func (t *Tree) elideNewline() Node {
- token := t.peek()
- if token.typ != itemText {
- t.unexpected(token, "input")
- return nil
- }
-
- t.next()
- stripped := strings.TrimLeft(token.val, "\n\r")
- diff := len(token.val) - len(stripped)
- if diff > 0 {
- // This is a bit nasty. We mutate the token in-place to remove
- // preceding newlines.
- token.pos += Pos(diff)
- token.val = stripped
- }
- return t.newText(token.pos, token.val)
-}
-
-// Action:
-// control
-// command ("|" command)*
-// Left delim is past. Now get actions.
-// First word could be a keyword such as range.
-func (t *Tree) action() (n Node) {
- switch token := t.nextNonSpace(); token.typ {
- case itemElse:
- return t.elseControl()
- case itemEnd:
- return t.endControl()
- case itemIf:
- return t.ifControl()
- case itemRange:
- return t.rangeControl()
- case itemTemplate:
- return t.templateControl()
- case itemWith:
- return t.withControl()
- }
- t.backup()
- // Do not pop variables; they persist until "end".
- return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command"))
-}
-
-// Pipeline:
-// declarations? command ('|' command)*
-func (t *Tree) pipeline(context string) (pipe *PipeNode) {
- var decl []*VariableNode
- pos := t.peekNonSpace().pos
- // Are there declarations?
- for {
- if v := t.peekNonSpace(); v.typ == itemVariable {
- t.next()
- // Since space is a token, we need 3-token look-ahead here in the worst case:
- // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
- // argument variable rather than a declaration. So remember the token
- // adjacent to the variable so we can push it back if necessary.
- tokenAfterVariable := t.peek()
- if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") {
- t.nextNonSpace()
- variable := t.newVariable(v.pos, v.val)
- decl = append(decl, variable)
- t.vars = append(t.vars, v.val)
- if next.typ == itemChar && next.val == "," {
- if context == "range" && len(decl) < 2 {
- continue
- }
- t.errorf("too many declarations in %s", context)
- }
- } else if tokenAfterVariable.typ == itemSpace {
- t.backup3(v, tokenAfterVariable)
- } else {
- t.backup2(v)
- }
- }
- break
- }
- pipe = t.newPipeline(pos, t.lex.lineNumber(), decl)
- for {
- switch token := t.nextNonSpace(); token.typ {
- case itemRightDelim, itemRightParen:
- if len(pipe.Cmds) == 0 {
- t.errorf("missing value for %s", context)
- }
- if token.typ == itemRightParen {
- t.backup()
- }
- return
- case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
- itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
- t.backup()
- pipe.append(t.command())
- default:
- t.unexpected(token, context)
- }
- }
-}
-
-func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
- defer t.popVars(len(t.vars))
- line = t.lex.lineNumber()
- pipe = t.pipeline(context)
- var next Node
- list, next = t.itemList()
- switch next.Type() {
- case nodeEnd: //done
- case nodeElse:
- if allowElseIf {
- // Special case for "else if". If the "else" is followed immediately by an "if",
- // the elseControl will have left the "if" token pending. Treat
- // {{if a}}_{{else if b}}_{{end}}
- // as
- // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
- // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
- // is assumed. This technique works even for long if-else-if chains.
- // TODO: Should we allow else-if in with and range?
- if t.peek().typ == itemIf {
- t.next() // Consume the "if" token.
- elseList = t.newList(next.Position())
- elseList.append(t.ifControl())
- // Do not consume the next item - only one {{end}} required.
- break
- }
- }
- elseList, next = t.itemList()
- if next.Type() != nodeEnd {
- t.errorf("expected end; found %s", next)
- }
- }
- return pipe.Position(), line, pipe, list, elseList
-}
-
-// If:
-// {{if pipeline}} itemList {{end}}
-// {{if pipeline}} itemList {{else}} itemList {{end}}
-// If keyword is past.
-func (t *Tree) ifControl() Node {
- return t.newIf(t.parseControl(true, "if"))
-}
-
-// Range:
-// {{range pipeline}} itemList {{end}}
-// {{range pipeline}} itemList {{else}} itemList {{end}}
-// Range keyword is past.
-func (t *Tree) rangeControl() Node {
- return t.newRange(t.parseControl(false, "range"))
-}
-
-// With:
-// {{with pipeline}} itemList {{end}}
-// {{with pipeline}} itemList {{else}} itemList {{end}}
-// If keyword is past.
-func (t *Tree) withControl() Node {
- return t.newWith(t.parseControl(false, "with"))
-}
-
-// End:
-// {{end}}
-// End keyword is past.
-func (t *Tree) endControl() Node {
- return t.newEnd(t.expect(itemRightDelim, "end").pos)
-}
-
-// Else:
-// {{else}}
-// Else keyword is past.
-func (t *Tree) elseControl() Node {
- // Special case for "else if".
- peek := t.peekNonSpace()
- if peek.typ == itemIf {
- // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
- return t.newElse(peek.pos, t.lex.lineNumber())
- }
- return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber())
-}
-
-// Template:
-// {{template stringValue pipeline}}
-// Template keyword is past. The name must be something that can evaluate
-// to a string.
-func (t *Tree) templateControl() Node {
- var name string
- token := t.nextNonSpace()
- switch token.typ {
- case itemString, itemRawString:
- s, err := strconv.Unquote(token.val)
- if err != nil {
- t.error(err)
- }
- name = s
- default:
- t.unexpected(token, "template invocation")
- }
- var pipe *PipeNode
- if t.nextNonSpace().typ != itemRightDelim {
- t.backup()
- // Do not pop variables; they persist until "end".
- pipe = t.pipeline("template")
- }
- return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe)
-}
-
-// command:
-// operand (space operand)*
-// space-separated arguments up to a pipeline character or right delimiter.
-// we consume the pipe character but leave the right delim to terminate the action.
-func (t *Tree) command() *CommandNode {
- cmd := t.newCommand(t.peekNonSpace().pos)
- for {
- t.peekNonSpace() // skip leading spaces.
- operand := t.operand()
- if operand != nil {
- cmd.append(operand)
- }
- switch token := t.next(); token.typ {
- case itemSpace:
- continue
- case itemError:
- t.errorf("%s", token.val)
- case itemRightDelim, itemRightParen:
- t.backup()
- case itemPipe:
- default:
- t.errorf("unexpected %s in operand; missing space?", token)
- }
- break
- }
- if len(cmd.Args) == 0 {
- t.errorf("empty command")
- }
- return cmd
-}
-
-// operand:
-// term .Field*
-// An operand is a space-separated component of a command,
-// a term possibly followed by field accesses.
-// A nil return means the next item is not an operand.
-func (t *Tree) operand() Node {
- node := t.term()
- if node == nil {
- return nil
- }
- if t.peek().typ == itemField {
- chain := t.newChain(t.peek().pos, node)
- for t.peek().typ == itemField {
- chain.Add(t.next().val)
- }
- // Compatibility with original API: If the term is of type NodeField
- // or NodeVariable, just put more fields on the original.
- // Otherwise, keep the Chain node.
- // TODO: Switch to Chains always when we can.
- switch node.Type() {
- case NodeField:
- node = t.newField(chain.Position(), chain.String())
- case NodeVariable:
- node = t.newVariable(chain.Position(), chain.String())
- default:
- node = chain
- }
- }
- return node
-}
-
-// term:
-// literal (number, string, nil, boolean)
-// function (identifier)
-// .
-// .Field
-// $
-// '(' pipeline ')'
-// A term is a simple "expression".
-// A nil return means the next item is not a term.
-func (t *Tree) term() Node {
- switch token := t.nextNonSpace(); token.typ {
- case itemError:
- t.errorf("%s", token.val)
- case itemIdentifier:
- if !t.hasFunction(token.val) {
- t.errorf("function %q not defined", token.val)
- }
- return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
- case itemDot:
- return t.newDot(token.pos)
- case itemNil:
- return t.newNil(token.pos)
- case itemVariable:
- return t.useVar(token.pos, token.val)
- case itemField:
- return t.newField(token.pos, token.val)
- case itemBool:
- return t.newBool(token.pos, token.val == "true")
- case itemCharConstant, itemComplex, itemNumber:
- number, err := t.newNumber(token.pos, token.val, token.typ)
- if err != nil {
- t.error(err)
- }
- return number
- case itemLeftParen:
- pipe := t.pipeline("parenthesized pipeline")
- if token := t.next(); token.typ != itemRightParen {
- t.errorf("unclosed right paren: unexpected %s", token)
- }
- return pipe
- case itemString, itemRawString:
- s, err := strconv.Unquote(token.val)
- if err != nil {
- t.error(err)
- }
- return t.newString(token.pos, token.val, s)
- }
- t.backup()
- return nil
-}
-
-// hasFunction reports if a function name exists in the Tree's maps.
-func (t *Tree) hasFunction(name string) bool {
- for _, funcMap := range t.funcs {
- if funcMap == nil {
- continue
- }
- if funcMap[name] != nil {
- return true
- }
- }
- return false
-}
-
-// popVars trims the variable list to the specified length
-func (t *Tree) popVars(n int) {
- t.vars = t.vars[:n]
-}
-
-// useVar returns a node for a variable reference. It errors if the
-// variable is not defined.
-func (t *Tree) useVar(pos Pos, name string) Node {
- v := t.newVariable(pos, name)
- for _, varName := range t.vars {
- if varName == v.Ident[0] {
- return v
- }
- }
- t.errorf("undefined variable %q", v.Ident[0])
- return nil
-}
diff --git a/vendor/github.com/alecthomas/template/template.go b/vendor/github.com/alecthomas/template/template.go
deleted file mode 100644
index 447ed2ab..00000000
--- a/vendor/github.com/alecthomas/template/template.go
+++ /dev/null
@@ -1,218 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "fmt"
- "reflect"
-
- "github.com/alecthomas/template/parse"
-)
-
-// common holds the information shared by related templates.
-type common struct {
- tmpl map[string]*Template
- // We use two maps, one for parsing and one for execution.
- // This separation makes the API cleaner since it doesn't
- // expose reflection to the client.
- parseFuncs FuncMap
- execFuncs map[string]reflect.Value
-}
-
-// Template is the representation of a parsed template. The *parse.Tree
-// field is exported only for use by html/template and should be treated
-// as unexported by all other clients.
-type Template struct {
- name string
- *parse.Tree
- *common
- leftDelim string
- rightDelim string
-}
-
-// New allocates a new template with the given name.
-func New(name string) *Template {
- return &Template{
- name: name,
- }
-}
-
-// Name returns the name of the template.
-func (t *Template) Name() string {
- return t.name
-}
-
-// New allocates a new template associated with the given one and with the same
-// delimiters. The association, which is transitive, allows one template to
-// invoke another with a {{template}} action.
-func (t *Template) New(name string) *Template {
- t.init()
- return &Template{
- name: name,
- common: t.common,
- leftDelim: t.leftDelim,
- rightDelim: t.rightDelim,
- }
-}
-
-func (t *Template) init() {
- if t.common == nil {
- t.common = new(common)
- t.tmpl = make(map[string]*Template)
- t.parseFuncs = make(FuncMap)
- t.execFuncs = make(map[string]reflect.Value)
- }
-}
-
-// Clone returns a duplicate of the template, including all associated
-// templates. The actual representation is not copied, but the name space of
-// associated templates is, so further calls to Parse in the copy will add
-// templates to the copy but not to the original. Clone can be used to prepare
-// common templates and use them with variant definitions for other templates
-// by adding the variants after the clone is made.
-func (t *Template) Clone() (*Template, error) {
- nt := t.copy(nil)
- nt.init()
- nt.tmpl[t.name] = nt
- for k, v := range t.tmpl {
- if k == t.name { // Already installed.
- continue
- }
- // The associated templates share nt's common structure.
- tmpl := v.copy(nt.common)
- nt.tmpl[k] = tmpl
- }
- for k, v := range t.parseFuncs {
- nt.parseFuncs[k] = v
- }
- for k, v := range t.execFuncs {
- nt.execFuncs[k] = v
- }
- return nt, nil
-}
-
-// copy returns a shallow copy of t, with common set to the argument.
-func (t *Template) copy(c *common) *Template {
- nt := New(t.name)
- nt.Tree = t.Tree
- nt.common = c
- nt.leftDelim = t.leftDelim
- nt.rightDelim = t.rightDelim
- return nt
-}
-
-// AddParseTree creates a new template with the name and parse tree
-// and associates it with t.
-func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
- if t.common != nil && t.tmpl[name] != nil {
- return nil, fmt.Errorf("template: redefinition of template %q", name)
- }
- nt := t.New(name)
- nt.Tree = tree
- t.tmpl[name] = nt
- return nt, nil
-}
-
-// Templates returns a slice of the templates associated with t, including t
-// itself.
-func (t *Template) Templates() []*Template {
- if t.common == nil {
- return nil
- }
- // Return a slice so we don't expose the map.
- m := make([]*Template, 0, len(t.tmpl))
- for _, v := range t.tmpl {
- m = append(m, v)
- }
- return m
-}
-
-// Delims sets the action delimiters to the specified strings, to be used in
-// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
-// definitions will inherit the settings. An empty delimiter stands for the
-// corresponding default: {{ or }}.
-// The return value is the template, so calls can be chained.
-func (t *Template) Delims(left, right string) *Template {
- t.leftDelim = left
- t.rightDelim = right
- return t
-}
-
-// Funcs adds the elements of the argument map to the template's function map.
-// It panics if a value in the map is not a function with appropriate return
-// type. However, it is legal to overwrite elements of the map. The return
-// value is the template, so calls can be chained.
-func (t *Template) Funcs(funcMap FuncMap) *Template {
- t.init()
- addValueFuncs(t.execFuncs, funcMap)
- addFuncs(t.parseFuncs, funcMap)
- return t
-}
-
-// Lookup returns the template with the given name that is associated with t,
-// or nil if there is no such template.
-func (t *Template) Lookup(name string) *Template {
- if t.common == nil {
- return nil
- }
- return t.tmpl[name]
-}
-
-// Parse parses a string into a template. Nested template definitions will be
-// associated with the top-level template t. Parse may be called multiple times
-// to parse definitions of templates to associate with t. It is an error if a
-// resulting template is non-empty (contains content other than template
-// definitions) and would replace a non-empty template with the same name.
-// (In multiple calls to Parse with the same receiver template, only one call
-// can contain text other than space, comments, and template definitions.)
-func (t *Template) Parse(text string) (*Template, error) {
- t.init()
- trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
- if err != nil {
- return nil, err
- }
- // Add the newly parsed trees, including the one for t, into our common structure.
- for name, tree := range trees {
- // If the name we parsed is the name of this template, overwrite this template.
- // The associate method checks it's not a redefinition.
- tmpl := t
- if name != t.name {
- tmpl = t.New(name)
- }
- // Even if t == tmpl, we need to install it in the common.tmpl map.
- if replace, err := t.associate(tmpl, tree); err != nil {
- return nil, err
- } else if replace {
- tmpl.Tree = tree
- }
- tmpl.leftDelim = t.leftDelim
- tmpl.rightDelim = t.rightDelim
- }
- return t, nil
-}
-
-// associate installs the new template into the group of templates associated
-// with t. It is an error to reuse a name except to overwrite an empty
-// template. The two are already known to share the common structure.
-// The boolean return value reports wither to store this tree as t.Tree.
-func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) {
- if new.common != t.common {
- panic("internal error: associate not common")
- }
- name := new.name
- if old := t.tmpl[name]; old != nil {
- oldIsEmpty := parse.IsEmptyTree(old.Root)
- newIsEmpty := parse.IsEmptyTree(tree.Root)
- if newIsEmpty {
- // Whether old is empty or not, new is empty; no reason to replace old.
- return false, nil
- }
- if !oldIsEmpty {
- return false, fmt.Errorf("template: redefinition of template %q", name)
- }
- }
- t.tmpl[name] = new
- return true, nil
-}
diff --git a/vendor/github.com/alecthomas/units/COPYING b/vendor/github.com/alecthomas/units/COPYING
deleted file mode 100644
index 2993ec08..00000000
--- a/vendor/github.com/alecthomas/units/COPYING
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (C) 2014 Alec Thomas
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/alecthomas/units/README.md b/vendor/github.com/alecthomas/units/README.md
deleted file mode 100644
index bee884e3..00000000
--- a/vendor/github.com/alecthomas/units/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Units - Helpful unit multipliers and functions for Go
-
-The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package.
-
-It allows for code like this:
-
-```go
-n, err := ParseBase2Bytes("1KB")
-// n == 1024
-n = units.Mebibyte * 512
-```
diff --git a/vendor/github.com/alecthomas/units/bytes.go b/vendor/github.com/alecthomas/units/bytes.go
deleted file mode 100644
index eaadeb80..00000000
--- a/vendor/github.com/alecthomas/units/bytes.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package units
-
-// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte,
-// etc.).
-type Base2Bytes int64
-
-// Base-2 byte units.
-const (
- Kibibyte Base2Bytes = 1024
- KiB = Kibibyte
- Mebibyte = Kibibyte * 1024
- MiB = Mebibyte
- Gibibyte = Mebibyte * 1024
- GiB = Gibibyte
- Tebibyte = Gibibyte * 1024
- TiB = Tebibyte
- Pebibyte = Tebibyte * 1024
- PiB = Pebibyte
- Exbibyte = Pebibyte * 1024
- EiB = Exbibyte
-)
-
-var (
- bytesUnitMap = MakeUnitMap("iB", "B", 1024)
- oldBytesUnitMap = MakeUnitMap("B", "B", 1024)
-)
-
-// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB
-// and KiB are both 1024.
-func ParseBase2Bytes(s string) (Base2Bytes, error) {
- n, err := ParseUnit(s, bytesUnitMap)
- if err != nil {
- n, err = ParseUnit(s, oldBytesUnitMap)
- }
- return Base2Bytes(n), err
-}
-
-func (b Base2Bytes) String() string {
- return ToString(int64(b), 1024, "iB", "B")
-}
-
-var (
- metricBytesUnitMap = MakeUnitMap("B", "B", 1000)
-)
-
-// MetricBytes are SI byte units (1000 bytes in a kilobyte).
-type MetricBytes SI
-
-// SI base-10 byte units.
-const (
- Kilobyte MetricBytes = 1000
- KB = Kilobyte
- Megabyte = Kilobyte * 1000
- MB = Megabyte
- Gigabyte = Megabyte * 1000
- GB = Gigabyte
- Terabyte = Gigabyte * 1000
- TB = Terabyte
- Petabyte = Terabyte * 1000
- PB = Petabyte
- Exabyte = Petabyte * 1000
- EB = Exabyte
-)
-
-// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes.
-func ParseMetricBytes(s string) (MetricBytes, error) {
- n, err := ParseUnit(s, metricBytesUnitMap)
- return MetricBytes(n), err
-}
-
-func (m MetricBytes) String() string {
- return ToString(int64(m), 1000, "B", "B")
-}
-
-// ParseStrictBytes supports both iB and B suffixes for base 2 and metric,
-// respectively. That is, KiB represents 1024 and KB represents 1000.
-func ParseStrictBytes(s string) (int64, error) {
- n, err := ParseUnit(s, bytesUnitMap)
- if err != nil {
- n, err = ParseUnit(s, metricBytesUnitMap)
- }
- return int64(n), err
-}
diff --git a/vendor/github.com/alecthomas/units/doc.go b/vendor/github.com/alecthomas/units/doc.go
deleted file mode 100644
index 156ae386..00000000
--- a/vendor/github.com/alecthomas/units/doc.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Package units provides helpful unit multipliers and functions for Go.
-//
-// The goal of this package is to have functionality similar to the time [1] package.
-//
-//
-// [1] http://golang.org/pkg/time/
-//
-// It allows for code like this:
-//
-// n, err := ParseBase2Bytes("1KB")
-// // n == 1024
-// n = units.Mebibyte * 512
-package units
diff --git a/vendor/github.com/alecthomas/units/si.go b/vendor/github.com/alecthomas/units/si.go
deleted file mode 100644
index 8234a9d5..00000000
--- a/vendor/github.com/alecthomas/units/si.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package units
-
-// SI units.
-type SI int64
-
-// SI unit multiples.
-const (
- Kilo SI = 1000
- Mega = Kilo * 1000
- Giga = Mega * 1000
- Tera = Giga * 1000
- Peta = Tera * 1000
- Exa = Peta * 1000
-)
-
-func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 {
- return map[string]float64{
- shortSuffix: 1,
- "K" + suffix: float64(scale),
- "M" + suffix: float64(scale * scale),
- "G" + suffix: float64(scale * scale * scale),
- "T" + suffix: float64(scale * scale * scale * scale),
- "P" + suffix: float64(scale * scale * scale * scale * scale),
- "E" + suffix: float64(scale * scale * scale * scale * scale * scale),
- }
-}
diff --git a/vendor/github.com/alecthomas/units/util.go b/vendor/github.com/alecthomas/units/util.go
deleted file mode 100644
index 6527e92d..00000000
--- a/vendor/github.com/alecthomas/units/util.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package units
-
-import (
- "errors"
- "fmt"
- "strings"
-)
-
-var (
- siUnits = []string{"", "K", "M", "G", "T", "P", "E"}
-)
-
-func ToString(n int64, scale int64, suffix, baseSuffix string) string {
- mn := len(siUnits)
- out := make([]string, mn)
- for i, m := range siUnits {
- if n%scale != 0 || i == 0 && n == 0 {
- s := suffix
- if i == 0 {
- s = baseSuffix
- }
- out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s)
- }
- n /= scale
- if n == 0 {
- break
- }
- }
- return strings.Join(out, "")
-}
-
-// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123
-var errLeadingInt = errors.New("units: bad [0-9]*") // never printed
-
-// leadingInt consumes the leading [0-9]* from s.
-func leadingInt(s string) (x int64, rem string, err error) {
- i := 0
- for ; i < len(s); i++ {
- c := s[i]
- if c < '0' || c > '9' {
- break
- }
- if x >= (1<<63-10)/10 {
- // overflow
- return 0, "", errLeadingInt
- }
- x = x*10 + int64(c) - '0'
- }
- return x, s[i:], nil
-}
-
-func ParseUnit(s string, unitMap map[string]float64) (int64, error) {
- // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
- orig := s
- f := float64(0)
- neg := false
-
- // Consume [-+]?
- if s != "" {
- c := s[0]
- if c == '-' || c == '+' {
- neg = c == '-'
- s = s[1:]
- }
- }
- // Special case: if all that is left is "0", this is zero.
- if s == "0" {
- return 0, nil
- }
- if s == "" {
- return 0, errors.New("units: invalid " + orig)
- }
- for s != "" {
- g := float64(0) // this element of the sequence
-
- var x int64
- var err error
-
- // The next character must be [0-9.]
- if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) {
- return 0, errors.New("units: invalid " + orig)
- }
- // Consume [0-9]*
- pl := len(s)
- x, s, err = leadingInt(s)
- if err != nil {
- return 0, errors.New("units: invalid " + orig)
- }
- g = float64(x)
- pre := pl != len(s) // whether we consumed anything before a period
-
- // Consume (\.[0-9]*)?
- post := false
- if s != "" && s[0] == '.' {
- s = s[1:]
- pl := len(s)
- x, s, err = leadingInt(s)
- if err != nil {
- return 0, errors.New("units: invalid " + orig)
- }
- scale := 1.0
- for n := pl - len(s); n > 0; n-- {
- scale *= 10
- }
- g += float64(x) / scale
- post = pl != len(s)
- }
- if !pre && !post {
- // no digits (e.g. ".s" or "-.s")
- return 0, errors.New("units: invalid " + orig)
- }
-
- // Consume unit.
- i := 0
- for ; i < len(s); i++ {
- c := s[i]
- if c == '.' || ('0' <= c && c <= '9') {
- break
- }
- }
- u := s[:i]
- s = s[i:]
- unit, ok := unitMap[u]
- if !ok {
- return 0, errors.New("units: unknown unit " + u + " in " + orig)
- }
-
- f += g * unit
- }
-
- if neg {
- f = -f
- }
- if f < float64(-1<<63) || f > float64(1<<63-1) {
- return 0, errors.New("units: overflow parsing unit")
- }
- return int64(f), nil
-}
diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE
deleted file mode 100644
index 339177be..00000000
--- a/vendor/github.com/beorn7/perks/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (C) 2013 Blake Mizerany
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
deleted file mode 100644
index 1602287d..00000000
--- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt
+++ /dev/null
@@ -1,2388 +0,0 @@
-8
-5
-26
-12
-5
-235
-13
-6
-28
-30
-3
-3
-3
-3
-5
-2
-33
-7
-2
-4
-7
-12
-14
-5
-8
-3
-10
-4
-5
-3
-6
-6
-209
-20
-3
-10
-14
-3
-4
-6
-8
-5
-11
-7
-3
-2
-3
-3
-212
-5
-222
-4
-10
-10
-5
-6
-3
-8
-3
-10
-254
-220
-2
-3
-5
-24
-5
-4
-222
-7
-3
-3
-223
-8
-15
-12
-14
-14
-3
-2
-2
-3
-13
-3
-11
-4
-4
-6
-5
-7
-13
-5
-3
-5
-2
-5
-3
-5
-2
-7
-15
-17
-14
-3
-6
-6
-3
-17
-5
-4
-7
-6
-4
-4
-8
-6
-8
-3
-9
-3
-6
-3
-4
-5
-3
-3
-660
-4
-6
-10
-3
-6
-3
-2
-5
-13
-2
-4
-4
-10
-4
-8
-4
-3
-7
-9
-9
-3
-10
-37
-3
-13
-4
-12
-3
-6
-10
-8
-5
-21
-2
-3
-8
-3
-2
-3
-3
-4
-12
-2
-4
-8
-8
-4
-3
-2
-20
-1
-6
-32
-2
-11
-6
-18
-3
-8
-11
-3
-212
-3
-4
-2
-6
-7
-12
-11
-3
-2
-16
-10
-6
-4
-6
-3
-2
-7
-3
-2
-2
-2
-2
-5
-6
-4
-3
-10
-3
-4
-6
-5
-3
-4
-4
-5
-6
-4
-3
-4
-4
-5
-7
-5
-5
-3
-2
-7
-2
-4
-12
-4
-5
-6
-2
-4
-4
-8
-4
-15
-13
-7
-16
-5
-3
-23
-5
-5
-7
-3
-2
-9
-8
-7
-5
-8
-11
-4
-10
-76
-4
-47
-4
-3
-2
-7
-4
-2
-3
-37
-10
-4
-2
-20
-5
-4
-4
-10
-10
-4
-3
-7
-23
-240
-7
-13
-5
-5
-3
-3
-2
-5
-4
-2
-8
-7
-19
-2
-23
-8
-7
-2
-5
-3
-8
-3
-8
-13
-5
-5
-5
-2
-3
-23
-4
-9
-8
-4
-3
-3
-5
-220
-2
-3
-4
-6
-14
-3
-53
-6
-2
-5
-18
-6
-3
-219
-6
-5
-2
-5
-3
-6
-5
-15
-4
-3
-17
-3
-2
-4
-7
-2
-3
-3
-4
-4
-3
-2
-664
-6
-3
-23
-5
-5
-16
-5
-8
-2
-4
-2
-24
-12
-3
-2
-3
-5
-8
-3
-5
-4
-3
-14
-3
-5
-8
-2
-3
-7
-9
-4
-2
-3
-6
-8
-4
-3
-4
-6
-5
-3
-3
-6
-3
-19
-4
-4
-6
-3
-6
-3
-5
-22
-5
-4
-4
-3
-8
-11
-4
-9
-7
-6
-13
-4
-4
-4
-6
-17
-9
-3
-3
-3
-4
-3
-221
-5
-11
-3
-4
-2
-12
-6
-3
-5
-7
-5
-7
-4
-9
-7
-14
-37
-19
-217
-16
-3
-5
-2
-2
-7
-19
-7
-6
-7
-4
-24
-5
-11
-4
-7
-7
-9
-13
-3
-4
-3
-6
-28
-4
-4
-5
-5
-2
-5
-6
-4
-4
-6
-10
-5
-4
-3
-2
-3
-3
-6
-5
-5
-4
-3
-2
-3
-7
-4
-6
-18
-16
-8
-16
-4
-5
-8
-6
-9
-13
-1545
-6
-215
-6
-5
-6
-3
-45
-31
-5
-2
-2
-4
-3
-3
-2
-5
-4
-3
-5
-7
-7
-4
-5
-8
-5
-4
-749
-2
-31
-9
-11
-2
-11
-5
-4
-4
-7
-9
-11
-4
-5
-4
-7
-3
-4
-6
-2
-15
-3
-4
-3
-4
-3
-5
-2
-13
-5
-5
-3
-3
-23
-4
-4
-5
-7
-4
-13
-2
-4
-3
-4
-2
-6
-2
-7
-3
-5
-5
-3
-29
-5
-4
-4
-3
-10
-2
-3
-79
-16
-6
-6
-7
-7
-3
-5
-5
-7
-4
-3
-7
-9
-5
-6
-5
-9
-6
-3
-6
-4
-17
-2
-10
-9
-3
-6
-2
-3
-21
-22
-5
-11
-4
-2
-17
-2
-224
-2
-14
-3
-4
-4
-2
-4
-4
-4
-4
-5
-3
-4
-4
-10
-2
-6
-3
-3
-5
-7
-2
-7
-5
-6
-3
-218
-2
-2
-5
-2
-6
-3
-5
-222
-14
-6
-33
-3
-2
-5
-3
-3
-3
-9
-5
-3
-3
-2
-7
-4
-3
-4
-3
-5
-6
-5
-26
-4
-13
-9
-7
-3
-221
-3
-3
-4
-4
-4
-4
-2
-18
-5
-3
-7
-9
-6
-8
-3
-10
-3
-11
-9
-5
-4
-17
-5
-5
-6
-6
-3
-2
-4
-12
-17
-6
-7
-218
-4
-2
-4
-10
-3
-5
-15
-3
-9
-4
-3
-3
-6
-29
-3
-3
-4
-5
-5
-3
-8
-5
-6
-6
-7
-5
-3
-5
-3
-29
-2
-31
-5
-15
-24
-16
-5
-207
-4
-3
-3
-2
-15
-4
-4
-13
-5
-5
-4
-6
-10
-2
-7
-8
-4
-6
-20
-5
-3
-4
-3
-12
-12
-5
-17
-7
-3
-3
-3
-6
-10
-3
-5
-25
-80
-4
-9
-3
-2
-11
-3
-3
-2
-3
-8
-7
-5
-5
-19
-5
-3
-3
-12
-11
-2
-6
-5
-5
-5
-3
-3
-3
-4
-209
-14
-3
-2
-5
-19
-4
-4
-3
-4
-14
-5
-6
-4
-13
-9
-7
-4
-7
-10
-2
-9
-5
-7
-2
-8
-4
-6
-5
-5
-222
-8
-7
-12
-5
-216
-3
-4
-4
-6
-3
-14
-8
-7
-13
-4
-3
-3
-3
-3
-17
-5
-4
-3
-33
-6
-6
-33
-7
-5
-3
-8
-7
-5
-2
-9
-4
-2
-233
-24
-7
-4
-8
-10
-3
-4
-15
-2
-16
-3
-3
-13
-12
-7
-5
-4
-207
-4
-2
-4
-27
-15
-2
-5
-2
-25
-6
-5
-5
-6
-13
-6
-18
-6
-4
-12
-225
-10
-7
-5
-2
-2
-11
-4
-14
-21
-8
-10
-3
-5
-4
-232
-2
-5
-5
-3
-7
-17
-11
-6
-6
-23
-4
-6
-3
-5
-4
-2
-17
-3
-6
-5
-8
-3
-2
-2
-14
-9
-4
-4
-2
-5
-5
-3
-7
-6
-12
-6
-10
-3
-6
-2
-2
-19
-5
-4
-4
-9
-2
-4
-13
-3
-5
-6
-3
-6
-5
-4
-9
-6
-3
-5
-7
-3
-6
-6
-4
-3
-10
-6
-3
-221
-3
-5
-3
-6
-4
-8
-5
-3
-6
-4
-4
-2
-54
-5
-6
-11
-3
-3
-4
-4
-4
-3
-7
-3
-11
-11
-7
-10
-6
-13
-223
-213
-15
-231
-7
-3
-7
-228
-2
-3
-4
-4
-5
-6
-7
-4
-13
-3
-4
-5
-3
-6
-4
-6
-7
-2
-4
-3
-4
-3
-3
-6
-3
-7
-3
-5
-18
-5
-6
-8
-10
-3
-3
-3
-2
-4
-2
-4
-4
-5
-6
-6
-4
-10
-13
-3
-12
-5
-12
-16
-8
-4
-19
-11
-2
-4
-5
-6
-8
-5
-6
-4
-18
-10
-4
-2
-216
-6
-6
-6
-2
-4
-12
-8
-3
-11
-5
-6
-14
-5
-3
-13
-4
-5
-4
-5
-3
-28
-6
-3
-7
-219
-3
-9
-7
-3
-10
-6
-3
-4
-19
-5
-7
-11
-6
-15
-19
-4
-13
-11
-3
-7
-5
-10
-2
-8
-11
-2
-6
-4
-6
-24
-6
-3
-3
-3
-3
-6
-18
-4
-11
-4
-2
-5
-10
-8
-3
-9
-5
-3
-4
-5
-6
-2
-5
-7
-4
-4
-14
-6
-4
-4
-5
-5
-7
-2
-4
-3
-7
-3
-3
-6
-4
-5
-4
-4
-4
-3
-3
-3
-3
-8
-14
-2
-3
-5
-3
-2
-4
-5
-3
-7
-3
-3
-18
-3
-4
-4
-5
-7
-3
-3
-3
-13
-5
-4
-8
-211
-5
-5
-3
-5
-2
-5
-4
-2
-655
-6
-3
-5
-11
-2
-5
-3
-12
-9
-15
-11
-5
-12
-217
-2
-6
-17
-3
-3
-207
-5
-5
-4
-5
-9
-3
-2
-8
-5
-4
-3
-2
-5
-12
-4
-14
-5
-4
-2
-13
-5
-8
-4
-225
-4
-3
-4
-5
-4
-3
-3
-6
-23
-9
-2
-6
-7
-233
-4
-4
-6
-18
-3
-4
-6
-3
-4
-4
-2
-3
-7
-4
-13
-227
-4
-3
-5
-4
-2
-12
-9
-17
-3
-7
-14
-6
-4
-5
-21
-4
-8
-9
-2
-9
-25
-16
-3
-6
-4
-7
-8
-5
-2
-3
-5
-4
-3
-3
-5
-3
-3
-3
-2
-3
-19
-2
-4
-3
-4
-2
-3
-4
-4
-2
-4
-3
-3
-3
-2
-6
-3
-17
-5
-6
-4
-3
-13
-5
-3
-3
-3
-4
-9
-4
-2
-14
-12
-4
-5
-24
-4
-3
-37
-12
-11
-21
-3
-4
-3
-13
-4
-2
-3
-15
-4
-11
-4
-4
-3
-8
-3
-4
-4
-12
-8
-5
-3
-3
-4
-2
-220
-3
-5
-223
-3
-3
-3
-10
-3
-15
-4
-241
-9
-7
-3
-6
-6
-23
-4
-13
-7
-3
-4
-7
-4
-9
-3
-3
-4
-10
-5
-5
-1
-5
-24
-2
-4
-5
-5
-6
-14
-3
-8
-2
-3
-5
-13
-13
-3
-5
-2
-3
-15
-3
-4
-2
-10
-4
-4
-4
-5
-5
-3
-5
-3
-4
-7
-4
-27
-3
-6
-4
-15
-3
-5
-6
-6
-5
-4
-8
-3
-9
-2
-6
-3
-4
-3
-7
-4
-18
-3
-11
-3
-3
-8
-9
-7
-24
-3
-219
-7
-10
-4
-5
-9
-12
-2
-5
-4
-4
-4
-3
-3
-19
-5
-8
-16
-8
-6
-22
-3
-23
-3
-242
-9
-4
-3
-3
-5
-7
-3
-3
-5
-8
-3
-7
-5
-14
-8
-10
-3
-4
-3
-7
-4
-6
-7
-4
-10
-4
-3
-11
-3
-7
-10
-3
-13
-6
-8
-12
-10
-5
-7
-9
-3
-4
-7
-7
-10
-8
-30
-9
-19
-4
-3
-19
-15
-4
-13
-3
-215
-223
-4
-7
-4
-8
-17
-16
-3
-7
-6
-5
-5
-4
-12
-3
-7
-4
-4
-13
-4
-5
-2
-5
-6
-5
-6
-6
-7
-10
-18
-23
-9
-3
-3
-6
-5
-2
-4
-2
-7
-3
-3
-2
-5
-5
-14
-10
-224
-6
-3
-4
-3
-7
-5
-9
-3
-6
-4
-2
-5
-11
-4
-3
-3
-2
-8
-4
-7
-4
-10
-7
-3
-3
-18
-18
-17
-3
-3
-3
-4
-5
-3
-3
-4
-12
-7
-3
-11
-13
-5
-4
-7
-13
-5
-4
-11
-3
-12
-3
-6
-4
-4
-21
-4
-6
-9
-5
-3
-10
-8
-4
-6
-4
-4
-6
-5
-4
-8
-6
-4
-6
-4
-4
-5
-9
-6
-3
-4
-2
-9
-3
-18
-2
-4
-3
-13
-3
-6
-6
-8
-7
-9
-3
-2
-16
-3
-4
-6
-3
-2
-33
-22
-14
-4
-9
-12
-4
-5
-6
-3
-23
-9
-4
-3
-5
-5
-3
-4
-5
-3
-5
-3
-10
-4
-5
-5
-8
-4
-4
-6
-8
-5
-4
-3
-4
-6
-3
-3
-3
-5
-9
-12
-6
-5
-9
-3
-5
-3
-2
-2
-2
-18
-3
-2
-21
-2
-5
-4
-6
-4
-5
-10
-3
-9
-3
-2
-10
-7
-3
-6
-6
-4
-4
-8
-12
-7
-3
-7
-3
-3
-9
-3
-4
-5
-4
-4
-5
-5
-10
-15
-4
-4
-14
-6
-227
-3
-14
-5
-216
-22
-5
-4
-2
-2
-6
-3
-4
-2
-9
-9
-4
-3
-28
-13
-11
-4
-5
-3
-3
-2
-3
-3
-5
-3
-4
-3
-5
-23
-26
-3
-4
-5
-6
-4
-6
-3
-5
-5
-3
-4
-3
-2
-2
-2
-7
-14
-3
-6
-7
-17
-2
-2
-15
-14
-16
-4
-6
-7
-13
-6
-4
-5
-6
-16
-3
-3
-28
-3
-6
-15
-3
-9
-2
-4
-6
-3
-3
-22
-4
-12
-6
-7
-2
-5
-4
-10
-3
-16
-6
-9
-2
-5
-12
-7
-5
-5
-5
-5
-2
-11
-9
-17
-4
-3
-11
-7
-3
-5
-15
-4
-3
-4
-211
-8
-7
-5
-4
-7
-6
-7
-6
-3
-6
-5
-6
-5
-3
-4
-4
-26
-4
-6
-10
-4
-4
-3
-2
-3
-3
-4
-5
-9
-3
-9
-4
-4
-5
-5
-8
-2
-4
-2
-3
-8
-4
-11
-19
-5
-8
-6
-3
-5
-6
-12
-3
-2
-4
-16
-12
-3
-4
-4
-8
-6
-5
-6
-6
-219
-8
-222
-6
-16
-3
-13
-19
-5
-4
-3
-11
-6
-10
-4
-7
-7
-12
-5
-3
-3
-5
-6
-10
-3
-8
-2
-5
-4
-7
-2
-4
-4
-2
-12
-9
-6
-4
-2
-40
-2
-4
-10
-4
-223
-4
-2
-20
-6
-7
-24
-5
-4
-5
-2
-20
-16
-6
-5
-13
-2
-3
-3
-19
-3
-2
-4
-5
-6
-7
-11
-12
-5
-6
-7
-7
-3
-5
-3
-5
-3
-14
-3
-4
-4
-2
-11
-1
-7
-3
-9
-6
-11
-12
-5
-8
-6
-221
-4
-2
-12
-4
-3
-15
-4
-5
-226
-7
-218
-7
-5
-4
-5
-18
-4
-5
-9
-4
-4
-2
-9
-18
-18
-9
-5
-6
-6
-3
-3
-7
-3
-5
-4
-4
-4
-12
-3
-6
-31
-5
-4
-7
-3
-6
-5
-6
-5
-11
-2
-2
-11
-11
-6
-7
-5
-8
-7
-10
-5
-23
-7
-4
-3
-5
-34
-2
-5
-23
-7
-3
-6
-8
-4
-4
-4
-2
-5
-3
-8
-5
-4
-8
-25
-2
-3
-17
-8
-3
-4
-8
-7
-3
-15
-6
-5
-7
-21
-9
-5
-6
-6
-5
-3
-2
-3
-10
-3
-6
-3
-14
-7
-4
-4
-8
-7
-8
-2
-6
-12
-4
-213
-6
-5
-21
-8
-2
-5
-23
-3
-11
-2
-3
-6
-25
-2
-3
-6
-7
-6
-6
-4
-4
-6
-3
-17
-9
-7
-6
-4
-3
-10
-7
-2
-3
-3
-3
-11
-8
-3
-7
-6
-4
-14
-36
-3
-4
-3
-3
-22
-13
-21
-4
-2
-7
-4
-4
-17
-15
-3
-7
-11
-2
-4
-7
-6
-209
-6
-3
-2
-2
-24
-4
-9
-4
-3
-3
-3
-29
-2
-2
-4
-3
-3
-5
-4
-6
-3
-3
-2
-4
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
deleted file mode 100644
index 587b1fc5..00000000
--- a/vendor/github.com/beorn7/perks/quantile/stream.go
+++ /dev/null
@@ -1,292 +0,0 @@
-// Package quantile computes approximate quantiles over an unbounded data
-// stream within low memory and CPU bounds.
-//
-// A small amount of accuracy is traded to achieve the above properties.
-//
-// Multiple streams can be merged before calling Query to generate a single set
-// of results. This is meaningful when the streams represent the same type of
-// data. See Merge and Samples.
-//
-// For more detailed information about the algorithm used, see:
-//
-// Effective Computation of Biased Quantiles over Data Streams
-//
-// http://www.cs.rutgers.edu/~muthu/bquant.pdf
-package quantile
-
-import (
- "math"
- "sort"
-)
-
-// Sample holds an observed value and meta information for compression. JSON
-// tags have been added for convenience.
-type Sample struct {
- Value float64 `json:",string"`
- Width float64 `json:",string"`
- Delta float64 `json:",string"`
-}
-
-// Samples represents a slice of samples. It implements sort.Interface.
-type Samples []Sample
-
-func (a Samples) Len() int { return len(a) }
-func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
-func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-type invariant func(s *stream, r float64) float64
-
-// NewLowBiased returns an initialized Stream for low-biased quantiles
-// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
-// error guarantees can still be given even for the lower ranks of the data
-// distribution.
-//
-// The provided epsilon is a relative error, i.e. the true quantile of a value
-// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
-// properties.
-func NewLowBiased(epsilon float64) *Stream {
- Æ’ := func(s *stream, r float64) float64 {
- return 2 * epsilon * r
- }
- return newStream(Æ’)
-}
-
-// NewHighBiased returns an initialized Stream for high-biased quantiles
-// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
-// error guarantees can still be given even for the higher ranks of the data
-// distribution.
-//
-// The provided epsilon is a relative error, i.e. the true quantile of a value
-// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
-// properties.
-func NewHighBiased(epsilon float64) *Stream {
- Æ’ := func(s *stream, r float64) float64 {
- return 2 * epsilon * (s.n - r)
- }
- return newStream(Æ’)
-}
-
-// NewTargeted returns an initialized Stream concerned with a particular set of
-// quantile values that are supplied a priori. Knowing these a priori reduces
-// space and computation time. The targets map maps the desired quantiles to
-// their absolute errors, i.e. the true quantile of a value returned by a query
-// is guaranteed to be within (Quantile±Epsilon).
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
-func NewTargeted(targets map[float64]float64) *Stream {
- Æ’ := func(s *stream, r float64) float64 {
- var m = math.MaxFloat64
- var f float64
- for quantile, epsilon := range targets {
- if quantile*s.n <= r {
- f = (2 * epsilon * r) / quantile
- } else {
- f = (2 * epsilon * (s.n - r)) / (1 - quantile)
- }
- if f < m {
- m = f
- }
- }
- return m
- }
- return newStream(Æ’)
-}
-
-// Stream computes quantiles for a stream of float64s. It is not thread-safe by
-// design. Take care when using across multiple goroutines.
-type Stream struct {
- *stream
- b Samples
- sorted bool
-}
-
-func newStream(Æ’ invariant) *Stream {
- x := &stream{Æ’: Æ’}
- return &Stream{x, make(Samples, 0, 500), true}
-}
-
-// Insert inserts v into the stream.
-func (s *Stream) Insert(v float64) {
- s.insert(Sample{Value: v, Width: 1})
-}
-
-func (s *Stream) insert(sample Sample) {
- s.b = append(s.b, sample)
- s.sorted = false
- if len(s.b) == cap(s.b) {
- s.flush()
- }
-}
-
-// Query returns the computed qth percentiles value. If s was created with
-// NewTargeted, and q is not in the set of quantiles provided a priori, Query
-// will return an unspecified result.
-func (s *Stream) Query(q float64) float64 {
- if !s.flushed() {
- // Fast path when there hasn't been enough data for a flush;
- // this also yields better accuracy for small sets of data.
- l := len(s.b)
- if l == 0 {
- return 0
- }
- i := int(float64(l) * q)
- if i > 0 {
- i -= 1
- }
- s.maybeSort()
- return s.b[i].Value
- }
- s.flush()
- return s.stream.query(q)
-}
-
-// Merge merges samples into the underlying streams samples. This is handy when
-// merging multiple streams from separate threads, database shards, etc.
-//
-// ATTENTION: This method is broken and does not yield correct results. The
-// underlying algorithm is not capable of merging streams correctly.
-func (s *Stream) Merge(samples Samples) {
- sort.Sort(samples)
- s.stream.merge(samples)
-}
-
-// Reset reinitializes and clears the list reusing the samples buffer memory.
-func (s *Stream) Reset() {
- s.stream.reset()
- s.b = s.b[:0]
-}
-
-// Samples returns stream samples held by s.
-func (s *Stream) Samples() Samples {
- if !s.flushed() {
- return s.b
- }
- s.flush()
- return s.stream.samples()
-}
-
-// Count returns the total number of samples observed in the stream
-// since initialization.
-func (s *Stream) Count() int {
- return len(s.b) + s.stream.count()
-}
-
-func (s *Stream) flush() {
- s.maybeSort()
- s.stream.merge(s.b)
- s.b = s.b[:0]
-}
-
-func (s *Stream) maybeSort() {
- if !s.sorted {
- s.sorted = true
- sort.Sort(s.b)
- }
-}
-
-func (s *Stream) flushed() bool {
- return len(s.stream.l) > 0
-}
-
-type stream struct {
- n float64
- l []Sample
- Æ’ invariant
-}
-
-func (s *stream) reset() {
- s.l = s.l[:0]
- s.n = 0
-}
-
-func (s *stream) insert(v float64) {
- s.merge(Samples{{v, 1, 0}})
-}
-
-func (s *stream) merge(samples Samples) {
- // TODO(beorn7): This tries to merge not only individual samples, but
- // whole summaries. The paper doesn't mention merging summaries at
- // all. Unittests show that the merging is inaccurate. Find out how to
- // do merges properly.
- var r float64
- i := 0
- for _, sample := range samples {
- for ; i < len(s.l); i++ {
- c := s.l[i]
- if c.Value > sample.Value {
- // Insert at position i.
- s.l = append(s.l, Sample{})
- copy(s.l[i+1:], s.l[i:])
- s.l[i] = Sample{
- sample.Value,
- sample.Width,
- math.Max(sample.Delta, math.Floor(s.Æ’(s, r))-1),
- // TODO(beorn7): How to calculate delta correctly?
- }
- i++
- goto inserted
- }
- r += c.Width
- }
- s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
- i++
- inserted:
- s.n += sample.Width
- r += sample.Width
- }
- s.compress()
-}
-
-func (s *stream) count() int {
- return int(s.n)
-}
-
-func (s *stream) query(q float64) float64 {
- t := math.Ceil(q * s.n)
- t += math.Ceil(s.Æ’(s, t) / 2)
- p := s.l[0]
- var r float64
- for _, c := range s.l[1:] {
- r += p.Width
- if r+c.Width+c.Delta > t {
- return p.Value
- }
- p = c
- }
- return p.Value
-}
-
-func (s *stream) compress() {
- if len(s.l) < 2 {
- return
- }
- x := s.l[len(s.l)-1]
- xi := len(s.l) - 1
- r := s.n - 1 - x.Width
-
- for i := len(s.l) - 2; i >= 0; i-- {
- c := s.l[i]
- if c.Width+x.Width+x.Delta <= s.Æ’(s, r) {
- x.Width += c.Width
- s.l[xi] = x
- // Remove element at i.
- copy(s.l[i:], s.l[i+1:])
- s.l = s.l[:len(s.l)-1]
- xi -= 1
- } else {
- x = c
- xi = i
- }
- r -= c.Width
- }
-}
-
-func (s *stream) samples() Samples {
- samples := make(Samples, len(s.l))
- copy(samples, s.l)
- return samples
-}
diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE
deleted file mode 100644
index 5ba5c86f..00000000
--- a/vendor/github.com/blang/semver/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-The MIT License
-
-Copyright (c) 2014 Benedikt Lang
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md
deleted file mode 100644
index 6af4dde7..00000000
--- a/vendor/github.com/blang/semver/README.md
+++ /dev/null
@@ -1,194 +0,0 @@
-semver for golang [](https://travis-ci.org/blang/semver) [](https://godoc.org/github.com/blang/semver) [](https://coveralls.io/r/blang/semver?branch=master) [](https://goreportcard.com/report/github.com/blang/semver)
-======
-
-semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
-
-Usage
------
-```bash
-$ go get github.com/blang/semver
-```
-Note: Always vendor your dependencies or fix on a specific version tag.
-
-```go
-import github.com/blang/semver
-v1, err := semver.Make("1.0.0-beta")
-v2, err := semver.Make("2.0.0-beta")
-v1.Compare(v2)
-```
-
-Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
-
-Why should I use this lib?
------
-
-- Fully spec compatible
-- No reflection
-- No regex
-- Fully tested (Coverage >99%)
-- Readable parsing/validation errors
-- Fast (See [Benchmarks](#benchmarks))
-- Only Stdlib
-- Uses values instead of pointers
-- Many features, see below
-
-
-Features
------
-
-- Parsing and validation at all levels
-- Comparator-like comparisons
-- Compare Helper Methods
-- InPlace manipulation
-- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1`
-- Wildcards `>=1.x`, `<=2.5.x`
-- Sortable (implements sort.Interface)
-- database/sql compatible (sql.Scanner/Valuer)
-- encoding/json compatible (json.Marshaler/Unmarshaler)
-
-Ranges
-------
-
-A `Range` is a set of conditions which specify which versions satisfy the range.
-
-A condition is composed of an operator and a version. The supported operators are:
-
-- `<1.0.0` Less than `1.0.0`
-- `<=1.0.0` Less than or equal to `1.0.0`
-- `>1.0.0` Greater than `1.0.0`
-- `>=1.0.0` Greater than or equal to `1.0.0`
-- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0`
-- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`.
-
-Note that spaces between the operator and the version will be gracefully tolerated.
-
-A `Range` can link multiple `Ranges` separated by space:
-
-Ranges can be linked by logical AND:
-
- - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0`
- - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2`
-
-Ranges can also be linked by logical OR:
-
- - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x`
-
-AND has a higher precedence than OR. It's not possible to use brackets.
-
-Ranges can be combined by both AND and OR
-
- - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
-
-Range usage:
-
-```
-v, err := semver.Parse("1.2.3")
-range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0")
-if range(v) {
- //valid
-}
-
-```
-
-Example
------
-
-Have a look at full examples in [examples/main.go](examples/main.go)
-
-```go
-import github.com/blang/semver
-
-v, err := semver.Make("0.0.1-alpha.preview+123.github")
-fmt.Printf("Major: %d\n", v.Major)
-fmt.Printf("Minor: %d\n", v.Minor)
-fmt.Printf("Patch: %d\n", v.Patch)
-fmt.Printf("Pre: %s\n", v.Pre)
-fmt.Printf("Build: %s\n", v.Build)
-
-// Prerelease versions array
-if len(v.Pre) > 0 {
- fmt.Println("Prerelease versions:")
- for i, pre := range v.Pre {
- fmt.Printf("%d: %q\n", i, pre)
- }
-}
-
-// Build meta data array
-if len(v.Build) > 0 {
- fmt.Println("Build meta data:")
- for i, build := range v.Build {
- fmt.Printf("%d: %q\n", i, build)
- }
-}
-
-v001, err := semver.Make("0.0.1")
-// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
-v001.GT(v) == true
-v.LT(v001) == true
-v.GTE(v) == true
-v.LTE(v) == true
-
-// Or use v.Compare(v2) for comparisons (-1, 0, 1):
-v001.Compare(v) == 1
-v.Compare(v001) == -1
-v.Compare(v) == 0
-
-// Manipulate Version in place:
-v.Pre[0], err = semver.NewPRVersion("beta")
-if err != nil {
- fmt.Printf("Error parsing pre release version: %q", err)
-}
-
-fmt.Println("\nValidate versions:")
-v.Build[0] = "?"
-
-err = v.Validate()
-if err != nil {
- fmt.Printf("Validation failed: %s\n", err)
-}
-```
-
-
-Benchmarks
------
-
- BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op
- BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op
- BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op
- BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op
- BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op
- BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op
- BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op
- BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op
- BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op
- BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op
- BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op
- BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op
- BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op
- BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op
- BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op
- BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op
- BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op
- BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op
- BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op
- BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op
-
-See benchmark cases at [semver_test.go](semver_test.go)
-
-
-Motivation
------
-
-I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
-
-
-Contribution
------
-
-Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
-
-
-License
------
-
-See [LICENSE](LICENSE) file.
diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go
deleted file mode 100644
index a74bf7c4..00000000
--- a/vendor/github.com/blang/semver/json.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package semver
-
-import (
- "encoding/json"
-)
-
-// MarshalJSON implements the encoding/json.Marshaler interface.
-func (v Version) MarshalJSON() ([]byte, error) {
- return json.Marshal(v.String())
-}
-
-// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
-func (v *Version) UnmarshalJSON(data []byte) (err error) {
- var versionString string
-
- if err = json.Unmarshal(data, &versionString); err != nil {
- return
- }
-
- *v, err = Parse(versionString)
-
- return
-}
diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json
deleted file mode 100644
index 1cf8ebdd..00000000
--- a/vendor/github.com/blang/semver/package.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "author": "blang",
- "bugs": {
- "URL": "https://github.com/blang/semver/issues",
- "url": "https://github.com/blang/semver/issues"
- },
- "gx": {
- "dvcsimport": "github.com/blang/semver"
- },
- "gxVersion": "0.10.0",
- "language": "go",
- "license": "MIT",
- "name": "semver",
- "releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
- "version": "3.5.1"
-}
-
diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go
deleted file mode 100644
index fca406d4..00000000
--- a/vendor/github.com/blang/semver/range.go
+++ /dev/null
@@ -1,416 +0,0 @@
-package semver
-
-import (
- "fmt"
- "strconv"
- "strings"
- "unicode"
-)
-
-type wildcardType int
-
-const (
- noneWildcard wildcardType = iota
- majorWildcard wildcardType = 1
- minorWildcard wildcardType = 2
- patchWildcard wildcardType = 3
-)
-
-func wildcardTypefromInt(i int) wildcardType {
- switch i {
- case 1:
- return majorWildcard
- case 2:
- return minorWildcard
- case 3:
- return patchWildcard
- default:
- return noneWildcard
- }
-}
-
-type comparator func(Version, Version) bool
-
-var (
- compEQ comparator = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) == 0
- }
- compNE = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) != 0
- }
- compGT = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) == 1
- }
- compGE = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) >= 0
- }
- compLT = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) == -1
- }
- compLE = func(v1 Version, v2 Version) bool {
- return v1.Compare(v2) <= 0
- }
-)
-
-type versionRange struct {
- v Version
- c comparator
-}
-
-// rangeFunc creates a Range from the given versionRange.
-func (vr *versionRange) rangeFunc() Range {
- return Range(func(v Version) bool {
- return vr.c(v, vr.v)
- })
-}
-
-// Range represents a range of versions.
-// A Range can be used to check if a Version satisfies it:
-//
-// range, err := semver.ParseRange(">1.0.0 <2.0.0")
-// range(semver.MustParse("1.1.1") // returns true
-type Range func(Version) bool
-
-// OR combines the existing Range with another Range using logical OR.
-func (rf Range) OR(f Range) Range {
- return Range(func(v Version) bool {
- return rf(v) || f(v)
- })
-}
-
-// AND combines the existing Range with another Range using logical AND.
-func (rf Range) AND(f Range) Range {
- return Range(func(v Version) bool {
- return rf(v) && f(v)
- })
-}
-
-// ParseRange parses a range and returns a Range.
-// If the range could not be parsed an error is returned.
-//
-// Valid ranges are:
-// - "<1.0.0"
-// - "<=1.0.0"
-// - ">1.0.0"
-// - ">=1.0.0"
-// - "1.0.0", "=1.0.0", "==1.0.0"
-// - "!1.0.0", "!=1.0.0"
-//
-// A Range can consist of multiple ranges separated by space:
-// Ranges can be linked by logical AND:
-// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0"
-// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2
-//
-// Ranges can also be linked by logical OR:
-// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x"
-//
-// AND has a higher precedence than OR. It's not possible to use brackets.
-//
-// Ranges can be combined by both AND and OR
-//
-// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
-func ParseRange(s string) (Range, error) {
- parts := splitAndTrim(s)
- orParts, err := splitORParts(parts)
- if err != nil {
- return nil, err
- }
- expandedParts, err := expandWildcardVersion(orParts)
- if err != nil {
- return nil, err
- }
- var orFn Range
- for _, p := range expandedParts {
- var andFn Range
- for _, ap := range p {
- opStr, vStr, err := splitComparatorVersion(ap)
- if err != nil {
- return nil, err
- }
- vr, err := buildVersionRange(opStr, vStr)
- if err != nil {
- return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err)
- }
- rf := vr.rangeFunc()
-
- // Set function
- if andFn == nil {
- andFn = rf
- } else { // Combine with existing function
- andFn = andFn.AND(rf)
- }
- }
- if orFn == nil {
- orFn = andFn
- } else {
- orFn = orFn.OR(andFn)
- }
-
- }
- return orFn, nil
-}
-
-// splitORParts splits the already cleaned parts by '||'.
-// Checks for invalid positions of the operator and returns an
-// error if found.
-func splitORParts(parts []string) ([][]string, error) {
- var ORparts [][]string
- last := 0
- for i, p := range parts {
- if p == "||" {
- if i == 0 {
- return nil, fmt.Errorf("First element in range is '||'")
- }
- ORparts = append(ORparts, parts[last:i])
- last = i + 1
- }
- }
- if last == len(parts) {
- return nil, fmt.Errorf("Last element in range is '||'")
- }
- ORparts = append(ORparts, parts[last:])
- return ORparts, nil
-}
-
-// buildVersionRange takes a slice of 2: operator and version
-// and builds a versionRange, otherwise an error.
-func buildVersionRange(opStr, vStr string) (*versionRange, error) {
- c := parseComparator(opStr)
- if c == nil {
- return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, ""))
- }
- v, err := Parse(vStr)
- if err != nil {
- return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err)
- }
-
- return &versionRange{
- v: v,
- c: c,
- }, nil
-
-}
-
-// inArray checks if a byte is contained in an array of bytes
-func inArray(s byte, list []byte) bool {
- for _, el := range list {
- if el == s {
- return true
- }
- }
- return false
-}
-
-// splitAndTrim splits a range string by spaces and cleans whitespaces
-func splitAndTrim(s string) (result []string) {
- last := 0
- var lastChar byte
- excludeFromSplit := []byte{'>', '<', '='}
- for i := 0; i < len(s); i++ {
- if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) {
- if last < i-1 {
- result = append(result, s[last:i])
- }
- last = i + 1
- } else if s[i] != ' ' {
- lastChar = s[i]
- }
- }
- if last < len(s)-1 {
- result = append(result, s[last:])
- }
-
- for i, v := range result {
- result[i] = strings.Replace(v, " ", "", -1)
- }
-
- // parts := strings.Split(s, " ")
- // for _, x := range parts {
- // if s := strings.TrimSpace(x); len(s) != 0 {
- // result = append(result, s)
- // }
- // }
- return
-}
-
-// splitComparatorVersion splits the comparator from the version.
-// Input must be free of leading or trailing spaces.
-func splitComparatorVersion(s string) (string, string, error) {
- i := strings.IndexFunc(s, unicode.IsDigit)
- if i == -1 {
- return "", "", fmt.Errorf("Could not get version from string: %q", s)
- }
- return strings.TrimSpace(s[0:i]), s[i:], nil
-}
-
-// getWildcardType will return the type of wildcard that the
-// passed version contains
-func getWildcardType(vStr string) wildcardType {
- parts := strings.Split(vStr, ".")
- nparts := len(parts)
- wildcard := parts[nparts-1]
-
- possibleWildcardType := wildcardTypefromInt(nparts)
- if wildcard == "x" {
- return possibleWildcardType
- }
-
- return noneWildcard
-}
-
-// createVersionFromWildcard will convert a wildcard version
-// into a regular version, replacing 'x's with '0's, handling
-// special cases like '1.x.x' and '1.x'
-func createVersionFromWildcard(vStr string) string {
- // handle 1.x.x
- vStr2 := strings.Replace(vStr, ".x.x", ".x", 1)
- vStr2 = strings.Replace(vStr2, ".x", ".0", 1)
- parts := strings.Split(vStr2, ".")
-
- // handle 1.x
- if len(parts) == 2 {
- return vStr2 + ".0"
- }
-
- return vStr2
-}
-
-// incrementMajorVersion will increment the major version
-// of the passed version
-func incrementMajorVersion(vStr string) (string, error) {
- parts := strings.Split(vStr, ".")
- i, err := strconv.Atoi(parts[0])
- if err != nil {
- return "", err
- }
- parts[0] = strconv.Itoa(i + 1)
-
- return strings.Join(parts, "."), nil
-}
-
-// incrementMajorVersion will increment the minor version
-// of the passed version
-func incrementMinorVersion(vStr string) (string, error) {
- parts := strings.Split(vStr, ".")
- i, err := strconv.Atoi(parts[1])
- if err != nil {
- return "", err
- }
- parts[1] = strconv.Itoa(i + 1)
-
- return strings.Join(parts, "."), nil
-}
-
-// expandWildcardVersion will expand wildcards inside versions
-// following these rules:
-//
-// * when dealing with patch wildcards:
-// >= 1.2.x will become >= 1.2.0
-// <= 1.2.x will become < 1.3.0
-// > 1.2.x will become >= 1.3.0
-// < 1.2.x will become < 1.2.0
-// != 1.2.x will become < 1.2.0 >= 1.3.0
-//
-// * when dealing with minor wildcards:
-// >= 1.x will become >= 1.0.0
-// <= 1.x will become < 2.0.0
-// > 1.x will become >= 2.0.0
-// < 1.0 will become < 1.0.0
-// != 1.x will become < 1.0.0 >= 2.0.0
-//
-// * when dealing with wildcards without
-// version operator:
-// 1.2.x will become >= 1.2.0 < 1.3.0
-// 1.x will become >= 1.0.0 < 2.0.0
-func expandWildcardVersion(parts [][]string) ([][]string, error) {
- var expandedParts [][]string
- for _, p := range parts {
- var newParts []string
- for _, ap := range p {
- if strings.Index(ap, "x") != -1 {
- opStr, vStr, err := splitComparatorVersion(ap)
- if err != nil {
- return nil, err
- }
-
- versionWildcardType := getWildcardType(vStr)
- flatVersion := createVersionFromWildcard(vStr)
-
- var resultOperator string
- var shouldIncrementVersion bool
- switch opStr {
- case ">":
- resultOperator = ">="
- shouldIncrementVersion = true
- case ">=":
- resultOperator = ">="
- case "<":
- resultOperator = "<"
- case "<=":
- resultOperator = "<"
- shouldIncrementVersion = true
- case "", "=", "==":
- newParts = append(newParts, ">="+flatVersion)
- resultOperator = "<"
- shouldIncrementVersion = true
- case "!=", "!":
- newParts = append(newParts, "<"+flatVersion)
- resultOperator = ">="
- shouldIncrementVersion = true
- }
-
- var resultVersion string
- if shouldIncrementVersion {
- switch versionWildcardType {
- case patchWildcard:
- resultVersion, _ = incrementMinorVersion(flatVersion)
- case minorWildcard:
- resultVersion, _ = incrementMajorVersion(flatVersion)
- }
- } else {
- resultVersion = flatVersion
- }
-
- ap = resultOperator + resultVersion
- }
- newParts = append(newParts, ap)
- }
- expandedParts = append(expandedParts, newParts)
- }
-
- return expandedParts, nil
-}
-
-func parseComparator(s string) comparator {
- switch s {
- case "==":
- fallthrough
- case "":
- fallthrough
- case "=":
- return compEQ
- case ">":
- return compGT
- case ">=":
- return compGE
- case "<":
- return compLT
- case "<=":
- return compLE
- case "!":
- fallthrough
- case "!=":
- return compNE
- }
-
- return nil
-}
-
-// MustParseRange is like ParseRange but panics if the range cannot be parsed.
-func MustParseRange(s string) Range {
- r, err := ParseRange(s)
- if err != nil {
- panic(`semver: ParseRange(` + s + `): ` + err.Error())
- }
- return r
-}
diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go
deleted file mode 100644
index ec26aa03..00000000
--- a/vendor/github.com/blang/semver/semver.go
+++ /dev/null
@@ -1,418 +0,0 @@
-package semver
-
-import (
- "errors"
- "fmt"
- "strconv"
- "strings"
-)
-
-const (
- numbers string = "0123456789"
- alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
- alphanum = alphas + numbers
-)
-
-// SpecVersion is the latest fully supported spec version of semver
-var SpecVersion = Version{
- Major: 2,
- Minor: 0,
- Patch: 0,
-}
-
-// Version represents a semver compatible version
-type Version struct {
- Major uint64
- Minor uint64
- Patch uint64
- Pre []PRVersion
- Build []string //No Precedence
-}
-
-// Version to string
-func (v Version) String() string {
- b := make([]byte, 0, 5)
- b = strconv.AppendUint(b, v.Major, 10)
- b = append(b, '.')
- b = strconv.AppendUint(b, v.Minor, 10)
- b = append(b, '.')
- b = strconv.AppendUint(b, v.Patch, 10)
-
- if len(v.Pre) > 0 {
- b = append(b, '-')
- b = append(b, v.Pre[0].String()...)
-
- for _, pre := range v.Pre[1:] {
- b = append(b, '.')
- b = append(b, pre.String()...)
- }
- }
-
- if len(v.Build) > 0 {
- b = append(b, '+')
- b = append(b, v.Build[0]...)
-
- for _, build := range v.Build[1:] {
- b = append(b, '.')
- b = append(b, build...)
- }
- }
-
- return string(b)
-}
-
-// Equals checks if v is equal to o.
-func (v Version) Equals(o Version) bool {
- return (v.Compare(o) == 0)
-}
-
-// EQ checks if v is equal to o.
-func (v Version) EQ(o Version) bool {
- return (v.Compare(o) == 0)
-}
-
-// NE checks if v is not equal to o.
-func (v Version) NE(o Version) bool {
- return (v.Compare(o) != 0)
-}
-
-// GT checks if v is greater than o.
-func (v Version) GT(o Version) bool {
- return (v.Compare(o) == 1)
-}
-
-// GTE checks if v is greater than or equal to o.
-func (v Version) GTE(o Version) bool {
- return (v.Compare(o) >= 0)
-}
-
-// GE checks if v is greater than or equal to o.
-func (v Version) GE(o Version) bool {
- return (v.Compare(o) >= 0)
-}
-
-// LT checks if v is less than o.
-func (v Version) LT(o Version) bool {
- return (v.Compare(o) == -1)
-}
-
-// LTE checks if v is less than or equal to o.
-func (v Version) LTE(o Version) bool {
- return (v.Compare(o) <= 0)
-}
-
-// LE checks if v is less than or equal to o.
-func (v Version) LE(o Version) bool {
- return (v.Compare(o) <= 0)
-}
-
-// Compare compares Versions v to o:
-// -1 == v is less than o
-// 0 == v is equal to o
-// 1 == v is greater than o
-func (v Version) Compare(o Version) int {
- if v.Major != o.Major {
- if v.Major > o.Major {
- return 1
- }
- return -1
- }
- if v.Minor != o.Minor {
- if v.Minor > o.Minor {
- return 1
- }
- return -1
- }
- if v.Patch != o.Patch {
- if v.Patch > o.Patch {
- return 1
- }
- return -1
- }
-
- // Quick comparison if a version has no prerelease versions
- if len(v.Pre) == 0 && len(o.Pre) == 0 {
- return 0
- } else if len(v.Pre) == 0 && len(o.Pre) > 0 {
- return 1
- } else if len(v.Pre) > 0 && len(o.Pre) == 0 {
- return -1
- }
-
- i := 0
- for ; i < len(v.Pre) && i < len(o.Pre); i++ {
- if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 {
- continue
- } else if comp == 1 {
- return 1
- } else {
- return -1
- }
- }
-
- // If all pr versions are the equal but one has further prversion, this one greater
- if i == len(v.Pre) && i == len(o.Pre) {
- return 0
- } else if i == len(v.Pre) && i < len(o.Pre) {
- return -1
- } else {
- return 1
- }
-
-}
-
-// Validate validates v and returns error in case
-func (v Version) Validate() error {
- // Major, Minor, Patch already validated using uint64
-
- for _, pre := range v.Pre {
- if !pre.IsNum { //Numeric prerelease versions already uint64
- if len(pre.VersionStr) == 0 {
- return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr)
- }
- if !containsOnly(pre.VersionStr, alphanum) {
- return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr)
- }
- }
- }
-
- for _, build := range v.Build {
- if len(build) == 0 {
- return fmt.Errorf("Build meta data can not be empty %q", build)
- }
- if !containsOnly(build, alphanum) {
- return fmt.Errorf("Invalid character(s) found in build meta data %q", build)
- }
- }
-
- return nil
-}
-
-// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
-func New(s string) (vp *Version, err error) {
- v, err := Parse(s)
- vp = &v
- return
-}
-
-// Make is an alias for Parse, parses version string and returns a validated Version or error
-func Make(s string) (Version, error) {
- return Parse(s)
-}
-
-// ParseTolerant allows for certain version specifications that do not strictly adhere to semver
-// specs to be parsed by this library. It does so by normalizing versions before passing them to
-// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions
-// with only major and minor components specified
-func ParseTolerant(s string) (Version, error) {
- s = strings.TrimSpace(s)
- s = strings.TrimPrefix(s, "v")
-
- // Split into major.minor.(patch+pr+meta)
- parts := strings.SplitN(s, ".", 3)
- if len(parts) < 3 {
- if strings.ContainsAny(parts[len(parts)-1], "+-") {
- return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data")
- }
- for len(parts) < 3 {
- parts = append(parts, "0")
- }
- s = strings.Join(parts, ".")
- }
-
- return Parse(s)
-}
-
-// Parse parses version string and returns a validated Version or error
-func Parse(s string) (Version, error) {
- if len(s) == 0 {
- return Version{}, errors.New("Version string empty")
- }
-
- // Split into major.minor.(patch+pr+meta)
- parts := strings.SplitN(s, ".", 3)
- if len(parts) != 3 {
- return Version{}, errors.New("No Major.Minor.Patch elements found")
- }
-
- // Major
- if !containsOnly(parts[0], numbers) {
- return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0])
- }
- if hasLeadingZeroes(parts[0]) {
- return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0])
- }
- major, err := strconv.ParseUint(parts[0], 10, 64)
- if err != nil {
- return Version{}, err
- }
-
- // Minor
- if !containsOnly(parts[1], numbers) {
- return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1])
- }
- if hasLeadingZeroes(parts[1]) {
- return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1])
- }
- minor, err := strconv.ParseUint(parts[1], 10, 64)
- if err != nil {
- return Version{}, err
- }
-
- v := Version{}
- v.Major = major
- v.Minor = minor
-
- var build, prerelease []string
- patchStr := parts[2]
-
- if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 {
- build = strings.Split(patchStr[buildIndex+1:], ".")
- patchStr = patchStr[:buildIndex]
- }
-
- if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 {
- prerelease = strings.Split(patchStr[preIndex+1:], ".")
- patchStr = patchStr[:preIndex]
- }
-
- if !containsOnly(patchStr, numbers) {
- return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr)
- }
- if hasLeadingZeroes(patchStr) {
- return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr)
- }
- patch, err := strconv.ParseUint(patchStr, 10, 64)
- if err != nil {
- return Version{}, err
- }
-
- v.Patch = patch
-
- // Prerelease
- for _, prstr := range prerelease {
- parsedPR, err := NewPRVersion(prstr)
- if err != nil {
- return Version{}, err
- }
- v.Pre = append(v.Pre, parsedPR)
- }
-
- // Build meta data
- for _, str := range build {
- if len(str) == 0 {
- return Version{}, errors.New("Build meta data is empty")
- }
- if !containsOnly(str, alphanum) {
- return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str)
- }
- v.Build = append(v.Build, str)
- }
-
- return v, nil
-}
-
-// MustParse is like Parse but panics if the version cannot be parsed.
-func MustParse(s string) Version {
- v, err := Parse(s)
- if err != nil {
- panic(`semver: Parse(` + s + `): ` + err.Error())
- }
- return v
-}
-
-// PRVersion represents a PreRelease Version
-type PRVersion struct {
- VersionStr string
- VersionNum uint64
- IsNum bool
-}
-
-// NewPRVersion creates a new valid prerelease version
-func NewPRVersion(s string) (PRVersion, error) {
- if len(s) == 0 {
- return PRVersion{}, errors.New("Prerelease is empty")
- }
- v := PRVersion{}
- if containsOnly(s, numbers) {
- if hasLeadingZeroes(s) {
- return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s)
- }
- num, err := strconv.ParseUint(s, 10, 64)
-
- // Might never be hit, but just in case
- if err != nil {
- return PRVersion{}, err
- }
- v.VersionNum = num
- v.IsNum = true
- } else if containsOnly(s, alphanum) {
- v.VersionStr = s
- v.IsNum = false
- } else {
- return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s)
- }
- return v, nil
-}
-
-// IsNumeric checks if prerelease-version is numeric
-func (v PRVersion) IsNumeric() bool {
- return v.IsNum
-}
-
-// Compare compares two PreRelease Versions v and o:
-// -1 == v is less than o
-// 0 == v is equal to o
-// 1 == v is greater than o
-func (v PRVersion) Compare(o PRVersion) int {
- if v.IsNum && !o.IsNum {
- return -1
- } else if !v.IsNum && o.IsNum {
- return 1
- } else if v.IsNum && o.IsNum {
- if v.VersionNum == o.VersionNum {
- return 0
- } else if v.VersionNum > o.VersionNum {
- return 1
- } else {
- return -1
- }
- } else { // both are Alphas
- if v.VersionStr == o.VersionStr {
- return 0
- } else if v.VersionStr > o.VersionStr {
- return 1
- } else {
- return -1
- }
- }
-}
-
-// PreRelease version to string
-func (v PRVersion) String() string {
- if v.IsNum {
- return strconv.FormatUint(v.VersionNum, 10)
- }
- return v.VersionStr
-}
-
-func containsOnly(s string, set string) bool {
- return strings.IndexFunc(s, func(r rune) bool {
- return !strings.ContainsRune(set, r)
- }) == -1
-}
-
-func hasLeadingZeroes(s string) bool {
- return len(s) > 1 && s[0] == '0'
-}
-
-// NewBuildVersion creates a new valid build version
-func NewBuildVersion(s string) (string, error) {
- if len(s) == 0 {
- return "", errors.New("Buildversion is empty")
- }
- if !containsOnly(s, alphanum) {
- return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s)
- }
- return s, nil
-}
diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go
deleted file mode 100644
index e18f8808..00000000
--- a/vendor/github.com/blang/semver/sort.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package semver
-
-import (
- "sort"
-)
-
-// Versions represents multiple versions.
-type Versions []Version
-
-// Len returns length of version collection
-func (s Versions) Len() int {
- return len(s)
-}
-
-// Swap swaps two versions inside the collection by its indices
-func (s Versions) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-// Less checks if version at index i is less than version at index j
-func (s Versions) Less(i, j int) bool {
- return s[i].LT(s[j])
-}
-
-// Sort sorts a slice of versions
-func Sort(versions []Version) {
- sort.Sort(Versions(versions))
-}
diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go
deleted file mode 100644
index eb4d8026..00000000
--- a/vendor/github.com/blang/semver/sql.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package semver
-
-import (
- "database/sql/driver"
- "fmt"
-)
-
-// Scan implements the database/sql.Scanner interface.
-func (v *Version) Scan(src interface{}) (err error) {
- var str string
- switch src := src.(type) {
- case string:
- str = src
- case []byte:
- str = string(src)
- default:
- return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
- }
-
- if t, err := Parse(str); err == nil {
- *v = t
- }
-
- return
-}
-
-// Value implements the database/sql/driver.Valuer interface.
-func (v Version) Value() (driver.Value, error) {
- return v.String(), nil
-}
diff --git a/vendor/github.com/go-kit/kit/LICENSE b/vendor/github.com/go-kit/kit/LICENSE
deleted file mode 100644
index 9d83342a..00000000
--- a/vendor/github.com/go-kit/kit/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Peter Bourgon
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
diff --git a/vendor/github.com/go-kit/kit/log/README.md b/vendor/github.com/go-kit/kit/log/README.md
deleted file mode 100644
index 7222f800..00000000
--- a/vendor/github.com/go-kit/kit/log/README.md
+++ /dev/null
@@ -1,147 +0,0 @@
-# package log
-
-`package log` provides a minimal interface for structured logging in services.
-It may be wrapped to encode conventions, enforce type-safety, provide leveled
-logging, and so on. It can be used for both typical application log events,
-and log-structured data streams.
-
-## Structured logging
-
-Structured logging is, basically, conceding to the reality that logs are
-_data_, and warrant some level of schematic rigor. Using a stricter,
-key/value-oriented message format for our logs, containing contextual and
-semantic information, makes it much easier to get insight into the
-operational activity of the systems we build. Consequently, `package log` is
-of the strong belief that "[the benefits of structured logging outweigh the
-minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)".
-
-Migrating from unstructured to structured logging is probably a lot easier
-than you'd expect.
-
-```go
-// Unstructured
-log.Printf("HTTP server listening on %s", addr)
-
-// Structured
-logger.Log("transport", "HTTP", "addr", addr, "msg", "listening")
-```
-
-## Usage
-
-### Typical application logging
-
-```go
-w := log.NewSyncWriter(os.Stderr)
-logger := log.NewLogfmtLogger(w)
-logger.Log("question", "what is the meaning of life?", "answer", 42)
-
-// Output:
-// question="what is the meaning of life?" answer=42
-```
-
-### Contextual Loggers
-
-```go
-func main() {
- var logger log.Logger
- logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
- logger = log.With(logger, "instance_id", 123)
-
- logger.Log("msg", "starting")
- NewWorker(log.With(logger, "component", "worker")).Run()
- NewSlacker(log.With(logger, "component", "slacker")).Run()
-}
-
-// Output:
-// instance_id=123 msg=starting
-// instance_id=123 component=worker msg=running
-// instance_id=123 component=slacker msg=running
-```
-
-### Interact with stdlib logger
-
-Redirect stdlib logger to Go kit logger.
-
-```go
-import (
- "os"
- stdlog "log"
- kitlog "github.com/go-kit/kit/log"
-)
-
-func main() {
- logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout))
- stdlog.SetOutput(kitlog.NewStdlibAdapter(logger))
- stdlog.Print("I sure like pie")
-}
-
-// Output:
-// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"}
-```
-
-Or, if, for legacy reasons, you need to pipe all of your logging through the
-stdlib log package, you can redirect Go kit logger to the stdlib logger.
-
-```go
-logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{})
-logger.Log("legacy", true, "msg", "at least it's something")
-
-// Output:
-// 2016/01/01 12:34:56 legacy=true msg="at least it's something"
-```
-
-### Timestamps and callers
-
-```go
-var logger log.Logger
-logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
-logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
-
-logger.Log("msg", "hello")
-
-// Output:
-// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello
-```
-
-## Supported output formats
-
-- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write))
-- JSON
-
-## Enhancements
-
-`package log` is centered on the one-method Logger interface.
-
-```go
-type Logger interface {
- Log(keyvals ...interface{}) error
-}
-```
-
-This interface, and its supporting code like is the product of much iteration
-and evaluation. For more details on the evolution of the Logger interface,
-see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1),
-a talk by [Chris Hines](https://github.com/ChrisHines).
-Also, please see
-[#63](https://github.com/go-kit/kit/issues/63),
-[#76](https://github.com/go-kit/kit/pull/76),
-[#131](https://github.com/go-kit/kit/issues/131),
-[#157](https://github.com/go-kit/kit/pull/157),
-[#164](https://github.com/go-kit/kit/issues/164), and
-[#252](https://github.com/go-kit/kit/pull/252)
-to review historical conversations about package log and the Logger interface.
-
-Value-add packages and suggestions,
-like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level),
-are of course welcome. Good proposals should
-
-- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With),
-- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and
-- Be friendly to packages that accept only an unadorned log.Logger.
-
-## Benchmarks & comparisons
-
-There are a few Go logging benchmarks and comparisons that include Go kit's package log.
-
-- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log
-- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log
diff --git a/vendor/github.com/go-kit/kit/log/doc.go b/vendor/github.com/go-kit/kit/log/doc.go
deleted file mode 100644
index 918c0af4..00000000
--- a/vendor/github.com/go-kit/kit/log/doc.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Package log provides a structured logger.
-//
-// Structured logging produces logs easily consumed later by humans or
-// machines. Humans might be interested in debugging errors, or tracing
-// specific requests. Machines might be interested in counting interesting
-// events, or aggregating information for off-line processing. In both cases,
-// it is important that the log messages are structured and actionable.
-// Package log is designed to encourage both of these best practices.
-//
-// Basic Usage
-//
-// The fundamental interface is Logger. Loggers create log events from
-// key/value data. The Logger interface has a single method, Log, which
-// accepts a sequence of alternating key/value pairs, which this package names
-// keyvals.
-//
-// type Logger interface {
-// Log(keyvals ...interface{}) error
-// }
-//
-// Here is an example of a function using a Logger to create log events.
-//
-// func RunTask(task Task, logger log.Logger) string {
-// logger.Log("taskID", task.ID, "event", "starting task")
-// ...
-// logger.Log("taskID", task.ID, "event", "task complete")
-// }
-//
-// The keys in the above example are "taskID" and "event". The values are
-// task.ID, "starting task", and "task complete". Every key is followed
-// immediately by its value.
-//
-// Keys are usually plain strings. Values may be any type that has a sensible
-// encoding in the chosen log format. With structured logging it is a good
-// idea to log simple values without formatting them. This practice allows
-// the chosen logger to encode values in the most appropriate way.
-//
-// Contextual Loggers
-//
-// A contextual logger stores keyvals that it includes in all log events.
-// Building appropriate contextual loggers reduces repetition and aids
-// consistency in the resulting log output. With and WithPrefix add context to
-// a logger. We can use With to improve the RunTask example.
-//
-// func RunTask(task Task, logger log.Logger) string {
-// logger = log.With(logger, "taskID", task.ID)
-// logger.Log("event", "starting task")
-// ...
-// taskHelper(task.Cmd, logger)
-// ...
-// logger.Log("event", "task complete")
-// }
-//
-// The improved version emits the same log events as the original for the
-// first and last calls to Log. Passing the contextual logger to taskHelper
-// enables each log event created by taskHelper to include the task.ID even
-// though taskHelper does not have access to that value. Using contextual
-// loggers this way simplifies producing log output that enables tracing the
-// life cycle of individual tasks. (See the Contextual example for the full
-// code of the above snippet.)
-//
-// Dynamic Contextual Values
-//
-// A Valuer function stored in a contextual logger generates a new value each
-// time an event is logged. The Valuer example demonstrates how this feature
-// works.
-//
-// Valuers provide the basis for consistently logging timestamps and source
-// code location. The log package defines several valuers for that purpose.
-// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and
-// DefaultCaller. A common logger initialization sequence that ensures all log
-// entries contain a timestamp and source location looks like this:
-//
-// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
-// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
-//
-// Concurrent Safety
-//
-// Applications with multiple goroutines want each log event written to the
-// same logger to remain separate from other log events. Package log provides
-// two simple solutions for concurrent safe logging.
-//
-// NewSyncWriter wraps an io.Writer and serializes each call to its Write
-// method. Using a SyncWriter has the benefit that the smallest practical
-// portion of the logging logic is performed within a mutex, but it requires
-// the formatting Logger to make only one call to Write per log event.
-//
-// NewSyncLogger wraps any Logger and serializes each call to its Log method.
-// Using a SyncLogger has the benefit that it guarantees each log event is
-// handled atomically within the wrapped logger, but it typically serializes
-// both the formatting and output logic. Use a SyncLogger if the formatting
-// logger may perform multiple writes per log event.
-//
-// Error Handling
-//
-// This package relies on the practice of wrapping or decorating loggers with
-// other loggers to provide composable pieces of functionality. It also means
-// that Logger.Log must return an error because some
-// implementations—especially those that output log data to an io.Writer—may
-// encounter errors that cannot be handled locally. This in turn means that
-// Loggers that wrap other loggers should return errors from the wrapped
-// logger up the stack.
-//
-// Fortunately, the decorator pattern also provides a way to avoid the
-// necessity to check for errors every time an application calls Logger.Log.
-// An application required to panic whenever its Logger encounters
-// an error could initialize its logger as follows.
-//
-// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
-// logger := log.LoggerFunc(func(keyvals ...interface{}) error {
-// if err := fmtlogger.Log(keyvals...); err != nil {
-// panic(err)
-// }
-// return nil
-// })
-package log
diff --git a/vendor/github.com/go-kit/kit/log/json_logger.go b/vendor/github.com/go-kit/kit/log/json_logger.go
deleted file mode 100644
index 231e0995..00000000
--- a/vendor/github.com/go-kit/kit/log/json_logger.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package log
-
-import (
- "encoding"
- "encoding/json"
- "fmt"
- "io"
- "reflect"
-)
-
-type jsonLogger struct {
- io.Writer
-}
-
-// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a
-// single JSON object. Each log event produces no more than one call to
-// w.Write. The passed Writer must be safe for concurrent use by multiple
-// goroutines if the returned Logger will be used concurrently.
-func NewJSONLogger(w io.Writer) Logger {
- return &jsonLogger{w}
-}
-
-func (l *jsonLogger) Log(keyvals ...interface{}) error {
- n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd
- m := make(map[string]interface{}, n)
- for i := 0; i < len(keyvals); i += 2 {
- k := keyvals[i]
- var v interface{} = ErrMissingValue
- if i+1 < len(keyvals) {
- v = keyvals[i+1]
- }
- merge(m, k, v)
- }
- return json.NewEncoder(l.Writer).Encode(m)
-}
-
-func merge(dst map[string]interface{}, k, v interface{}) {
- var key string
- switch x := k.(type) {
- case string:
- key = x
- case fmt.Stringer:
- key = safeString(x)
- default:
- key = fmt.Sprint(x)
- }
- if x, ok := v.(error); ok {
- v = safeError(x)
- }
-
- // We want json.Marshaler and encoding.TextMarshaller to take priority over
- // err.Error() and v.String(). But json.Marshall (called later) does that by
- // default so we force a no-op if it's one of those 2 case.
- switch x := v.(type) {
- case json.Marshaler:
- case encoding.TextMarshaler:
- case error:
- v = safeError(x)
- case fmt.Stringer:
- v = safeString(x)
- }
-
- dst[key] = v
-}
-
-func safeString(str fmt.Stringer) (s string) {
- defer func() {
- if panicVal := recover(); panicVal != nil {
- if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() {
- s = "NULL"
- } else {
- panic(panicVal)
- }
- }
- }()
- s = str.String()
- return
-}
-
-func safeError(err error) (s interface{}) {
- defer func() {
- if panicVal := recover(); panicVal != nil {
- if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
- s = nil
- } else {
- panic(panicVal)
- }
- }
- }()
- s = err.Error()
- return
-}
diff --git a/vendor/github.com/go-kit/kit/log/level/doc.go b/vendor/github.com/go-kit/kit/log/level/doc.go
deleted file mode 100644
index 5e9df7fa..00000000
--- a/vendor/github.com/go-kit/kit/log/level/doc.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Package level implements leveled logging on top of package log. To use the
-// level package, create a logger as per normal in your func main, and wrap it
-// with level.NewFilter.
-//
-// var logger log.Logger
-// logger = log.NewLogfmtLogger(os.Stderr)
-// logger = level.NewFilter(logger, level.AllowInfoAndAbove()) // <--
-// logger = log.With(logger, "ts", log.DefaultTimestampUTC)
-//
-// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error
-// helper methods to emit leveled log events.
-//
-// logger.Log("foo", "bar") // as normal, no level
-// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get())
-// if value > 100 {
-// level.Error(logger).Log("value", value)
-// }
-//
-// NewFilter allows precise control over what happens when a log event is
-// emitted without a level key, or if a squelched level is used. Check the
-// Option functions for details.
-package level
diff --git a/vendor/github.com/go-kit/kit/log/level/level.go b/vendor/github.com/go-kit/kit/log/level/level.go
deleted file mode 100644
index 6833b0dc..00000000
--- a/vendor/github.com/go-kit/kit/log/level/level.go
+++ /dev/null
@@ -1,205 +0,0 @@
-package level
-
-import "github.com/go-kit/kit/log"
-
-// Error returns a logger that includes a Key/ErrorValue pair.
-func Error(logger log.Logger) log.Logger {
- return log.WithPrefix(logger, Key(), ErrorValue())
-}
-
-// Warn returns a logger that includes a Key/WarnValue pair.
-func Warn(logger log.Logger) log.Logger {
- return log.WithPrefix(logger, Key(), WarnValue())
-}
-
-// Info returns a logger that includes a Key/InfoValue pair.
-func Info(logger log.Logger) log.Logger {
- return log.WithPrefix(logger, Key(), InfoValue())
-}
-
-// Debug returns a logger that includes a Key/DebugValue pair.
-func Debug(logger log.Logger) log.Logger {
- return log.WithPrefix(logger, Key(), DebugValue())
-}
-
-// NewFilter wraps next and implements level filtering. See the commentary on
-// the Option functions for a detailed description of how to configure levels.
-// If no options are provided, all leveled log events created with Debug,
-// Info, Warn or Error helper methods are squelched and non-leveled log
-// events are passed to next unmodified.
-func NewFilter(next log.Logger, options ...Option) log.Logger {
- l := &logger{
- next: next,
- }
- for _, option := range options {
- option(l)
- }
- return l
-}
-
-type logger struct {
- next log.Logger
- allowed level
- squelchNoLevel bool
- errNotAllowed error
- errNoLevel error
-}
-
-func (l *logger) Log(keyvals ...interface{}) error {
- var hasLevel, levelAllowed bool
- for i := 1; i < len(keyvals); i += 2 {
- if v, ok := keyvals[i].(*levelValue); ok {
- hasLevel = true
- levelAllowed = l.allowed&v.level != 0
- break
- }
- }
- if !hasLevel && l.squelchNoLevel {
- return l.errNoLevel
- }
- if hasLevel && !levelAllowed {
- return l.errNotAllowed
- }
- return l.next.Log(keyvals...)
-}
-
-// Option sets a parameter for the leveled logger.
-type Option func(*logger)
-
-// AllowAll is an alias for AllowDebug.
-func AllowAll() Option {
- return AllowDebug()
-}
-
-// AllowDebug allows error, warn, info and debug level log events to pass.
-func AllowDebug() Option {
- return allowed(levelError | levelWarn | levelInfo | levelDebug)
-}
-
-// AllowInfo allows error, warn and info level log events to pass.
-func AllowInfo() Option {
- return allowed(levelError | levelWarn | levelInfo)
-}
-
-// AllowWarn allows error and warn level log events to pass.
-func AllowWarn() Option {
- return allowed(levelError | levelWarn)
-}
-
-// AllowError allows only error level log events to pass.
-func AllowError() Option {
- return allowed(levelError)
-}
-
-// AllowNone allows no leveled log events to pass.
-func AllowNone() Option {
- return allowed(0)
-}
-
-func allowed(allowed level) Option {
- return func(l *logger) { l.allowed = allowed }
-}
-
-// ErrNotAllowed sets the error to return from Log when it squelches a log
-// event disallowed by the configured Allow[Level] option. By default,
-// ErrNotAllowed is nil; in this case the log event is squelched with no
-// error.
-func ErrNotAllowed(err error) Option {
- return func(l *logger) { l.errNotAllowed = err }
-}
-
-// SquelchNoLevel instructs Log to squelch log events with no level, so that
-// they don't proceed through to the wrapped logger. If SquelchNoLevel is set
-// to true and a log event is squelched in this way, the error value
-// configured with ErrNoLevel is returned to the caller.
-func SquelchNoLevel(squelch bool) Option {
- return func(l *logger) { l.squelchNoLevel = squelch }
-}
-
-// ErrNoLevel sets the error to return from Log when it squelches a log event
-// with no level. By default, ErrNoLevel is nil; in this case the log event is
-// squelched with no error.
-func ErrNoLevel(err error) Option {
- return func(l *logger) { l.errNoLevel = err }
-}
-
-// NewInjector wraps next and returns a logger that adds a Key/level pair to
-// the beginning of log events that don't already contain a level. In effect,
-// this gives a default level to logs without a level.
-func NewInjector(next log.Logger, level Value) log.Logger {
- return &injector{
- next: next,
- level: level,
- }
-}
-
-type injector struct {
- next log.Logger
- level interface{}
-}
-
-func (l *injector) Log(keyvals ...interface{}) error {
- for i := 1; i < len(keyvals); i += 2 {
- if _, ok := keyvals[i].(*levelValue); ok {
- return l.next.Log(keyvals...)
- }
- }
- kvs := make([]interface{}, len(keyvals)+2)
- kvs[0], kvs[1] = key, l.level
- copy(kvs[2:], keyvals)
- return l.next.Log(kvs...)
-}
-
-// Value is the interface that each of the canonical level values implement.
-// It contains unexported methods that prevent types from other packages from
-// implementing it and guaranteeing that NewFilter can distinguish the levels
-// defined in this package from all other values.
-type Value interface {
- String() string
- levelVal()
-}
-
-// Key returns the unique key added to log events by the loggers in this
-// package.
-func Key() interface{} { return key }
-
-// ErrorValue returns the unique value added to log events by Error.
-func ErrorValue() Value { return errorValue }
-
-// WarnValue returns the unique value added to log events by Warn.
-func WarnValue() Value { return warnValue }
-
-// InfoValue returns the unique value added to log events by Info.
-func InfoValue() Value { return infoValue }
-
-// DebugValue returns the unique value added to log events by Warn.
-func DebugValue() Value { return debugValue }
-
-var (
- // key is of type interfae{} so that it allocates once during package
- // initialization and avoids allocating every type the value is added to a
- // []interface{} later.
- key interface{} = "level"
-
- errorValue = &levelValue{level: levelError, name: "error"}
- warnValue = &levelValue{level: levelWarn, name: "warn"}
- infoValue = &levelValue{level: levelInfo, name: "info"}
- debugValue = &levelValue{level: levelDebug, name: "debug"}
-)
-
-type level byte
-
-const (
- levelDebug level = 1 << iota
- levelInfo
- levelWarn
- levelError
-)
-
-type levelValue struct {
- name string
- level
-}
-
-func (v *levelValue) String() string { return v.name }
-func (v *levelValue) levelVal() {}
diff --git a/vendor/github.com/go-kit/kit/log/log.go b/vendor/github.com/go-kit/kit/log/log.go
deleted file mode 100644
index 66a9e2fd..00000000
--- a/vendor/github.com/go-kit/kit/log/log.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package log
-
-import "errors"
-
-// Logger is the fundamental interface for all log operations. Log creates a
-// log event from keyvals, a variadic sequence of alternating keys and values.
-// Implementations must be safe for concurrent use by multiple goroutines. In
-// particular, any implementation of Logger that appends to keyvals or
-// modifies or retains any of its elements must make a copy first.
-type Logger interface {
- Log(keyvals ...interface{}) error
-}
-
-// ErrMissingValue is appended to keyvals slices with odd length to substitute
-// the missing value.
-var ErrMissingValue = errors.New("(MISSING)")
-
-// With returns a new contextual logger with keyvals prepended to those passed
-// to calls to Log. If logger is also a contextual logger created by With or
-// WithPrefix, keyvals is appended to the existing context.
-//
-// The returned Logger replaces all value elements (odd indexes) containing a
-// Valuer with their generated value for each call to its Log method.
-func With(logger Logger, keyvals ...interface{}) Logger {
- if len(keyvals) == 0 {
- return logger
- }
- l := newContext(logger)
- kvs := append(l.keyvals, keyvals...)
- if len(kvs)%2 != 0 {
- kvs = append(kvs, ErrMissingValue)
- }
- return &context{
- logger: l.logger,
- // Limiting the capacity of the stored keyvals ensures that a new
- // backing array is created if the slice must grow in Log or With.
- // Using the extra capacity without copying risks a data race that
- // would violate the Logger interface contract.
- keyvals: kvs[:len(kvs):len(kvs)],
- hasValuer: l.hasValuer || containsValuer(keyvals),
- }
-}
-
-// WithPrefix returns a new contextual logger with keyvals prepended to those
-// passed to calls to Log. If logger is also a contextual logger created by
-// With or WithPrefix, keyvals is prepended to the existing context.
-//
-// The returned Logger replaces all value elements (odd indexes) containing a
-// Valuer with their generated value for each call to its Log method.
-func WithPrefix(logger Logger, keyvals ...interface{}) Logger {
- if len(keyvals) == 0 {
- return logger
- }
- l := newContext(logger)
- // Limiting the capacity of the stored keyvals ensures that a new
- // backing array is created if the slice must grow in Log or With.
- // Using the extra capacity without copying risks a data race that
- // would violate the Logger interface contract.
- n := len(l.keyvals) + len(keyvals)
- if len(keyvals)%2 != 0 {
- n++
- }
- kvs := make([]interface{}, 0, n)
- kvs = append(kvs, keyvals...)
- if len(kvs)%2 != 0 {
- kvs = append(kvs, ErrMissingValue)
- }
- kvs = append(kvs, l.keyvals...)
- return &context{
- logger: l.logger,
- keyvals: kvs,
- hasValuer: l.hasValuer || containsValuer(keyvals),
- }
-}
-
-// context is the Logger implementation returned by With and WithPrefix. It
-// wraps a Logger and holds keyvals that it includes in all log events. Its
-// Log method calls bindValues to generate values for each Valuer in the
-// context keyvals.
-//
-// A context must always have the same number of stack frames between calls to
-// its Log method and the eventual binding of Valuers to their value. This
-// requirement comes from the functional requirement to allow a context to
-// resolve application call site information for a Caller stored in the
-// context. To do this we must be able to predict the number of logging
-// functions on the stack when bindValues is called.
-//
-// Two implementation details provide the needed stack depth consistency.
-//
-// 1. newContext avoids introducing an additional layer when asked to
-// wrap another context.
-// 2. With and WithPrefix avoid introducing an additional layer by
-// returning a newly constructed context with a merged keyvals rather
-// than simply wrapping the existing context.
-type context struct {
- logger Logger
- keyvals []interface{}
- hasValuer bool
-}
-
-func newContext(logger Logger) *context {
- if c, ok := logger.(*context); ok {
- return c
- }
- return &context{logger: logger}
-}
-
-// Log replaces all value elements (odd indexes) containing a Valuer in the
-// stored context with their generated value, appends keyvals, and passes the
-// result to the wrapped Logger.
-func (l *context) Log(keyvals ...interface{}) error {
- kvs := append(l.keyvals, keyvals...)
- if len(kvs)%2 != 0 {
- kvs = append(kvs, ErrMissingValue)
- }
- if l.hasValuer {
- // If no keyvals were appended above then we must copy l.keyvals so
- // that future log events will reevaluate the stored Valuers.
- if len(keyvals) == 0 {
- kvs = append([]interface{}{}, l.keyvals...)
- }
- bindValues(kvs[:len(l.keyvals)])
- }
- return l.logger.Log(kvs...)
-}
-
-// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
-// f is a function with the appropriate signature, LoggerFunc(f) is a Logger
-// object that calls f.
-type LoggerFunc func(...interface{}) error
-
-// Log implements Logger by calling f(keyvals...).
-func (f LoggerFunc) Log(keyvals ...interface{}) error {
- return f(keyvals...)
-}
diff --git a/vendor/github.com/go-kit/kit/log/logfmt_logger.go b/vendor/github.com/go-kit/kit/log/logfmt_logger.go
deleted file mode 100644
index a0030529..00000000
--- a/vendor/github.com/go-kit/kit/log/logfmt_logger.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package log
-
-import (
- "bytes"
- "io"
- "sync"
-
- "github.com/go-logfmt/logfmt"
-)
-
-type logfmtEncoder struct {
- *logfmt.Encoder
- buf bytes.Buffer
-}
-
-func (l *logfmtEncoder) Reset() {
- l.Encoder.Reset()
- l.buf.Reset()
-}
-
-var logfmtEncoderPool = sync.Pool{
- New: func() interface{} {
- var enc logfmtEncoder
- enc.Encoder = logfmt.NewEncoder(&enc.buf)
- return &enc
- },
-}
-
-type logfmtLogger struct {
- w io.Writer
-}
-
-// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in
-// logfmt format. Each log event produces no more than one call to w.Write.
-// The passed Writer must be safe for concurrent use by multiple goroutines if
-// the returned Logger will be used concurrently.
-func NewLogfmtLogger(w io.Writer) Logger {
- return &logfmtLogger{w}
-}
-
-func (l logfmtLogger) Log(keyvals ...interface{}) error {
- enc := logfmtEncoderPool.Get().(*logfmtEncoder)
- enc.Reset()
- defer logfmtEncoderPool.Put(enc)
-
- if err := enc.EncodeKeyvals(keyvals...); err != nil {
- return err
- }
-
- // Add newline to the end of the buffer
- if err := enc.EndRecord(); err != nil {
- return err
- }
-
- // The Logger interface requires implementations to be safe for concurrent
- // use by multiple goroutines. For this implementation that means making
- // only one call to l.w.Write() for each call to Log.
- if _, err := l.w.Write(enc.buf.Bytes()); err != nil {
- return err
- }
- return nil
-}
diff --git a/vendor/github.com/go-kit/kit/log/nop_logger.go b/vendor/github.com/go-kit/kit/log/nop_logger.go
deleted file mode 100644
index 1047d626..00000000
--- a/vendor/github.com/go-kit/kit/log/nop_logger.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package log
-
-type nopLogger struct{}
-
-// NewNopLogger returns a logger that doesn't do anything.
-func NewNopLogger() Logger { return nopLogger{} }
-
-func (nopLogger) Log(...interface{}) error { return nil }
diff --git a/vendor/github.com/go-kit/kit/log/stdlib.go b/vendor/github.com/go-kit/kit/log/stdlib.go
deleted file mode 100644
index ff96b5de..00000000
--- a/vendor/github.com/go-kit/kit/log/stdlib.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package log
-
-import (
- "io"
- "log"
- "regexp"
- "strings"
-)
-
-// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's
-// designed to be passed to a Go kit logger as the writer, for cases where
-// it's necessary to redirect all Go kit log output to the stdlib logger.
-//
-// If you have any choice in the matter, you shouldn't use this. Prefer to
-// redirect the stdlib log to the Go kit logger via NewStdlibAdapter.
-type StdlibWriter struct{}
-
-// Write implements io.Writer.
-func (w StdlibWriter) Write(p []byte) (int, error) {
- log.Print(strings.TrimSpace(string(p)))
- return len(p), nil
-}
-
-// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib
-// logger's SetOutput. It will extract date/timestamps, filenames, and
-// messages, and place them under relevant keys.
-type StdlibAdapter struct {
- Logger
- timestampKey string
- fileKey string
- messageKey string
-}
-
-// StdlibAdapterOption sets a parameter for the StdlibAdapter.
-type StdlibAdapterOption func(*StdlibAdapter)
-
-// TimestampKey sets the key for the timestamp field. By default, it's "ts".
-func TimestampKey(key string) StdlibAdapterOption {
- return func(a *StdlibAdapter) { a.timestampKey = key }
-}
-
-// FileKey sets the key for the file and line field. By default, it's "caller".
-func FileKey(key string) StdlibAdapterOption {
- return func(a *StdlibAdapter) { a.fileKey = key }
-}
-
-// MessageKey sets the key for the actual log message. By default, it's "msg".
-func MessageKey(key string) StdlibAdapterOption {
- return func(a *StdlibAdapter) { a.messageKey = key }
-}
-
-// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed
-// logger. It's designed to be passed to log.SetOutput.
-func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer {
- a := StdlibAdapter{
- Logger: logger,
- timestampKey: "ts",
- fileKey: "caller",
- messageKey: "msg",
- }
- for _, option := range options {
- option(&a)
- }
- return a
-}
-
-func (a StdlibAdapter) Write(p []byte) (int, error) {
- result := subexps(p)
- keyvals := []interface{}{}
- var timestamp string
- if date, ok := result["date"]; ok && date != "" {
- timestamp = date
- }
- if time, ok := result["time"]; ok && time != "" {
- if timestamp != "" {
- timestamp += " "
- }
- timestamp += time
- }
- if timestamp != "" {
- keyvals = append(keyvals, a.timestampKey, timestamp)
- }
- if file, ok := result["file"]; ok && file != "" {
- keyvals = append(keyvals, a.fileKey, file)
- }
- if msg, ok := result["msg"]; ok {
- keyvals = append(keyvals, a.messageKey, msg)
- }
- if err := a.Logger.Log(keyvals...); err != nil {
- return 0, err
- }
- return len(p), nil
-}
-
-const (
- logRegexpDate = `(?P[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?`
- logRegexpTime = `(?P[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?)?[ ]?`
- logRegexpFile = `(?P.+?:[0-9]+)?`
- logRegexpMsg = `(: )?(?P.*)`
-)
-
-var (
- logRegexp = regexp.MustCompile(logRegexpDate + logRegexpTime + logRegexpFile + logRegexpMsg)
-)
-
-func subexps(line []byte) map[string]string {
- m := logRegexp.FindSubmatch(line)
- if len(m) < len(logRegexp.SubexpNames()) {
- return map[string]string{}
- }
- result := map[string]string{}
- for i, name := range logRegexp.SubexpNames() {
- result[name] = string(m[i])
- }
- return result
-}
diff --git a/vendor/github.com/go-kit/kit/log/sync.go b/vendor/github.com/go-kit/kit/log/sync.go
deleted file mode 100644
index 2e846908..00000000
--- a/vendor/github.com/go-kit/kit/log/sync.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package log
-
-import (
- "io"
- "sync"
- "sync/atomic"
-)
-
-// SwapLogger wraps another logger that may be safely replaced while other
-// goroutines use the SwapLogger concurrently. The zero value for a SwapLogger
-// will discard all log events without error.
-//
-// SwapLogger serves well as a package global logger that can be changed by
-// importers.
-type SwapLogger struct {
- logger atomic.Value
-}
-
-type loggerStruct struct {
- Logger
-}
-
-// Log implements the Logger interface by forwarding keyvals to the currently
-// wrapped logger. It does not log anything if the wrapped logger is nil.
-func (l *SwapLogger) Log(keyvals ...interface{}) error {
- s, ok := l.logger.Load().(loggerStruct)
- if !ok || s.Logger == nil {
- return nil
- }
- return s.Log(keyvals...)
-}
-
-// Swap replaces the currently wrapped logger with logger. Swap may be called
-// concurrently with calls to Log from other goroutines.
-func (l *SwapLogger) Swap(logger Logger) {
- l.logger.Store(loggerStruct{logger})
-}
-
-// NewSyncWriter returns a new writer that is safe for concurrent use by
-// multiple goroutines. Writes to the returned writer are passed on to w. If
-// another write is already in progress, the calling goroutine blocks until
-// the writer is available.
-//
-// If w implements the following interface, so does the returned writer.
-//
-// interface {
-// Fd() uintptr
-// }
-func NewSyncWriter(w io.Writer) io.Writer {
- switch w := w.(type) {
- case fdWriter:
- return &fdSyncWriter{fdWriter: w}
- default:
- return &syncWriter{Writer: w}
- }
-}
-
-// syncWriter synchronizes concurrent writes to an io.Writer.
-type syncWriter struct {
- sync.Mutex
- io.Writer
-}
-
-// Write writes p to the underlying io.Writer. If another write is already in
-// progress, the calling goroutine blocks until the syncWriter is available.
-func (w *syncWriter) Write(p []byte) (n int, err error) {
- w.Lock()
- n, err = w.Writer.Write(p)
- w.Unlock()
- return n, err
-}
-
-// fdWriter is an io.Writer that also has an Fd method. The most common
-// example of an fdWriter is an *os.File.
-type fdWriter interface {
- io.Writer
- Fd() uintptr
-}
-
-// fdSyncWriter synchronizes concurrent writes to an fdWriter.
-type fdSyncWriter struct {
- sync.Mutex
- fdWriter
-}
-
-// Write writes p to the underlying io.Writer. If another write is already in
-// progress, the calling goroutine blocks until the fdSyncWriter is available.
-func (w *fdSyncWriter) Write(p []byte) (n int, err error) {
- w.Lock()
- n, err = w.fdWriter.Write(p)
- w.Unlock()
- return n, err
-}
-
-// syncLogger provides concurrent safe logging for another Logger.
-type syncLogger struct {
- mu sync.Mutex
- logger Logger
-}
-
-// NewSyncLogger returns a logger that synchronizes concurrent use of the
-// wrapped logger. When multiple goroutines use the SyncLogger concurrently
-// only one goroutine will be allowed to log to the wrapped logger at a time.
-// The other goroutines will block until the logger is available.
-func NewSyncLogger(logger Logger) Logger {
- return &syncLogger{logger: logger}
-}
-
-// Log logs keyvals to the underlying Logger. If another log is already in
-// progress, the calling goroutine blocks until the syncLogger is available.
-func (l *syncLogger) Log(keyvals ...interface{}) error {
- l.mu.Lock()
- err := l.logger.Log(keyvals...)
- l.mu.Unlock()
- return err
-}
diff --git a/vendor/github.com/go-kit/kit/log/value.go b/vendor/github.com/go-kit/kit/log/value.go
deleted file mode 100644
index b56f154f..00000000
--- a/vendor/github.com/go-kit/kit/log/value.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package log
-
-import (
- "time"
-
- "github.com/go-stack/stack"
-)
-
-// A Valuer generates a log value. When passed to With or WithPrefix in a
-// value element (odd indexes), it represents a dynamic value which is re-
-// evaluated with each log event.
-type Valuer func() interface{}
-
-// bindValues replaces all value elements (odd indexes) containing a Valuer
-// with their generated value.
-func bindValues(keyvals []interface{}) {
- for i := 1; i < len(keyvals); i += 2 {
- if v, ok := keyvals[i].(Valuer); ok {
- keyvals[i] = v()
- }
- }
-}
-
-// containsValuer returns true if any of the value elements (odd indexes)
-// contain a Valuer.
-func containsValuer(keyvals []interface{}) bool {
- for i := 1; i < len(keyvals); i += 2 {
- if _, ok := keyvals[i].(Valuer); ok {
- return true
- }
- }
- return false
-}
-
-// Timestamp returns a timestamp Valuer. It invokes the t function to get the
-// time; unless you are doing something tricky, pass time.Now.
-//
-// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
-// are TimestampFormats that use the RFC3339Nano format.
-func Timestamp(t func() time.Time) Valuer {
- return func() interface{} { return t() }
-}
-
-// TimestampFormat returns a timestamp Valuer with a custom time format. It
-// invokes the t function to get the time to format; unless you are doing
-// something tricky, pass time.Now. The layout string is passed to
-// Time.Format.
-//
-// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
-// are TimestampFormats that use the RFC3339Nano format.
-func TimestampFormat(t func() time.Time, layout string) Valuer {
- return func() interface{} {
- return timeFormat{
- time: t(),
- layout: layout,
- }
- }
-}
-
-// A timeFormat represents an instant in time and a layout used when
-// marshaling to a text format.
-type timeFormat struct {
- time time.Time
- layout string
-}
-
-func (tf timeFormat) String() string {
- return tf.time.Format(tf.layout)
-}
-
-// MarshalText implements encoding.TextMarshaller.
-func (tf timeFormat) MarshalText() (text []byte, err error) {
- // The following code adapted from the standard library time.Time.Format
- // method. Using the same undocumented magic constant to extend the size
- // of the buffer as seen there.
- b := make([]byte, 0, len(tf.layout)+10)
- b = tf.time.AppendFormat(b, tf.layout)
- return b, nil
-}
-
-// Caller returns a Valuer that returns a file and line from a specified depth
-// in the callstack. Users will probably want to use DefaultCaller.
-func Caller(depth int) Valuer {
- return func() interface{} { return stack.Caller(depth) }
-}
-
-var (
- // DefaultTimestamp is a Valuer that returns the current wallclock time,
- // respecting time zones, when bound.
- DefaultTimestamp = TimestampFormat(time.Now, time.RFC3339Nano)
-
- // DefaultTimestampUTC is a Valuer that returns the current time in UTC
- // when bound.
- DefaultTimestampUTC = TimestampFormat(
- func() time.Time { return time.Now().UTC() },
- time.RFC3339Nano,
- )
-
- // DefaultCaller is a Valuer that returns the file and line where the Log
- // method was invoked. It can only be used with log.With.
- DefaultCaller = Caller(3)
-)
diff --git a/vendor/github.com/go-logfmt/logfmt/LICENSE b/vendor/github.com/go-logfmt/logfmt/LICENSE
deleted file mode 100644
index c0265089..00000000
--- a/vendor/github.com/go-logfmt/logfmt/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 go-logfmt
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
diff --git a/vendor/github.com/go-logfmt/logfmt/README.md b/vendor/github.com/go-logfmt/logfmt/README.md
deleted file mode 100644
index 3a8f10b7..00000000
--- a/vendor/github.com/go-logfmt/logfmt/README.md
+++ /dev/null
@@ -1,33 +0,0 @@
-[](https://godoc.org/github.com/go-logfmt/logfmt)
-[](https://goreportcard.com/report/go-logfmt/logfmt)
-[](https://travis-ci.org/go-logfmt/logfmt)
-[](https://coveralls.io/github/go-logfmt/logfmt?branch=master)
-
-# logfmt
-
-Package logfmt implements utilities to marshal and unmarshal data in the [logfmt
-format](https://brandur.org/logfmt). It provides an API similar to
-[encoding/json](http://golang.org/pkg/encoding/json/) and
-[encoding/xml](http://golang.org/pkg/encoding/xml/).
-
-The logfmt format was first documented by Brandur Leach in [this
-article](https://brandur.org/logfmt). The format has not been formally
-standardized. The most authoritative public specification to date has been the
-documentation of a Go Language [package](http://godoc.org/github.com/kr/logfmt)
-written by Blake Mizerany and Keith Rarick.
-
-## Goals
-
-This project attempts to conform as closely as possible to the prior art, while
-also removing ambiguity where necessary to provide well behaved encoder and
-decoder implementations.
-
-## Non-goals
-
-This project does not attempt to formally standardize the logfmt format. In the
-event that logfmt is standardized this project would take conforming to the
-standard as a goal.
-
-## Versioning
-
-Package logfmt publishes releases via [semver](http://semver.org/) compatible Git tags prefixed with a single 'v'.
diff --git a/vendor/github.com/go-logfmt/logfmt/decode.go b/vendor/github.com/go-logfmt/logfmt/decode.go
deleted file mode 100644
index 04e0efff..00000000
--- a/vendor/github.com/go-logfmt/logfmt/decode.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package logfmt
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "unicode/utf8"
-)
-
-// A Decoder reads and decodes logfmt records from an input stream.
-type Decoder struct {
- pos int
- key []byte
- value []byte
- lineNum int
- s *bufio.Scanner
- err error
-}
-
-// NewDecoder returns a new decoder that reads from r.
-//
-// The decoder introduces its own buffering and may read data from r beyond
-// the logfmt records requested.
-func NewDecoder(r io.Reader) *Decoder {
- dec := &Decoder{
- s: bufio.NewScanner(r),
- }
- return dec
-}
-
-// ScanRecord advances the Decoder to the next record, which can then be
-// parsed with the ScanKeyval method. It returns false when decoding stops,
-// either by reaching the end of the input or an error. After ScanRecord
-// returns false, the Err method will return any error that occurred during
-// decoding, except that if it was io.EOF, Err will return nil.
-func (dec *Decoder) ScanRecord() bool {
- if dec.err != nil {
- return false
- }
- if !dec.s.Scan() {
- dec.err = dec.s.Err()
- return false
- }
- dec.lineNum++
- dec.pos = 0
- return true
-}
-
-// ScanKeyval advances the Decoder to the next key/value pair of the current
-// record, which can then be retrieved with the Key and Value methods. It
-// returns false when decoding stops, either by reaching the end of the
-// current record or an error.
-func (dec *Decoder) ScanKeyval() bool {
- dec.key, dec.value = nil, nil
- if dec.err != nil {
- return false
- }
-
- line := dec.s.Bytes()
-
- // garbage
- for p, c := range line[dec.pos:] {
- if c > ' ' {
- dec.pos += p
- goto key
- }
- }
- dec.pos = len(line)
- return false
-
-key:
- const invalidKeyError = "invalid key"
-
- start, multibyte := dec.pos, false
- for p, c := range line[dec.pos:] {
- switch {
- case c == '=':
- dec.pos += p
- if dec.pos > start {
- dec.key = line[start:dec.pos]
- if multibyte && bytes.IndexRune(dec.key, utf8.RuneError) != -1 {
- dec.syntaxError(invalidKeyError)
- return false
- }
- }
- if dec.key == nil {
- dec.unexpectedByte(c)
- return false
- }
- goto equal
- case c == '"':
- dec.pos += p
- dec.unexpectedByte(c)
- return false
- case c <= ' ':
- dec.pos += p
- if dec.pos > start {
- dec.key = line[start:dec.pos]
- if multibyte && bytes.IndexRune(dec.key, utf8.RuneError) != -1 {
- dec.syntaxError(invalidKeyError)
- return false
- }
- }
- return true
- case c >= utf8.RuneSelf:
- multibyte = true
- }
- }
- dec.pos = len(line)
- if dec.pos > start {
- dec.key = line[start:dec.pos]
- if multibyte && bytes.IndexRune(dec.key, utf8.RuneError) != -1 {
- dec.syntaxError(invalidKeyError)
- return false
- }
- }
- return true
-
-equal:
- dec.pos++
- if dec.pos >= len(line) {
- return true
- }
- switch c := line[dec.pos]; {
- case c <= ' ':
- return true
- case c == '"':
- goto qvalue
- }
-
- // value
- start = dec.pos
- for p, c := range line[dec.pos:] {
- switch {
- case c == '=' || c == '"':
- dec.pos += p
- dec.unexpectedByte(c)
- return false
- case c <= ' ':
- dec.pos += p
- if dec.pos > start {
- dec.value = line[start:dec.pos]
- }
- return true
- }
- }
- dec.pos = len(line)
- if dec.pos > start {
- dec.value = line[start:dec.pos]
- }
- return true
-
-qvalue:
- const (
- untermQuote = "unterminated quoted value"
- invalidQuote = "invalid quoted value"
- )
-
- hasEsc, esc := false, false
- start = dec.pos
- for p, c := range line[dec.pos+1:] {
- switch {
- case esc:
- esc = false
- case c == '\\':
- hasEsc, esc = true, true
- case c == '"':
- dec.pos += p + 2
- if hasEsc {
- v, ok := unquoteBytes(line[start:dec.pos])
- if !ok {
- dec.syntaxError(invalidQuote)
- return false
- }
- dec.value = v
- } else {
- start++
- end := dec.pos - 1
- if end > start {
- dec.value = line[start:end]
- }
- }
- return true
- }
- }
- dec.pos = len(line)
- dec.syntaxError(untermQuote)
- return false
-}
-
-// Key returns the most recent key found by a call to ScanKeyval. The returned
-// slice may point to internal buffers and is only valid until the next call
-// to ScanRecord. It does no allocation.
-func (dec *Decoder) Key() []byte {
- return dec.key
-}
-
-// Value returns the most recent value found by a call to ScanKeyval. The
-// returned slice may point to internal buffers and is only valid until the
-// next call to ScanRecord. It does no allocation when the value has no
-// escape sequences.
-func (dec *Decoder) Value() []byte {
- return dec.value
-}
-
-// Err returns the first non-EOF error that was encountered by the Scanner.
-func (dec *Decoder) Err() error {
- return dec.err
-}
-
-func (dec *Decoder) syntaxError(msg string) {
- dec.err = &SyntaxError{
- Msg: msg,
- Line: dec.lineNum,
- Pos: dec.pos + 1,
- }
-}
-
-func (dec *Decoder) unexpectedByte(c byte) {
- dec.err = &SyntaxError{
- Msg: fmt.Sprintf("unexpected %q", c),
- Line: dec.lineNum,
- Pos: dec.pos + 1,
- }
-}
-
-// A SyntaxError represents a syntax error in the logfmt input stream.
-type SyntaxError struct {
- Msg string
- Line int
- Pos int
-}
-
-func (e *SyntaxError) Error() string {
- return fmt.Sprintf("logfmt syntax error at pos %d on line %d: %s", e.Pos, e.Line, e.Msg)
-}
diff --git a/vendor/github.com/go-logfmt/logfmt/doc.go b/vendor/github.com/go-logfmt/logfmt/doc.go
deleted file mode 100644
index 378e9ad1..00000000
--- a/vendor/github.com/go-logfmt/logfmt/doc.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Package logfmt implements utilities to marshal and unmarshal data in the
-// logfmt format. The logfmt format records key/value pairs in a way that
-// balances readability for humans and simplicity of computer parsing. It is
-// most commonly used as a more human friendly alternative to JSON for
-// structured logging.
-package logfmt
diff --git a/vendor/github.com/go-logfmt/logfmt/encode.go b/vendor/github.com/go-logfmt/logfmt/encode.go
deleted file mode 100644
index 55f1603c..00000000
--- a/vendor/github.com/go-logfmt/logfmt/encode.go
+++ /dev/null
@@ -1,321 +0,0 @@
-package logfmt
-
-import (
- "bytes"
- "encoding"
- "errors"
- "fmt"
- "io"
- "reflect"
- "strings"
- "unicode/utf8"
-)
-
-// MarshalKeyvals returns the logfmt encoding of keyvals, a variadic sequence
-// of alternating keys and values.
-func MarshalKeyvals(keyvals ...interface{}) ([]byte, error) {
- buf := &bytes.Buffer{}
- if err := NewEncoder(buf).EncodeKeyvals(keyvals...); err != nil {
- return nil, err
- }
- return buf.Bytes(), nil
-}
-
-// An Encoder writes logfmt data to an output stream.
-type Encoder struct {
- w io.Writer
- scratch bytes.Buffer
- needSep bool
-}
-
-// NewEncoder returns a new encoder that writes to w.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{
- w: w,
- }
-}
-
-var (
- space = []byte(" ")
- equals = []byte("=")
- newline = []byte("\n")
- null = []byte("null")
-)
-
-// EncodeKeyval writes the logfmt encoding of key and value to the stream. A
-// single space is written before the second and subsequent keys in a record.
-// Nothing is written if a non-nil error is returned.
-func (enc *Encoder) EncodeKeyval(key, value interface{}) error {
- enc.scratch.Reset()
- if enc.needSep {
- if _, err := enc.scratch.Write(space); err != nil {
- return err
- }
- }
- if err := writeKey(&enc.scratch, key); err != nil {
- return err
- }
- if _, err := enc.scratch.Write(equals); err != nil {
- return err
- }
- if err := writeValue(&enc.scratch, value); err != nil {
- return err
- }
- _, err := enc.w.Write(enc.scratch.Bytes())
- enc.needSep = true
- return err
-}
-
-// EncodeKeyvals writes the logfmt encoding of keyvals to the stream. Keyvals
-// is a variadic sequence of alternating keys and values. Keys of unsupported
-// type are skipped along with their corresponding value. Values of
-// unsupported type or that cause a MarshalerError are replaced by their error
-// but do not cause EncodeKeyvals to return an error. If a non-nil error is
-// returned some key/value pairs may not have be written.
-func (enc *Encoder) EncodeKeyvals(keyvals ...interface{}) error {
- if len(keyvals) == 0 {
- return nil
- }
- if len(keyvals)%2 == 1 {
- keyvals = append(keyvals, nil)
- }
- for i := 0; i < len(keyvals); i += 2 {
- k, v := keyvals[i], keyvals[i+1]
- err := enc.EncodeKeyval(k, v)
- if err == ErrUnsupportedKeyType {
- continue
- }
- if _, ok := err.(*MarshalerError); ok || err == ErrUnsupportedValueType {
- v = err
- err = enc.EncodeKeyval(k, v)
- }
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// MarshalerError represents an error encountered while marshaling a value.
-type MarshalerError struct {
- Type reflect.Type
- Err error
-}
-
-func (e *MarshalerError) Error() string {
- return "error marshaling value of type " + e.Type.String() + ": " + e.Err.Error()
-}
-
-// ErrNilKey is returned by Marshal functions and Encoder methods if a key is
-// a nil interface or pointer value.
-var ErrNilKey = errors.New("nil key")
-
-// ErrInvalidKey is returned by Marshal functions and Encoder methods if a key
-// contains an invalid character.
-var ErrInvalidKey = errors.New("invalid key")
-
-// ErrUnsupportedKeyType is returned by Encoder methods if a key has an
-// unsupported type.
-var ErrUnsupportedKeyType = errors.New("unsupported key type")
-
-// ErrUnsupportedValueType is returned by Encoder methods if a value has an
-// unsupported type.
-var ErrUnsupportedValueType = errors.New("unsupported value type")
-
-func writeKey(w io.Writer, key interface{}) error {
- if key == nil {
- return ErrNilKey
- }
-
- switch k := key.(type) {
- case string:
- return writeStringKey(w, k)
- case []byte:
- if k == nil {
- return ErrNilKey
- }
- return writeBytesKey(w, k)
- case encoding.TextMarshaler:
- kb, err := safeMarshal(k)
- if err != nil {
- return err
- }
- if kb == nil {
- return ErrNilKey
- }
- return writeBytesKey(w, kb)
- case fmt.Stringer:
- ks, ok := safeString(k)
- if !ok {
- return ErrNilKey
- }
- return writeStringKey(w, ks)
- default:
- rkey := reflect.ValueOf(key)
- switch rkey.Kind() {
- case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Struct:
- return ErrUnsupportedKeyType
- case reflect.Ptr:
- if rkey.IsNil() {
- return ErrNilKey
- }
- return writeKey(w, rkey.Elem().Interface())
- }
- return writeStringKey(w, fmt.Sprint(k))
- }
-}
-
-func invalidKeyRune(r rune) bool {
- return r <= ' ' || r == '=' || r == '"' || r == utf8.RuneError
-}
-
-func invalidKeyString(key string) bool {
- return len(key) == 0 || strings.IndexFunc(key, invalidKeyRune) != -1
-}
-
-func invalidKey(key []byte) bool {
- return len(key) == 0 || bytes.IndexFunc(key, invalidKeyRune) != -1
-}
-
-func writeStringKey(w io.Writer, key string) error {
- if invalidKeyString(key) {
- return ErrInvalidKey
- }
- _, err := io.WriteString(w, key)
- return err
-}
-
-func writeBytesKey(w io.Writer, key []byte) error {
- if invalidKey(key) {
- return ErrInvalidKey
- }
- _, err := w.Write(key)
- return err
-}
-
-func writeValue(w io.Writer, value interface{}) error {
- switch v := value.(type) {
- case nil:
- return writeBytesValue(w, null)
- case string:
- return writeStringValue(w, v, true)
- case []byte:
- return writeBytesValue(w, v)
- case encoding.TextMarshaler:
- vb, err := safeMarshal(v)
- if err != nil {
- return err
- }
- if vb == nil {
- vb = null
- }
- return writeBytesValue(w, vb)
- case error:
- se, ok := safeError(v)
- return writeStringValue(w, se, ok)
- case fmt.Stringer:
- ss, ok := safeString(v)
- return writeStringValue(w, ss, ok)
- default:
- rvalue := reflect.ValueOf(value)
- switch rvalue.Kind() {
- case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Struct:
- return ErrUnsupportedValueType
- case reflect.Ptr:
- if rvalue.IsNil() {
- return writeBytesValue(w, null)
- }
- return writeValue(w, rvalue.Elem().Interface())
- }
- return writeStringValue(w, fmt.Sprint(v), true)
- }
-}
-
-func needsQuotedValueRune(r rune) bool {
- return r <= ' ' || r == '=' || r == '"' || r == utf8.RuneError
-}
-
-func writeStringValue(w io.Writer, value string, ok bool) error {
- var err error
- if ok && value == "null" {
- _, err = io.WriteString(w, `"null"`)
- } else if strings.IndexFunc(value, needsQuotedValueRune) != -1 {
- _, err = writeQuotedString(w, value)
- } else {
- _, err = io.WriteString(w, value)
- }
- return err
-}
-
-func writeBytesValue(w io.Writer, value []byte) error {
- var err error
- if bytes.IndexFunc(value, needsQuotedValueRune) != -1 {
- _, err = writeQuotedBytes(w, value)
- } else {
- _, err = w.Write(value)
- }
- return err
-}
-
-// EndRecord writes a newline character to the stream and resets the encoder
-// to the beginning of a new record.
-func (enc *Encoder) EndRecord() error {
- _, err := enc.w.Write(newline)
- if err == nil {
- enc.needSep = false
- }
- return err
-}
-
-// Reset resets the encoder to the beginning of a new record.
-func (enc *Encoder) Reset() {
- enc.needSep = false
-}
-
-func safeError(err error) (s string, ok bool) {
- defer func() {
- if panicVal := recover(); panicVal != nil {
- if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
- s, ok = "null", false
- } else {
- panic(panicVal)
- }
- }
- }()
- s, ok = err.Error(), true
- return
-}
-
-func safeString(str fmt.Stringer) (s string, ok bool) {
- defer func() {
- if panicVal := recover(); panicVal != nil {
- if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() {
- s, ok = "null", false
- } else {
- panic(panicVal)
- }
- }
- }()
- s, ok = str.String(), true
- return
-}
-
-func safeMarshal(tm encoding.TextMarshaler) (b []byte, err error) {
- defer func() {
- if panicVal := recover(); panicVal != nil {
- if v := reflect.ValueOf(tm); v.Kind() == reflect.Ptr && v.IsNil() {
- b, err = nil, nil
- } else {
- panic(panicVal)
- }
- }
- }()
- b, err = tm.MarshalText()
- if err != nil {
- return nil, &MarshalerError{
- Type: reflect.TypeOf(tm),
- Err: err,
- }
- }
- return
-}
diff --git a/vendor/github.com/go-logfmt/logfmt/fuzz.go b/vendor/github.com/go-logfmt/logfmt/fuzz.go
deleted file mode 100644
index 6553b35e..00000000
--- a/vendor/github.com/go-logfmt/logfmt/fuzz.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// +build gofuzz
-
-package logfmt
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "reflect"
-
- kr "github.com/kr/logfmt"
-)
-
-// Fuzz checks reserialized data matches
-func Fuzz(data []byte) int {
- parsed, err := parse(data)
- if err != nil {
- return 0
- }
- var w1 bytes.Buffer
- if err = write(parsed, &w1); err != nil {
- panic(err)
- }
- parsed, err = parse(w1.Bytes())
- if err != nil {
- panic(err)
- }
- var w2 bytes.Buffer
- if err = write(parsed, &w2); err != nil {
- panic(err)
- }
- if !bytes.Equal(w1.Bytes(), w2.Bytes()) {
- panic(fmt.Sprintf("reserialized data does not match:\n%q\n%q\n", w1.Bytes(), w2.Bytes()))
- }
- return 1
-}
-
-// FuzzVsKR checks go-logfmt/logfmt against kr/logfmt
-func FuzzVsKR(data []byte) int {
- parsed, err := parse(data)
- parsedKR, errKR := parseKR(data)
-
- // github.com/go-logfmt/logfmt is a stricter parser. It returns errors for
- // more inputs than github.com/kr/logfmt. Ignore any inputs that have a
- // stict error.
- if err != nil {
- return 0
- }
-
- // Fail if the more forgiving parser finds an error not found by the
- // stricter parser.
- if errKR != nil {
- panic(fmt.Sprintf("unmatched error: %v", errKR))
- }
-
- if !reflect.DeepEqual(parsed, parsedKR) {
- panic(fmt.Sprintf("parsers disagree:\n%+v\n%+v\n", parsed, parsedKR))
- }
- return 1
-}
-
-type kv struct {
- k, v []byte
-}
-
-func parse(data []byte) ([][]kv, error) {
- var got [][]kv
- dec := NewDecoder(bytes.NewReader(data))
- for dec.ScanRecord() {
- var kvs []kv
- for dec.ScanKeyval() {
- kvs = append(kvs, kv{dec.Key(), dec.Value()})
- }
- got = append(got, kvs)
- }
- return got, dec.Err()
-}
-
-func parseKR(data []byte) ([][]kv, error) {
- var (
- s = bufio.NewScanner(bytes.NewReader(data))
- err error
- h saveHandler
- got [][]kv
- )
- for err == nil && s.Scan() {
- h.kvs = nil
- err = kr.Unmarshal(s.Bytes(), &h)
- got = append(got, h.kvs)
- }
- if err == nil {
- err = s.Err()
- }
- return got, err
-}
-
-type saveHandler struct {
- kvs []kv
-}
-
-func (h *saveHandler) HandleLogfmt(key, val []byte) error {
- if len(key) == 0 {
- key = nil
- }
- if len(val) == 0 {
- val = nil
- }
- h.kvs = append(h.kvs, kv{key, val})
- return nil
-}
-
-func write(recs [][]kv, w io.Writer) error {
- enc := NewEncoder(w)
- for _, rec := range recs {
- for _, f := range rec {
- if err := enc.EncodeKeyval(f.k, f.v); err != nil {
- return err
- }
- }
- if err := enc.EndRecord(); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/go-logfmt/logfmt/jsonstring.go b/vendor/github.com/go-logfmt/logfmt/jsonstring.go
deleted file mode 100644
index 030ac85f..00000000
--- a/vendor/github.com/go-logfmt/logfmt/jsonstring.go
+++ /dev/null
@@ -1,277 +0,0 @@
-package logfmt
-
-import (
- "bytes"
- "io"
- "strconv"
- "sync"
- "unicode"
- "unicode/utf16"
- "unicode/utf8"
-)
-
-// Taken from Go's encoding/json and modified for use here.
-
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-var hex = "0123456789abcdef"
-
-var bufferPool = sync.Pool{
- New: func() interface{} {
- return &bytes.Buffer{}
- },
-}
-
-func getBuffer() *bytes.Buffer {
- return bufferPool.Get().(*bytes.Buffer)
-}
-
-func poolBuffer(buf *bytes.Buffer) {
- buf.Reset()
- bufferPool.Put(buf)
-}
-
-// NOTE: keep in sync with writeQuotedBytes below.
-func writeQuotedString(w io.Writer, s string) (int, error) {
- buf := getBuffer()
- buf.WriteByte('"')
- start := 0
- for i := 0; i < len(s); {
- if b := s[i]; b < utf8.RuneSelf {
- if 0x20 <= b && b != '\\' && b != '"' {
- i++
- continue
- }
- if start < i {
- buf.WriteString(s[start:i])
- }
- switch b {
- case '\\', '"':
- buf.WriteByte('\\')
- buf.WriteByte(b)
- case '\n':
- buf.WriteByte('\\')
- buf.WriteByte('n')
- case '\r':
- buf.WriteByte('\\')
- buf.WriteByte('r')
- case '\t':
- buf.WriteByte('\\')
- buf.WriteByte('t')
- default:
- // This encodes bytes < 0x20 except for \n, \r, and \t.
- buf.WriteString(`\u00`)
- buf.WriteByte(hex[b>>4])
- buf.WriteByte(hex[b&0xF])
- }
- i++
- start = i
- continue
- }
- c, size := utf8.DecodeRuneInString(s[i:])
- if c == utf8.RuneError {
- if start < i {
- buf.WriteString(s[start:i])
- }
- buf.WriteString(`\ufffd`)
- i += size
- start = i
- continue
- }
- i += size
- }
- if start < len(s) {
- buf.WriteString(s[start:])
- }
- buf.WriteByte('"')
- n, err := w.Write(buf.Bytes())
- poolBuffer(buf)
- return n, err
-}
-
-// NOTE: keep in sync with writeQuoteString above.
-func writeQuotedBytes(w io.Writer, s []byte) (int, error) {
- buf := getBuffer()
- buf.WriteByte('"')
- start := 0
- for i := 0; i < len(s); {
- if b := s[i]; b < utf8.RuneSelf {
- if 0x20 <= b && b != '\\' && b != '"' {
- i++
- continue
- }
- if start < i {
- buf.Write(s[start:i])
- }
- switch b {
- case '\\', '"':
- buf.WriteByte('\\')
- buf.WriteByte(b)
- case '\n':
- buf.WriteByte('\\')
- buf.WriteByte('n')
- case '\r':
- buf.WriteByte('\\')
- buf.WriteByte('r')
- case '\t':
- buf.WriteByte('\\')
- buf.WriteByte('t')
- default:
- // This encodes bytes < 0x20 except for \n, \r, and \t.
- buf.WriteString(`\u00`)
- buf.WriteByte(hex[b>>4])
- buf.WriteByte(hex[b&0xF])
- }
- i++
- start = i
- continue
- }
- c, size := utf8.DecodeRune(s[i:])
- if c == utf8.RuneError {
- if start < i {
- buf.Write(s[start:i])
- }
- buf.WriteString(`\ufffd`)
- i += size
- start = i
- continue
- }
- i += size
- }
- if start < len(s) {
- buf.Write(s[start:])
- }
- buf.WriteByte('"')
- n, err := w.Write(buf.Bytes())
- poolBuffer(buf)
- return n, err
-}
-
-// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
-// or it returns -1.
-func getu4(s []byte) rune {
- if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
- return -1
- }
- r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
- if err != nil {
- return -1
- }
- return rune(r)
-}
-
-func unquoteBytes(s []byte) (t []byte, ok bool) {
- if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
- return
- }
- s = s[1 : len(s)-1]
-
- // Check for unusual characters. If there are none,
- // then no unquoting is needed, so return a slice of the
- // original bytes.
- r := 0
- for r < len(s) {
- c := s[r]
- if c == '\\' || c == '"' || c < ' ' {
- break
- }
- if c < utf8.RuneSelf {
- r++
- continue
- }
- rr, size := utf8.DecodeRune(s[r:])
- if rr == utf8.RuneError {
- break
- }
- r += size
- }
- if r == len(s) {
- return s, true
- }
-
- b := make([]byte, len(s)+2*utf8.UTFMax)
- w := copy(b, s[0:r])
- for r < len(s) {
- // Out of room? Can only happen if s is full of
- // malformed UTF-8 and we're replacing each
- // byte with RuneError.
- if w >= len(b)-2*utf8.UTFMax {
- nb := make([]byte, (len(b)+utf8.UTFMax)*2)
- copy(nb, b[0:w])
- b = nb
- }
- switch c := s[r]; {
- case c == '\\':
- r++
- if r >= len(s) {
- return
- }
- switch s[r] {
- default:
- return
- case '"', '\\', '/', '\'':
- b[w] = s[r]
- r++
- w++
- case 'b':
- b[w] = '\b'
- r++
- w++
- case 'f':
- b[w] = '\f'
- r++
- w++
- case 'n':
- b[w] = '\n'
- r++
- w++
- case 'r':
- b[w] = '\r'
- r++
- w++
- case 't':
- b[w] = '\t'
- r++
- w++
- case 'u':
- r--
- rr := getu4(s[r:])
- if rr < 0 {
- return
- }
- r += 6
- if utf16.IsSurrogate(rr) {
- rr1 := getu4(s[r:])
- if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
- // A valid pair; consume.
- r += 6
- w += utf8.EncodeRune(b[w:], dec)
- break
- }
- // Invalid surrogate; fall back to replacement rune.
- rr = unicode.ReplacementChar
- }
- w += utf8.EncodeRune(b[w:], rr)
- }
-
- // Quote, control characters are invalid.
- case c == '"', c < ' ':
- return
-
- // ASCII
- case c < utf8.RuneSelf:
- b[w] = c
- r++
- w++
-
- // Coerce to well-formed UTF-8.
- default:
- rr, size := utf8.DecodeRune(s[r:])
- r += size
- w += utf8.EncodeRune(b[w:], rr)
- }
- }
- return b[0:w], true
-}
diff --git a/vendor/github.com/go-stack/stack/LICENSE.md b/vendor/github.com/go-stack/stack/LICENSE.md
deleted file mode 100644
index 2abf98ea..00000000
--- a/vendor/github.com/go-stack/stack/LICENSE.md
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Chris Hines
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/go-stack/stack/README.md b/vendor/github.com/go-stack/stack/README.md
deleted file mode 100644
index f11cccca..00000000
--- a/vendor/github.com/go-stack/stack/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-[](https://godoc.org/github.com/go-stack/stack)
-[](https://goreportcard.com/report/go-stack/stack)
-[](https://travis-ci.org/go-stack/stack)
-[](https://coveralls.io/github/go-stack/stack?branch=master)
-
-# stack
-
-Package stack implements utilities to capture, manipulate, and format call
-stacks. It provides a simpler API than package runtime.
-
-The implementation takes care of the minutia and special cases of interpreting
-the program counter (pc) values returned by runtime.Callers.
-
-## Versioning
-
-Package stack publishes releases via [semver](http://semver.org/) compatible Git
-tags prefixed with a single 'v'. The master branch always contains the latest
-release. The develop branch contains unreleased commits.
-
-## Formatting
-
-Package stack's types implement fmt.Formatter, which provides a simple and
-flexible way to declaratively configure formatting when used with logging or
-error tracking packages.
-
-```go
-func DoTheThing() {
- c := stack.Caller(0)
- log.Print(c) // "source.go:10"
- log.Printf("%+v", c) // "pkg/path/source.go:10"
- log.Printf("%n", c) // "DoTheThing"
-
- s := stack.Trace().TrimRuntime()
- log.Print(s) // "[source.go:15 caller.go:42 main.go:14]"
-}
-```
-
-See the docs for all of the supported formatting options.
diff --git a/vendor/github.com/go-stack/stack/stack.go b/vendor/github.com/go-stack/stack/stack.go
deleted file mode 100644
index a614eeeb..00000000
--- a/vendor/github.com/go-stack/stack/stack.go
+++ /dev/null
@@ -1,349 +0,0 @@
-// Package stack implements utilities to capture, manipulate, and format call
-// stacks. It provides a simpler API than package runtime.
-//
-// The implementation takes care of the minutia and special cases of
-// interpreting the program counter (pc) values returned by runtime.Callers.
-//
-// Package stack's types implement fmt.Formatter, which provides a simple and
-// flexible way to declaratively configure formatting when used with logging
-// or error tracking packages.
-package stack
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "runtime"
- "strconv"
- "strings"
-)
-
-// Call records a single function invocation from a goroutine stack.
-type Call struct {
- fn *runtime.Func
- pc uintptr
-}
-
-// Caller returns a Call from the stack of the current goroutine. The argument
-// skip is the number of stack frames to ascend, with 0 identifying the
-// calling function.
-func Caller(skip int) Call {
- var pcs [2]uintptr
- n := runtime.Callers(skip+1, pcs[:])
-
- var c Call
-
- if n < 2 {
- return c
- }
-
- c.pc = pcs[1]
- if runtime.FuncForPC(pcs[0]) != sigpanic {
- c.pc--
- }
- c.fn = runtime.FuncForPC(c.pc)
- return c
-}
-
-// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c).
-func (c Call) String() string {
- return fmt.Sprint(c)
-}
-
-// MarshalText implements encoding.TextMarshaler. It formats the Call the same
-// as fmt.Sprintf("%v", c).
-func (c Call) MarshalText() ([]byte, error) {
- if c.fn == nil {
- return nil, ErrNoFunc
- }
- buf := bytes.Buffer{}
- fmt.Fprint(&buf, c)
- return buf.Bytes(), nil
-}
-
-// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely
-// cause is a Call with the zero value.
-var ErrNoFunc = errors.New("no call stack information")
-
-// Format implements fmt.Formatter with support for the following verbs.
-//
-// %s source file
-// %d line number
-// %n function name
-// %v equivalent to %s:%d
-//
-// It accepts the '+' and '#' flags for most of the verbs as follows.
-//
-// %+s path of source file relative to the compile time GOPATH
-// %#s full path of source file
-// %+n import path qualified function name
-// %+v equivalent to %+s:%d
-// %#v equivalent to %#s:%d
-func (c Call) Format(s fmt.State, verb rune) {
- if c.fn == nil {
- fmt.Fprintf(s, "%%!%c(NOFUNC)", verb)
- return
- }
-
- switch verb {
- case 's', 'v':
- file, line := c.fn.FileLine(c.pc)
- switch {
- case s.Flag('#'):
- // done
- case s.Flag('+'):
- file = file[pkgIndex(file, c.fn.Name()):]
- default:
- const sep = "/"
- if i := strings.LastIndex(file, sep); i != -1 {
- file = file[i+len(sep):]
- }
- }
- io.WriteString(s, file)
- if verb == 'v' {
- buf := [7]byte{':'}
- s.Write(strconv.AppendInt(buf[:1], int64(line), 10))
- }
-
- case 'd':
- _, line := c.fn.FileLine(c.pc)
- buf := [6]byte{}
- s.Write(strconv.AppendInt(buf[:0], int64(line), 10))
-
- case 'n':
- name := c.fn.Name()
- if !s.Flag('+') {
- const pathSep = "/"
- if i := strings.LastIndex(name, pathSep); i != -1 {
- name = name[i+len(pathSep):]
- }
- const pkgSep = "."
- if i := strings.Index(name, pkgSep); i != -1 {
- name = name[i+len(pkgSep):]
- }
- }
- io.WriteString(s, name)
- }
-}
-
-// PC returns the program counter for this call frame; multiple frames may
-// have the same PC value.
-func (c Call) PC() uintptr {
- return c.pc
-}
-
-// name returns the import path qualified name of the function containing the
-// call.
-func (c Call) name() string {
- if c.fn == nil {
- return "???"
- }
- return c.fn.Name()
-}
-
-func (c Call) file() string {
- if c.fn == nil {
- return "???"
- }
- file, _ := c.fn.FileLine(c.pc)
- return file
-}
-
-func (c Call) line() int {
- if c.fn == nil {
- return 0
- }
- _, line := c.fn.FileLine(c.pc)
- return line
-}
-
-// CallStack records a sequence of function invocations from a goroutine
-// stack.
-type CallStack []Call
-
-// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs).
-func (cs CallStack) String() string {
- return fmt.Sprint(cs)
-}
-
-var (
- openBracketBytes = []byte("[")
- closeBracketBytes = []byte("]")
- spaceBytes = []byte(" ")
-)
-
-// MarshalText implements encoding.TextMarshaler. It formats the CallStack the
-// same as fmt.Sprintf("%v", cs).
-func (cs CallStack) MarshalText() ([]byte, error) {
- buf := bytes.Buffer{}
- buf.Write(openBracketBytes)
- for i, pc := range cs {
- if pc.fn == nil {
- return nil, ErrNoFunc
- }
- if i > 0 {
- buf.Write(spaceBytes)
- }
- fmt.Fprint(&buf, pc)
- }
- buf.Write(closeBracketBytes)
- return buf.Bytes(), nil
-}
-
-// Format implements fmt.Formatter by printing the CallStack as square brackets
-// ([, ]) surrounding a space separated list of Calls each formatted with the
-// supplied verb and options.
-func (cs CallStack) Format(s fmt.State, verb rune) {
- s.Write(openBracketBytes)
- for i, pc := range cs {
- if i > 0 {
- s.Write(spaceBytes)
- }
- pc.Format(s, verb)
- }
- s.Write(closeBracketBytes)
-}
-
-// findSigpanic intentionally executes faulting code to generate a stack trace
-// containing an entry for runtime.sigpanic.
-func findSigpanic() *runtime.Func {
- var fn *runtime.Func
- var p *int
- func() int {
- defer func() {
- if p := recover(); p != nil {
- var pcs [512]uintptr
- n := runtime.Callers(2, pcs[:])
- for _, pc := range pcs[:n] {
- f := runtime.FuncForPC(pc)
- if f.Name() == "runtime.sigpanic" {
- fn = f
- break
- }
- }
- }
- }()
- // intentional nil pointer dereference to trigger sigpanic
- return *p
- }()
- return fn
-}
-
-var sigpanic = findSigpanic()
-
-// Trace returns a CallStack for the current goroutine with element 0
-// identifying the calling function.
-func Trace() CallStack {
- var pcs [512]uintptr
- n := runtime.Callers(2, pcs[:])
- cs := make([]Call, n)
-
- for i, pc := range pcs[:n] {
- pcFix := pc
- if i > 0 && cs[i-1].fn != sigpanic {
- pcFix--
- }
- cs[i] = Call{
- fn: runtime.FuncForPC(pcFix),
- pc: pcFix,
- }
- }
-
- return cs
-}
-
-// TrimBelow returns a slice of the CallStack with all entries below c
-// removed.
-func (cs CallStack) TrimBelow(c Call) CallStack {
- for len(cs) > 0 && cs[0].pc != c.pc {
- cs = cs[1:]
- }
- return cs
-}
-
-// TrimAbove returns a slice of the CallStack with all entries above c
-// removed.
-func (cs CallStack) TrimAbove(c Call) CallStack {
- for len(cs) > 0 && cs[len(cs)-1].pc != c.pc {
- cs = cs[:len(cs)-1]
- }
- return cs
-}
-
-// pkgIndex returns the index that results in file[index:] being the path of
-// file relative to the compile time GOPATH, and file[:index] being the
-// $GOPATH/src/ portion of file. funcName must be the name of a function in
-// file as returned by runtime.Func.Name.
-func pkgIndex(file, funcName string) int {
- // As of Go 1.6.2 there is no direct way to know the compile time GOPATH
- // at runtime, but we can infer the number of path segments in the GOPATH.
- // We note that runtime.Func.Name() returns the function name qualified by
- // the import path, which does not include the GOPATH. Thus we can trim
- // segments from the beginning of the file path until the number of path
- // separators remaining is one more than the number of path separators in
- // the function name. For example, given:
- //
- // GOPATH /home/user
- // file /home/user/src/pkg/sub/file.go
- // fn.Name() pkg/sub.Type.Method
- //
- // We want to produce:
- //
- // file[:idx] == /home/user/src/
- // file[idx:] == pkg/sub/file.go
- //
- // From this we can easily see that fn.Name() has one less path separator
- // than our desired result for file[idx:]. We count separators from the
- // end of the file path until it finds two more than in the function name
- // and then move one character forward to preserve the initial path
- // segment without a leading separator.
- const sep = "/"
- i := len(file)
- for n := strings.Count(funcName, sep) + 2; n > 0; n-- {
- i = strings.LastIndex(file[:i], sep)
- if i == -1 {
- i = -len(sep)
- break
- }
- }
- // get back to 0 or trim the leading separator
- return i + len(sep)
-}
-
-var runtimePath string
-
-func init() {
- var pcs [1]uintptr
- runtime.Callers(0, pcs[:])
- fn := runtime.FuncForPC(pcs[0])
- file, _ := fn.FileLine(pcs[0])
-
- idx := pkgIndex(file, fn.Name())
-
- runtimePath = file[:idx]
- if runtime.GOOS == "windows" {
- runtimePath = strings.ToLower(runtimePath)
- }
-}
-
-func inGoroot(c Call) bool {
- file := c.file()
- if len(file) == 0 || file[0] == '?' {
- return true
- }
- if runtime.GOOS == "windows" {
- file = strings.ToLower(file)
- }
- return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go")
-}
-
-// TrimRuntime returns a slice of the CallStack with the topmost entries from
-// the go runtime removed. It considers any calls originating from unknown
-// files, files under GOROOT, or _testmain.go as part of the runtime.
-func (cs CallStack) TrimRuntime() CallStack {
- for len(cs) > 0 && inGoroot(cs[len(cs)-1]) {
- cs = cs[:len(cs)-1]
- }
- return cs
-}
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
deleted file mode 100644
index 1b1b1921..00000000
--- a/vendor/github.com/golang/protobuf/LICENSE
+++ /dev/null
@@ -1,31 +0,0 @@
-Go support for Protocol Buffers - Google's data interchange format
-
-Copyright 2010 The Go Authors. All rights reserved.
-https://github.com/golang/protobuf
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile
deleted file mode 100644
index e2e0651a..00000000
--- a/vendor/github.com/golang/protobuf/proto/Makefile
+++ /dev/null
@@ -1,43 +0,0 @@
-# Go support for Protocol Buffers - Google's data interchange format
-#
-# Copyright 2010 The Go Authors. All rights reserved.
-# https://github.com/golang/protobuf
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-install:
- go install
-
-test: install generate-test-pbs
- go test
-
-
-generate-test-pbs:
- make install
- make -C testdata
- protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
- make
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
deleted file mode 100644
index e98ddec9..00000000
--- a/vendor/github.com/golang/protobuf/proto/clone.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer deep copy and merge.
-// TODO: RawMessage.
-
-package proto
-
-import (
- "log"
- "reflect"
- "strings"
-)
-
-// Clone returns a deep copy of a protocol buffer.
-func Clone(pb Message) Message {
- in := reflect.ValueOf(pb)
- if in.IsNil() {
- return pb
- }
-
- out := reflect.New(in.Type().Elem())
- // out is empty so a merge is a deep copy.
- mergeStruct(out.Elem(), in.Elem())
- return out.Interface().(Message)
-}
-
-// Merge merges src into dst.
-// Required and optional fields that are set in src will be set to that value in dst.
-// Elements of repeated fields will be appended.
-// Merge panics if src and dst are not the same type, or if dst is nil.
-func Merge(dst, src Message) {
- in := reflect.ValueOf(src)
- out := reflect.ValueOf(dst)
- if out.IsNil() {
- panic("proto: nil destination")
- }
- if in.Type() != out.Type() {
- // Explicit test prior to mergeStruct so that mistyped nils will fail
- panic("proto: type mismatch")
- }
- if in.IsNil() {
- // Merging nil into non-nil is a quiet no-op
- return
- }
- mergeStruct(out.Elem(), in.Elem())
-}
-
-func mergeStruct(out, in reflect.Value) {
- sprop := GetProperties(in.Type())
- for i := 0; i < in.NumField(); i++ {
- f := in.Type().Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
- }
-
- if emIn, ok := in.Addr().Interface().(extendableProto); ok {
- emOut := out.Addr().Interface().(extendableProto)
- mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
- }
-
- uf := in.FieldByName("XXX_unrecognized")
- if !uf.IsValid() {
- return
- }
- uin := uf.Bytes()
- if len(uin) > 0 {
- out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
- }
-}
-
-// mergeAny performs a merge between two values of the same type.
-// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
-// prop is set if this is a struct field (it may be nil).
-func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
- if in.Type() == protoMessageType {
- if !in.IsNil() {
- if out.IsNil() {
- out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
- } else {
- Merge(out.Interface().(Message), in.Interface().(Message))
- }
- }
- return
- }
- switch in.Kind() {
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
- reflect.String, reflect.Uint32, reflect.Uint64:
- if !viaPtr && isProto3Zero(in) {
- return
- }
- out.Set(in)
- case reflect.Interface:
- // Probably a oneof field; copy non-nil values.
- if in.IsNil() {
- return
- }
- // Allocate destination if it is not set, or set to a different type.
- // Otherwise we will merge as normal.
- if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
- out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
- }
- mergeAny(out.Elem(), in.Elem(), false, nil)
- case reflect.Map:
- if in.Len() == 0 {
- return
- }
- if out.IsNil() {
- out.Set(reflect.MakeMap(in.Type()))
- }
- // For maps with value types of *T or []byte we need to deep copy each value.
- elemKind := in.Type().Elem().Kind()
- for _, key := range in.MapKeys() {
- var val reflect.Value
- switch elemKind {
- case reflect.Ptr:
- val = reflect.New(in.Type().Elem().Elem())
- mergeAny(val, in.MapIndex(key), false, nil)
- case reflect.Slice:
- val = in.MapIndex(key)
- val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
- default:
- val = in.MapIndex(key)
- }
- out.SetMapIndex(key, val)
- }
- case reflect.Ptr:
- if in.IsNil() {
- return
- }
- if out.IsNil() {
- out.Set(reflect.New(in.Elem().Type()))
- }
- mergeAny(out.Elem(), in.Elem(), true, nil)
- case reflect.Slice:
- if in.IsNil() {
- return
- }
- if in.Type().Elem().Kind() == reflect.Uint8 {
- // []byte is a scalar bytes field, not a repeated field.
-
- // Edge case: if this is in a proto3 message, a zero length
- // bytes field is considered the zero value, and should not
- // be merged.
- if prop != nil && prop.proto3 && in.Len() == 0 {
- return
- }
-
- // Make a deep copy.
- // Append to []byte{} instead of []byte(nil) so that we never end up
- // with a nil result.
- out.SetBytes(append([]byte{}, in.Bytes()...))
- return
- }
- n := in.Len()
- if out.IsNil() {
- out.Set(reflect.MakeSlice(in.Type(), 0, n))
- }
- switch in.Type().Elem().Kind() {
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
- reflect.String, reflect.Uint32, reflect.Uint64:
- out.Set(reflect.AppendSlice(out, in))
- default:
- for i := 0; i < n; i++ {
- x := reflect.Indirect(reflect.New(in.Type().Elem()))
- mergeAny(x, in.Index(i), false, nil)
- out.Set(reflect.Append(out, x))
- }
- }
- case reflect.Struct:
- mergeStruct(out, in)
- default:
- // unknown type, so not a protocol buffer
- log.Printf("proto: don't know how to copy %v", in)
- }
-}
-
-func mergeExtension(out, in map[int32]Extension) {
- for extNum, eIn := range in {
- eOut := Extension{desc: eIn.desc}
- if eIn.value != nil {
- v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
- mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
- eOut.value = v.Interface()
- }
- if eIn.enc != nil {
- eOut.enc = make([]byte, len(eIn.enc))
- copy(eOut.enc, eIn.enc)
- }
-
- out[extNum] = eOut
- }
-}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
deleted file mode 100644
index f94b9f41..00000000
--- a/vendor/github.com/golang/protobuf/proto/decode.go
+++ /dev/null
@@ -1,868 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for decoding protocol buffer data to construct in-memory representations.
- */
-
-import (
- "errors"
- "fmt"
- "io"
- "os"
- "reflect"
-)
-
-// errOverflow is returned when an integer is too large to be represented.
-var errOverflow = errors.New("proto: integer overflow")
-
-// ErrInternalBadWireType is returned by generated code when an incorrect
-// wire type is encountered. It does not get returned to user code.
-var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
-
-// The fundamental decoders that interpret bytes on the wire.
-// Those that take integer types all return uint64 and are
-// therefore of type valueDecoder.
-
-// DecodeVarint reads a varint-encoded integer from the slice.
-// It returns the integer and the number of bytes consumed, or
-// zero if there is not enough.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func DecodeVarint(buf []byte) (x uint64, n int) {
- // x, n already 0
- for shift := uint(0); shift < 64; shift += 7 {
- if n >= len(buf) {
- return 0, 0
- }
- b := uint64(buf[n])
- n++
- x |= (b & 0x7F) << shift
- if (b & 0x80) == 0 {
- return x, n
- }
- }
-
- // The number is too large to represent in a 64-bit value.
- return 0, 0
-}
-
-// DecodeVarint reads a varint-encoded integer from the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) DecodeVarint() (x uint64, err error) {
- // x, err already 0
-
- i := p.index
- l := len(p.buf)
-
- for shift := uint(0); shift < 64; shift += 7 {
- if i >= l {
- err = io.ErrUnexpectedEOF
- return
- }
- b := p.buf[i]
- i++
- x |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- p.index = i
- return
- }
- }
-
- // The number is too large to represent in a 64-bit value.
- err = errOverflow
- return
-}
-
-// DecodeFixed64 reads a 64-bit integer from the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) DecodeFixed64() (x uint64, err error) {
- // x, err already 0
- i := p.index + 8
- if i < 0 || i > len(p.buf) {
- err = io.ErrUnexpectedEOF
- return
- }
- p.index = i
-
- x = uint64(p.buf[i-8])
- x |= uint64(p.buf[i-7]) << 8
- x |= uint64(p.buf[i-6]) << 16
- x |= uint64(p.buf[i-5]) << 24
- x |= uint64(p.buf[i-4]) << 32
- x |= uint64(p.buf[i-3]) << 40
- x |= uint64(p.buf[i-2]) << 48
- x |= uint64(p.buf[i-1]) << 56
- return
-}
-
-// DecodeFixed32 reads a 32-bit integer from the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) DecodeFixed32() (x uint64, err error) {
- // x, err already 0
- i := p.index + 4
- if i < 0 || i > len(p.buf) {
- err = io.ErrUnexpectedEOF
- return
- }
- p.index = i
-
- x = uint64(p.buf[i-4])
- x |= uint64(p.buf[i-3]) << 8
- x |= uint64(p.buf[i-2]) << 16
- x |= uint64(p.buf[i-1]) << 24
- return
-}
-
-// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
-// from the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
- x, err = p.DecodeVarint()
- if err != nil {
- return
- }
- x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
- return
-}
-
-// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
-// from the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
- x, err = p.DecodeVarint()
- if err != nil {
- return
- }
- x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
- return
-}
-
-// These are not ValueDecoders: they produce an array of bytes or a string.
-// bytes, embedded messages
-
-// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
- n, err := p.DecodeVarint()
- if err != nil {
- return nil, err
- }
-
- nb := int(n)
- if nb < 0 {
- return nil, fmt.Errorf("proto: bad byte length %d", nb)
- }
- end := p.index + nb
- if end < p.index || end > len(p.buf) {
- return nil, io.ErrUnexpectedEOF
- }
-
- if !alloc {
- // todo: check if can get more uses of alloc=false
- buf = p.buf[p.index:end]
- p.index += nb
- return
- }
-
- buf = make([]byte, nb)
- copy(buf, p.buf[p.index:])
- p.index += nb
- return
-}
-
-// DecodeStringBytes reads an encoded string from the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) DecodeStringBytes() (s string, err error) {
- buf, err := p.DecodeRawBytes(false)
- if err != nil {
- return
- }
- return string(buf), nil
-}
-
-// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
-// If the protocol buffer has extensions, and the field matches, add it as an extension.
-// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
-func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
- oi := o.index
-
- err := o.skip(t, tag, wire)
- if err != nil {
- return err
- }
-
- if !unrecField.IsValid() {
- return nil
- }
-
- ptr := structPointer_Bytes(base, unrecField)
-
- // Add the skipped field to struct field
- obuf := o.buf
-
- o.buf = *ptr
- o.EncodeVarint(uint64(tag<<3 | wire))
- *ptr = append(o.buf, obuf[oi:o.index]...)
-
- o.buf = obuf
-
- return nil
-}
-
-// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
-func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
-
- var u uint64
- var err error
-
- switch wire {
- case WireVarint:
- _, err = o.DecodeVarint()
- case WireFixed64:
- _, err = o.DecodeFixed64()
- case WireBytes:
- _, err = o.DecodeRawBytes(false)
- case WireFixed32:
- _, err = o.DecodeFixed32()
- case WireStartGroup:
- for {
- u, err = o.DecodeVarint()
- if err != nil {
- break
- }
- fwire := int(u & 0x7)
- if fwire == WireEndGroup {
- break
- }
- ftag := int(u >> 3)
- err = o.skip(t, ftag, fwire)
- if err != nil {
- break
- }
- }
- default:
- err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
- }
- return err
-}
-
-// Unmarshaler is the interface representing objects that can
-// unmarshal themselves. The method should reset the receiver before
-// decoding starts. The argument points to data that may be
-// overwritten, so implementations should not keep references to the
-// buffer.
-type Unmarshaler interface {
- Unmarshal([]byte) error
-}
-
-// Unmarshal parses the protocol buffer representation in buf and places the
-// decoded result in pb. If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// Unmarshal resets pb before starting to unmarshal, so any
-// existing data in pb is always removed. Use UnmarshalMerge
-// to preserve and append to existing data.
-func Unmarshal(buf []byte, pb Message) error {
- pb.Reset()
- return UnmarshalMerge(buf, pb)
-}
-
-// UnmarshalMerge parses the protocol buffer representation in buf and
-// writes the decoded result to pb. If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// UnmarshalMerge merges into existing data in pb.
-// Most code should use Unmarshal instead.
-func UnmarshalMerge(buf []byte, pb Message) error {
- // If the object can unmarshal itself, let it.
- if u, ok := pb.(Unmarshaler); ok {
- return u.Unmarshal(buf)
- }
- return NewBuffer(buf).Unmarshal(pb)
-}
-
-// DecodeMessage reads a count-delimited message from the Buffer.
-func (p *Buffer) DecodeMessage(pb Message) error {
- enc, err := p.DecodeRawBytes(false)
- if err != nil {
- return err
- }
- return NewBuffer(enc).Unmarshal(pb)
-}
-
-// DecodeGroup reads a tag-delimited group from the Buffer.
-func (p *Buffer) DecodeGroup(pb Message) error {
- typ, base, err := getbase(pb)
- if err != nil {
- return err
- }
- return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
-}
-
-// Unmarshal parses the protocol buffer representation in the
-// Buffer and places the decoded result in pb. If the struct
-// underlying pb does not match the data in the buffer, the results can be
-// unpredictable.
-func (p *Buffer) Unmarshal(pb Message) error {
- // If the object can unmarshal itself, let it.
- if u, ok := pb.(Unmarshaler); ok {
- err := u.Unmarshal(p.buf[p.index:])
- p.index = len(p.buf)
- return err
- }
-
- typ, base, err := getbase(pb)
- if err != nil {
- return err
- }
-
- err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
-
- if collectStats {
- stats.Decode++
- }
-
- return err
-}
-
-// unmarshalType does the work of unmarshaling a structure.
-func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
- var state errorState
- required, reqFields := prop.reqCount, uint64(0)
-
- var err error
- for err == nil && o.index < len(o.buf) {
- oi := o.index
- var u uint64
- u, err = o.DecodeVarint()
- if err != nil {
- break
- }
- wire := int(u & 0x7)
- if wire == WireEndGroup {
- if is_group {
- return nil // input is satisfied
- }
- return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
- }
- tag := int(u >> 3)
- if tag <= 0 {
- return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
- }
- fieldnum, ok := prop.decoderTags.get(tag)
- if !ok {
- // Maybe it's an extension?
- if prop.extendable {
- if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
- if err = o.skip(st, tag, wire); err == nil {
- ext := e.ExtensionMap()[int32(tag)] // may be missing
- ext.enc = append(ext.enc, o.buf[oi:o.index]...)
- e.ExtensionMap()[int32(tag)] = ext
- }
- continue
- }
- }
- // Maybe it's a oneof?
- if prop.oneofUnmarshaler != nil {
- m := structPointer_Interface(base, st).(Message)
- // First return value indicates whether tag is a oneof field.
- ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
- if err == ErrInternalBadWireType {
- // Map the error to something more descriptive.
- // Do the formatting here to save generated code space.
- err = fmt.Errorf("bad wiretype for oneof field in %T", m)
- }
- if ok {
- continue
- }
- }
- err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
- continue
- }
- p := prop.Prop[fieldnum]
-
- if p.dec == nil {
- fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
- continue
- }
- dec := p.dec
- if wire != WireStartGroup && wire != p.WireType {
- if wire == WireBytes && p.packedDec != nil {
- // a packable field
- dec = p.packedDec
- } else {
- err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
- continue
- }
- }
- decErr := dec(o, p, base)
- if decErr != nil && !state.shouldContinue(decErr, p) {
- err = decErr
- }
- if err == nil && p.Required {
- // Successfully decoded a required field.
- if tag <= 64 {
- // use bitmap for fields 1-64 to catch field reuse.
- var mask uint64 = 1 << uint64(tag-1)
- if reqFields&mask == 0 {
- // new required field
- reqFields |= mask
- required--
- }
- } else {
- // This is imprecise. It can be fooled by a required field
- // with a tag > 64 that is encoded twice; that's very rare.
- // A fully correct implementation would require allocating
- // a data structure, which we would like to avoid.
- required--
- }
- }
- }
- if err == nil {
- if is_group {
- return io.ErrUnexpectedEOF
- }
- if state.err != nil {
- return state.err
- }
- if required > 0 {
- // Not enough information to determine the exact field. If we use extra
- // CPU, we could determine the field only if the missing required field
- // has a tag <= 64 and we check reqFields.
- return &RequiredNotSetError{"{Unknown}"}
- }
- }
- return err
-}
-
-// Individual type decoders
-// For each,
-// u is the decoded value,
-// v is a pointer to the field (pointer) in the struct
-
-// Sizes of the pools to allocate inside the Buffer.
-// The goal is modest amortization and allocation
-// on at least 16-byte boundaries.
-const (
- boolPoolSize = 16
- uint32PoolSize = 8
- uint64PoolSize = 4
-)
-
-// Decode a bool.
-func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- if len(o.bools) == 0 {
- o.bools = make([]bool, boolPoolSize)
- }
- o.bools[0] = u != 0
- *structPointer_Bool(base, p.field) = &o.bools[0]
- o.bools = o.bools[1:]
- return nil
-}
-
-func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- *structPointer_BoolVal(base, p.field) = u != 0
- return nil
-}
-
-// Decode an int32.
-func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
- return nil
-}
-
-func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
- return nil
-}
-
-// Decode an int64.
-func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word64_Set(structPointer_Word64(base, p.field), o, u)
- return nil
-}
-
-func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
- return nil
-}
-
-// Decode a string.
-func (o *Buffer) dec_string(p *Properties, base structPointer) error {
- s, err := o.DecodeStringBytes()
- if err != nil {
- return err
- }
- *structPointer_String(base, p.field) = &s
- return nil
-}
-
-func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
- s, err := o.DecodeStringBytes()
- if err != nil {
- return err
- }
- *structPointer_StringVal(base, p.field) = s
- return nil
-}
-
-// Decode a slice of bytes ([]byte).
-func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
- b, err := o.DecodeRawBytes(true)
- if err != nil {
- return err
- }
- *structPointer_Bytes(base, p.field) = b
- return nil
-}
-
-// Decode a slice of bools ([]bool).
-func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- v := structPointer_BoolSlice(base, p.field)
- *v = append(*v, u != 0)
- return nil
-}
-
-// Decode a slice of bools ([]bool) in packed format.
-func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
- v := structPointer_BoolSlice(base, p.field)
-
- nn, err := o.DecodeVarint()
- if err != nil {
- return err
- }
- nb := int(nn) // number of bytes of encoded bools
- fin := o.index + nb
- if fin < o.index {
- return errOverflow
- }
-
- y := *v
- for o.index < fin {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- y = append(y, u != 0)
- }
-
- *v = y
- return nil
-}
-
-// Decode a slice of int32s ([]int32).
-func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- structPointer_Word32Slice(base, p.field).Append(uint32(u))
- return nil
-}
-
-// Decode a slice of int32s ([]int32) in packed format.
-func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
- v := structPointer_Word32Slice(base, p.field)
-
- nn, err := o.DecodeVarint()
- if err != nil {
- return err
- }
- nb := int(nn) // number of bytes of encoded int32s
-
- fin := o.index + nb
- if fin < o.index {
- return errOverflow
- }
- for o.index < fin {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- v.Append(uint32(u))
- }
- return nil
-}
-
-// Decode a slice of int64s ([]int64).
-func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
-
- structPointer_Word64Slice(base, p.field).Append(u)
- return nil
-}
-
-// Decode a slice of int64s ([]int64) in packed format.
-func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
- v := structPointer_Word64Slice(base, p.field)
-
- nn, err := o.DecodeVarint()
- if err != nil {
- return err
- }
- nb := int(nn) // number of bytes of encoded int64s
-
- fin := o.index + nb
- if fin < o.index {
- return errOverflow
- }
- for o.index < fin {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- v.Append(u)
- }
- return nil
-}
-
-// Decode a slice of strings ([]string).
-func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
- s, err := o.DecodeStringBytes()
- if err != nil {
- return err
- }
- v := structPointer_StringSlice(base, p.field)
- *v = append(*v, s)
- return nil
-}
-
-// Decode a slice of slice of bytes ([][]byte).
-func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
- b, err := o.DecodeRawBytes(true)
- if err != nil {
- return err
- }
- v := structPointer_BytesSlice(base, p.field)
- *v = append(*v, b)
- return nil
-}
-
-// Decode a map field.
-func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
- raw, err := o.DecodeRawBytes(false)
- if err != nil {
- return err
- }
- oi := o.index // index at the end of this map entry
- o.index -= len(raw) // move buffer back to start of map entry
-
- mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
- if mptr.Elem().IsNil() {
- mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
- }
- v := mptr.Elem() // map[K]V
-
- // Prepare addressable doubly-indirect placeholders for the key and value types.
- // See enc_new_map for why.
- keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
- keybase := toStructPointer(keyptr.Addr()) // **K
-
- var valbase structPointer
- var valptr reflect.Value
- switch p.mtype.Elem().Kind() {
- case reflect.Slice:
- // []byte
- var dummy []byte
- valptr = reflect.ValueOf(&dummy) // *[]byte
- valbase = toStructPointer(valptr) // *[]byte
- case reflect.Ptr:
- // message; valptr is **Msg; need to allocate the intermediate pointer
- valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
- valptr.Set(reflect.New(valptr.Type().Elem()))
- valbase = toStructPointer(valptr)
- default:
- // everything else
- valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
- valbase = toStructPointer(valptr.Addr()) // **V
- }
-
- // Decode.
- // This parses a restricted wire format, namely the encoding of a message
- // with two fields. See enc_new_map for the format.
- for o.index < oi {
- // tagcode for key and value properties are always a single byte
- // because they have tags 1 and 2.
- tagcode := o.buf[o.index]
- o.index++
- switch tagcode {
- case p.mkeyprop.tagcode[0]:
- if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
- return err
- }
- case p.mvalprop.tagcode[0]:
- if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
- return err
- }
- default:
- // TODO: Should we silently skip this instead?
- return fmt.Errorf("proto: bad map data tag %d", raw[0])
- }
- }
- keyelem, valelem := keyptr.Elem(), valptr.Elem()
- if !keyelem.IsValid() {
- keyelem = reflect.Zero(p.mtype.Key())
- }
- if !valelem.IsValid() {
- valelem = reflect.Zero(p.mtype.Elem())
- }
-
- v.SetMapIndex(keyelem, valelem)
- return nil
-}
-
-// Decode a group.
-func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
- bas := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(bas) {
- // allocate new nested message
- bas = toStructPointer(reflect.New(p.stype))
- structPointer_SetStructPointer(base, p.field, bas)
- }
- return o.unmarshalType(p.stype, p.sprop, true, bas)
-}
-
-// Decode an embedded message.
-func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
- raw, e := o.DecodeRawBytes(false)
- if e != nil {
- return e
- }
-
- bas := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(bas) {
- // allocate new nested message
- bas = toStructPointer(reflect.New(p.stype))
- structPointer_SetStructPointer(base, p.field, bas)
- }
-
- // If the object can unmarshal itself, let it.
- if p.isUnmarshaler {
- iv := structPointer_Interface(bas, p.stype)
- return iv.(Unmarshaler).Unmarshal(raw)
- }
-
- obuf := o.buf
- oi := o.index
- o.buf = raw
- o.index = 0
-
- err = o.unmarshalType(p.stype, p.sprop, false, bas)
- o.buf = obuf
- o.index = oi
-
- return err
-}
-
-// Decode a slice of embedded messages.
-func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
- return o.dec_slice_struct(p, false, base)
-}
-
-// Decode a slice of embedded groups.
-func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
- return o.dec_slice_struct(p, true, base)
-}
-
-// Decode a slice of structs ([]*struct).
-func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
- v := reflect.New(p.stype)
- bas := toStructPointer(v)
- structPointer_StructPointerSlice(base, p.field).Append(bas)
-
- if is_group {
- err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
- return err
- }
-
- raw, err := o.DecodeRawBytes(false)
- if err != nil {
- return err
- }
-
- // If the object can unmarshal itself, let it.
- if p.isUnmarshaler {
- iv := v.Interface()
- return iv.(Unmarshaler).Unmarshal(raw)
- }
-
- obuf := o.buf
- oi := o.index
- o.buf = raw
- o.index = 0
-
- err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
-
- o.buf = obuf
- o.index = oi
-
- return err
-}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
deleted file mode 100644
index eb7e0474..00000000
--- a/vendor/github.com/golang/protobuf/proto/encode.go
+++ /dev/null
@@ -1,1331 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
-import (
- "errors"
- "fmt"
- "reflect"
- "sort"
-)
-
-// RequiredNotSetError is the error returned if Marshal is called with
-// a protocol buffer struct whose required fields have not
-// all been initialized. It is also the error returned if Unmarshal is
-// called with an encoded protocol buffer that does not include all the
-// required fields.
-//
-// When printed, RequiredNotSetError reports the first unset required field in a
-// message. If the field cannot be precisely determined, it is reported as
-// "{Unknown}".
-type RequiredNotSetError struct {
- field string
-}
-
-func (e *RequiredNotSetError) Error() string {
- return fmt.Sprintf("proto: required field %q not set", e.field)
-}
-
-var (
- // errRepeatedHasNil is the error returned if Marshal is called with
- // a struct with a repeated field containing a nil element.
- errRepeatedHasNil = errors.New("proto: repeated field has nil element")
-
- // errOneofHasNil is the error returned if Marshal is called with
- // a struct with a oneof field containing a nil element.
- errOneofHasNil = errors.New("proto: oneof field has nil value")
-
- // ErrNil is the error returned if Marshal is called with nil.
- ErrNil = errors.New("proto: Marshal called with nil")
-)
-
-// The fundamental encoders that put bytes on the wire.
-// Those that take integer types all accept uint64 and are
-// therefore of type valueEncoder.
-
-const maxVarintBytes = 10 // maximum length of a varint
-
-// EncodeVarint returns the varint encoding of x.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-// Not used by the package itself, but helpful to clients
-// wishing to use the same encoding.
-func EncodeVarint(x uint64) []byte {
- var buf [maxVarintBytes]byte
- var n int
- for n = 0; x > 127; n++ {
- buf[n] = 0x80 | uint8(x&0x7F)
- x >>= 7
- }
- buf[n] = uint8(x)
- n++
- return buf[0:n]
-}
-
-// EncodeVarint writes a varint-encoded integer to the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) EncodeVarint(x uint64) error {
- for x >= 1<<7 {
- p.buf = append(p.buf, uint8(x&0x7f|0x80))
- x >>= 7
- }
- p.buf = append(p.buf, uint8(x))
- return nil
-}
-
-// SizeVarint returns the varint encoding size of an integer.
-func SizeVarint(x uint64) int {
- return sizeVarint(x)
-}
-
-func sizeVarint(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
-}
-
-// EncodeFixed64 writes a 64-bit integer to the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) EncodeFixed64(x uint64) error {
- p.buf = append(p.buf,
- uint8(x),
- uint8(x>>8),
- uint8(x>>16),
- uint8(x>>24),
- uint8(x>>32),
- uint8(x>>40),
- uint8(x>>48),
- uint8(x>>56))
- return nil
-}
-
-func sizeFixed64(x uint64) int {
- return 8
-}
-
-// EncodeFixed32 writes a 32-bit integer to the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) EncodeFixed32(x uint64) error {
- p.buf = append(p.buf,
- uint8(x),
- uint8(x>>8),
- uint8(x>>16),
- uint8(x>>24))
- return nil
-}
-
-func sizeFixed32(x uint64) int {
- return 4
-}
-
-// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
-// to the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) EncodeZigzag64(x uint64) error {
- // use signed number to get arithmetic right shift.
- return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-
-func sizeZigzag64(x uint64) int {
- return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-
-// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
-// to the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) EncodeZigzag32(x uint64) error {
- // use signed number to get arithmetic right shift.
- return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
-}
-
-func sizeZigzag32(x uint64) int {
- return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
-}
-
-// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) EncodeRawBytes(b []byte) error {
- p.EncodeVarint(uint64(len(b)))
- p.buf = append(p.buf, b...)
- return nil
-}
-
-func sizeRawBytes(b []byte) int {
- return sizeVarint(uint64(len(b))) +
- len(b)
-}
-
-// EncodeStringBytes writes an encoded string to the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) EncodeStringBytes(s string) error {
- p.EncodeVarint(uint64(len(s)))
- p.buf = append(p.buf, s...)
- return nil
-}
-
-func sizeStringBytes(s string) int {
- return sizeVarint(uint64(len(s))) +
- len(s)
-}
-
-// Marshaler is the interface representing objects that can marshal themselves.
-type Marshaler interface {
- Marshal() ([]byte, error)
-}
-
-// Marshal takes the protocol buffer
-// and encodes it into the wire format, returning the data.
-func Marshal(pb Message) ([]byte, error) {
- // Can the object marshal itself?
- if m, ok := pb.(Marshaler); ok {
- return m.Marshal()
- }
- p := NewBuffer(nil)
- err := p.Marshal(pb)
- var state errorState
- if err != nil && !state.shouldContinue(err, nil) {
- return nil, err
- }
- if p.buf == nil && err == nil {
- // Return a non-nil slice on success.
- return []byte{}, nil
- }
- return p.buf, err
-}
-
-// EncodeMessage writes the protocol buffer to the Buffer,
-// prefixed by a varint-encoded length.
-func (p *Buffer) EncodeMessage(pb Message) error {
- t, base, err := getbase(pb)
- if structPointer_IsNil(base) {
- return ErrNil
- }
- if err == nil {
- var state errorState
- err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
- }
- return err
-}
-
-// Marshal takes the protocol buffer
-// and encodes it into the wire format, writing the result to the
-// Buffer.
-func (p *Buffer) Marshal(pb Message) error {
- // Can the object marshal itself?
- if m, ok := pb.(Marshaler); ok {
- data, err := m.Marshal()
- if err != nil {
- return err
- }
- p.buf = append(p.buf, data...)
- return nil
- }
-
- t, base, err := getbase(pb)
- if structPointer_IsNil(base) {
- return ErrNil
- }
- if err == nil {
- err = p.enc_struct(GetProperties(t.Elem()), base)
- }
-
- if collectStats {
- stats.Encode++
- }
-
- return err
-}
-
-// Size returns the encoded size of a protocol buffer.
-func Size(pb Message) (n int) {
- // Can the object marshal itself? If so, Size is slow.
- // TODO: add Size to Marshaler, or add a Sizer interface.
- if m, ok := pb.(Marshaler); ok {
- b, _ := m.Marshal()
- return len(b)
- }
-
- t, base, err := getbase(pb)
- if structPointer_IsNil(base) {
- return 0
- }
- if err == nil {
- n = size_struct(GetProperties(t.Elem()), base)
- }
-
- if collectStats {
- stats.Size++
- }
-
- return
-}
-
-// Individual type encoders.
-
-// Encode a bool.
-func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
- v := *structPointer_Bool(base, p.field)
- if v == nil {
- return ErrNil
- }
- x := 0
- if *v {
- x = 1
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
- v := *structPointer_BoolVal(base, p.field)
- if !v {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, 1)
- return nil
-}
-
-func size_bool(p *Properties, base structPointer) int {
- v := *structPointer_Bool(base, p.field)
- if v == nil {
- return 0
- }
- return len(p.tagcode) + 1 // each bool takes exactly one byte
-}
-
-func size_proto3_bool(p *Properties, base structPointer) int {
- v := *structPointer_BoolVal(base, p.field)
- if !v && !p.oneof {
- return 0
- }
- return len(p.tagcode) + 1 // each bool takes exactly one byte
-}
-
-// Encode an int32.
-func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return ErrNil
- }
- x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
- v := structPointer_Word32Val(base, p.field)
- x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
- if x == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func size_int32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return 0
- }
- x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-func size_proto3_int32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32Val(base, p.field)
- x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
- if x == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-// Encode a uint32.
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return ErrNil
- }
- x := word32_Get(v)
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
- v := structPointer_Word32Val(base, p.field)
- x := word32Val_Get(v)
- if x == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func size_uint32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return 0
- }
- x := word32_Get(v)
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-func size_proto3_uint32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32Val(base, p.field)
- x := word32Val_Get(v)
- if x == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-// Encode an int64.
-func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
- v := structPointer_Word64(base, p.field)
- if word64_IsNil(v) {
- return ErrNil
- }
- x := word64_Get(v)
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, x)
- return nil
-}
-
-func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
- v := structPointer_Word64Val(base, p.field)
- x := word64Val_Get(v)
- if x == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, x)
- return nil
-}
-
-func size_int64(p *Properties, base structPointer) (n int) {
- v := structPointer_Word64(base, p.field)
- if word64_IsNil(v) {
- return 0
- }
- x := word64_Get(v)
- n += len(p.tagcode)
- n += p.valSize(x)
- return
-}
-
-func size_proto3_int64(p *Properties, base structPointer) (n int) {
- v := structPointer_Word64Val(base, p.field)
- x := word64Val_Get(v)
- if x == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += p.valSize(x)
- return
-}
-
-// Encode a string.
-func (o *Buffer) enc_string(p *Properties, base structPointer) error {
- v := *structPointer_String(base, p.field)
- if v == nil {
- return ErrNil
- }
- x := *v
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeStringBytes(x)
- return nil
-}
-
-func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
- v := *structPointer_StringVal(base, p.field)
- if v == "" {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeStringBytes(v)
- return nil
-}
-
-func size_string(p *Properties, base structPointer) (n int) {
- v := *structPointer_String(base, p.field)
- if v == nil {
- return 0
- }
- x := *v
- n += len(p.tagcode)
- n += sizeStringBytes(x)
- return
-}
-
-func size_proto3_string(p *Properties, base structPointer) (n int) {
- v := *structPointer_StringVal(base, p.field)
- if v == "" && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += sizeStringBytes(v)
- return
-}
-
-// All protocol buffer fields are nillable, but be careful.
-func isNil(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return v.IsNil()
- }
- return false
-}
-
-// Encode a message struct.
-func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
- var state errorState
- structp := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(structp) {
- return ErrNil
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, err := m.Marshal()
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(data)
- return state.err
- }
-
- o.buf = append(o.buf, p.tagcode...)
- return o.enc_len_struct(p.sprop, structp, &state)
-}
-
-func size_struct_message(p *Properties, base structPointer) int {
- structp := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(structp) {
- return 0
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, _ := m.Marshal()
- n0 := len(p.tagcode)
- n1 := sizeRawBytes(data)
- return n0 + n1
- }
-
- n0 := len(p.tagcode)
- n1 := size_struct(p.sprop, structp)
- n2 := sizeVarint(uint64(n1)) // size of encoded length
- return n0 + n1 + n2
-}
-
-// Encode a group struct.
-func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
- var state errorState
- b := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(b) {
- return ErrNil
- }
-
- o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
- err := o.enc_struct(p.sprop, b)
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
- return state.err
-}
-
-func size_struct_group(p *Properties, base structPointer) (n int) {
- b := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(b) {
- return 0
- }
-
- n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
- n += size_struct(p.sprop, b)
- n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
- return
-}
-
-// Encode a slice of bools ([]bool).
-func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return ErrNil
- }
- for _, x := range s {
- o.buf = append(o.buf, p.tagcode...)
- v := uint64(0)
- if x {
- v = 1
- }
- p.valEnc(o, v)
- }
- return nil
-}
-
-func size_slice_bool(p *Properties, base structPointer) int {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return 0
- }
- return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
-}
-
-// Encode a slice of bools ([]bool) in packed format.
-func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
- for _, x := range s {
- v := uint64(0)
- if x {
- v = 1
- }
- p.valEnc(o, v)
- }
- return nil
-}
-
-func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return 0
- }
- n += len(p.tagcode)
- n += sizeVarint(uint64(l))
- n += l // each bool takes exactly one byte
- return
-}
-
-// Encode a slice of bytes ([]byte).
-func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
- s := *structPointer_Bytes(base, p.field)
- if s == nil {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(s)
- return nil
-}
-
-func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
- s := *structPointer_Bytes(base, p.field)
- if len(s) == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(s)
- return nil
-}
-
-func size_slice_byte(p *Properties, base structPointer) (n int) {
- s := *structPointer_Bytes(base, p.field)
- if s == nil && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += sizeRawBytes(s)
- return
-}
-
-func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
- s := *structPointer_Bytes(base, p.field)
- if len(s) == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += sizeRawBytes(s)
- return
-}
-
-// Encode a slice of int32s ([]int32).
-func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- p.valEnc(o, uint64(x))
- }
- return nil
-}
-
-func size_slice_int32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- for i := 0; i < l; i++ {
- n += len(p.tagcode)
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- n += p.valSize(uint64(x))
- }
- return
-}
-
-// Encode a slice of int32s ([]int32) in packed format.
-func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- // TODO: Reuse a Buffer.
- buf := NewBuffer(nil)
- for i := 0; i < l; i++ {
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- p.valEnc(buf, uint64(x))
- }
-
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(len(buf.buf)))
- o.buf = append(o.buf, buf.buf...)
- return nil
-}
-
-func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- var bufSize int
- for i := 0; i < l; i++ {
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- bufSize += p.valSize(uint64(x))
- }
-
- n += len(p.tagcode)
- n += sizeVarint(uint64(bufSize))
- n += bufSize
- return
-}
-
-// Encode a slice of uint32s ([]uint32).
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- x := s.Index(i)
- p.valEnc(o, uint64(x))
- }
- return nil
-}
-
-func size_slice_uint32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- for i := 0; i < l; i++ {
- n += len(p.tagcode)
- x := s.Index(i)
- n += p.valSize(uint64(x))
- }
- return
-}
-
-// Encode a slice of uint32s ([]uint32) in packed format.
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- // TODO: Reuse a Buffer.
- buf := NewBuffer(nil)
- for i := 0; i < l; i++ {
- p.valEnc(buf, uint64(s.Index(i)))
- }
-
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(len(buf.buf)))
- o.buf = append(o.buf, buf.buf...)
- return nil
-}
-
-func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- var bufSize int
- for i := 0; i < l; i++ {
- bufSize += p.valSize(uint64(s.Index(i)))
- }
-
- n += len(p.tagcode)
- n += sizeVarint(uint64(bufSize))
- n += bufSize
- return
-}
-
-// Encode a slice of int64s ([]int64).
-func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, s.Index(i))
- }
- return nil
-}
-
-func size_slice_int64(p *Properties, base structPointer) (n int) {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- for i := 0; i < l; i++ {
- n += len(p.tagcode)
- n += p.valSize(s.Index(i))
- }
- return
-}
-
-// Encode a slice of int64s ([]int64) in packed format.
-func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- // TODO: Reuse a Buffer.
- buf := NewBuffer(nil)
- for i := 0; i < l; i++ {
- p.valEnc(buf, s.Index(i))
- }
-
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(len(buf.buf)))
- o.buf = append(o.buf, buf.buf...)
- return nil
-}
-
-func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- var bufSize int
- for i := 0; i < l; i++ {
- bufSize += p.valSize(s.Index(i))
- }
-
- n += len(p.tagcode)
- n += sizeVarint(uint64(bufSize))
- n += bufSize
- return
-}
-
-// Encode a slice of slice of bytes ([][]byte).
-func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
- ss := *structPointer_BytesSlice(base, p.field)
- l := len(ss)
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(ss[i])
- }
- return nil
-}
-
-func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
- ss := *structPointer_BytesSlice(base, p.field)
- l := len(ss)
- if l == 0 {
- return 0
- }
- n += l * len(p.tagcode)
- for i := 0; i < l; i++ {
- n += sizeRawBytes(ss[i])
- }
- return
-}
-
-// Encode a slice of strings ([]string).
-func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
- ss := *structPointer_StringSlice(base, p.field)
- l := len(ss)
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeStringBytes(ss[i])
- }
- return nil
-}
-
-func size_slice_string(p *Properties, base structPointer) (n int) {
- ss := *structPointer_StringSlice(base, p.field)
- l := len(ss)
- n += l * len(p.tagcode)
- for i := 0; i < l; i++ {
- n += sizeStringBytes(ss[i])
- }
- return
-}
-
-// Encode a slice of message structs ([]*struct).
-func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
- var state errorState
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
-
- for i := 0; i < l; i++ {
- structp := s.Index(i)
- if structPointer_IsNil(structp) {
- return errRepeatedHasNil
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, err := m.Marshal()
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(data)
- continue
- }
-
- o.buf = append(o.buf, p.tagcode...)
- err := o.enc_len_struct(p.sprop, structp, &state)
- if err != nil && !state.shouldContinue(err, nil) {
- if err == ErrNil {
- return errRepeatedHasNil
- }
- return err
- }
- }
- return state.err
-}
-
-func size_slice_struct_message(p *Properties, base structPointer) (n int) {
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
- n += l * len(p.tagcode)
- for i := 0; i < l; i++ {
- structp := s.Index(i)
- if structPointer_IsNil(structp) {
- return // return the size up to this point
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, _ := m.Marshal()
- n += len(p.tagcode)
- n += sizeRawBytes(data)
- continue
- }
-
- n0 := size_struct(p.sprop, structp)
- n1 := sizeVarint(uint64(n0)) // size of encoded length
- n += n0 + n1
- }
- return
-}
-
-// Encode a slice of group structs ([]*struct).
-func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
- var state errorState
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
-
- for i := 0; i < l; i++ {
- b := s.Index(i)
- if structPointer_IsNil(b) {
- return errRepeatedHasNil
- }
-
- o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
-
- err := o.enc_struct(p.sprop, b)
-
- if err != nil && !state.shouldContinue(err, nil) {
- if err == ErrNil {
- return errRepeatedHasNil
- }
- return err
- }
-
- o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
- }
- return state.err
-}
-
-func size_slice_struct_group(p *Properties, base structPointer) (n int) {
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
-
- n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
- n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
- for i := 0; i < l; i++ {
- b := s.Index(i)
- if structPointer_IsNil(b) {
- return // return size up to this point
- }
-
- n += size_struct(p.sprop, b)
- }
- return
-}
-
-// Encode an extension map.
-func (o *Buffer) enc_map(p *Properties, base structPointer) error {
- v := *structPointer_ExtMap(base, p.field)
- if err := encodeExtensionMap(v); err != nil {
- return err
- }
- // Fast-path for common cases: zero or one extensions.
- if len(v) <= 1 {
- for _, e := range v {
- o.buf = append(o.buf, e.enc...)
- }
- return nil
- }
-
- // Sort keys to provide a deterministic encoding.
- keys := make([]int, 0, len(v))
- for k := range v {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- for _, k := range keys {
- o.buf = append(o.buf, v[int32(k)].enc...)
- }
- return nil
-}
-
-func size_map(p *Properties, base structPointer) int {
- v := *structPointer_ExtMap(base, p.field)
- return sizeExtensionMap(v)
-}
-
-// Encode a map field.
-func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
- var state errorState // XXX: or do we need to plumb this through?
-
- /*
- A map defined as
- map map_field = N;
- is encoded in the same way as
- message MapFieldEntry {
- key_type key = 1;
- value_type value = 2;
- }
- repeated MapFieldEntry map_field = N;
- */
-
- v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
- if v.Len() == 0 {
- return nil
- }
-
- keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
-
- enc := func() error {
- if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
- return err
- }
- if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {
- return err
- }
- return nil
- }
-
- // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
- for _, key := range v.MapKeys() {
- val := v.MapIndex(key)
-
- // The only illegal map entry values are nil message pointers.
- if val.Kind() == reflect.Ptr && val.IsNil() {
- return errors.New("proto: map has nil element")
- }
-
- keycopy.Set(key)
- valcopy.Set(val)
-
- o.buf = append(o.buf, p.tagcode...)
- if err := o.enc_len_thing(enc, &state); err != nil {
- return err
- }
- }
- return nil
-}
-
-func size_new_map(p *Properties, base structPointer) int {
- v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
-
- keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
-
- n := 0
- for _, key := range v.MapKeys() {
- val := v.MapIndex(key)
- keycopy.Set(key)
- valcopy.Set(val)
-
- // Tag codes for key and val are the responsibility of the sub-sizer.
- keysize := p.mkeyprop.size(p.mkeyprop, keybase)
- valsize := p.mvalprop.size(p.mvalprop, valbase)
- entry := keysize + valsize
- // Add on tag code and length of map entry itself.
- n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
- }
- return n
-}
-
-// mapEncodeScratch returns a new reflect.Value matching the map's value type,
-// and a structPointer suitable for passing to an encoder or sizer.
-func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
- // Prepare addressable doubly-indirect placeholders for the key and value types.
- // This is needed because the element-type encoders expect **T, but the map iteration produces T.
-
- keycopy = reflect.New(mapType.Key()).Elem() // addressable K
- keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
- keyptr.Set(keycopy.Addr()) //
- keybase = toStructPointer(keyptr.Addr()) // **K
-
- // Value types are more varied and require special handling.
- switch mapType.Elem().Kind() {
- case reflect.Slice:
- // []byte
- var dummy []byte
- valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
- valbase = toStructPointer(valcopy.Addr())
- case reflect.Ptr:
- // message; the generated field type is map[K]*Msg (so V is *Msg),
- // so we only need one level of indirection.
- valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
- valbase = toStructPointer(valcopy.Addr())
- default:
- // everything else
- valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
- valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
- valptr.Set(valcopy.Addr()) //
- valbase = toStructPointer(valptr.Addr()) // **V
- }
- return
-}
-
-// Encode a struct.
-func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
- var state errorState
- // Encode fields in tag order so that decoders may use optimizations
- // that depend on the ordering.
- // https://developers.google.com/protocol-buffers/docs/encoding#order
- for _, i := range prop.order {
- p := prop.Prop[i]
- if p.enc != nil {
- err := p.enc(o, p, base)
- if err != nil {
- if err == ErrNil {
- if p.Required && state.err == nil {
- state.err = &RequiredNotSetError{p.Name}
- }
- } else if err == errRepeatedHasNil {
- // Give more context to nil values in repeated fields.
- return errors.New("repeated field " + p.OrigName + " has nil element")
- } else if !state.shouldContinue(err, p) {
- return err
- }
- }
- }
- }
-
- // Do oneof fields.
- if prop.oneofMarshaler != nil {
- m := structPointer_Interface(base, prop.stype).(Message)
- if err := prop.oneofMarshaler(m, o); err == ErrNil {
- return errOneofHasNil
- } else if err != nil {
- return err
- }
- }
-
- // Add unrecognized fields at the end.
- if prop.unrecField.IsValid() {
- v := *structPointer_Bytes(base, prop.unrecField)
- if len(v) > 0 {
- o.buf = append(o.buf, v...)
- }
- }
-
- return state.err
-}
-
-func size_struct(prop *StructProperties, base structPointer) (n int) {
- for _, i := range prop.order {
- p := prop.Prop[i]
- if p.size != nil {
- n += p.size(p, base)
- }
- }
-
- // Add unrecognized fields at the end.
- if prop.unrecField.IsValid() {
- v := *structPointer_Bytes(base, prop.unrecField)
- n += len(v)
- }
-
- // Factor in any oneof fields.
- if prop.oneofSizer != nil {
- m := structPointer_Interface(base, prop.stype).(Message)
- n += prop.oneofSizer(m)
- }
-
- return
-}
-
-var zeroes [20]byte // longer than any conceivable sizeVarint
-
-// Encode a struct, preceded by its encoded length (as a varint).
-func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
- return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
-}
-
-// Encode something, preceded by its encoded length (as a varint).
-func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
- iLen := len(o.buf)
- o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
- iMsg := len(o.buf)
- err := enc()
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- lMsg := len(o.buf) - iMsg
- lLen := sizeVarint(uint64(lMsg))
- switch x := lLen - (iMsg - iLen); {
- case x > 0: // actual length is x bytes larger than the space we reserved
- // Move msg x bytes right.
- o.buf = append(o.buf, zeroes[:x]...)
- copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
- case x < 0: // actual length is x bytes smaller than the space we reserved
- // Move msg x bytes left.
- copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
- o.buf = o.buf[:len(o.buf)+x] // x is negative
- }
- // Encode the length in the reserved space.
- o.buf = o.buf[:iLen]
- o.EncodeVarint(uint64(lMsg))
- o.buf = o.buf[:len(o.buf)+lMsg]
- return state.err
-}
-
-// errorState maintains the first error that occurs and updates that error
-// with additional context.
-type errorState struct {
- err error
-}
-
-// shouldContinue reports whether encoding should continue upon encountering the
-// given error. If the error is RequiredNotSetError, shouldContinue returns true
-// and, if this is the first appearance of that error, remembers it for future
-// reporting.
-//
-// If prop is not nil, it may update any error with additional context about the
-// field with the error.
-func (s *errorState) shouldContinue(err error, prop *Properties) bool {
- // Ignore unset required fields.
- reqNotSet, ok := err.(*RequiredNotSetError)
- if !ok {
- return false
- }
- if s.err == nil {
- if prop != nil {
- err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
- }
- s.err = err
- }
- return true
-}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
deleted file mode 100644
index f5db1def..00000000
--- a/vendor/github.com/golang/protobuf/proto/equal.go
+++ /dev/null
@@ -1,276 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer comparison.
-
-package proto
-
-import (
- "bytes"
- "log"
- "reflect"
- "strings"
-)
-
-/*
-Equal returns true iff protocol buffers a and b are equal.
-The arguments must both be pointers to protocol buffer structs.
-
-Equality is defined in this way:
- - Two messages are equal iff they are the same type,
- corresponding fields are equal, unknown field sets
- are equal, and extensions sets are equal.
- - Two set scalar fields are equal iff their values are equal.
- If the fields are of a floating-point type, remember that
- NaN != x for all x, including NaN. If the message is defined
- in a proto3 .proto file, fields are not "set"; specifically,
- zero length proto3 "bytes" fields are equal (nil == {}).
- - Two repeated fields are equal iff their lengths are the same,
- and their corresponding elements are equal (a "bytes" field,
- although represented by []byte, is not a repeated field)
- - Two unset fields are equal.
- - Two unknown field sets are equal if their current
- encoded state is equal.
- - Two extension sets are equal iff they have corresponding
- elements that are pairwise equal.
- - Every other combination of things are not equal.
-
-The return value is undefined if a and b are not protocol buffers.
-*/
-func Equal(a, b Message) bool {
- if a == nil || b == nil {
- return a == b
- }
- v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
- if v1.Type() != v2.Type() {
- return false
- }
- if v1.Kind() == reflect.Ptr {
- if v1.IsNil() {
- return v2.IsNil()
- }
- if v2.IsNil() {
- return false
- }
- v1, v2 = v1.Elem(), v2.Elem()
- }
- if v1.Kind() != reflect.Struct {
- return false
- }
- return equalStruct(v1, v2)
-}
-
-// v1 and v2 are known to have the same type.
-func equalStruct(v1, v2 reflect.Value) bool {
- sprop := GetProperties(v1.Type())
- for i := 0; i < v1.NumField(); i++ {
- f := v1.Type().Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- f1, f2 := v1.Field(i), v2.Field(i)
- if f.Type.Kind() == reflect.Ptr {
- if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
- // both unset
- continue
- } else if n1 != n2 {
- // set/unset mismatch
- return false
- }
- b1, ok := f1.Interface().(raw)
- if ok {
- b2 := f2.Interface().(raw)
- // RawMessage
- if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
- return false
- }
- continue
- }
- f1, f2 = f1.Elem(), f2.Elem()
- }
- if !equalAny(f1, f2, sprop.Prop[i]) {
- return false
- }
- }
-
- if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
- em2 := v2.FieldByName("XXX_extensions")
- if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
- return false
- }
- }
-
- uf := v1.FieldByName("XXX_unrecognized")
- if !uf.IsValid() {
- return true
- }
-
- u1 := uf.Bytes()
- u2 := v2.FieldByName("XXX_unrecognized").Bytes()
- if !bytes.Equal(u1, u2) {
- return false
- }
-
- return true
-}
-
-// v1 and v2 are known to have the same type.
-// prop may be nil.
-func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
- if v1.Type() == protoMessageType {
- m1, _ := v1.Interface().(Message)
- m2, _ := v2.Interface().(Message)
- return Equal(m1, m2)
- }
- switch v1.Kind() {
- case reflect.Bool:
- return v1.Bool() == v2.Bool()
- case reflect.Float32, reflect.Float64:
- return v1.Float() == v2.Float()
- case reflect.Int32, reflect.Int64:
- return v1.Int() == v2.Int()
- case reflect.Interface:
- // Probably a oneof field; compare the inner values.
- n1, n2 := v1.IsNil(), v2.IsNil()
- if n1 || n2 {
- return n1 == n2
- }
- e1, e2 := v1.Elem(), v2.Elem()
- if e1.Type() != e2.Type() {
- return false
- }
- return equalAny(e1, e2, nil)
- case reflect.Map:
- if v1.Len() != v2.Len() {
- return false
- }
- for _, key := range v1.MapKeys() {
- val2 := v2.MapIndex(key)
- if !val2.IsValid() {
- // This key was not found in the second map.
- return false
- }
- if !equalAny(v1.MapIndex(key), val2, nil) {
- return false
- }
- }
- return true
- case reflect.Ptr:
- return equalAny(v1.Elem(), v2.Elem(), prop)
- case reflect.Slice:
- if v1.Type().Elem().Kind() == reflect.Uint8 {
- // short circuit: []byte
-
- // Edge case: if this is in a proto3 message, a zero length
- // bytes field is considered the zero value.
- if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
- return true
- }
- if v1.IsNil() != v2.IsNil() {
- return false
- }
- return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
- }
-
- if v1.Len() != v2.Len() {
- return false
- }
- for i := 0; i < v1.Len(); i++ {
- if !equalAny(v1.Index(i), v2.Index(i), prop) {
- return false
- }
- }
- return true
- case reflect.String:
- return v1.Interface().(string) == v2.Interface().(string)
- case reflect.Struct:
- return equalStruct(v1, v2)
- case reflect.Uint32, reflect.Uint64:
- return v1.Uint() == v2.Uint()
- }
-
- // unknown type, so not a protocol buffer
- log.Printf("proto: don't know how to compare %v", v1)
- return false
-}
-
-// base is the struct type that the extensions are based on.
-// em1 and em2 are extension maps.
-func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
- if len(em1) != len(em2) {
- return false
- }
-
- for extNum, e1 := range em1 {
- e2, ok := em2[extNum]
- if !ok {
- return false
- }
-
- m1, m2 := e1.value, e2.value
-
- if m1 != nil && m2 != nil {
- // Both are unencoded.
- if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
- return false
- }
- continue
- }
-
- // At least one is encoded. To do a semantically correct comparison
- // we need to unmarshal them first.
- var desc *ExtensionDesc
- if m := extensionMaps[base]; m != nil {
- desc = m[extNum]
- }
- if desc == nil {
- log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
- continue
- }
- var err error
- if m1 == nil {
- m1, err = decodeExtension(e1.enc, desc)
- }
- if m2 == nil && err == nil {
- m2, err = decodeExtension(e2.enc, desc)
- }
- if err != nil {
- // The encoded form is invalid.
- log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
- return false
- }
- if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
- return false
- }
- }
-
- return true
-}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
deleted file mode 100644
index 054f4f1d..00000000
--- a/vendor/github.com/golang/protobuf/proto/extensions.go
+++ /dev/null
@@ -1,399 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Types and routines for supporting protocol buffer extensions.
- */
-
-import (
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "sync"
-)
-
-// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
-var ErrMissingExtension = errors.New("proto: missing extension")
-
-// ExtensionRange represents a range of message extensions for a protocol buffer.
-// Used in code generated by the protocol compiler.
-type ExtensionRange struct {
- Start, End int32 // both inclusive
-}
-
-// extendableProto is an interface implemented by any protocol buffer that may be extended.
-type extendableProto interface {
- Message
- ExtensionRangeArray() []ExtensionRange
- ExtensionMap() map[int32]Extension
-}
-
-var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
-
-// ExtensionDesc represents an extension specification.
-// Used in generated code from the protocol compiler.
-type ExtensionDesc struct {
- ExtendedType Message // nil pointer to the type that is being extended
- ExtensionType interface{} // nil pointer to the extension type
- Field int32 // field number
- Name string // fully-qualified name of extension, for text formatting
- Tag string // protobuf tag style
-}
-
-func (ed *ExtensionDesc) repeated() bool {
- t := reflect.TypeOf(ed.ExtensionType)
- return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
-}
-
-// Extension represents an extension in a message.
-type Extension struct {
- // When an extension is stored in a message using SetExtension
- // only desc and value are set. When the message is marshaled
- // enc will be set to the encoded form of the message.
- //
- // When a message is unmarshaled and contains extensions, each
- // extension will have only enc set. When such an extension is
- // accessed using GetExtension (or GetExtensions) desc and value
- // will be set.
- desc *ExtensionDesc
- value interface{}
- enc []byte
-}
-
-// SetRawExtension is for testing only.
-func SetRawExtension(base extendableProto, id int32, b []byte) {
- base.ExtensionMap()[id] = Extension{enc: b}
-}
-
-// isExtensionField returns true iff the given field number is in an extension range.
-func isExtensionField(pb extendableProto, field int32) bool {
- for _, er := range pb.ExtensionRangeArray() {
- if er.Start <= field && field <= er.End {
- return true
- }
- }
- return false
-}
-
-// checkExtensionTypes checks that the given extension is valid for pb.
-func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
- // Check the extended type.
- if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
- return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
- }
- // Check the range.
- if !isExtensionField(pb, extension.Field) {
- return errors.New("proto: bad extension number; not in declared ranges")
- }
- return nil
-}
-
-// extPropKey is sufficient to uniquely identify an extension.
-type extPropKey struct {
- base reflect.Type
- field int32
-}
-
-var extProp = struct {
- sync.RWMutex
- m map[extPropKey]*Properties
-}{
- m: make(map[extPropKey]*Properties),
-}
-
-func extensionProperties(ed *ExtensionDesc) *Properties {
- key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
-
- extProp.RLock()
- if prop, ok := extProp.m[key]; ok {
- extProp.RUnlock()
- return prop
- }
- extProp.RUnlock()
-
- extProp.Lock()
- defer extProp.Unlock()
- // Check again.
- if prop, ok := extProp.m[key]; ok {
- return prop
- }
-
- prop := new(Properties)
- prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
- extProp.m[key] = prop
- return prop
-}
-
-// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
-func encodeExtensionMap(m map[int32]Extension) error {
- for k, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- et := reflect.TypeOf(e.desc.ExtensionType)
- props := extensionProperties(e.desc)
-
- p := NewBuffer(nil)
- // If e.value has type T, the encoder expects a *struct{ X T }.
- // Pass a *T with a zero field and hope it all works out.
- x := reflect.New(et)
- x.Elem().Set(reflect.ValueOf(e.value))
- if err := props.enc(p, props, toStructPointer(x)); err != nil {
- return err
- }
- e.enc = p.buf
- m[k] = e
- }
- return nil
-}
-
-func sizeExtensionMap(m map[int32]Extension) (n int) {
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- n += len(e.enc)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- et := reflect.TypeOf(e.desc.ExtensionType)
- props := extensionProperties(e.desc)
-
- // If e.value has type T, the encoder expects a *struct{ X T }.
- // Pass a *T with a zero field and hope it all works out.
- x := reflect.New(et)
- x.Elem().Set(reflect.ValueOf(e.value))
- n += props.size(props, toStructPointer(x))
- }
- return
-}
-
-// HasExtension returns whether the given extension is present in pb.
-func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
- // TODO: Check types, field numbers, etc.?
- _, ok := pb.ExtensionMap()[extension.Field]
- return ok
-}
-
-// ClearExtension removes the given extension from pb.
-func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
- // TODO: Check types, field numbers, etc.?
- delete(pb.ExtensionMap(), extension.Field)
-}
-
-// GetExtension parses and returns the given extension of pb.
-// If the extension is not present and has no default value it returns ErrMissingExtension.
-func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
- if err := checkExtensionTypes(pb, extension); err != nil {
- return nil, err
- }
-
- emap := pb.ExtensionMap()
- e, ok := emap[extension.Field]
- if !ok {
- // defaultExtensionValue returns the default value or
- // ErrMissingExtension if there is no default.
- return defaultExtensionValue(extension)
- }
-
- if e.value != nil {
- // Already decoded. Check the descriptor, though.
- if e.desc != extension {
- // This shouldn't happen. If it does, it means that
- // GetExtension was called twice with two different
- // descriptors with the same field number.
- return nil, errors.New("proto: descriptor conflict")
- }
- return e.value, nil
- }
-
- v, err := decodeExtension(e.enc, extension)
- if err != nil {
- return nil, err
- }
-
- // Remember the decoded version and drop the encoded version.
- // That way it is safe to mutate what we return.
- e.value = v
- e.desc = extension
- e.enc = nil
- emap[extension.Field] = e
- return e.value, nil
-}
-
-// defaultExtensionValue returns the default value for extension.
-// If no default for an extension is defined ErrMissingExtension is returned.
-func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
- t := reflect.TypeOf(extension.ExtensionType)
- props := extensionProperties(extension)
-
- sf, _, err := fieldDefault(t, props)
- if err != nil {
- return nil, err
- }
-
- if sf == nil || sf.value == nil {
- // There is no default value.
- return nil, ErrMissingExtension
- }
-
- if t.Kind() != reflect.Ptr {
- // We do not need to return a Ptr, we can directly return sf.value.
- return sf.value, nil
- }
-
- // We need to return an interface{} that is a pointer to sf.value.
- value := reflect.New(t).Elem()
- value.Set(reflect.New(value.Type().Elem()))
- if sf.kind == reflect.Int32 {
- // We may have an int32 or an enum, but the underlying data is int32.
- // Since we can't set an int32 into a non int32 reflect.value directly
- // set it as a int32.
- value.Elem().SetInt(int64(sf.value.(int32)))
- } else {
- value.Elem().Set(reflect.ValueOf(sf.value))
- }
- return value.Interface(), nil
-}
-
-// decodeExtension decodes an extension encoded in b.
-func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
- o := NewBuffer(b)
-
- t := reflect.TypeOf(extension.ExtensionType)
-
- props := extensionProperties(extension)
-
- // t is a pointer to a struct, pointer to basic type or a slice.
- // Allocate a "field" to store the pointer/slice itself; the
- // pointer/slice will be stored here. We pass
- // the address of this field to props.dec.
- // This passes a zero field and a *t and lets props.dec
- // interpret it as a *struct{ x t }.
- value := reflect.New(t).Elem()
-
- for {
- // Discard wire type and field number varint. It isn't needed.
- if _, err := o.DecodeVarint(); err != nil {
- return nil, err
- }
-
- if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
- return nil, err
- }
-
- if o.index >= len(o.buf) {
- break
- }
- }
- return value.Interface(), nil
-}
-
-// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
-// The returned slice has the same length as es; missing extensions will appear as nil elements.
-func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
- epb, ok := pb.(extendableProto)
- if !ok {
- err = errors.New("proto: not an extendable proto")
- return
- }
- extensions = make([]interface{}, len(es))
- for i, e := range es {
- extensions[i], err = GetExtension(epb, e)
- if err == ErrMissingExtension {
- err = nil
- }
- if err != nil {
- return
- }
- }
- return
-}
-
-// SetExtension sets the specified extension of pb to the specified value.
-func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
- if err := checkExtensionTypes(pb, extension); err != nil {
- return err
- }
- typ := reflect.TypeOf(extension.ExtensionType)
- if typ != reflect.TypeOf(value) {
- return errors.New("proto: bad extension value type")
- }
- // nil extension values need to be caught early, because the
- // encoder can't distinguish an ErrNil due to a nil extension
- // from an ErrNil due to a missing field. Extensions are
- // always optional, so the encoder would just swallow the error
- // and drop all the extensions from the encoded message.
- if reflect.ValueOf(value).IsNil() {
- return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
- }
-
- pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
- return nil
-}
-
-// A global registry of extensions.
-// The generated code will register the generated descriptors by calling RegisterExtension.
-
-var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
-
-// RegisterExtension is called from the generated code.
-func RegisterExtension(desc *ExtensionDesc) {
- st := reflect.TypeOf(desc.ExtendedType).Elem()
- m := extensionMaps[st]
- if m == nil {
- m = make(map[int32]*ExtensionDesc)
- extensionMaps[st] = m
- }
- if _, ok := m[desc.Field]; ok {
- panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
- }
- m[desc.Field] = desc
-}
-
-// RegisteredExtensions returns a map of the registered extensions of a
-// protocol buffer struct, indexed by the extension number.
-// The argument pb should be a nil pointer to the struct type.
-func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
- return extensionMaps[reflect.TypeOf(pb).Elem()]
-}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
deleted file mode 100644
index 0de8f8df..00000000
--- a/vendor/github.com/golang/protobuf/proto/lib.go
+++ /dev/null
@@ -1,894 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/*
-Package proto converts data structures to and from the wire format of
-protocol buffers. It works in concert with the Go source code generated
-for .proto files by the protocol compiler.
-
-A summary of the properties of the protocol buffer interface
-for a protocol buffer variable v:
-
- - Names are turned from camel_case to CamelCase for export.
- - There are no methods on v to set fields; just treat
- them as structure fields.
- - There are getters that return a field's value if set,
- and return the field's default value if unset.
- The getters work even if the receiver is a nil message.
- - The zero value for a struct is its correct initialization state.
- All desired fields must be set before marshaling.
- - A Reset() method will restore a protobuf struct to its zero state.
- - Non-repeated fields are pointers to the values; nil means unset.
- That is, optional or required field int32 f becomes F *int32.
- - Repeated fields are slices.
- - Helper functions are available to aid the setting of fields.
- msg.Foo = proto.String("hello") // set field
- - Constants are defined to hold the default values of all fields that
- have them. They have the form Default_StructName_FieldName.
- Because the getter methods handle defaulted values,
- direct use of these constants should be rare.
- - Enums are given type names and maps from names to values.
- Enum values are prefixed by the enclosing message's name, or by the
- enum's type name if it is a top-level enum. Enum types have a String
- method, and a Enum method to assist in message construction.
- - Nested messages, groups and enums have type names prefixed with the name of
- the surrounding message type.
- - Extensions are given descriptor names that start with E_,
- followed by an underscore-delimited list of the nested messages
- that contain it (if any) followed by the CamelCased name of the
- extension field itself. HasExtension, ClearExtension, GetExtension
- and SetExtension are functions for manipulating extensions.
- - Oneof field sets are given a single field in their message,
- with distinguished wrapper types for each possible field value.
- - Marshal and Unmarshal are functions to encode and decode the wire format.
-
-When the .proto file specifies `syntax="proto3"`, there are some differences:
-
- - Non-repeated fields of non-message type are values instead of pointers.
- - Getters are only generated for message and oneof fields.
- - Enum types do not get an Enum method.
-
-The simplest way to describe this is to see an example.
-Given file test.proto, containing
-
- package example;
-
- enum FOO { X = 17; }
-
- message Test {
- required string label = 1;
- optional int32 type = 2 [default=77];
- repeated int64 reps = 3;
- optional group OptionalGroup = 4 {
- required string RequiredField = 5;
- }
- oneof union {
- int32 number = 6;
- string name = 7;
- }
- }
-
-The resulting file, test.pb.go, is:
-
- package example
-
- import proto "github.com/golang/protobuf/proto"
- import math "math"
-
- type FOO int32
- const (
- FOO_X FOO = 17
- )
- var FOO_name = map[int32]string{
- 17: "X",
- }
- var FOO_value = map[string]int32{
- "X": 17,
- }
-
- func (x FOO) Enum() *FOO {
- p := new(FOO)
- *p = x
- return p
- }
- func (x FOO) String() string {
- return proto.EnumName(FOO_name, int32(x))
- }
- func (x *FOO) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FOO_value, data)
- if err != nil {
- return err
- }
- *x = FOO(value)
- return nil
- }
-
- type Test struct {
- Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
- Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
- Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
- Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
- // Types that are valid to be assigned to Union:
- // *Test_Number
- // *Test_Name
- Union isTest_Union `protobuf_oneof:"union"`
- XXX_unrecognized []byte `json:"-"`
- }
- func (m *Test) Reset() { *m = Test{} }
- func (m *Test) String() string { return proto.CompactTextString(m) }
- func (*Test) ProtoMessage() {}
-
- type isTest_Union interface {
- isTest_Union()
- }
-
- type Test_Number struct {
- Number int32 `protobuf:"varint,6,opt,name=number"`
- }
- type Test_Name struct {
- Name string `protobuf:"bytes,7,opt,name=name"`
- }
-
- func (*Test_Number) isTest_Union() {}
- func (*Test_Name) isTest_Union() {}
-
- func (m *Test) GetUnion() isTest_Union {
- if m != nil {
- return m.Union
- }
- return nil
- }
- const Default_Test_Type int32 = 77
-
- func (m *Test) GetLabel() string {
- if m != nil && m.Label != nil {
- return *m.Label
- }
- return ""
- }
-
- func (m *Test) GetType() int32 {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return Default_Test_Type
- }
-
- func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
- if m != nil {
- return m.Optionalgroup
- }
- return nil
- }
-
- type Test_OptionalGroup struct {
- RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
- }
- func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
- func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
-
- func (m *Test_OptionalGroup) GetRequiredField() string {
- if m != nil && m.RequiredField != nil {
- return *m.RequiredField
- }
- return ""
- }
-
- func (m *Test) GetNumber() int32 {
- if x, ok := m.GetUnion().(*Test_Number); ok {
- return x.Number
- }
- return 0
- }
-
- func (m *Test) GetName() string {
- if x, ok := m.GetUnion().(*Test_Name); ok {
- return x.Name
- }
- return ""
- }
-
- func init() {
- proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
- }
-
-To create and play with a Test object:
-
- package main
-
- import (
- "log"
-
- "github.com/golang/protobuf/proto"
- pb "./example.pb"
- )
-
- func main() {
- test := &pb.Test{
- Label: proto.String("hello"),
- Type: proto.Int32(17),
- Reps: []int64{1, 2, 3},
- Optionalgroup: &pb.Test_OptionalGroup{
- RequiredField: proto.String("good bye"),
- },
- Union: &pb.Test_Name{"fred"},
- }
- data, err := proto.Marshal(test)
- if err != nil {
- log.Fatal("marshaling error: ", err)
- }
- newTest := &pb.Test{}
- err = proto.Unmarshal(data, newTest)
- if err != nil {
- log.Fatal("unmarshaling error: ", err)
- }
- // Now test and newTest contain the same data.
- if test.GetLabel() != newTest.GetLabel() {
- log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
- }
- // Use a type switch to determine which oneof was set.
- switch u := test.Union.(type) {
- case *pb.Test_Number: // u.Number contains the number.
- case *pb.Test_Name: // u.Name contains the string.
- }
- // etc.
- }
-*/
-package proto
-
-import (
- "encoding/json"
- "fmt"
- "log"
- "reflect"
- "sort"
- "strconv"
- "sync"
-)
-
-// Message is implemented by generated protocol buffer messages.
-type Message interface {
- Reset()
- String() string
- ProtoMessage()
-}
-
-// Stats records allocation details about the protocol buffer encoders
-// and decoders. Useful for tuning the library itself.
-type Stats struct {
- Emalloc uint64 // mallocs in encode
- Dmalloc uint64 // mallocs in decode
- Encode uint64 // number of encodes
- Decode uint64 // number of decodes
- Chit uint64 // number of cache hits
- Cmiss uint64 // number of cache misses
- Size uint64 // number of sizes
-}
-
-// Set to true to enable stats collection.
-const collectStats = false
-
-var stats Stats
-
-// GetStats returns a copy of the global Stats structure.
-func GetStats() Stats { return stats }
-
-// A Buffer is a buffer manager for marshaling and unmarshaling
-// protocol buffers. It may be reused between invocations to
-// reduce memory usage. It is not necessary to use a Buffer;
-// the global functions Marshal and Unmarshal create a
-// temporary Buffer and are fine for most applications.
-type Buffer struct {
- buf []byte // encode/decode byte stream
- index int // write point
-
- // pools of basic types to amortize allocation.
- bools []bool
- uint32s []uint32
- uint64s []uint64
-
- // extra pools, only used with pointer_reflect.go
- int32s []int32
- int64s []int64
- float32s []float32
- float64s []float64
-}
-
-// NewBuffer allocates a new Buffer and initializes its internal data to
-// the contents of the argument slice.
-func NewBuffer(e []byte) *Buffer {
- return &Buffer{buf: e}
-}
-
-// Reset resets the Buffer, ready for marshaling a new protocol buffer.
-func (p *Buffer) Reset() {
- p.buf = p.buf[0:0] // for reading/writing
- p.index = 0 // for reading
-}
-
-// SetBuf replaces the internal buffer with the slice,
-// ready for unmarshaling the contents of the slice.
-func (p *Buffer) SetBuf(s []byte) {
- p.buf = s
- p.index = 0
-}
-
-// Bytes returns the contents of the Buffer.
-func (p *Buffer) Bytes() []byte { return p.buf }
-
-/*
- * Helper routines for simplifying the creation of optional fields of basic type.
- */
-
-// Bool is a helper routine that allocates a new bool value
-// to store v and returns a pointer to it.
-func Bool(v bool) *bool {
- return &v
-}
-
-// Int32 is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it.
-func Int32(v int32) *int32 {
- return &v
-}
-
-// Int is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it, but unlike Int32
-// its argument value is an int.
-func Int(v int) *int32 {
- p := new(int32)
- *p = int32(v)
- return p
-}
-
-// Int64 is a helper routine that allocates a new int64 value
-// to store v and returns a pointer to it.
-func Int64(v int64) *int64 {
- return &v
-}
-
-// Float32 is a helper routine that allocates a new float32 value
-// to store v and returns a pointer to it.
-func Float32(v float32) *float32 {
- return &v
-}
-
-// Float64 is a helper routine that allocates a new float64 value
-// to store v and returns a pointer to it.
-func Float64(v float64) *float64 {
- return &v
-}
-
-// Uint32 is a helper routine that allocates a new uint32 value
-// to store v and returns a pointer to it.
-func Uint32(v uint32) *uint32 {
- return &v
-}
-
-// Uint64 is a helper routine that allocates a new uint64 value
-// to store v and returns a pointer to it.
-func Uint64(v uint64) *uint64 {
- return &v
-}
-
-// String is a helper routine that allocates a new string value
-// to store v and returns a pointer to it.
-func String(v string) *string {
- return &v
-}
-
-// EnumName is a helper function to simplify printing protocol buffer enums
-// by name. Given an enum map and a value, it returns a useful string.
-func EnumName(m map[int32]string, v int32) string {
- s, ok := m[v]
- if ok {
- return s
- }
- return strconv.Itoa(int(v))
-}
-
-// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
-// from their JSON-encoded representation. Given a map from the enum's symbolic
-// names to its int values, and a byte buffer containing the JSON-encoded
-// value, it returns an int32 that can be cast to the enum type by the caller.
-//
-// The function can deal with both JSON representations, numeric and symbolic.
-func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
- if data[0] == '"' {
- // New style: enums are strings.
- var repr string
- if err := json.Unmarshal(data, &repr); err != nil {
- return -1, err
- }
- val, ok := m[repr]
- if !ok {
- return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
- }
- return val, nil
- }
- // Old style: enums are ints.
- var val int32
- if err := json.Unmarshal(data, &val); err != nil {
- return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
- }
- return val, nil
-}
-
-// DebugPrint dumps the encoded data in b in a debugging format with a header
-// including the string s. Used in testing but made available for general debugging.
-func (p *Buffer) DebugPrint(s string, b []byte) {
- var u uint64
-
- obuf := p.buf
- index := p.index
- p.buf = b
- p.index = 0
- depth := 0
-
- fmt.Printf("\n--- %s ---\n", s)
-
-out:
- for {
- for i := 0; i < depth; i++ {
- fmt.Print(" ")
- }
-
- index := p.index
- if index == len(p.buf) {
- break
- }
-
- op, err := p.DecodeVarint()
- if err != nil {
- fmt.Printf("%3d: fetching op err %v\n", index, err)
- break out
- }
- tag := op >> 3
- wire := op & 7
-
- switch wire {
- default:
- fmt.Printf("%3d: t=%3d unknown wire=%d\n",
- index, tag, wire)
- break out
-
- case WireBytes:
- var r []byte
-
- r, err = p.DecodeRawBytes(false)
- if err != nil {
- break out
- }
- fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
- if len(r) <= 6 {
- for i := 0; i < len(r); i++ {
- fmt.Printf(" %.2x", r[i])
- }
- } else {
- for i := 0; i < 3; i++ {
- fmt.Printf(" %.2x", r[i])
- }
- fmt.Printf(" ..")
- for i := len(r) - 3; i < len(r); i++ {
- fmt.Printf(" %.2x", r[i])
- }
- }
- fmt.Printf("\n")
-
- case WireFixed32:
- u, err = p.DecodeFixed32()
- if err != nil {
- fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
-
- case WireFixed64:
- u, err = p.DecodeFixed64()
- if err != nil {
- fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
-
- case WireVarint:
- u, err = p.DecodeVarint()
- if err != nil {
- fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
-
- case WireStartGroup:
- fmt.Printf("%3d: t=%3d start\n", index, tag)
- depth++
-
- case WireEndGroup:
- depth--
- fmt.Printf("%3d: t=%3d end\n", index, tag)
- }
- }
-
- if depth != 0 {
- fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
- }
- fmt.Printf("\n")
-
- p.buf = obuf
- p.index = index
-}
-
-// SetDefaults sets unset protocol buffer fields to their default values.
-// It only modifies fields that are both unset and have defined defaults.
-// It recursively sets default values in any non-nil sub-messages.
-func SetDefaults(pb Message) {
- setDefaults(reflect.ValueOf(pb), true, false)
-}
-
-// v is a pointer to a struct.
-func setDefaults(v reflect.Value, recur, zeros bool) {
- v = v.Elem()
-
- defaultMu.RLock()
- dm, ok := defaults[v.Type()]
- defaultMu.RUnlock()
- if !ok {
- dm = buildDefaultMessage(v.Type())
- defaultMu.Lock()
- defaults[v.Type()] = dm
- defaultMu.Unlock()
- }
-
- for _, sf := range dm.scalars {
- f := v.Field(sf.index)
- if !f.IsNil() {
- // field already set
- continue
- }
- dv := sf.value
- if dv == nil && !zeros {
- // no explicit default, and don't want to set zeros
- continue
- }
- fptr := f.Addr().Interface() // **T
- // TODO: Consider batching the allocations we do here.
- switch sf.kind {
- case reflect.Bool:
- b := new(bool)
- if dv != nil {
- *b = dv.(bool)
- }
- *(fptr.(**bool)) = b
- case reflect.Float32:
- f := new(float32)
- if dv != nil {
- *f = dv.(float32)
- }
- *(fptr.(**float32)) = f
- case reflect.Float64:
- f := new(float64)
- if dv != nil {
- *f = dv.(float64)
- }
- *(fptr.(**float64)) = f
- case reflect.Int32:
- // might be an enum
- if ft := f.Type(); ft != int32PtrType {
- // enum
- f.Set(reflect.New(ft.Elem()))
- if dv != nil {
- f.Elem().SetInt(int64(dv.(int32)))
- }
- } else {
- // int32 field
- i := new(int32)
- if dv != nil {
- *i = dv.(int32)
- }
- *(fptr.(**int32)) = i
- }
- case reflect.Int64:
- i := new(int64)
- if dv != nil {
- *i = dv.(int64)
- }
- *(fptr.(**int64)) = i
- case reflect.String:
- s := new(string)
- if dv != nil {
- *s = dv.(string)
- }
- *(fptr.(**string)) = s
- case reflect.Uint8:
- // exceptional case: []byte
- var b []byte
- if dv != nil {
- db := dv.([]byte)
- b = make([]byte, len(db))
- copy(b, db)
- } else {
- b = []byte{}
- }
- *(fptr.(*[]byte)) = b
- case reflect.Uint32:
- u := new(uint32)
- if dv != nil {
- *u = dv.(uint32)
- }
- *(fptr.(**uint32)) = u
- case reflect.Uint64:
- u := new(uint64)
- if dv != nil {
- *u = dv.(uint64)
- }
- *(fptr.(**uint64)) = u
- default:
- log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
- }
- }
-
- for _, ni := range dm.nested {
- f := v.Field(ni)
- // f is *T or []*T or map[T]*T
- switch f.Kind() {
- case reflect.Ptr:
- if f.IsNil() {
- continue
- }
- setDefaults(f, recur, zeros)
-
- case reflect.Slice:
- for i := 0; i < f.Len(); i++ {
- e := f.Index(i)
- if e.IsNil() {
- continue
- }
- setDefaults(e, recur, zeros)
- }
-
- case reflect.Map:
- for _, k := range f.MapKeys() {
- e := f.MapIndex(k)
- if e.IsNil() {
- continue
- }
- setDefaults(e, recur, zeros)
- }
- }
- }
-}
-
-var (
- // defaults maps a protocol buffer struct type to a slice of the fields,
- // with its scalar fields set to their proto-declared non-zero default values.
- defaultMu sync.RWMutex
- defaults = make(map[reflect.Type]defaultMessage)
-
- int32PtrType = reflect.TypeOf((*int32)(nil))
-)
-
-// defaultMessage represents information about the default values of a message.
-type defaultMessage struct {
- scalars []scalarField
- nested []int // struct field index of nested messages
-}
-
-type scalarField struct {
- index int // struct field index
- kind reflect.Kind // element type (the T in *T or []T)
- value interface{} // the proto-declared default value, or nil
-}
-
-// t is a struct type.
-func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
- sprop := GetProperties(t)
- for _, prop := range sprop.Prop {
- fi, ok := sprop.decoderTags.get(prop.Tag)
- if !ok {
- // XXX_unrecognized
- continue
- }
- ft := t.Field(fi).Type
-
- sf, nested, err := fieldDefault(ft, prop)
- switch {
- case err != nil:
- log.Print(err)
- case nested:
- dm.nested = append(dm.nested, fi)
- case sf != nil:
- sf.index = fi
- dm.scalars = append(dm.scalars, *sf)
- }
- }
-
- return dm
-}
-
-// fieldDefault returns the scalarField for field type ft.
-// sf will be nil if the field can not have a default.
-// nestedMessage will be true if this is a nested message.
-// Note that sf.index is not set on return.
-func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
- var canHaveDefault bool
- switch ft.Kind() {
- case reflect.Ptr:
- if ft.Elem().Kind() == reflect.Struct {
- nestedMessage = true
- } else {
- canHaveDefault = true // proto2 scalar field
- }
-
- case reflect.Slice:
- switch ft.Elem().Kind() {
- case reflect.Ptr:
- nestedMessage = true // repeated message
- case reflect.Uint8:
- canHaveDefault = true // bytes field
- }
-
- case reflect.Map:
- if ft.Elem().Kind() == reflect.Ptr {
- nestedMessage = true // map with message values
- }
- }
-
- if !canHaveDefault {
- if nestedMessage {
- return nil, true, nil
- }
- return nil, false, nil
- }
-
- // We now know that ft is a pointer or slice.
- sf = &scalarField{kind: ft.Elem().Kind()}
-
- // scalar fields without defaults
- if !prop.HasDefault {
- return sf, false, nil
- }
-
- // a scalar field: either *T or []byte
- switch ft.Elem().Kind() {
- case reflect.Bool:
- x, err := strconv.ParseBool(prop.Default)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.Float32:
- x, err := strconv.ParseFloat(prop.Default, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
- }
- sf.value = float32(x)
- case reflect.Float64:
- x, err := strconv.ParseFloat(prop.Default, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.Int32:
- x, err := strconv.ParseInt(prop.Default, 10, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
- }
- sf.value = int32(x)
- case reflect.Int64:
- x, err := strconv.ParseInt(prop.Default, 10, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.String:
- sf.value = prop.Default
- case reflect.Uint8:
- // []byte (not *uint8)
- sf.value = []byte(prop.Default)
- case reflect.Uint32:
- x, err := strconv.ParseUint(prop.Default, 10, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
- }
- sf.value = uint32(x)
- case reflect.Uint64:
- x, err := strconv.ParseUint(prop.Default, 10, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
- }
- sf.value = x
- default:
- return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
- }
-
- return sf, false, nil
-}
-
-// Map fields may have key types of non-float scalars, strings and enums.
-// The easiest way to sort them in some deterministic order is to use fmt.
-// If this turns out to be inefficient we can always consider other options,
-// such as doing a Schwartzian transform.
-
-func mapKeys(vs []reflect.Value) sort.Interface {
- s := mapKeySorter{
- vs: vs,
- // default Less function: textual comparison
- less: func(a, b reflect.Value) bool {
- return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
- },
- }
-
- // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
- // numeric keys are sorted numerically.
- if len(vs) == 0 {
- return s
- }
- switch vs[0].Kind() {
- case reflect.Int32, reflect.Int64:
- s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
- case reflect.Uint32, reflect.Uint64:
- s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
- }
-
- return s
-}
-
-type mapKeySorter struct {
- vs []reflect.Value
- less func(a, b reflect.Value) bool
-}
-
-func (s mapKeySorter) Len() int { return len(s.vs) }
-func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
-func (s mapKeySorter) Less(i, j int) bool {
- return s.less(s.vs[i], s.vs[j])
-}
-
-// isProto3Zero reports whether v is a zero proto3 value.
-func isProto3Zero(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint32, reflect.Uint64:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.String:
- return v.String() == ""
- }
- return false
-}
-
-// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const ProtoPackageIsVersion1 = true
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
deleted file mode 100644
index e25e01e6..00000000
--- a/vendor/github.com/golang/protobuf/proto/message_set.go
+++ /dev/null
@@ -1,280 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Support for message sets.
- */
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "reflect"
- "sort"
-)
-
-// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
-// A message type ID is required for storing a protocol buffer in a message set.
-var errNoMessageTypeID = errors.New("proto does not have a message type ID")
-
-// The first two types (_MessageSet_Item and messageSet)
-// model what the protocol compiler produces for the following protocol message:
-// message MessageSet {
-// repeated group Item = 1 {
-// required int32 type_id = 2;
-// required string message = 3;
-// };
-// }
-// That is the MessageSet wire format. We can't use a proto to generate these
-// because that would introduce a circular dependency between it and this package.
-
-type _MessageSet_Item struct {
- TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
- Message []byte `protobuf:"bytes,3,req,name=message"`
-}
-
-type messageSet struct {
- Item []*_MessageSet_Item `protobuf:"group,1,rep"`
- XXX_unrecognized []byte
- // TODO: caching?
-}
-
-// Make sure messageSet is a Message.
-var _ Message = (*messageSet)(nil)
-
-// messageTypeIder is an interface satisfied by a protocol buffer type
-// that may be stored in a MessageSet.
-type messageTypeIder interface {
- MessageTypeId() int32
-}
-
-func (ms *messageSet) find(pb Message) *_MessageSet_Item {
- mti, ok := pb.(messageTypeIder)
- if !ok {
- return nil
- }
- id := mti.MessageTypeId()
- for _, item := range ms.Item {
- if *item.TypeId == id {
- return item
- }
- }
- return nil
-}
-
-func (ms *messageSet) Has(pb Message) bool {
- if ms.find(pb) != nil {
- return true
- }
- return false
-}
-
-func (ms *messageSet) Unmarshal(pb Message) error {
- if item := ms.find(pb); item != nil {
- return Unmarshal(item.Message, pb)
- }
- if _, ok := pb.(messageTypeIder); !ok {
- return errNoMessageTypeID
- }
- return nil // TODO: return error instead?
-}
-
-func (ms *messageSet) Marshal(pb Message) error {
- msg, err := Marshal(pb)
- if err != nil {
- return err
- }
- if item := ms.find(pb); item != nil {
- // reuse existing item
- item.Message = msg
- return nil
- }
-
- mti, ok := pb.(messageTypeIder)
- if !ok {
- return errNoMessageTypeID
- }
-
- mtid := mti.MessageTypeId()
- ms.Item = append(ms.Item, &_MessageSet_Item{
- TypeId: &mtid,
- Message: msg,
- })
- return nil
-}
-
-func (ms *messageSet) Reset() { *ms = messageSet{} }
-func (ms *messageSet) String() string { return CompactTextString(ms) }
-func (*messageSet) ProtoMessage() {}
-
-// Support for the message_set_wire_format message option.
-
-func skipVarint(buf []byte) []byte {
- i := 0
- for ; buf[i]&0x80 != 0; i++ {
- }
- return buf[i+1:]
-}
-
-// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
-// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
- if err := encodeExtensionMap(m); err != nil {
- return nil, err
- }
-
- // Sort extension IDs to provide a deterministic encoding.
- // See also enc_map in encode.go.
- ids := make([]int, 0, len(m))
- for id := range m {
- ids = append(ids, int(id))
- }
- sort.Ints(ids)
-
- ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
- for _, id := range ids {
- e := m[int32(id)]
- // Remove the wire type and field number varint, as well as the length varint.
- msg := skipVarint(skipVarint(e.enc))
-
- ms.Item = append(ms.Item, &_MessageSet_Item{
- TypeId: Int32(int32(id)),
- Message: msg,
- })
- }
- return Marshal(ms)
-}
-
-// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
-// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
- ms := new(messageSet)
- if err := Unmarshal(buf, ms); err != nil {
- return err
- }
- for _, item := range ms.Item {
- id := *item.TypeId
- msg := item.Message
-
- // Restore wire type and field number varint, plus length varint.
- // Be careful to preserve duplicate items.
- b := EncodeVarint(uint64(id)<<3 | WireBytes)
- if ext, ok := m[id]; ok {
- // Existing data; rip off the tag and length varint
- // so we join the new data correctly.
- // We can assume that ext.enc is set because we are unmarshaling.
- o := ext.enc[len(b):] // skip wire type and field number
- _, n := DecodeVarint(o) // calculate length of length varint
- o = o[n:] // skip length varint
- msg = append(o, msg...) // join old data and new data
- }
- b = append(b, EncodeVarint(uint64(len(msg)))...)
- b = append(b, msg...)
-
- m[id] = Extension{enc: b}
- }
- return nil
-}
-
-// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
-// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
- var b bytes.Buffer
- b.WriteByte('{')
-
- // Process the map in key order for deterministic output.
- ids := make([]int32, 0, len(m))
- for id := range m {
- ids = append(ids, id)
- }
- sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
-
- for i, id := range ids {
- ext := m[id]
- if i > 0 {
- b.WriteByte(',')
- }
-
- msd, ok := messageSetMap[id]
- if !ok {
- // Unknown type; we can't render it, so skip it.
- continue
- }
- fmt.Fprintf(&b, `"[%s]":`, msd.name)
-
- x := ext.value
- if x == nil {
- x = reflect.New(msd.t.Elem()).Interface()
- if err := Unmarshal(ext.enc, x.(Message)); err != nil {
- return nil, err
- }
- }
- d, err := json.Marshal(x)
- if err != nil {
- return nil, err
- }
- b.Write(d)
- }
- b.WriteByte('}')
- return b.Bytes(), nil
-}
-
-// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
-// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
- // Common-case fast path.
- if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
- return nil
- }
-
- // This is fairly tricky, and it's not clear that it is needed.
- return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
-}
-
-// A global registry of types that can be used in a MessageSet.
-
-var messageSetMap = make(map[int32]messageSetDesc)
-
-type messageSetDesc struct {
- t reflect.Type // pointer to struct
- name string
-}
-
-// RegisterMessageSetType is called from the generated code.
-func RegisterMessageSetType(m Message, fieldNum int32, name string) {
- messageSetMap[fieldNum] = messageSetDesc{
- t: reflect.TypeOf(m),
- name: name,
- }
-}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
deleted file mode 100644
index 98991417..00000000
--- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
+++ /dev/null
@@ -1,479 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build appengine js
-
-// This file contains an implementation of proto field accesses using package reflect.
-// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
-// be used on App Engine.
-
-package proto
-
-import (
- "math"
- "reflect"
-)
-
-// A structPointer is a pointer to a struct.
-type structPointer struct {
- v reflect.Value
-}
-
-// toStructPointer returns a structPointer equivalent to the given reflect value.
-// The reflect value must itself be a pointer to a struct.
-func toStructPointer(v reflect.Value) structPointer {
- return structPointer{v}
-}
-
-// IsNil reports whether p is nil.
-func structPointer_IsNil(p structPointer) bool {
- return p.v.IsNil()
-}
-
-// Interface returns the struct pointer as an interface value.
-func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
- return p.v.Interface()
-}
-
-// A field identifies a field in a struct, accessible from a structPointer.
-// In this implementation, a field is identified by the sequence of field indices
-// passed to reflect's FieldByIndex.
-type field []int
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
- return f.Index
-}
-
-// invalidField is an invalid field identifier.
-var invalidField = field(nil)
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool { return f != nil }
-
-// field returns the given field in the struct as a reflect value.
-func structPointer_field(p structPointer, f field) reflect.Value {
- // Special case: an extension map entry with a value of type T
- // passes a *T to the struct-handling code with a zero field,
- // expecting that it will be treated as equivalent to *struct{ X T },
- // which has the same memory layout. We have to handle that case
- // specially, because reflect will panic if we call FieldByIndex on a
- // non-struct.
- if f == nil {
- return p.v.Elem()
- }
-
- return p.v.Elem().FieldByIndex(f)
-}
-
-// ifield returns the given field in the struct as an interface value.
-func structPointer_ifield(p structPointer, f field) interface{} {
- return structPointer_field(p, f).Addr().Interface()
-}
-
-// Bytes returns the address of a []byte field in the struct.
-func structPointer_Bytes(p structPointer, f field) *[]byte {
- return structPointer_ifield(p, f).(*[]byte)
-}
-
-// BytesSlice returns the address of a [][]byte field in the struct.
-func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
- return structPointer_ifield(p, f).(*[][]byte)
-}
-
-// Bool returns the address of a *bool field in the struct.
-func structPointer_Bool(p structPointer, f field) **bool {
- return structPointer_ifield(p, f).(**bool)
-}
-
-// BoolVal returns the address of a bool field in the struct.
-func structPointer_BoolVal(p structPointer, f field) *bool {
- return structPointer_ifield(p, f).(*bool)
-}
-
-// BoolSlice returns the address of a []bool field in the struct.
-func structPointer_BoolSlice(p structPointer, f field) *[]bool {
- return structPointer_ifield(p, f).(*[]bool)
-}
-
-// String returns the address of a *string field in the struct.
-func structPointer_String(p structPointer, f field) **string {
- return structPointer_ifield(p, f).(**string)
-}
-
-// StringVal returns the address of a string field in the struct.
-func structPointer_StringVal(p structPointer, f field) *string {
- return structPointer_ifield(p, f).(*string)
-}
-
-// StringSlice returns the address of a []string field in the struct.
-func structPointer_StringSlice(p structPointer, f field) *[]string {
- return structPointer_ifield(p, f).(*[]string)
-}
-
-// ExtMap returns the address of an extension map field in the struct.
-func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
- return structPointer_ifield(p, f).(*map[int32]Extension)
-}
-
-// NewAt returns the reflect.Value for a pointer to a field in the struct.
-func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
- return structPointer_field(p, f).Addr()
-}
-
-// SetStructPointer writes a *struct field in the struct.
-func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
- structPointer_field(p, f).Set(q.v)
-}
-
-// GetStructPointer reads a *struct field in the struct.
-func structPointer_GetStructPointer(p structPointer, f field) structPointer {
- return structPointer{structPointer_field(p, f)}
-}
-
-// StructPointerSlice the address of a []*struct field in the struct.
-func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
- return structPointerSlice{structPointer_field(p, f)}
-}
-
-// A structPointerSlice represents the address of a slice of pointers to structs
-// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
-type structPointerSlice struct {
- v reflect.Value
-}
-
-func (p structPointerSlice) Len() int { return p.v.Len() }
-func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
-func (p structPointerSlice) Append(q structPointer) {
- p.v.Set(reflect.Append(p.v, q.v))
-}
-
-var (
- int32Type = reflect.TypeOf(int32(0))
- uint32Type = reflect.TypeOf(uint32(0))
- float32Type = reflect.TypeOf(float32(0))
- int64Type = reflect.TypeOf(int64(0))
- uint64Type = reflect.TypeOf(uint64(0))
- float64Type = reflect.TypeOf(float64(0))
-)
-
-// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
-// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
-type word32 struct {
- v reflect.Value
-}
-
-// IsNil reports whether p is nil.
-func word32_IsNil(p word32) bool {
- return p.v.IsNil()
-}
-
-// Set sets p to point at a newly allocated word with bits set to x.
-func word32_Set(p word32, o *Buffer, x uint32) {
- t := p.v.Type().Elem()
- switch t {
- case int32Type:
- if len(o.int32s) == 0 {
- o.int32s = make([]int32, uint32PoolSize)
- }
- o.int32s[0] = int32(x)
- p.v.Set(reflect.ValueOf(&o.int32s[0]))
- o.int32s = o.int32s[1:]
- return
- case uint32Type:
- if len(o.uint32s) == 0 {
- o.uint32s = make([]uint32, uint32PoolSize)
- }
- o.uint32s[0] = x
- p.v.Set(reflect.ValueOf(&o.uint32s[0]))
- o.uint32s = o.uint32s[1:]
- return
- case float32Type:
- if len(o.float32s) == 0 {
- o.float32s = make([]float32, uint32PoolSize)
- }
- o.float32s[0] = math.Float32frombits(x)
- p.v.Set(reflect.ValueOf(&o.float32s[0]))
- o.float32s = o.float32s[1:]
- return
- }
-
- // must be enum
- p.v.Set(reflect.New(t))
- p.v.Elem().SetInt(int64(int32(x)))
-}
-
-// Get gets the bits pointed at by p, as a uint32.
-func word32_Get(p word32) uint32 {
- elem := p.v.Elem()
- switch elem.Kind() {
- case reflect.Int32:
- return uint32(elem.Int())
- case reflect.Uint32:
- return uint32(elem.Uint())
- case reflect.Float32:
- return math.Float32bits(float32(elem.Float()))
- }
- panic("unreachable")
-}
-
-// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
-func structPointer_Word32(p structPointer, f field) word32 {
- return word32{structPointer_field(p, f)}
-}
-
-// A word32Val represents a field of type int32, uint32, float32, or enum.
-// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
-type word32Val struct {
- v reflect.Value
-}
-
-// Set sets *p to x.
-func word32Val_Set(p word32Val, x uint32) {
- switch p.v.Type() {
- case int32Type:
- p.v.SetInt(int64(x))
- return
- case uint32Type:
- p.v.SetUint(uint64(x))
- return
- case float32Type:
- p.v.SetFloat(float64(math.Float32frombits(x)))
- return
- }
-
- // must be enum
- p.v.SetInt(int64(int32(x)))
-}
-
-// Get gets the bits pointed at by p, as a uint32.
-func word32Val_Get(p word32Val) uint32 {
- elem := p.v
- switch elem.Kind() {
- case reflect.Int32:
- return uint32(elem.Int())
- case reflect.Uint32:
- return uint32(elem.Uint())
- case reflect.Float32:
- return math.Float32bits(float32(elem.Float()))
- }
- panic("unreachable")
-}
-
-// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
-func structPointer_Word32Val(p structPointer, f field) word32Val {
- return word32Val{structPointer_field(p, f)}
-}
-
-// A word32Slice is a slice of 32-bit values.
-// That is, v.Type() is []int32, []uint32, []float32, or []enum.
-type word32Slice struct {
- v reflect.Value
-}
-
-func (p word32Slice) Append(x uint32) {
- n, m := p.v.Len(), p.v.Cap()
- if n < m {
- p.v.SetLen(n + 1)
- } else {
- t := p.v.Type().Elem()
- p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
- }
- elem := p.v.Index(n)
- switch elem.Kind() {
- case reflect.Int32:
- elem.SetInt(int64(int32(x)))
- case reflect.Uint32:
- elem.SetUint(uint64(x))
- case reflect.Float32:
- elem.SetFloat(float64(math.Float32frombits(x)))
- }
-}
-
-func (p word32Slice) Len() int {
- return p.v.Len()
-}
-
-func (p word32Slice) Index(i int) uint32 {
- elem := p.v.Index(i)
- switch elem.Kind() {
- case reflect.Int32:
- return uint32(elem.Int())
- case reflect.Uint32:
- return uint32(elem.Uint())
- case reflect.Float32:
- return math.Float32bits(float32(elem.Float()))
- }
- panic("unreachable")
-}
-
-// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
-func structPointer_Word32Slice(p structPointer, f field) word32Slice {
- return word32Slice{structPointer_field(p, f)}
-}
-
-// word64 is like word32 but for 64-bit values.
-type word64 struct {
- v reflect.Value
-}
-
-func word64_Set(p word64, o *Buffer, x uint64) {
- t := p.v.Type().Elem()
- switch t {
- case int64Type:
- if len(o.int64s) == 0 {
- o.int64s = make([]int64, uint64PoolSize)
- }
- o.int64s[0] = int64(x)
- p.v.Set(reflect.ValueOf(&o.int64s[0]))
- o.int64s = o.int64s[1:]
- return
- case uint64Type:
- if len(o.uint64s) == 0 {
- o.uint64s = make([]uint64, uint64PoolSize)
- }
- o.uint64s[0] = x
- p.v.Set(reflect.ValueOf(&o.uint64s[0]))
- o.uint64s = o.uint64s[1:]
- return
- case float64Type:
- if len(o.float64s) == 0 {
- o.float64s = make([]float64, uint64PoolSize)
- }
- o.float64s[0] = math.Float64frombits(x)
- p.v.Set(reflect.ValueOf(&o.float64s[0]))
- o.float64s = o.float64s[1:]
- return
- }
- panic("unreachable")
-}
-
-func word64_IsNil(p word64) bool {
- return p.v.IsNil()
-}
-
-func word64_Get(p word64) uint64 {
- elem := p.v.Elem()
- switch elem.Kind() {
- case reflect.Int64:
- return uint64(elem.Int())
- case reflect.Uint64:
- return elem.Uint()
- case reflect.Float64:
- return math.Float64bits(elem.Float())
- }
- panic("unreachable")
-}
-
-func structPointer_Word64(p structPointer, f field) word64 {
- return word64{structPointer_field(p, f)}
-}
-
-// word64Val is like word32Val but for 64-bit values.
-type word64Val struct {
- v reflect.Value
-}
-
-func word64Val_Set(p word64Val, o *Buffer, x uint64) {
- switch p.v.Type() {
- case int64Type:
- p.v.SetInt(int64(x))
- return
- case uint64Type:
- p.v.SetUint(x)
- return
- case float64Type:
- p.v.SetFloat(math.Float64frombits(x))
- return
- }
- panic("unreachable")
-}
-
-func word64Val_Get(p word64Val) uint64 {
- elem := p.v
- switch elem.Kind() {
- case reflect.Int64:
- return uint64(elem.Int())
- case reflect.Uint64:
- return elem.Uint()
- case reflect.Float64:
- return math.Float64bits(elem.Float())
- }
- panic("unreachable")
-}
-
-func structPointer_Word64Val(p structPointer, f field) word64Val {
- return word64Val{structPointer_field(p, f)}
-}
-
-type word64Slice struct {
- v reflect.Value
-}
-
-func (p word64Slice) Append(x uint64) {
- n, m := p.v.Len(), p.v.Cap()
- if n < m {
- p.v.SetLen(n + 1)
- } else {
- t := p.v.Type().Elem()
- p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
- }
- elem := p.v.Index(n)
- switch elem.Kind() {
- case reflect.Int64:
- elem.SetInt(int64(int64(x)))
- case reflect.Uint64:
- elem.SetUint(uint64(x))
- case reflect.Float64:
- elem.SetFloat(float64(math.Float64frombits(x)))
- }
-}
-
-func (p word64Slice) Len() int {
- return p.v.Len()
-}
-
-func (p word64Slice) Index(i int) uint64 {
- elem := p.v.Index(i)
- switch elem.Kind() {
- case reflect.Int64:
- return uint64(elem.Int())
- case reflect.Uint64:
- return uint64(elem.Uint())
- case reflect.Float64:
- return math.Float64bits(float64(elem.Float()))
- }
- panic("unreachable")
-}
-
-func structPointer_Word64Slice(p structPointer, f field) word64Slice {
- return word64Slice{structPointer_field(p, f)}
-}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
deleted file mode 100644
index ceece772..00000000
--- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build !appengine,!js
-
-// This file contains the implementation of the proto field accesses using package unsafe.
-
-package proto
-
-import (
- "reflect"
- "unsafe"
-)
-
-// NOTE: These type_Foo functions would more idiomatically be methods,
-// but Go does not allow methods on pointer types, and we must preserve
-// some pointer type for the garbage collector. We use these
-// funcs with clunky names as our poor approximation to methods.
-//
-// An alternative would be
-// type structPointer struct { p unsafe.Pointer }
-// but that does not registerize as well.
-
-// A structPointer is a pointer to a struct.
-type structPointer unsafe.Pointer
-
-// toStructPointer returns a structPointer equivalent to the given reflect value.
-func toStructPointer(v reflect.Value) structPointer {
- return structPointer(unsafe.Pointer(v.Pointer()))
-}
-
-// IsNil reports whether p is nil.
-func structPointer_IsNil(p structPointer) bool {
- return p == nil
-}
-
-// Interface returns the struct pointer, assumed to have element type t,
-// as an interface value.
-func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
- return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
-}
-
-// A field identifies a field in a struct, accessible from a structPointer.
-// In this implementation, a field is identified by its byte offset from the start of the struct.
-type field uintptr
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
- return field(f.Offset)
-}
-
-// invalidField is an invalid field identifier.
-const invalidField = ^field(0)
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool {
- return f != ^field(0)
-}
-
-// Bytes returns the address of a []byte field in the struct.
-func structPointer_Bytes(p structPointer, f field) *[]byte {
- return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// BytesSlice returns the address of a [][]byte field in the struct.
-func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
- return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// Bool returns the address of a *bool field in the struct.
-func structPointer_Bool(p structPointer, f field) **bool {
- return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// BoolVal returns the address of a bool field in the struct.
-func structPointer_BoolVal(p structPointer, f field) *bool {
- return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// BoolSlice returns the address of a []bool field in the struct.
-func structPointer_BoolSlice(p structPointer, f field) *[]bool {
- return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// String returns the address of a *string field in the struct.
-func structPointer_String(p structPointer, f field) **string {
- return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// StringVal returns the address of a string field in the struct.
-func structPointer_StringVal(p structPointer, f field) *string {
- return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// StringSlice returns the address of a []string field in the struct.
-func structPointer_StringSlice(p structPointer, f field) *[]string {
- return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// ExtMap returns the address of an extension map field in the struct.
-func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
- return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// NewAt returns the reflect.Value for a pointer to a field in the struct.
-func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
- return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
-}
-
-// SetStructPointer writes a *struct field in the struct.
-func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
- *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
-}
-
-// GetStructPointer reads a *struct field in the struct.
-func structPointer_GetStructPointer(p structPointer, f field) structPointer {
- return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// StructPointerSlice the address of a []*struct field in the struct.
-func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
- return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
-type structPointerSlice []structPointer
-
-func (v *structPointerSlice) Len() int { return len(*v) }
-func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
-func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
-
-// A word32 is the address of a "pointer to 32-bit value" field.
-type word32 **uint32
-
-// IsNil reports whether *v is nil.
-func word32_IsNil(p word32) bool {
- return *p == nil
-}
-
-// Set sets *v to point at a newly allocated word set to x.
-func word32_Set(p word32, o *Buffer, x uint32) {
- if len(o.uint32s) == 0 {
- o.uint32s = make([]uint32, uint32PoolSize)
- }
- o.uint32s[0] = x
- *p = &o.uint32s[0]
- o.uint32s = o.uint32s[1:]
-}
-
-// Get gets the value pointed at by *v.
-func word32_Get(p word32) uint32 {
- return **p
-}
-
-// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
-func structPointer_Word32(p structPointer, f field) word32 {
- return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
-}
-
-// A word32Val is the address of a 32-bit value field.
-type word32Val *uint32
-
-// Set sets *p to x.
-func word32Val_Set(p word32Val, x uint32) {
- *p = x
-}
-
-// Get gets the value pointed at by p.
-func word32Val_Get(p word32Val) uint32 {
- return *p
-}
-
-// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
-func structPointer_Word32Val(p structPointer, f field) word32Val {
- return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
-}
-
-// A word32Slice is a slice of 32-bit values.
-type word32Slice []uint32
-
-func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
-func (v *word32Slice) Len() int { return len(*v) }
-func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
-
-// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
-func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
- return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// word64 is like word32 but for 64-bit values.
-type word64 **uint64
-
-func word64_Set(p word64, o *Buffer, x uint64) {
- if len(o.uint64s) == 0 {
- o.uint64s = make([]uint64, uint64PoolSize)
- }
- o.uint64s[0] = x
- *p = &o.uint64s[0]
- o.uint64s = o.uint64s[1:]
-}
-
-func word64_IsNil(p word64) bool {
- return *p == nil
-}
-
-func word64_Get(p word64) uint64 {
- return **p
-}
-
-func structPointer_Word64(p structPointer, f field) word64 {
- return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
-}
-
-// word64Val is like word32Val but for 64-bit values.
-type word64Val *uint64
-
-func word64Val_Set(p word64Val, o *Buffer, x uint64) {
- *p = x
-}
-
-func word64Val_Get(p word64Val) uint64 {
- return *p
-}
-
-func structPointer_Word64Val(p structPointer, f field) word64Val {
- return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
-}
-
-// word64Slice is like word32Slice but for 64-bit values.
-type word64Slice []uint64
-
-func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
-func (v *word64Slice) Len() int { return len(*v) }
-func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
-
-func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
- return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
deleted file mode 100644
index 880eb22d..00000000
--- a/vendor/github.com/golang/protobuf/proto/properties.go
+++ /dev/null
@@ -1,850 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
-import (
- "fmt"
- "log"
- "os"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "sync"
-)
-
-const debug bool = false
-
-// Constants that identify the encoding of a value on the wire.
-const (
- WireVarint = 0
- WireFixed64 = 1
- WireBytes = 2
- WireStartGroup = 3
- WireEndGroup = 4
- WireFixed32 = 5
-)
-
-const startSize = 10 // initial slice/string sizes
-
-// Encoders are defined in encode.go
-// An encoder outputs the full representation of a field, including its
-// tag and encoder type.
-type encoder func(p *Buffer, prop *Properties, base structPointer) error
-
-// A valueEncoder encodes a single integer in a particular encoding.
-type valueEncoder func(o *Buffer, x uint64) error
-
-// Sizers are defined in encode.go
-// A sizer returns the encoded size of a field, including its tag and encoder
-// type.
-type sizer func(prop *Properties, base structPointer) int
-
-// A valueSizer returns the encoded size of a single integer in a particular
-// encoding.
-type valueSizer func(x uint64) int
-
-// Decoders are defined in decode.go
-// A decoder creates a value from its wire representation.
-// Unrecognized subelements are saved in unrec.
-type decoder func(p *Buffer, prop *Properties, base structPointer) error
-
-// A valueDecoder decodes a single integer in a particular encoding.
-type valueDecoder func(o *Buffer) (x uint64, err error)
-
-// A oneofMarshaler does the marshaling for all oneof fields in a message.
-type oneofMarshaler func(Message, *Buffer) error
-
-// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
-type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
-
-// A oneofSizer does the sizing for all oneof fields in a message.
-type oneofSizer func(Message) int
-
-// tagMap is an optimization over map[int]int for typical protocol buffer
-// use-cases. Encoded protocol buffers are often in tag order with small tag
-// numbers.
-type tagMap struct {
- fastTags []int
- slowTags map[int]int
-}
-
-// tagMapFastLimit is the upper bound on the tag number that will be stored in
-// the tagMap slice rather than its map.
-const tagMapFastLimit = 1024
-
-func (p *tagMap) get(t int) (int, bool) {
- if t > 0 && t < tagMapFastLimit {
- if t >= len(p.fastTags) {
- return 0, false
- }
- fi := p.fastTags[t]
- return fi, fi >= 0
- }
- fi, ok := p.slowTags[t]
- return fi, ok
-}
-
-func (p *tagMap) put(t int, fi int) {
- if t > 0 && t < tagMapFastLimit {
- for len(p.fastTags) < t+1 {
- p.fastTags = append(p.fastTags, -1)
- }
- p.fastTags[t] = fi
- return
- }
- if p.slowTags == nil {
- p.slowTags = make(map[int]int)
- }
- p.slowTags[t] = fi
-}
-
-// StructProperties represents properties for all the fields of a struct.
-// decoderTags and decoderOrigNames should only be used by the decoder.
-type StructProperties struct {
- Prop []*Properties // properties for each field
- reqCount int // required count
- decoderTags tagMap // map from proto tag to struct field number
- decoderOrigNames map[string]int // map from original name to struct field number
- order []int // list of struct field numbers in tag order
- unrecField field // field id of the XXX_unrecognized []byte field
- extendable bool // is this an extendable proto
-
- oneofMarshaler oneofMarshaler
- oneofUnmarshaler oneofUnmarshaler
- oneofSizer oneofSizer
- stype reflect.Type
-
- // OneofTypes contains information about the oneof fields in this message.
- // It is keyed by the original name of a field.
- OneofTypes map[string]*OneofProperties
-}
-
-// OneofProperties represents information about a specific field in a oneof.
-type OneofProperties struct {
- Type reflect.Type // pointer to generated struct type for this oneof field
- Field int // struct field number of the containing oneof in the message
- Prop *Properties
-}
-
-// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
-// See encode.go, (*Buffer).enc_struct.
-
-func (sp *StructProperties) Len() int { return len(sp.order) }
-func (sp *StructProperties) Less(i, j int) bool {
- return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
-}
-func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
-
-// Properties represents the protocol-specific behavior of a single struct field.
-type Properties struct {
- Name string // name of the field, for error messages
- OrigName string // original name before protocol compiler (always set)
- JSONName string // name to use for JSON; determined by protoc
- Wire string
- WireType int
- Tag int
- Required bool
- Optional bool
- Repeated bool
- Packed bool // relevant for repeated primitives only
- Enum string // set for enum types only
- proto3 bool // whether this is known to be a proto3 field; set for []byte only
- oneof bool // whether this is a oneof field
-
- Default string // default value
- HasDefault bool // whether an explicit default was provided
- def_uint64 uint64
-
- enc encoder
- valEnc valueEncoder // set for bool and numeric types only
- field field
- tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
- tagbuf [8]byte
- stype reflect.Type // set for struct types only
- sprop *StructProperties // set for struct types only
- isMarshaler bool
- isUnmarshaler bool
-
- mtype reflect.Type // set for map types only
- mkeyprop *Properties // set for map types only
- mvalprop *Properties // set for map types only
-
- size sizer
- valSize valueSizer // set for bool and numeric types only
-
- dec decoder
- valDec valueDecoder // set for bool and numeric types only
-
- // If this is a packable field, this will be the decoder for the packed version of the field.
- packedDec decoder
-}
-
-// String formats the properties in the protobuf struct field tag style.
-func (p *Properties) String() string {
- s := p.Wire
- s = ","
- s += strconv.Itoa(p.Tag)
- if p.Required {
- s += ",req"
- }
- if p.Optional {
- s += ",opt"
- }
- if p.Repeated {
- s += ",rep"
- }
- if p.Packed {
- s += ",packed"
- }
- s += ",name=" + p.OrigName
- if p.JSONName != p.OrigName {
- s += ",json=" + p.JSONName
- }
- if p.proto3 {
- s += ",proto3"
- }
- if p.oneof {
- s += ",oneof"
- }
- if len(p.Enum) > 0 {
- s += ",enum=" + p.Enum
- }
- if p.HasDefault {
- s += ",def=" + p.Default
- }
- return s
-}
-
-// Parse populates p by parsing a string in the protobuf struct field tag style.
-func (p *Properties) Parse(s string) {
- // "bytes,49,opt,name=foo,def=hello!"
- fields := strings.Split(s, ",") // breaks def=, but handled below.
- if len(fields) < 2 {
- fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
- return
- }
-
- p.Wire = fields[0]
- switch p.Wire {
- case "varint":
- p.WireType = WireVarint
- p.valEnc = (*Buffer).EncodeVarint
- p.valDec = (*Buffer).DecodeVarint
- p.valSize = sizeVarint
- case "fixed32":
- p.WireType = WireFixed32
- p.valEnc = (*Buffer).EncodeFixed32
- p.valDec = (*Buffer).DecodeFixed32
- p.valSize = sizeFixed32
- case "fixed64":
- p.WireType = WireFixed64
- p.valEnc = (*Buffer).EncodeFixed64
- p.valDec = (*Buffer).DecodeFixed64
- p.valSize = sizeFixed64
- case "zigzag32":
- p.WireType = WireVarint
- p.valEnc = (*Buffer).EncodeZigzag32
- p.valDec = (*Buffer).DecodeZigzag32
- p.valSize = sizeZigzag32
- case "zigzag64":
- p.WireType = WireVarint
- p.valEnc = (*Buffer).EncodeZigzag64
- p.valDec = (*Buffer).DecodeZigzag64
- p.valSize = sizeZigzag64
- case "bytes", "group":
- p.WireType = WireBytes
- // no numeric converter for non-numeric types
- default:
- fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
- return
- }
-
- var err error
- p.Tag, err = strconv.Atoi(fields[1])
- if err != nil {
- return
- }
-
- for i := 2; i < len(fields); i++ {
- f := fields[i]
- switch {
- case f == "req":
- p.Required = true
- case f == "opt":
- p.Optional = true
- case f == "rep":
- p.Repeated = true
- case f == "packed":
- p.Packed = true
- case strings.HasPrefix(f, "name="):
- p.OrigName = f[5:]
- case strings.HasPrefix(f, "json="):
- p.JSONName = f[5:]
- case strings.HasPrefix(f, "enum="):
- p.Enum = f[5:]
- case f == "proto3":
- p.proto3 = true
- case f == "oneof":
- p.oneof = true
- case strings.HasPrefix(f, "def="):
- p.HasDefault = true
- p.Default = f[4:] // rest of string
- if i+1 < len(fields) {
- // Commas aren't escaped, and def is always last.
- p.Default += "," + strings.Join(fields[i+1:], ",")
- break
- }
- }
- }
-}
-
-func logNoSliceEnc(t1, t2 reflect.Type) {
- fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
-}
-
-var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
-
-// Initialize the fields for encoding and decoding.
-func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
- p.enc = nil
- p.dec = nil
- p.size = nil
-
- switch t1 := typ; t1.Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
-
- // proto3 scalar types
-
- case reflect.Bool:
- p.enc = (*Buffer).enc_proto3_bool
- p.dec = (*Buffer).dec_proto3_bool
- p.size = size_proto3_bool
- case reflect.Int32:
- p.enc = (*Buffer).enc_proto3_int32
- p.dec = (*Buffer).dec_proto3_int32
- p.size = size_proto3_int32
- case reflect.Uint32:
- p.enc = (*Buffer).enc_proto3_uint32
- p.dec = (*Buffer).dec_proto3_int32 // can reuse
- p.size = size_proto3_uint32
- case reflect.Int64, reflect.Uint64:
- p.enc = (*Buffer).enc_proto3_int64
- p.dec = (*Buffer).dec_proto3_int64
- p.size = size_proto3_int64
- case reflect.Float32:
- p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
- p.dec = (*Buffer).dec_proto3_int32
- p.size = size_proto3_uint32
- case reflect.Float64:
- p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
- p.dec = (*Buffer).dec_proto3_int64
- p.size = size_proto3_int64
- case reflect.String:
- p.enc = (*Buffer).enc_proto3_string
- p.dec = (*Buffer).dec_proto3_string
- p.size = size_proto3_string
-
- case reflect.Ptr:
- switch t2 := t1.Elem(); t2.Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
- break
- case reflect.Bool:
- p.enc = (*Buffer).enc_bool
- p.dec = (*Buffer).dec_bool
- p.size = size_bool
- case reflect.Int32:
- p.enc = (*Buffer).enc_int32
- p.dec = (*Buffer).dec_int32
- p.size = size_int32
- case reflect.Uint32:
- p.enc = (*Buffer).enc_uint32
- p.dec = (*Buffer).dec_int32 // can reuse
- p.size = size_uint32
- case reflect.Int64, reflect.Uint64:
- p.enc = (*Buffer).enc_int64
- p.dec = (*Buffer).dec_int64
- p.size = size_int64
- case reflect.Float32:
- p.enc = (*Buffer).enc_uint32 // can just treat them as bits
- p.dec = (*Buffer).dec_int32
- p.size = size_uint32
- case reflect.Float64:
- p.enc = (*Buffer).enc_int64 // can just treat them as bits
- p.dec = (*Buffer).dec_int64
- p.size = size_int64
- case reflect.String:
- p.enc = (*Buffer).enc_string
- p.dec = (*Buffer).dec_string
- p.size = size_string
- case reflect.Struct:
- p.stype = t1.Elem()
- p.isMarshaler = isMarshaler(t1)
- p.isUnmarshaler = isUnmarshaler(t1)
- if p.Wire == "bytes" {
- p.enc = (*Buffer).enc_struct_message
- p.dec = (*Buffer).dec_struct_message
- p.size = size_struct_message
- } else {
- p.enc = (*Buffer).enc_struct_group
- p.dec = (*Buffer).dec_struct_group
- p.size = size_struct_group
- }
- }
-
- case reflect.Slice:
- switch t2 := t1.Elem(); t2.Kind() {
- default:
- logNoSliceEnc(t1, t2)
- break
- case reflect.Bool:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_bool
- p.size = size_slice_packed_bool
- } else {
- p.enc = (*Buffer).enc_slice_bool
- p.size = size_slice_bool
- }
- p.dec = (*Buffer).dec_slice_bool
- p.packedDec = (*Buffer).dec_slice_packed_bool
- case reflect.Int32:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_int32
- p.size = size_slice_packed_int32
- } else {
- p.enc = (*Buffer).enc_slice_int32
- p.size = size_slice_int32
- }
- p.dec = (*Buffer).dec_slice_int32
- p.packedDec = (*Buffer).dec_slice_packed_int32
- case reflect.Uint32:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_uint32
- p.size = size_slice_packed_uint32
- } else {
- p.enc = (*Buffer).enc_slice_uint32
- p.size = size_slice_uint32
- }
- p.dec = (*Buffer).dec_slice_int32
- p.packedDec = (*Buffer).dec_slice_packed_int32
- case reflect.Int64, reflect.Uint64:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_int64
- p.size = size_slice_packed_int64
- } else {
- p.enc = (*Buffer).enc_slice_int64
- p.size = size_slice_int64
- }
- p.dec = (*Buffer).dec_slice_int64
- p.packedDec = (*Buffer).dec_slice_packed_int64
- case reflect.Uint8:
- p.enc = (*Buffer).enc_slice_byte
- p.dec = (*Buffer).dec_slice_byte
- p.size = size_slice_byte
- // This is a []byte, which is either a bytes field,
- // or the value of a map field. In the latter case,
- // we always encode an empty []byte, so we should not
- // use the proto3 enc/size funcs.
- // f == nil iff this is the key/value of a map field.
- if p.proto3 && f != nil {
- p.enc = (*Buffer).enc_proto3_slice_byte
- p.size = size_proto3_slice_byte
- }
- case reflect.Float32, reflect.Float64:
- switch t2.Bits() {
- case 32:
- // can just treat them as bits
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_uint32
- p.size = size_slice_packed_uint32
- } else {
- p.enc = (*Buffer).enc_slice_uint32
- p.size = size_slice_uint32
- }
- p.dec = (*Buffer).dec_slice_int32
- p.packedDec = (*Buffer).dec_slice_packed_int32
- case 64:
- // can just treat them as bits
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_int64
- p.size = size_slice_packed_int64
- } else {
- p.enc = (*Buffer).enc_slice_int64
- p.size = size_slice_int64
- }
- p.dec = (*Buffer).dec_slice_int64
- p.packedDec = (*Buffer).dec_slice_packed_int64
- default:
- logNoSliceEnc(t1, t2)
- break
- }
- case reflect.String:
- p.enc = (*Buffer).enc_slice_string
- p.dec = (*Buffer).dec_slice_string
- p.size = size_slice_string
- case reflect.Ptr:
- switch t3 := t2.Elem(); t3.Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
- break
- case reflect.Struct:
- p.stype = t2.Elem()
- p.isMarshaler = isMarshaler(t2)
- p.isUnmarshaler = isUnmarshaler(t2)
- if p.Wire == "bytes" {
- p.enc = (*Buffer).enc_slice_struct_message
- p.dec = (*Buffer).dec_slice_struct_message
- p.size = size_slice_struct_message
- } else {
- p.enc = (*Buffer).enc_slice_struct_group
- p.dec = (*Buffer).dec_slice_struct_group
- p.size = size_slice_struct_group
- }
- }
- case reflect.Slice:
- switch t2.Elem().Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
- break
- case reflect.Uint8:
- p.enc = (*Buffer).enc_slice_slice_byte
- p.dec = (*Buffer).dec_slice_slice_byte
- p.size = size_slice_slice_byte
- }
- }
-
- case reflect.Map:
- p.enc = (*Buffer).enc_new_map
- p.dec = (*Buffer).dec_new_map
- p.size = size_new_map
-
- p.mtype = t1
- p.mkeyprop = &Properties{}
- p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
- p.mvalprop = &Properties{}
- vtype := p.mtype.Elem()
- if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
- // The value type is not a message (*T) or bytes ([]byte),
- // so we need encoders for the pointer to this type.
- vtype = reflect.PtrTo(vtype)
- }
- p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
- }
-
- // precalculate tag code
- wire := p.WireType
- if p.Packed {
- wire = WireBytes
- }
- x := uint32(p.Tag)<<3 | uint32(wire)
- i := 0
- for i = 0; x > 127; i++ {
- p.tagbuf[i] = 0x80 | uint8(x&0x7F)
- x >>= 7
- }
- p.tagbuf[i] = uint8(x)
- p.tagcode = p.tagbuf[0 : i+1]
-
- if p.stype != nil {
- if lockGetProp {
- p.sprop = GetProperties(p.stype)
- } else {
- p.sprop = getPropertiesLocked(p.stype)
- }
- }
-}
-
-var (
- marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
- unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
-)
-
-// isMarshaler reports whether type t implements Marshaler.
-func isMarshaler(t reflect.Type) bool {
- // We're checking for (likely) pointer-receiver methods
- // so if t is not a pointer, something is very wrong.
- // The calls above only invoke isMarshaler on pointer types.
- if t.Kind() != reflect.Ptr {
- panic("proto: misuse of isMarshaler")
- }
- return t.Implements(marshalerType)
-}
-
-// isUnmarshaler reports whether type t implements Unmarshaler.
-func isUnmarshaler(t reflect.Type) bool {
- // We're checking for (likely) pointer-receiver methods
- // so if t is not a pointer, something is very wrong.
- // The calls above only invoke isUnmarshaler on pointer types.
- if t.Kind() != reflect.Ptr {
- panic("proto: misuse of isUnmarshaler")
- }
- return t.Implements(unmarshalerType)
-}
-
-// Init populates the properties from a protocol buffer struct tag.
-func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
- p.init(typ, name, tag, f, true)
-}
-
-func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
- // "bytes,49,opt,def=hello!"
- p.Name = name
- p.OrigName = name
- if f != nil {
- p.field = toField(f)
- }
- if tag == "" {
- return
- }
- p.Parse(tag)
- p.setEncAndDec(typ, f, lockGetProp)
-}
-
-var (
- propertiesMu sync.RWMutex
- propertiesMap = make(map[reflect.Type]*StructProperties)
-)
-
-// GetProperties returns the list of properties for the type represented by t.
-// t must represent a generated struct type of a protocol message.
-func GetProperties(t reflect.Type) *StructProperties {
- if t.Kind() != reflect.Struct {
- panic("proto: type must have kind struct")
- }
-
- // Most calls to GetProperties in a long-running program will be
- // retrieving details for types we have seen before.
- propertiesMu.RLock()
- sprop, ok := propertiesMap[t]
- propertiesMu.RUnlock()
- if ok {
- if collectStats {
- stats.Chit++
- }
- return sprop
- }
-
- propertiesMu.Lock()
- sprop = getPropertiesLocked(t)
- propertiesMu.Unlock()
- return sprop
-}
-
-// getPropertiesLocked requires that propertiesMu is held.
-func getPropertiesLocked(t reflect.Type) *StructProperties {
- if prop, ok := propertiesMap[t]; ok {
- if collectStats {
- stats.Chit++
- }
- return prop
- }
- if collectStats {
- stats.Cmiss++
- }
-
- prop := new(StructProperties)
- // in case of recursive protos, fill this in now.
- propertiesMap[t] = prop
-
- // build properties
- prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
- prop.unrecField = invalidField
- prop.Prop = make([]*Properties, t.NumField())
- prop.order = make([]int, t.NumField())
-
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- p := new(Properties)
- name := f.Name
- p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
-
- if f.Name == "XXX_extensions" { // special case
- p.enc = (*Buffer).enc_map
- p.dec = nil // not needed
- p.size = size_map
- }
- if f.Name == "XXX_unrecognized" { // special case
- prop.unrecField = toField(&f)
- }
- oneof := f.Tag.Get("protobuf_oneof") // special case
- if oneof != "" {
- // Oneof fields don't use the traditional protobuf tag.
- p.OrigName = oneof
- }
- prop.Prop[i] = p
- prop.order[i] = i
- if debug {
- print(i, " ", f.Name, " ", t.String(), " ")
- if p.Tag > 0 {
- print(p.String())
- }
- print("\n")
- }
- if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
- fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
- }
- }
-
- // Re-order prop.order.
- sort.Sort(prop)
-
- type oneofMessage interface {
- XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
- }
- if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
- var oots []interface{}
- prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
- prop.stype = t
-
- // Interpret oneof metadata.
- prop.OneofTypes = make(map[string]*OneofProperties)
- for _, oot := range oots {
- oop := &OneofProperties{
- Type: reflect.ValueOf(oot).Type(), // *T
- Prop: new(Properties),
- }
- sft := oop.Type.Elem().Field(0)
- oop.Prop.Name = sft.Name
- oop.Prop.Parse(sft.Tag.Get("protobuf"))
- // There will be exactly one interface field that
- // this new value is assignable to.
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- if f.Type.Kind() != reflect.Interface {
- continue
- }
- if !oop.Type.AssignableTo(f.Type) {
- continue
- }
- oop.Field = i
- break
- }
- prop.OneofTypes[oop.Prop.OrigName] = oop
- }
- }
-
- // build required counts
- // build tags
- reqCount := 0
- prop.decoderOrigNames = make(map[string]int)
- for i, p := range prop.Prop {
- if strings.HasPrefix(p.Name, "XXX_") {
- // Internal fields should not appear in tags/origNames maps.
- // They are handled specially when encoding and decoding.
- continue
- }
- if p.Required {
- reqCount++
- }
- prop.decoderTags.put(p.Tag, i)
- prop.decoderOrigNames[p.OrigName] = i
- }
- prop.reqCount = reqCount
-
- return prop
-}
-
-// Return the Properties object for the x[0]'th field of the structure.
-func propByIndex(t reflect.Type, x []int) *Properties {
- if len(x) != 1 {
- fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
- return nil
- }
- prop := GetProperties(t)
- return prop.Prop[x[0]]
-}
-
-// Get the address and type of a pointer to a struct from an interface.
-func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
- if pb == nil {
- err = ErrNil
- return
- }
- // get the reflect type of the pointer to the struct.
- t = reflect.TypeOf(pb)
- // get the address of the struct.
- value := reflect.ValueOf(pb)
- b = toStructPointer(value)
- return
-}
-
-// A global registry of enum types.
-// The generated code will register the generated maps by calling RegisterEnum.
-
-var enumValueMaps = make(map[string]map[string]int32)
-
-// RegisterEnum is called from the generated code to install the enum descriptor
-// maps into the global table to aid parsing text format protocol buffers.
-func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
- if _, ok := enumValueMaps[typeName]; ok {
- panic("proto: duplicate enum registered: " + typeName)
- }
- enumValueMaps[typeName] = valueMap
-}
-
-// EnumValueMap returns the mapping from names to integers of the
-// enum type enumType, or a nil if not found.
-func EnumValueMap(enumType string) map[string]int32 {
- return enumValueMaps[enumType]
-}
-
-// A registry of all linked message types.
-// The string is a fully-qualified proto name ("pkg.Message").
-var (
- protoTypes = make(map[string]reflect.Type)
- revProtoTypes = make(map[reflect.Type]string)
-)
-
-// RegisterType is called from generated code and maps from the fully qualified
-// proto name to the type (pointer to struct) of the protocol buffer.
-func RegisterType(x Message, name string) {
- if _, ok := protoTypes[name]; ok {
- // TODO: Some day, make this a panic.
- log.Printf("proto: duplicate proto type registered: %s", name)
- return
- }
- t := reflect.TypeOf(x)
- protoTypes[name] = t
- revProtoTypes[t] = name
-}
-
-// MessageName returns the fully-qualified proto name for the given message type.
-func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
-
-// MessageType returns the message type (pointer to struct) for a named message.
-func MessageType(name string) reflect.Type { return protoTypes[name] }
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
deleted file mode 100644
index 37c95357..00000000
--- a/vendor/github.com/golang/protobuf/proto/text.go
+++ /dev/null
@@ -1,849 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for writing the text protocol buffer format.
-
-import (
- "bufio"
- "bytes"
- "encoding"
- "errors"
- "fmt"
- "io"
- "log"
- "math"
- "reflect"
- "sort"
- "strings"
-)
-
-var (
- newline = []byte("\n")
- spaces = []byte(" ")
- gtNewline = []byte(">\n")
- endBraceNewline = []byte("}\n")
- backslashN = []byte{'\\', 'n'}
- backslashR = []byte{'\\', 'r'}
- backslashT = []byte{'\\', 't'}
- backslashDQ = []byte{'\\', '"'}
- backslashBS = []byte{'\\', '\\'}
- posInf = []byte("inf")
- negInf = []byte("-inf")
- nan = []byte("nan")
-)
-
-type writer interface {
- io.Writer
- WriteByte(byte) error
-}
-
-// textWriter is an io.Writer that tracks its indentation level.
-type textWriter struct {
- ind int
- complete bool // if the current position is a complete line
- compact bool // whether to write out as a one-liner
- w writer
-}
-
-func (w *textWriter) WriteString(s string) (n int, err error) {
- if !strings.Contains(s, "\n") {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.complete = false
- return io.WriteString(w.w, s)
- }
- // WriteString is typically called without newlines, so this
- // codepath and its copy are rare. We copy to avoid
- // duplicating all of Write's logic here.
- return w.Write([]byte(s))
-}
-
-func (w *textWriter) Write(p []byte) (n int, err error) {
- newlines := bytes.Count(p, newline)
- if newlines == 0 {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- n, err = w.w.Write(p)
- w.complete = false
- return n, err
- }
-
- frags := bytes.SplitN(p, newline, newlines+1)
- if w.compact {
- for i, frag := range frags {
- if i > 0 {
- if err := w.w.WriteByte(' '); err != nil {
- return n, err
- }
- n++
- }
- nn, err := w.w.Write(frag)
- n += nn
- if err != nil {
- return n, err
- }
- }
- return n, nil
- }
-
- for i, frag := range frags {
- if w.complete {
- w.writeIndent()
- }
- nn, err := w.w.Write(frag)
- n += nn
- if err != nil {
- return n, err
- }
- if i+1 < len(frags) {
- if err := w.w.WriteByte('\n'); err != nil {
- return n, err
- }
- n++
- }
- }
- w.complete = len(frags[len(frags)-1]) == 0
- return n, nil
-}
-
-func (w *textWriter) WriteByte(c byte) error {
- if w.compact && c == '\n' {
- c = ' '
- }
- if !w.compact && w.complete {
- w.writeIndent()
- }
- err := w.w.WriteByte(c)
- w.complete = c == '\n'
- return err
-}
-
-func (w *textWriter) indent() { w.ind++ }
-
-func (w *textWriter) unindent() {
- if w.ind == 0 {
- log.Printf("proto: textWriter unindented too far")
- return
- }
- w.ind--
-}
-
-func writeName(w *textWriter, props *Properties) error {
- if _, err := w.WriteString(props.OrigName); err != nil {
- return err
- }
- if props.Wire != "group" {
- return w.WriteByte(':')
- }
- return nil
-}
-
-// raw is the interface satisfied by RawMessage.
-type raw interface {
- Bytes() []byte
-}
-
-func requiresQuotes(u string) bool {
- // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
- for _, ch := range u {
- switch {
- case ch == '.' || ch == '/' || ch == '_':
- continue
- case '0' <= ch && ch <= '9':
- continue
- case 'A' <= ch && ch <= 'Z':
- continue
- case 'a' <= ch && ch <= 'z':
- continue
- default:
- return true
- }
- }
- return false
-}
-
-// isAny reports whether sv is a google.protobuf.Any message
-func isAny(sv reflect.Value) bool {
- type wkt interface {
- XXX_WellKnownType() string
- }
- t, ok := sv.Addr().Interface().(wkt)
- return ok && t.XXX_WellKnownType() == "Any"
-}
-
-// writeProto3Any writes an expanded google.protobuf.Any message.
-//
-// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
-// required messages are not linked in).
-//
-// It returns (true, error) when sv was written in expanded format or an error
-// was encountered.
-func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
- turl := sv.FieldByName("TypeUrl")
- val := sv.FieldByName("Value")
- if !turl.IsValid() || !val.IsValid() {
- return true, errors.New("proto: invalid google.protobuf.Any message")
- }
-
- b, ok := val.Interface().([]byte)
- if !ok {
- return true, errors.New("proto: invalid google.protobuf.Any message")
- }
-
- parts := strings.Split(turl.String(), "/")
- mt := MessageType(parts[len(parts)-1])
- if mt == nil {
- return false, nil
- }
- m := reflect.New(mt.Elem())
- if err := Unmarshal(b, m.Interface().(Message)); err != nil {
- return false, nil
- }
- w.Write([]byte("["))
- u := turl.String()
- if requiresQuotes(u) {
- writeString(w, u)
- } else {
- w.Write([]byte(u))
- }
- if w.compact {
- w.Write([]byte("]:<"))
- } else {
- w.Write([]byte("]: <\n"))
- w.ind++
- }
- if err := tm.writeStruct(w, m.Elem()); err != nil {
- return true, err
- }
- if w.compact {
- w.Write([]byte("> "))
- } else {
- w.ind--
- w.Write([]byte(">\n"))
- }
- return true, nil
-}
-
-func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
- if tm.ExpandAny && isAny(sv) {
- if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
- return err
- }
- }
- st := sv.Type()
- sprops := GetProperties(st)
- for i := 0; i < sv.NumField(); i++ {
- fv := sv.Field(i)
- props := sprops.Prop[i]
- name := st.Field(i).Name
-
- if strings.HasPrefix(name, "XXX_") {
- // There are two XXX_ fields:
- // XXX_unrecognized []byte
- // XXX_extensions map[int32]proto.Extension
- // The first is handled here;
- // the second is handled at the bottom of this function.
- if name == "XXX_unrecognized" && !fv.IsNil() {
- if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
- return err
- }
- }
- continue
- }
- if fv.Kind() == reflect.Ptr && fv.IsNil() {
- // Field not filled in. This could be an optional field or
- // a required field that wasn't filled in. Either way, there
- // isn't anything we can show for it.
- continue
- }
- if fv.Kind() == reflect.Slice && fv.IsNil() {
- // Repeated field that is empty, or a bytes field that is unused.
- continue
- }
-
- if props.Repeated && fv.Kind() == reflect.Slice {
- // Repeated field.
- for j := 0; j < fv.Len(); j++ {
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- v := fv.Index(j)
- if v.Kind() == reflect.Ptr && v.IsNil() {
- // A nil message in a repeated field is not valid,
- // but we can handle that more gracefully than panicking.
- if _, err := w.Write([]byte("\n")); err != nil {
- return err
- }
- continue
- }
- if err := tm.writeAny(w, v, props); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- continue
- }
- if fv.Kind() == reflect.Map {
- // Map fields are rendered as a repeated struct with key/value fields.
- keys := fv.MapKeys()
- sort.Sort(mapKeys(keys))
- for _, key := range keys {
- val := fv.MapIndex(key)
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- // open struct
- if err := w.WriteByte('<'); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- // key
- if _, err := w.WriteString("key:"); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- // nil values aren't legal, but we can avoid panicking because of them.
- if val.Kind() != reflect.Ptr || !val.IsNil() {
- // value
- if _, err := w.WriteString("value:"); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, val, props.mvalprop); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- // close struct
- w.unindent()
- if err := w.WriteByte('>'); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- continue
- }
- if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
- // empty bytes field
- continue
- }
- if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
- // proto3 non-repeated scalar field; skip if zero value
- if isProto3Zero(fv) {
- continue
- }
- }
-
- if fv.Kind() == reflect.Interface {
- // Check if it is a oneof.
- if st.Field(i).Tag.Get("protobuf_oneof") != "" {
- // fv is nil, or holds a pointer to generated struct.
- // That generated struct has exactly one field,
- // which has a protobuf struct tag.
- if fv.IsNil() {
- continue
- }
- inner := fv.Elem().Elem() // interface -> *T -> T
- tag := inner.Type().Field(0).Tag.Get("protobuf")
- props = new(Properties) // Overwrite the outer props var, but not its pointee.
- props.Parse(tag)
- // Write the value in the oneof, not the oneof itself.
- fv = inner.Field(0)
-
- // Special case to cope with malformed messages gracefully:
- // If the value in the oneof is a nil pointer, don't panic
- // in writeAny.
- if fv.Kind() == reflect.Ptr && fv.IsNil() {
- // Use errors.New so writeAny won't render quotes.
- msg := errors.New("/* nil */")
- fv = reflect.ValueOf(&msg).Elem()
- }
- }
- }
-
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if b, ok := fv.Interface().(raw); ok {
- if err := writeRaw(w, b.Bytes()); err != nil {
- return err
- }
- continue
- }
-
- // Enums have a String method, so writeAny will work fine.
- if err := tm.writeAny(w, fv, props); err != nil {
- return err
- }
-
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
-
- // Extensions (the XXX_extensions field).
- pv := sv.Addr()
- if pv.Type().Implements(extendableProtoType) {
- if err := tm.writeExtensions(w, pv); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// writeRaw writes an uninterpreted raw message.
-func writeRaw(w *textWriter, b []byte) error {
- if err := w.WriteByte('<'); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- if err := writeUnknownStruct(w, b); err != nil {
- return err
- }
- w.unindent()
- if err := w.WriteByte('>'); err != nil {
- return err
- }
- return nil
-}
-
-// writeAny writes an arbitrary field.
-func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
- v = reflect.Indirect(v)
-
- // Floats have special cases.
- if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
- x := v.Float()
- var b []byte
- switch {
- case math.IsInf(x, 1):
- b = posInf
- case math.IsInf(x, -1):
- b = negInf
- case math.IsNaN(x):
- b = nan
- }
- if b != nil {
- _, err := w.Write(b)
- return err
- }
- // Other values are handled below.
- }
-
- // We don't attempt to serialise every possible value type; only those
- // that can occur in protocol buffers.
- switch v.Kind() {
- case reflect.Slice:
- // Should only be a []byte; repeated fields are handled in writeStruct.
- if err := writeString(w, string(v.Interface().([]byte))); err != nil {
- return err
- }
- case reflect.String:
- if err := writeString(w, v.String()); err != nil {
- return err
- }
- case reflect.Struct:
- // Required/optional group/message.
- var bra, ket byte = '<', '>'
- if props != nil && props.Wire == "group" {
- bra, ket = '{', '}'
- }
- if err := w.WriteByte(bra); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
- text, err := etm.MarshalText()
- if err != nil {
- return err
- }
- if _, err = w.Write(text); err != nil {
- return err
- }
- } else if err := tm.writeStruct(w, v); err != nil {
- return err
- }
- w.unindent()
- if err := w.WriteByte(ket); err != nil {
- return err
- }
- default:
- _, err := fmt.Fprint(w, v.Interface())
- return err
- }
- return nil
-}
-
-// equivalent to C's isprint.
-func isprint(c byte) bool {
- return c >= 0x20 && c < 0x7f
-}
-
-// writeString writes a string in the protocol buffer text format.
-// It is similar to strconv.Quote except we don't use Go escape sequences,
-// we treat the string as a byte sequence, and we use octal escapes.
-// These differences are to maintain interoperability with the other
-// languages' implementations of the text format.
-func writeString(w *textWriter, s string) error {
- // use WriteByte here to get any needed indent
- if err := w.WriteByte('"'); err != nil {
- return err
- }
- // Loop over the bytes, not the runes.
- for i := 0; i < len(s); i++ {
- var err error
- // Divergence from C++: we don't escape apostrophes.
- // There's no need to escape them, and the C++ parser
- // copes with a naked apostrophe.
- switch c := s[i]; c {
- case '\n':
- _, err = w.w.Write(backslashN)
- case '\r':
- _, err = w.w.Write(backslashR)
- case '\t':
- _, err = w.w.Write(backslashT)
- case '"':
- _, err = w.w.Write(backslashDQ)
- case '\\':
- _, err = w.w.Write(backslashBS)
- default:
- if isprint(c) {
- err = w.w.WriteByte(c)
- } else {
- _, err = fmt.Fprintf(w.w, "\\%03o", c)
- }
- }
- if err != nil {
- return err
- }
- }
- return w.WriteByte('"')
-}
-
-func writeUnknownStruct(w *textWriter, data []byte) (err error) {
- if !w.compact {
- if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
- return err
- }
- }
- b := NewBuffer(data)
- for b.index < len(b.buf) {
- x, err := b.DecodeVarint()
- if err != nil {
- _, err := fmt.Fprintf(w, "/* %v */\n", err)
- return err
- }
- wire, tag := x&7, x>>3
- if wire == WireEndGroup {
- w.unindent()
- if _, err := w.Write(endBraceNewline); err != nil {
- return err
- }
- continue
- }
- if _, err := fmt.Fprint(w, tag); err != nil {
- return err
- }
- if wire != WireStartGroup {
- if err := w.WriteByte(':'); err != nil {
- return err
- }
- }
- if !w.compact || wire == WireStartGroup {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- switch wire {
- case WireBytes:
- buf, e := b.DecodeRawBytes(false)
- if e == nil {
- _, err = fmt.Fprintf(w, "%q", buf)
- } else {
- _, err = fmt.Fprintf(w, "/* %v */", e)
- }
- case WireFixed32:
- x, err = b.DecodeFixed32()
- err = writeUnknownInt(w, x, err)
- case WireFixed64:
- x, err = b.DecodeFixed64()
- err = writeUnknownInt(w, x, err)
- case WireStartGroup:
- err = w.WriteByte('{')
- w.indent()
- case WireVarint:
- x, err = b.DecodeVarint()
- err = writeUnknownInt(w, x, err)
- default:
- _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
- }
- if err != nil {
- return err
- }
- if err = w.WriteByte('\n'); err != nil {
- return err
- }
- }
- return nil
-}
-
-func writeUnknownInt(w *textWriter, x uint64, err error) error {
- if err == nil {
- _, err = fmt.Fprint(w, x)
- } else {
- _, err = fmt.Fprintf(w, "/* %v */", err)
- }
- return err
-}
-
-type int32Slice []int32
-
-func (s int32Slice) Len() int { return len(s) }
-func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
-func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-// writeExtensions writes all the extensions in pv.
-// pv is assumed to be a pointer to a protocol message struct that is extendable.
-func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
- emap := extensionMaps[pv.Type().Elem()]
- ep := pv.Interface().(extendableProto)
-
- // Order the extensions by ID.
- // This isn't strictly necessary, but it will give us
- // canonical output, which will also make testing easier.
- m := ep.ExtensionMap()
- ids := make([]int32, 0, len(m))
- for id := range m {
- ids = append(ids, id)
- }
- sort.Sort(int32Slice(ids))
-
- for _, extNum := range ids {
- ext := m[extNum]
- var desc *ExtensionDesc
- if emap != nil {
- desc = emap[extNum]
- }
- if desc == nil {
- // Unknown extension.
- if err := writeUnknownStruct(w, ext.enc); err != nil {
- return err
- }
- continue
- }
-
- pb, err := GetExtension(ep, desc)
- if err != nil {
- return fmt.Errorf("failed getting extension: %v", err)
- }
-
- // Repeated extensions will appear as a slice.
- if !desc.repeated() {
- if err := tm.writeExtension(w, desc.Name, pb); err != nil {
- return err
- }
- } else {
- v := reflect.ValueOf(pb)
- for i := 0; i < v.Len(); i++ {
- if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
- return err
- }
- }
- }
- }
- return nil
-}
-
-func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
- if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- return nil
-}
-
-func (w *textWriter) writeIndent() {
- if !w.complete {
- return
- }
- remain := w.ind * 2
- for remain > 0 {
- n := remain
- if n > len(spaces) {
- n = len(spaces)
- }
- w.w.Write(spaces[:n])
- remain -= n
- }
- w.complete = false
-}
-
-// TextMarshaler is a configurable text format marshaler.
-type TextMarshaler struct {
- Compact bool // use compact text format (one line).
- ExpandAny bool // expand google.protobuf.Any messages of known types
-}
-
-// Marshal writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
- val := reflect.ValueOf(pb)
- if pb == nil || val.IsNil() {
- w.Write([]byte(""))
- return nil
- }
- var bw *bufio.Writer
- ww, ok := w.(writer)
- if !ok {
- bw = bufio.NewWriter(w)
- ww = bw
- }
- aw := &textWriter{
- w: ww,
- complete: true,
- compact: tm.Compact,
- }
-
- if etm, ok := pb.(encoding.TextMarshaler); ok {
- text, err := etm.MarshalText()
- if err != nil {
- return err
- }
- if _, err = aw.Write(text); err != nil {
- return err
- }
- if bw != nil {
- return bw.Flush()
- }
- return nil
- }
- // Dereference the received pointer so we don't have outer < and >.
- v := reflect.Indirect(val)
- if err := tm.writeStruct(aw, v); err != nil {
- return err
- }
- if bw != nil {
- return bw.Flush()
- }
- return nil
-}
-
-// Text is the same as Marshal, but returns the string directly.
-func (tm *TextMarshaler) Text(pb Message) string {
- var buf bytes.Buffer
- tm.Marshal(&buf, pb)
- return buf.String()
-}
-
-var (
- defaultTextMarshaler = TextMarshaler{}
- compactTextMarshaler = TextMarshaler{Compact: true}
-)
-
-// TODO: consider removing some of the Marshal functions below.
-
-// MarshalText writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
-
-// MarshalTextString is the same as MarshalText, but returns the string directly.
-func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
-
-// CompactText writes a given protocol buffer in compact text format (one line).
-func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
-
-// CompactTextString is the same as CompactText, but returns the string directly.
-func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
deleted file mode 100644
index b5fba5b2..00000000
--- a/vendor/github.com/golang/protobuf/proto/text_parser.go
+++ /dev/null
@@ -1,872 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for parsing the Text protocol buffer format.
-// TODO: message sets.
-
-import (
- "encoding"
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "unicode/utf8"
-)
-
-type ParseError struct {
- Message string
- Line int // 1-based line number
- Offset int // 0-based byte offset from start of input
-}
-
-func (p *ParseError) Error() string {
- if p.Line == 1 {
- // show offset only for first line
- return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
- }
- return fmt.Sprintf("line %d: %v", p.Line, p.Message)
-}
-
-type token struct {
- value string
- err *ParseError
- line int // line number
- offset int // byte number from start of input, not start of line
- unquoted string // the unquoted version of value, if it was a quoted string
-}
-
-func (t *token) String() string {
- if t.err == nil {
- return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
- }
- return fmt.Sprintf("parse error: %v", t.err)
-}
-
-type textParser struct {
- s string // remaining input
- done bool // whether the parsing is finished (success or error)
- backed bool // whether back() was called
- offset, line int
- cur token
-}
-
-func newTextParser(s string) *textParser {
- p := new(textParser)
- p.s = s
- p.line = 1
- p.cur.line = 1
- return p
-}
-
-func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
- pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
- p.cur.err = pe
- p.done = true
- return pe
-}
-
-// Numbers and identifiers are matched by [-+._A-Za-z0-9]
-func isIdentOrNumberChar(c byte) bool {
- switch {
- case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
- return true
- case '0' <= c && c <= '9':
- return true
- }
- switch c {
- case '-', '+', '.', '_':
- return true
- }
- return false
-}
-
-func isWhitespace(c byte) bool {
- switch c {
- case ' ', '\t', '\n', '\r':
- return true
- }
- return false
-}
-
-func isQuote(c byte) bool {
- switch c {
- case '"', '\'':
- return true
- }
- return false
-}
-
-func (p *textParser) skipWhitespace() {
- i := 0
- for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
- if p.s[i] == '#' {
- // comment; skip to end of line or input
- for i < len(p.s) && p.s[i] != '\n' {
- i++
- }
- if i == len(p.s) {
- break
- }
- }
- if p.s[i] == '\n' {
- p.line++
- }
- i++
- }
- p.offset += i
- p.s = p.s[i:len(p.s)]
- if len(p.s) == 0 {
- p.done = true
- }
-}
-
-func (p *textParser) advance() {
- // Skip whitespace
- p.skipWhitespace()
- if p.done {
- return
- }
-
- // Start of non-whitespace
- p.cur.err = nil
- p.cur.offset, p.cur.line = p.offset, p.line
- p.cur.unquoted = ""
- switch p.s[0] {
- case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
- // Single symbol
- p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
- case '"', '\'':
- // Quoted string
- i := 1
- for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
- if p.s[i] == '\\' && i+1 < len(p.s) {
- // skip escaped char
- i++
- }
- i++
- }
- if i >= len(p.s) || p.s[i] != p.s[0] {
- p.errorf("unmatched quote")
- return
- }
- unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
- if err != nil {
- p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
- return
- }
- p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
- p.cur.unquoted = unq
- default:
- i := 0
- for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
- i++
- }
- if i == 0 {
- p.errorf("unexpected byte %#x", p.s[0])
- return
- }
- p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
- }
- p.offset += len(p.cur.value)
-}
-
-var (
- errBadUTF8 = errors.New("proto: bad UTF-8")
- errBadHex = errors.New("proto: bad hexadecimal")
-)
-
-func unquoteC(s string, quote rune) (string, error) {
- // This is based on C++'s tokenizer.cc.
- // Despite its name, this is *not* parsing C syntax.
- // For instance, "\0" is an invalid quoted string.
-
- // Avoid allocation in trivial cases.
- simple := true
- for _, r := range s {
- if r == '\\' || r == quote {
- simple = false
- break
- }
- }
- if simple {
- return s, nil
- }
-
- buf := make([]byte, 0, 3*len(s)/2)
- for len(s) > 0 {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", errBadUTF8
- }
- s = s[n:]
- if r != '\\' {
- if r < utf8.RuneSelf {
- buf = append(buf, byte(r))
- } else {
- buf = append(buf, string(r)...)
- }
- continue
- }
-
- ch, tail, err := unescape(s)
- if err != nil {
- return "", err
- }
- buf = append(buf, ch...)
- s = tail
- }
- return string(buf), nil
-}
-
-func unescape(s string) (ch string, tail string, err error) {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", "", errBadUTF8
- }
- s = s[n:]
- switch r {
- case 'a':
- return "\a", s, nil
- case 'b':
- return "\b", s, nil
- case 'f':
- return "\f", s, nil
- case 'n':
- return "\n", s, nil
- case 'r':
- return "\r", s, nil
- case 't':
- return "\t", s, nil
- case 'v':
- return "\v", s, nil
- case '?':
- return "?", s, nil // trigraph workaround
- case '\'', '"', '\\':
- return string(r), s, nil
- case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
- if len(s) < 2 {
- return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
- }
- base := 8
- ss := s[:2]
- s = s[2:]
- if r == 'x' || r == 'X' {
- base = 16
- } else {
- ss = string(r) + ss
- }
- i, err := strconv.ParseUint(ss, base, 8)
- if err != nil {
- return "", "", err
- }
- return string([]byte{byte(i)}), s, nil
- case 'u', 'U':
- n := 4
- if r == 'U' {
- n = 8
- }
- if len(s) < n {
- return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
- }
-
- bs := make([]byte, n/2)
- for i := 0; i < n; i += 2 {
- a, ok1 := unhex(s[i])
- b, ok2 := unhex(s[i+1])
- if !ok1 || !ok2 {
- return "", "", errBadHex
- }
- bs[i/2] = a<<4 | b
- }
- s = s[n:]
- return string(bs), s, nil
- }
- return "", "", fmt.Errorf(`unknown escape \%c`, r)
-}
-
-// Adapted from src/pkg/strconv/quote.go.
-func unhex(b byte) (v byte, ok bool) {
- switch {
- case '0' <= b && b <= '9':
- return b - '0', true
- case 'a' <= b && b <= 'f':
- return b - 'a' + 10, true
- case 'A' <= b && b <= 'F':
- return b - 'A' + 10, true
- }
- return 0, false
-}
-
-// Back off the parser by one token. Can only be done between calls to next().
-// It makes the next advance() a no-op.
-func (p *textParser) back() { p.backed = true }
-
-// Advances the parser and returns the new current token.
-func (p *textParser) next() *token {
- if p.backed || p.done {
- p.backed = false
- return &p.cur
- }
- p.advance()
- if p.done {
- p.cur.value = ""
- } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
- // Look for multiple quoted strings separated by whitespace,
- // and concatenate them.
- cat := p.cur
- for {
- p.skipWhitespace()
- if p.done || !isQuote(p.s[0]) {
- break
- }
- p.advance()
- if p.cur.err != nil {
- return &p.cur
- }
- cat.value += " " + p.cur.value
- cat.unquoted += p.cur.unquoted
- }
- p.done = false // parser may have seen EOF, but we want to return cat
- p.cur = cat
- }
- return &p.cur
-}
-
-func (p *textParser) consumeToken(s string) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != s {
- p.back()
- return p.errorf("expected %q, found %q", s, tok.value)
- }
- return nil
-}
-
-// Return a RequiredNotSetError indicating which required field was not set.
-func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
- st := sv.Type()
- sprops := GetProperties(st)
- for i := 0; i < st.NumField(); i++ {
- if !isNil(sv.Field(i)) {
- continue
- }
-
- props := sprops.Prop[i]
- if props.Required {
- return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
- }
- }
- return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen
-}
-
-// Returns the index in the struct for the named field, as well as the parsed tag properties.
-func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
- i, ok := sprops.decoderOrigNames[name]
- if ok {
- return i, sprops.Prop[i], true
- }
- return -1, nil, false
-}
-
-// Consume a ':' from the input stream (if the next token is a colon),
-// returning an error if a colon is needed but not present.
-func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ":" {
- // Colon is optional when the field is a group or message.
- needColon := true
- switch props.Wire {
- case "group":
- needColon = false
- case "bytes":
- // A "bytes" field is either a message, a string, or a repeated field;
- // those three become *T, *string and []T respectively, so we can check for
- // this field being a pointer to a non-string.
- if typ.Kind() == reflect.Ptr {
- // *T or *string
- if typ.Elem().Kind() == reflect.String {
- break
- }
- } else if typ.Kind() == reflect.Slice {
- // []T or []*T
- if typ.Elem().Kind() != reflect.Ptr {
- break
- }
- } else if typ.Kind() == reflect.String {
- // The proto3 exception is for a string field,
- // which requires a colon.
- break
- }
- needColon = false
- }
- if needColon {
- return p.errorf("expected ':', found %q", tok.value)
- }
- p.back()
- }
- return nil
-}
-
-func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
- st := sv.Type()
- sprops := GetProperties(st)
- reqCount := sprops.reqCount
- var reqFieldErr error
- fieldSet := make(map[string]bool)
- // A struct is a sequence of "name: value", terminated by one of
- // '>' or '}', or the end of the input. A name may also be
- // "[extension]" or "[type/url]".
- //
- // The whole struct can also be an expanded Any message, like:
- // [type/url] < ... struct contents ... >
- for {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == terminator {
- break
- }
- if tok.value == "[" {
- // Looks like an extension or an Any.
- //
- // TODO: Check whether we need to handle
- // namespace rooted names (e.g. ".something.Foo").
- extName, err := p.consumeExtName()
- if err != nil {
- return err
- }
-
- if s := strings.LastIndex(extName, "/"); s >= 0 {
- // If it contains a slash, it's an Any type URL.
- messageName := extName[s+1:]
- mt := MessageType(messageName)
- if mt == nil {
- return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
- }
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- // consume an optional colon
- if tok.value == ":" {
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- }
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- v := reflect.New(mt.Elem())
- if pe := p.readStruct(v.Elem(), terminator); pe != nil {
- return pe
- }
- b, err := Marshal(v.Interface().(Message))
- if err != nil {
- return p.errorf("failed to marshal message of type %q: %v", messageName, err)
- }
- sv.FieldByName("TypeUrl").SetString(extName)
- sv.FieldByName("Value").SetBytes(b)
- continue
- }
-
- var desc *ExtensionDesc
- // This could be faster, but it's functional.
- // TODO: Do something smarter than a linear scan.
- for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
- if d.Name == extName {
- desc = d
- break
- }
- }
- if desc == nil {
- return p.errorf("unrecognized extension %q", extName)
- }
-
- props := &Properties{}
- props.Parse(desc.Tag)
-
- typ := reflect.TypeOf(desc.ExtensionType)
- if err := p.checkForColon(props, typ); err != nil {
- return err
- }
-
- rep := desc.repeated()
-
- // Read the extension structure, and set it in
- // the value we're constructing.
- var ext reflect.Value
- if !rep {
- ext = reflect.New(typ).Elem()
- } else {
- ext = reflect.New(typ.Elem()).Elem()
- }
- if err := p.readAny(ext, props); err != nil {
- if _, ok := err.(*RequiredNotSetError); !ok {
- return err
- }
- reqFieldErr = err
- }
- ep := sv.Addr().Interface().(extendableProto)
- if !rep {
- SetExtension(ep, desc, ext.Interface())
- } else {
- old, err := GetExtension(ep, desc)
- var sl reflect.Value
- if err == nil {
- sl = reflect.ValueOf(old) // existing slice
- } else {
- sl = reflect.MakeSlice(typ, 0, 1)
- }
- sl = reflect.Append(sl, ext)
- SetExtension(ep, desc, sl.Interface())
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- continue
- }
-
- // This is a normal, non-extension field.
- name := tok.value
- var dst reflect.Value
- fi, props, ok := structFieldByName(sprops, name)
- if ok {
- dst = sv.Field(fi)
- } else if oop, ok := sprops.OneofTypes[name]; ok {
- // It is a oneof.
- props = oop.Prop
- nv := reflect.New(oop.Type.Elem())
- dst = nv.Elem().Field(0)
- sv.Field(oop.Field).Set(nv)
- }
- if !dst.IsValid() {
- return p.errorf("unknown field name %q in %v", name, st)
- }
-
- if dst.Kind() == reflect.Map {
- // Consume any colon.
- if err := p.checkForColon(props, dst.Type()); err != nil {
- return err
- }
-
- // Construct the map if it doesn't already exist.
- if dst.IsNil() {
- dst.Set(reflect.MakeMap(dst.Type()))
- }
- key := reflect.New(dst.Type().Key()).Elem()
- val := reflect.New(dst.Type().Elem()).Elem()
-
- // The map entry should be this sequence of tokens:
- // < key : KEY value : VALUE >
- // Technically the "key" and "value" could come in any order,
- // but in practice they won't.
-
- tok := p.next()
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- if err := p.consumeToken("key"); err != nil {
- return err
- }
- if err := p.consumeToken(":"); err != nil {
- return err
- }
- if err := p.readAny(key, props.mkeyprop); err != nil {
- return err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- if err := p.consumeToken("value"); err != nil {
- return err
- }
- if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
- return err
- }
- if err := p.readAny(val, props.mvalprop); err != nil {
- return err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- if err := p.consumeToken(terminator); err != nil {
- return err
- }
-
- dst.SetMapIndex(key, val)
- continue
- }
-
- // Check that it's not already set if it's not a repeated field.
- if !props.Repeated && fieldSet[name] {
- return p.errorf("non-repeated field %q was repeated", name)
- }
-
- if err := p.checkForColon(props, dst.Type()); err != nil {
- return err
- }
-
- // Parse into the field.
- fieldSet[name] = true
- if err := p.readAny(dst, props); err != nil {
- if _, ok := err.(*RequiredNotSetError); !ok {
- return err
- }
- reqFieldErr = err
- }
- if props.Required {
- reqCount--
- }
-
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
-
- }
-
- if reqCount > 0 {
- return p.missingRequiredFieldError(sv)
- }
- return reqFieldErr
-}
-
-// consumeExtName consumes extension name or expanded Any type URL and the
-// following ']'. It returns the name or URL consumed.
-func (p *textParser) consumeExtName() (string, error) {
- tok := p.next()
- if tok.err != nil {
- return "", tok.err
- }
-
- // If extension name or type url is quoted, it's a single token.
- if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
- name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
- if err != nil {
- return "", err
- }
- return name, p.consumeToken("]")
- }
-
- // Consume everything up to "]"
- var parts []string
- for tok.value != "]" {
- parts = append(parts, tok.value)
- tok = p.next()
- if tok.err != nil {
- return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
- }
- }
- return strings.Join(parts, ""), nil
-}
-
-// consumeOptionalSeparator consumes an optional semicolon or comma.
-// It is used in readStruct to provide backward compatibility.
-func (p *textParser) consumeOptionalSeparator() error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ";" && tok.value != "," {
- p.back()
- }
- return nil
-}
-
-func (p *textParser) readAny(v reflect.Value, props *Properties) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == "" {
- return p.errorf("unexpected EOF")
- }
-
- switch fv := v; fv.Kind() {
- case reflect.Slice:
- at := v.Type()
- if at.Elem().Kind() == reflect.Uint8 {
- // Special case for []byte
- if tok.value[0] != '"' && tok.value[0] != '\'' {
- // Deliberately written out here, as the error after
- // this switch statement would write "invalid []byte: ...",
- // which is not as user-friendly.
- return p.errorf("invalid string: %v", tok.value)
- }
- bytes := []byte(tok.unquoted)
- fv.Set(reflect.ValueOf(bytes))
- return nil
- }
- // Repeated field.
- if tok.value == "[" {
- // Repeated field with list notation, like [1,2,3].
- for {
- fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
- err := p.readAny(fv.Index(fv.Len()-1), props)
- if err != nil {
- return err
- }
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == "]" {
- break
- }
- if tok.value != "," {
- return p.errorf("Expected ']' or ',' found %q", tok.value)
- }
- }
- return nil
- }
- // One value of the repeated field.
- p.back()
- fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
- return p.readAny(fv.Index(fv.Len()-1), props)
- case reflect.Bool:
- // Either "true", "false", 1 or 0.
- switch tok.value {
- case "true", "1":
- fv.SetBool(true)
- return nil
- case "false", "0":
- fv.SetBool(false)
- return nil
- }
- case reflect.Float32, reflect.Float64:
- v := tok.value
- // Ignore 'f' for compatibility with output generated by C++, but don't
- // remove 'f' when the value is "-inf" or "inf".
- if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
- v = v[:len(v)-1]
- }
- if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
- fv.SetFloat(f)
- return nil
- }
- case reflect.Int32:
- if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
- fv.SetInt(x)
- return nil
- }
-
- if len(props.Enum) == 0 {
- break
- }
- m, ok := enumValueMaps[props.Enum]
- if !ok {
- break
- }
- x, ok := m[tok.value]
- if !ok {
- break
- }
- fv.SetInt(int64(x))
- return nil
- case reflect.Int64:
- if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
- fv.SetInt(x)
- return nil
- }
-
- case reflect.Ptr:
- // A basic field (indirected through pointer), or a repeated message/group
- p.back()
- fv.Set(reflect.New(fv.Type().Elem()))
- return p.readAny(fv.Elem(), props)
- case reflect.String:
- if tok.value[0] == '"' || tok.value[0] == '\'' {
- fv.SetString(tok.unquoted)
- return nil
- }
- case reflect.Struct:
- var terminator string
- switch tok.value {
- case "{":
- terminator = "}"
- case "<":
- terminator = ">"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
- return p.readStruct(fv, terminator)
- case reflect.Uint32:
- if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- fv.SetUint(uint64(x))
- return nil
- }
- case reflect.Uint64:
- if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
- fv.SetUint(x)
- return nil
- }
- }
- return p.errorf("invalid %v: %v", v.Type(), tok.value)
-}
-
-// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
-// before starting to unmarshal, so any existing data in pb is always removed.
-// If a required field is not set and no other error occurs,
-// UnmarshalText returns *RequiredNotSetError.
-func UnmarshalText(s string, pb Message) error {
- if um, ok := pb.(encoding.TextUnmarshaler); ok {
- err := um.UnmarshalText([]byte(s))
- return err
- }
- pb.Reset()
- v := reflect.ValueOf(pb)
- if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
- return pe
- }
- return nil
-}
diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
deleted file mode 100644
index 469b4490..00000000
--- a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Contributor Covenant Code of Conduct
-
-## Our Pledge
-
-In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
-
-## Our Standards
-
-Examples of behavior that contributes to creating a positive environment include:
-
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery and unwelcome sexual attention or advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a professional setting
-
-## Our Responsibilities
-
-Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
-
-## Scope
-
-This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
-
-## Enforcement
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
-
-Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
-
-## Attribution
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
-
-[homepage]: http://contributor-covenant.org
-[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE
deleted file mode 100644
index 68668029..00000000
--- a/vendor/github.com/imdario/mergo/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (c) 2013 Dario Castañé. All rights reserved.
-Copyright (c) 2012 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md
deleted file mode 100644
index 4f142083..00000000
--- a/vendor/github.com/imdario/mergo/README.md
+++ /dev/null
@@ -1,238 +0,0 @@
-# Mergo
-
-A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
-
-Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
-
-## Status
-
-It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
-
-[![GoDoc][3]][4]
-[![GoCard][5]][6]
-[![Build Status][1]][2]
-[![Coverage Status][7]][8]
-[![Sourcegraph][9]][10]
-[](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield)
-
-[1]: https://travis-ci.org/imdario/mergo.png
-[2]: https://travis-ci.org/imdario/mergo
-[3]: https://godoc.org/github.com/imdario/mergo?status.svg
-[4]: https://godoc.org/github.com/imdario/mergo
-[5]: https://goreportcard.com/badge/imdario/mergo
-[6]: https://goreportcard.com/report/github.com/imdario/mergo
-[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
-[8]: https://coveralls.io/github/imdario/mergo?branch=master
-[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
-[10]: https://sourcegraph.com/github.com/imdario/mergo?badge
-
-### Latest release
-
-[Release v0.3.6](https://github.com/imdario/mergo/releases/tag/v0.3.6).
-
-### Important note
-
-Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code.
-
-If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0).
-
-### Donations
-
-If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes:
-
-
-[](https://beerpay.io/imdario/mergo)
-[](https://beerpay.io/imdario/mergo)
-
-
-### Mergo in the wild
-
-- [moby/moby](https://github.com/moby/moby)
-- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
-- [vmware/dispatch](https://github.com/vmware/dispatch)
-- [Shopify/themekit](https://github.com/Shopify/themekit)
-- [imdario/zas](https://github.com/imdario/zas)
-- [matcornic/hermes](https://github.com/matcornic/hermes)
-- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
-- [kataras/iris](https://github.com/kataras/iris)
-- [michaelsauter/crane](https://github.com/michaelsauter/crane)
-- [go-task/task](https://github.com/go-task/task)
-- [sensu/uchiwa](https://github.com/sensu/uchiwa)
-- [ory/hydra](https://github.com/ory/hydra)
-- [sisatech/vcli](https://github.com/sisatech/vcli)
-- [dairycart/dairycart](https://github.com/dairycart/dairycart)
-- [projectcalico/felix](https://github.com/projectcalico/felix)
-- [resin-os/balena](https://github.com/resin-os/balena)
-- [go-kivik/kivik](https://github.com/go-kivik/kivik)
-- [Telefonica/govice](https://github.com/Telefonica/govice)
-- [supergiant/supergiant](supergiant/supergiant)
-- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
-- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
-- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
-- [EagerIO/Stout](https://github.com/EagerIO/Stout)
-- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
-- [russross/canvasassignments](https://github.com/russross/canvasassignments)
-- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
-- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
-- [divshot/gitling](https://github.com/divshot/gitling)
-- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
-- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
-- [elwinar/rambler](https://github.com/elwinar/rambler)
-- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
-- [jfbus/impressionist](https://github.com/jfbus/impressionist)
-- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
-- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
-- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
-- [thoas/picfit](https://github.com/thoas/picfit)
-- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
-- [jnuthong/item_search](https://github.com/jnuthong/item_search)
-- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
-
-## Installation
-
- go get github.com/imdario/mergo
-
- // use in your .go code
- import (
- "github.com/imdario/mergo"
- )
-
-## Usage
-
-You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
-
-```go
-if err := mergo.Merge(&dst, src); err != nil {
- // ...
-}
-```
-
-Also, you can merge overwriting values using the transformer `WithOverride`.
-
-```go
-if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
- // ...
-}
-```
-
-Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
-
-```go
-if err := mergo.Map(&dst, srcMap); err != nil {
- // ...
-}
-```
-
-Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
-
-More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo).
-
-### Nice example
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/imdario/mergo"
-)
-
-type Foo struct {
- A string
- B int64
-}
-
-func main() {
- src := Foo{
- A: "one",
- B: 2,
- }
- dest := Foo{
- A: "two",
- }
- mergo.Merge(&dest, src)
- fmt.Println(dest)
- // Will print
- // {two 2}
-}
-```
-
-Note: if test are failing due missing package, please execute:
-
- go get gopkg.in/yaml.v2
-
-### Transformers
-
-Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/imdario/mergo"
- "reflect"
- "time"
-)
-
-type timeTransfomer struct {
-}
-
-func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
- if typ == reflect.TypeOf(time.Time{}) {
- return func(dst, src reflect.Value) error {
- if dst.CanSet() {
- isZero := dst.MethodByName("IsZero")
- result := isZero.Call([]reflect.Value{})
- if result[0].Bool() {
- dst.Set(src)
- }
- }
- return nil
- }
- }
- return nil
-}
-
-type Snapshot struct {
- Time time.Time
- // ...
-}
-
-func main() {
- src := Snapshot{time.Now()}
- dest := Snapshot{}
- mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{}))
- fmt.Println(dest)
- // Will print
- // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
-}
-```
-
-
-## Contact me
-
-If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
-
-## About
-
-Written by [Dario Castañé](http://dario.im).
-
-## Top Contributors
-
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/0)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/1)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/2)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/3)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/4)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/5)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/6)
-[](https://sourcerer.io/fame/imdario/imdario/mergo/links/7)
-
-
-## License
-
-[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
-
-
-[](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
\ No newline at end of file
diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go
deleted file mode 100644
index 6e9aa7ba..00000000
--- a/vendor/github.com/imdario/mergo/doc.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2013 Dario Castañé. All rights reserved.
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package mergo merges same-type structs and maps by setting default values in zero-value fields.
-
-Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
-
-Usage
-
-From my own work-in-progress project:
-
- type networkConfig struct {
- Protocol string
- Address string
- ServerType string `json: "server_type"`
- Port uint16
- }
-
- type FssnConfig struct {
- Network networkConfig
- }
-
- var fssnDefault = FssnConfig {
- networkConfig {
- "tcp",
- "127.0.0.1",
- "http",
- 31560,
- },
- }
-
- // Inside a function [...]
-
- if err := mergo.Merge(&config, fssnDefault); err != nil {
- log.Fatal(err)
- }
-
- // More code [...]
-
-*/
-package mergo
diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go
deleted file mode 100644
index 6ea38e63..00000000
--- a/vendor/github.com/imdario/mergo/map.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2014 Dario Castañé. All rights reserved.
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Based on src/pkg/reflect/deepequal.go from official
-// golang's stdlib.
-
-package mergo
-
-import (
- "fmt"
- "reflect"
- "unicode"
- "unicode/utf8"
-)
-
-func changeInitialCase(s string, mapper func(rune) rune) string {
- if s == "" {
- return s
- }
- r, n := utf8.DecodeRuneInString(s)
- return string(mapper(r)) + s[n:]
-}
-
-func isExported(field reflect.StructField) bool {
- r, _ := utf8.DecodeRuneInString(field.Name)
- return r >= 'A' && r <= 'Z'
-}
-
-// Traverses recursively both values, assigning src's fields values to dst.
-// The map argument tracks comparisons that have already been seen, which allows
-// short circuiting on recursive types.
-func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
- overwrite := config.Overwrite
- if dst.CanAddr() {
- addr := dst.UnsafeAddr()
- h := 17 * addr
- seen := visited[h]
- typ := dst.Type()
- for p := seen; p != nil; p = p.next {
- if p.ptr == addr && p.typ == typ {
- return nil
- }
- }
- // Remember, remember...
- visited[h] = &visit{addr, typ, seen}
- }
- zeroValue := reflect.Value{}
- switch dst.Kind() {
- case reflect.Map:
- dstMap := dst.Interface().(map[string]interface{})
- for i, n := 0, src.NumField(); i < n; i++ {
- srcType := src.Type()
- field := srcType.Field(i)
- if !isExported(field) {
- continue
- }
- fieldName := field.Name
- fieldName = changeInitialCase(fieldName, unicode.ToLower)
- if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
- dstMap[fieldName] = src.Field(i).Interface()
- }
- }
- case reflect.Ptr:
- if dst.IsNil() {
- v := reflect.New(dst.Type().Elem())
- dst.Set(v)
- }
- dst = dst.Elem()
- fallthrough
- case reflect.Struct:
- srcMap := src.Interface().(map[string]interface{})
- for key := range srcMap {
- srcValue := srcMap[key]
- fieldName := changeInitialCase(key, unicode.ToUpper)
- dstElement := dst.FieldByName(fieldName)
- if dstElement == zeroValue {
- // We discard it because the field doesn't exist.
- continue
- }
- srcElement := reflect.ValueOf(srcValue)
- dstKind := dstElement.Kind()
- srcKind := srcElement.Kind()
- if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
- srcElement = srcElement.Elem()
- srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
- } else if dstKind == reflect.Ptr {
- // Can this work? I guess it can't.
- if srcKind != reflect.Ptr && srcElement.CanAddr() {
- srcPtr := srcElement.Addr()
- srcElement = reflect.ValueOf(srcPtr)
- srcKind = reflect.Ptr
- }
- }
-
- if !srcElement.IsValid() {
- continue
- }
- if srcKind == dstKind {
- if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
- return
- }
- } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
- if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
- return
- }
- } else if srcKind == reflect.Map {
- if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil {
- return
- }
- } else {
- return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
- }
- }
- }
- return
-}
-
-// Map sets fields' values in dst from src.
-// src can be a map with string keys or a struct. dst must be the opposite:
-// if src is a map, dst must be a valid pointer to struct. If src is a struct,
-// dst must be map[string]interface{}.
-// It won't merge unexported (private) fields and will do recursively
-// any exported field.
-// If dst is a map, keys will be src fields' names in lower camel case.
-// Missing key in src that doesn't match a field in dst will be skipped. This
-// doesn't apply if dst is a map.
-// This is separated method from Merge because it is cleaner and it keeps sane
-// semantics: merging equal types, mapping different (restricted) types.
-func Map(dst, src interface{}, opts ...func(*Config)) error {
- return _map(dst, src, opts...)
-}
-
-// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
-// non-empty src attribute values.
-// Deprecated: Use Map(…) with WithOverride
-func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
- return _map(dst, src, append(opts, WithOverride)...)
-}
-
-func _map(dst, src interface{}, opts ...func(*Config)) error {
- var (
- vDst, vSrc reflect.Value
- err error
- )
- config := &Config{}
-
- for _, opt := range opts {
- opt(config)
- }
-
- if vDst, vSrc, err = resolveValues(dst, src); err != nil {
- return err
- }
- // To be friction-less, we redirect equal-type arguments
- // to deepMerge. Only because arguments can be anything.
- if vSrc.Kind() == vDst.Kind() {
- return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
- }
- switch vSrc.Kind() {
- case reflect.Struct:
- if vDst.Kind() != reflect.Map {
- return ErrExpectedMapAsDestination
- }
- case reflect.Map:
- if vDst.Kind() != reflect.Struct {
- return ErrExpectedStructAsDestination
- }
- default:
- return ErrNotSupported
- }
- return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config)
-}
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
deleted file mode 100644
index 44f70a89..00000000
--- a/vendor/github.com/imdario/mergo/merge.go
+++ /dev/null
@@ -1,252 +0,0 @@
-// Copyright 2013 Dario Castañé. All rights reserved.
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Based on src/pkg/reflect/deepequal.go from official
-// golang's stdlib.
-
-package mergo
-
-import (
- "fmt"
- "reflect"
-)
-
-func hasExportedField(dst reflect.Value) (exported bool) {
- for i, n := 0, dst.NumField(); i < n; i++ {
- field := dst.Type().Field(i)
- if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
- exported = exported || hasExportedField(dst.Field(i))
- } else {
- exported = exported || len(field.PkgPath) == 0
- }
- }
- return
-}
-
-type Config struct {
- Overwrite bool
- AppendSlice bool
- Transformers Transformers
-}
-
-type Transformers interface {
- Transformer(reflect.Type) func(dst, src reflect.Value) error
-}
-
-// Traverses recursively both values, assigning src's fields values to dst.
-// The map argument tracks comparisons that have already been seen, which allows
-// short circuiting on recursive types.
-func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
- overwrite := config.Overwrite
-
- if !src.IsValid() {
- return
- }
- if dst.CanAddr() {
- addr := dst.UnsafeAddr()
- h := 17 * addr
- seen := visited[h]
- typ := dst.Type()
- for p := seen; p != nil; p = p.next {
- if p.ptr == addr && p.typ == typ {
- return nil
- }
- }
- // Remember, remember...
- visited[h] = &visit{addr, typ, seen}
- }
-
- if config.Transformers != nil && !isEmptyValue(dst) {
- if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
- err = fn(dst, src)
- return
- }
- }
-
- switch dst.Kind() {
- case reflect.Struct:
- if hasExportedField(dst) {
- for i, n := 0, dst.NumField(); i < n; i++ {
- if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
- return
- }
- }
- } else {
- if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
- dst.Set(src)
- }
- }
- case reflect.Map:
- if dst.IsNil() && !src.IsNil() {
- dst.Set(reflect.MakeMap(dst.Type()))
- }
- for _, key := range src.MapKeys() {
- srcElement := src.MapIndex(key)
- if !srcElement.IsValid() {
- continue
- }
- dstElement := dst.MapIndex(key)
- switch srcElement.Kind() {
- case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
- if srcElement.IsNil() {
- continue
- }
- fallthrough
- default:
- if !srcElement.CanInterface() {
- continue
- }
- switch reflect.TypeOf(srcElement.Interface()).Kind() {
- case reflect.Struct:
- fallthrough
- case reflect.Ptr:
- fallthrough
- case reflect.Map:
- srcMapElm := srcElement
- dstMapElm := dstElement
- if srcMapElm.CanInterface() {
- srcMapElm = reflect.ValueOf(srcMapElm.Interface())
- if dstMapElm.IsValid() {
- dstMapElm = reflect.ValueOf(dstMapElm.Interface())
- }
- }
- if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
- return
- }
- case reflect.Slice:
- srcSlice := reflect.ValueOf(srcElement.Interface())
-
- var dstSlice reflect.Value
- if !dstElement.IsValid() || dstElement.IsNil() {
- dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
- } else {
- dstSlice = reflect.ValueOf(dstElement.Interface())
- }
-
- if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
- dstSlice = srcSlice
- } else if config.AppendSlice {
- if srcSlice.Type() != dstSlice.Type() {
- return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
- }
- dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
- }
- dst.SetMapIndex(key, dstSlice)
- }
- }
- if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map {
- continue
- }
-
- if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) {
- if dst.IsNil() {
- dst.Set(reflect.MakeMap(dst.Type()))
- }
- dst.SetMapIndex(key, srcElement)
- }
- }
- case reflect.Slice:
- if !dst.CanSet() {
- break
- }
- if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
- dst.Set(src)
- } else if config.AppendSlice {
- if src.Type() != dst.Type() {
- return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
- }
- dst.Set(reflect.AppendSlice(dst, src))
- }
- case reflect.Ptr:
- fallthrough
- case reflect.Interface:
- if src.IsNil() {
- break
- }
- if src.Kind() != reflect.Interface {
- if dst.IsNil() || overwrite {
- if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
- dst.Set(src)
- }
- } else if src.Kind() == reflect.Ptr {
- if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
- return
- }
- } else if dst.Elem().Type() == src.Type() {
- if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
- return
- }
- } else {
- return ErrDifferentArgumentsTypes
- }
- break
- }
- if dst.IsNil() || overwrite {
- if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
- dst.Set(src)
- }
- } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
- return
- }
- default:
- if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
- dst.Set(src)
- }
- }
- return
-}
-
-// Merge will fill any empty for value type attributes on the dst struct using corresponding
-// src attributes if they themselves are not empty. dst and src must be valid same-type structs
-// and dst must be a pointer to struct.
-// It won't merge unexported (private) fields and will do recursively any exported field.
-func Merge(dst, src interface{}, opts ...func(*Config)) error {
- return merge(dst, src, opts...)
-}
-
-// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by
-// non-empty src attribute values.
-// Deprecated: use Merge(…) with WithOverride
-func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
- return merge(dst, src, append(opts, WithOverride)...)
-}
-
-// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
-func WithTransformers(transformers Transformers) func(*Config) {
- return func(config *Config) {
- config.Transformers = transformers
- }
-}
-
-// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
-func WithOverride(config *Config) {
- config.Overwrite = true
-}
-
-// WithAppendSlice will make merge append slices instead of overwriting it
-func WithAppendSlice(config *Config) {
- config.AppendSlice = true
-}
-
-func merge(dst, src interface{}, opts ...func(*Config)) error {
- var (
- vDst, vSrc reflect.Value
- err error
- )
-
- config := &Config{}
-
- for _, opt := range opts {
- opt(config)
- }
-
- if vDst, vSrc, err = resolveValues(dst, src); err != nil {
- return err
- }
- if vDst.Type() != vSrc.Type() {
- return ErrDifferentArgumentsTypes
- }
- return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
-}
diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go
deleted file mode 100644
index a82fea2f..00000000
--- a/vendor/github.com/imdario/mergo/mergo.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2013 Dario Castañé. All rights reserved.
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Based on src/pkg/reflect/deepequal.go from official
-// golang's stdlib.
-
-package mergo
-
-import (
- "errors"
- "reflect"
-)
-
-// Errors reported by Mergo when it finds invalid arguments.
-var (
- ErrNilArguments = errors.New("src and dst must not be nil")
- ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
- ErrNotSupported = errors.New("only structs and maps are supported")
- ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
- ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
-)
-
-// During deepMerge, must keep track of checks that are
-// in progress. The comparison algorithm assumes that all
-// checks in progress are true when it reencounters them.
-// Visited are stored in a map indexed by 17 * a1 + a2;
-type visit struct {
- ptr uintptr
- typ reflect.Type
- next *visit
-}
-
-// From src/pkg/encoding/json/encode.go.
-func isEmptyValue(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- return v.Len() == 0
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Interface, reflect.Ptr:
- if v.IsNil() {
- return true
- }
- return isEmptyValue(v.Elem())
- case reflect.Func:
- return v.IsNil()
- case reflect.Invalid:
- return true
- }
- return false
-}
-
-func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
- if dst == nil || src == nil {
- err = ErrNilArguments
- return
- }
- vDst = reflect.ValueOf(dst).Elem()
- if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
- err = ErrNotSupported
- return
- }
- vSrc = reflect.ValueOf(src)
- // We check if vSrc is a pointer to dereference it.
- if vSrc.Kind() == reflect.Ptr {
- vSrc = vSrc.Elem()
- }
- return
-}
-
-// Traverses recursively both values, assigning src's fields values to dst.
-// The map argument tracks comparisons that have already been seen, which allows
-// short circuiting on recursive types.
-func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
- if dst.CanAddr() {
- addr := dst.UnsafeAddr()
- h := 17 * addr
- seen := visited[h]
- typ := dst.Type()
- for p := seen; p != nil; p = p.next {
- if p.ptr == addr && p.typ == typ {
- return nil
- }
- }
- // Remember, remember...
- visited[h] = &visit{addr, typ, seen}
- }
- return // TODO refactor
-}
diff --git a/vendor/github.com/kr/logfmt/Readme b/vendor/github.com/kr/logfmt/Readme
deleted file mode 100644
index 1865a11f..00000000
--- a/vendor/github.com/kr/logfmt/Readme
+++ /dev/null
@@ -1,12 +0,0 @@
-Go package for parsing (and, eventually, generating)
-log lines in the logfmt style.
-
-See http://godoc.org/github.com/kr/logfmt for format, and other documentation and examples.
-
-Copyright (C) 2013 Keith Rarick, Blake Mizerany
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/kr/logfmt/decode.go b/vendor/github.com/kr/logfmt/decode.go
deleted file mode 100644
index 1397fb74..00000000
--- a/vendor/github.com/kr/logfmt/decode.go
+++ /dev/null
@@ -1,184 +0,0 @@
-// Package implements the decoding of logfmt key-value pairs.
-//
-// Example logfmt message:
-//
-// foo=bar a=14 baz="hello kitty" cool%story=bro f %^asdf
-//
-// Example result in JSON:
-//
-// { "foo": "bar", "a": 14, "baz": "hello kitty", "cool%story": "bro", "f": true, "%^asdf": true }
-//
-// EBNFish:
-//
-// ident_byte = any byte greater than ' ', excluding '=' and '"'
-// string_byte = any byte excluding '"' and '\'
-// garbage = !ident_byte
-// ident = ident_byte, { ident byte }
-// key = ident
-// value = ident | '"', { string_byte | '\', '"' }, '"'
-// pair = key, '=', value | key, '=' | key
-// message = { garbage, pair }, garbage
-package logfmt
-
-import (
- "reflect"
- "strconv"
- "strings"
- "time"
-)
-
-// Handler is the interface implemented by objects that accept logfmt
-// key-value pairs. HandleLogfmt must copy the logfmt data if it
-// wishes to retain the data after returning.
-type Handler interface {
- HandleLogfmt(key, val []byte) error
-}
-
-// The HandlerFunc type is an adapter to allow the use of ordinary functions as
-// logfmt handlers. If f is a function with the appropriate signature,
-// HandlerFunc(f) is a Handler object that calls f.
-type HandlerFunc func(key, val []byte) error
-
-func (f HandlerFunc) HandleLogfmt(key, val []byte) error {
- return f(key, val)
-}
-
-// Unmarshal parses the logfmt encoding data and stores the result in the value
-// pointed to by v. If v is an Handler, HandleLogfmt will be called for each
-// key-value pair.
-//
-// If v is not a Handler, it will pass v to NewStructHandler and use the
-// returned StructHandler for decoding.
-func Unmarshal(data []byte, v interface{}) (err error) {
- h, ok := v.(Handler)
- if !ok {
- h, err = NewStructHandler(v)
- if err != nil {
- return err
- }
- }
- return gotoScanner(data, h)
-}
-
-// StructHandler unmarshals logfmt into a struct. It matches incoming keys to
-// the the struct's fields (either the struct field name or its tag, preferring
-// an exact match but also accepting a case-insensitive match.
-//
-// Field types supported by StructHandler are:
-//
-// all numeric types (e.g. float32, int, etc.)
-// []byte
-// string
-// bool - true if key is present, false otherwise (the value is ignored).
-// time.Duration - uses time.ParseDuration
-//
-// If a field is a pointer to an above type, and a matching key is not present
-// in the logfmt data, the pointer will be untouched.
-//
-// If v is not a pointer to an Handler or struct, Unmarshal will return an
-// error.
-type StructHandler struct {
- rv reflect.Value
-}
-
-func NewStructHandler(v interface{}) (Handler, error) {
- rv := reflect.ValueOf(v)
- if rv.Kind() != reflect.Ptr || rv.IsNil() {
- return nil, &InvalidUnmarshalError{reflect.TypeOf(v)}
- }
- return &StructHandler{rv: rv}, nil
-}
-
-func (h *StructHandler) HandleLogfmt(key, val []byte) error {
- el := h.rv.Elem()
- skey := string(key)
- for i := 0; i < el.NumField(); i++ {
- fv := el.Field(i)
- ft := el.Type().Field(i)
- switch {
- case ft.Name == skey:
- case ft.Tag.Get("logfmt") == skey:
- case strings.EqualFold(ft.Name, skey):
- default:
- continue
- }
- if fv.Kind() == reflect.Ptr {
- if fv.IsNil() {
- t := fv.Type().Elem()
- v := reflect.New(t)
- fv.Set(v)
- fv = v
- }
- fv = fv.Elem()
- }
- switch fv.Interface().(type) {
- case time.Duration:
- d, err := time.ParseDuration(string(val))
- if err != nil {
- return &UnmarshalTypeError{string(val), fv.Type()}
- }
- fv.Set(reflect.ValueOf(d))
- case string:
- fv.SetString(string(val))
- case []byte:
- b := make([]byte, len(val))
- copy(b, val)
- fv.SetBytes(b)
- case bool:
- fv.SetBool(true)
- default:
- switch {
- case reflect.Int <= fv.Kind() && fv.Kind() <= reflect.Int64:
- v, err := strconv.ParseInt(string(val), 10, 64)
- if err != nil {
- return err
- }
- fv.SetInt(v)
- case reflect.Uint32 <= fv.Kind() && fv.Kind() <= reflect.Uint64:
- v, err := strconv.ParseUint(string(val), 10, 64)
- if err != nil {
- return err
- }
- fv.SetUint(v)
- case reflect.Float32 <= fv.Kind() && fv.Kind() <= reflect.Float64:
- v, err := strconv.ParseFloat(string(val), 10)
- if err != nil {
- return err
- }
- fv.SetFloat(v)
- default:
- return &UnmarshalTypeError{string(val), fv.Type()}
- }
- }
-
- }
- return nil
-}
-
-// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
-// (The argument to Unmarshal must be a non-nil pointer.)
-type InvalidUnmarshalError struct {
- Type reflect.Type
-}
-
-func (e *InvalidUnmarshalError) Error() string {
- if e.Type == nil {
- return "logfmt: Unmarshal(nil)"
- }
-
- if e.Type.Kind() != reflect.Ptr {
- return "logfmt: Unmarshal(non-pointer " + e.Type.String() + ")"
- }
- return "logfmt: Unmarshal(nil " + e.Type.String() + ")"
-}
-
-// An UnmarshalTypeError describes a logfmt value that was
-// not appropriate for a value of a specific Go type.
-type UnmarshalTypeError struct {
- Value string // the logfmt value
- Type reflect.Type // type of Go value it could not be assigned to
-}
-
-func (e *UnmarshalTypeError) Error() string {
- return "logfmt: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
-}
diff --git a/vendor/github.com/kr/logfmt/scanner.go b/vendor/github.com/kr/logfmt/scanner.go
deleted file mode 100644
index 095221ff..00000000
--- a/vendor/github.com/kr/logfmt/scanner.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package logfmt
-
-import (
- "errors"
- "fmt"
-)
-
-var ErrUnterminatedString = errors.New("logfmt: unterminated string")
-
-func gotoScanner(data []byte, h Handler) (err error) {
- saveError := func(e error) {
- if err == nil {
- err = e
- }
- }
-
- var c byte
- var i int
- var m int
- var key []byte
- var val []byte
- var ok bool
- var esc bool
-
-garbage:
- if i == len(data) {
- return
- }
-
- c = data[i]
- switch {
- case c > ' ' && c != '"' && c != '=':
- key, val = nil, nil
- m = i
- i++
- goto key
- default:
- i++
- goto garbage
- }
-
-key:
- if i >= len(data) {
- if m >= 0 {
- key = data[m:i]
- saveError(h.HandleLogfmt(key, nil))
- }
- return
- }
-
- c = data[i]
- switch {
- case c > ' ' && c != '"' && c != '=':
- i++
- goto key
- case c == '=':
- key = data[m:i]
- i++
- goto equal
- default:
- key = data[m:i]
- i++
- saveError(h.HandleLogfmt(key, nil))
- goto garbage
- }
-
-equal:
- if i >= len(data) {
- if m >= 0 {
- i--
- key = data[m:i]
- saveError(h.HandleLogfmt(key, nil))
- }
- return
- }
-
- c = data[i]
- switch {
- case c > ' ' && c != '"' && c != '=':
- m = i
- i++
- goto ivalue
- case c == '"':
- m = i
- i++
- esc = false
- goto qvalue
- default:
- if key != nil {
- saveError(h.HandleLogfmt(key, val))
- }
- i++
- goto garbage
- }
-
-ivalue:
- if i >= len(data) {
- if m >= 0 {
- val = data[m:i]
- saveError(h.HandleLogfmt(key, val))
- }
- return
- }
-
- c = data[i]
- switch {
- case c > ' ' && c != '"' && c != '=':
- i++
- goto ivalue
- default:
- val = data[m:i]
- saveError(h.HandleLogfmt(key, val))
- i++
- goto garbage
- }
-
-qvalue:
- if i >= len(data) {
- if m >= 0 {
- saveError(ErrUnterminatedString)
- }
- return
- }
-
- c = data[i]
- switch c {
- case '\\':
- i += 2
- esc = true
- goto qvalue
- case '"':
- i++
- val = data[m:i]
- if esc {
- val, ok = unquoteBytes(val)
- if !ok {
- saveError(fmt.Errorf("logfmt: error unquoting bytes %q", string(val)))
- goto garbage
- }
- } else {
- val = val[1 : len(val)-1]
- }
- saveError(h.HandleLogfmt(key, val))
- goto garbage
- default:
- i++
- goto qvalue
- }
-}
diff --git a/vendor/github.com/kr/logfmt/unquote.go b/vendor/github.com/kr/logfmt/unquote.go
deleted file mode 100644
index fb088a47..00000000
--- a/vendor/github.com/kr/logfmt/unquote.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package logfmt
-
-import (
- "strconv"
- "unicode"
- "unicode/utf16"
- "unicode/utf8"
-)
-
-// Taken from Go's encoding/json
-
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
-// or it returns -1.
-func getu4(s []byte) rune {
- if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
- return -1
- }
- r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
- if err != nil {
- return -1
- }
- return rune(r)
-}
-
-// unquote converts a quoted JSON string literal s into an actual string t.
-// The rules are different than for Go, so cannot use strconv.Unquote.
-func unquote(s []byte) (t string, ok bool) {
- s, ok = unquoteBytes(s)
- t = string(s)
- return
-}
-
-func unquoteBytes(s []byte) (t []byte, ok bool) {
- if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
- return
- }
- s = s[1 : len(s)-1]
-
- // Check for unusual characters. If there are none,
- // then no unquoting is needed, so return a slice of the
- // original bytes.
- r := 0
- for r < len(s) {
- c := s[r]
- if c == '\\' || c == '"' || c < ' ' {
- break
- }
- if c < utf8.RuneSelf {
- r++
- continue
- }
- rr, size := utf8.DecodeRune(s[r:])
- if rr == utf8.RuneError && size == 1 {
- break
- }
- r += size
- }
- if r == len(s) {
- return s, true
- }
-
- b := make([]byte, len(s)+2*utf8.UTFMax)
- w := copy(b, s[0:r])
- for r < len(s) {
- // Out of room? Can only happen if s is full of
- // malformed UTF-8 and we're replacing each
- // byte with RuneError.
- if w >= len(b)-2*utf8.UTFMax {
- nb := make([]byte, (len(b)+utf8.UTFMax)*2)
- copy(nb, b[0:w])
- b = nb
- }
- switch c := s[r]; {
- case c == '\\':
- r++
- if r >= len(s) {
- return
- }
- switch s[r] {
- default:
- return
- case '"', '\\', '/', '\'':
- b[w] = s[r]
- r++
- w++
- case 'b':
- b[w] = '\b'
- r++
- w++
- case 'f':
- b[w] = '\f'
- r++
- w++
- case 'n':
- b[w] = '\n'
- r++
- w++
- case 'r':
- b[w] = '\r'
- r++
- w++
- case 't':
- b[w] = '\t'
- r++
- w++
- case 'u':
- r--
- rr := getu4(s[r:])
- if rr < 0 {
- return
- }
- r += 6
- if utf16.IsSurrogate(rr) {
- rr1 := getu4(s[r:])
- if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
- // A valid pair; consume.
- r += 6
- w += utf8.EncodeRune(b[w:], dec)
- break
- }
- // Invalid surrogate; fall back to replacement rune.
- rr = unicode.ReplacementChar
- }
- w += utf8.EncodeRune(b[w:], rr)
- }
-
- // Quote, control characters are invalid.
- case c == '"', c < ' ':
- return
-
- // ASCII
- case c < utf8.RuneSelf:
- b[w] = c
- r++
- w++
-
- // Coerce to well-formed UTF-8.
- default:
- rr, size := utf8.DecodeRune(s[r:])
- r += size
- w += utf8.EncodeRune(b[w:], rr)
- }
- }
- return b[0:w], true
-}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
deleted file mode 100644
index 8dada3ed..00000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
deleted file mode 100644
index 5d8cb5b7..00000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
+++ /dev/null
@@ -1 +0,0 @@
-Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
deleted file mode 100644
index 81be2143..00000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-all:
-
-cover:
- go test -cover -v -coverprofile=cover.dat ./...
- go tool cover -func cover.dat
-
-.PHONY: cover
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
deleted file mode 100644
index 258c0636..00000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2013 Matt T. Proud
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pbutil
-
-import (
- "encoding/binary"
- "errors"
- "io"
-
- "github.com/golang/protobuf/proto"
-)
-
-var errInvalidVarint = errors.New("invalid varint32 encountered")
-
-// ReadDelimited decodes a message from the provided length-delimited stream,
-// where the length is encoded as 32-bit varint prefix to the message body.
-// It returns the total number of bytes read and any applicable error. This is
-// roughly equivalent to the companion Java API's
-// MessageLite#parseDelimitedFrom. As per the reader contract, this function
-// calls r.Read repeatedly as required until exactly one message including its
-// prefix is read and decoded (or an error has occurred). The function never
-// reads more bytes from the stream than required. The function never returns
-// an error if a message has been read and decoded correctly, even if the end
-// of the stream has been reached in doing so. In that case, any subsequent
-// calls return (0, io.EOF).
-func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
- // Per AbstractParser#parsePartialDelimitedFrom with
- // CodedInputStream#readRawVarint32.
- var headerBuf [binary.MaxVarintLen32]byte
- var bytesRead, varIntBytes int
- var messageLength uint64
- for varIntBytes == 0 { // i.e. no varint has been decoded yet.
- if bytesRead >= len(headerBuf) {
- return bytesRead, errInvalidVarint
- }
- // We have to read byte by byte here to avoid reading more bytes
- // than required. Each read byte is appended to what we have
- // read before.
- newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
- if newBytesRead == 0 {
- if err != nil {
- return bytesRead, err
- }
- // A Reader should not return (0, nil), but if it does,
- // it should be treated as no-op (according to the
- // Reader contract). So let's go on...
- continue
- }
- bytesRead += newBytesRead
- // Now present everything read so far to the varint decoder and
- // see if a varint can be decoded already.
- messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
- }
-
- messageBuf := make([]byte, messageLength)
- newBytesRead, err := io.ReadFull(r, messageBuf)
- bytesRead += newBytesRead
- if err != nil {
- return bytesRead, err
- }
-
- return bytesRead, proto.Unmarshal(messageBuf, m)
-}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
deleted file mode 100644
index c318385c..00000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2013 Matt T. Proud
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package pbutil provides record length-delimited Protocol Buffer streaming.
-package pbutil
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
deleted file mode 100644
index 8fb59ad2..00000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2013 Matt T. Proud
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pbutil
-
-import (
- "encoding/binary"
- "io"
-
- "github.com/golang/protobuf/proto"
-)
-
-// WriteDelimited encodes and dumps a message to the provided writer prefixed
-// with a 32-bit varint indicating the length of the encoded message, producing
-// a length-delimited record stream, which can be used to chain together
-// encoded messages of the same type together in a file. It returns the total
-// number of bytes written and any applicable error. This is roughly
-// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
-func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
- buffer, err := proto.Marshal(m)
- if err != nil {
- return 0, err
- }
-
- var buf [binary.MaxVarintLen32]byte
- encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
-
- sync, err := w.Write(buf[:encodedLength])
- if err != nil {
- return sync, err
- }
-
- n, err = w.Write(buffer)
- return n + sync, err
-}
diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE
deleted file mode 100644
index 261eeb9e..00000000
--- a/vendor/github.com/prometheus/client_golang/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
deleted file mode 100644
index 37e4a7d4..00000000
--- a/vendor/github.com/prometheus/client_golang/NOTICE
+++ /dev/null
@@ -1,28 +0,0 @@
-Prometheus instrumentation library for Go applications
-Copyright 2012-2015 The Prometheus Authors
-
-This product includes software developed at
-SoundCloud Ltd. (http://soundcloud.com/).
-
-
-The following components are included in this product:
-
-goautoneg
-http://bitbucket.org/ww/goautoneg
-Copyright 2011, Open Knowledge Foundation Ltd.
-See README.txt for license details.
-
-perks - a fork of https://github.com/bmizerany/perks
-https://github.com/beorn7/perks
-Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
-See https://github.com/beorn7/perks/blob/master/README.md for license details.
-
-Go support for Protocol Buffers - Google's data interchange format
-http://github.com/golang/protobuf/
-Copyright 2010 The Go Authors
-See source code for license details.
-
-Support for streaming Protocol Buffer messages for the Go language (golang).
-https://github.com/matttproud/golang_protobuf_extensions
-Copyright 2013 Matt T. Proud
-Licensed under the Apache License, Version 2.0
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
deleted file mode 100644
index 81032bed..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
-# Overview
-This is the [Prometheus](http://www.prometheus.io) telemetric
-instrumentation client [Go](http://golang.org) client library. It
-enable authors to define process-space metrics for their servers and
-expose them through a web service interface for extraction,
-aggregation, and a whole slew of other post processing techniques.
-
-# Installing
- $ go get github.com/prometheus/client_golang/prometheus
-
-# Example
-```go
-package main
-
-import (
- "net/http"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-var (
- indexed = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "my_company",
- Subsystem: "indexer",
- Name: "documents_indexed",
- Help: "The number of documents indexed.",
- })
- size = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "my_company",
- Subsystem: "storage",
- Name: "documents_total_size_bytes",
- Help: "The total size of all documents in the storage.",
- })
-)
-
-func main() {
- http.Handle("/metrics", prometheus.Handler())
-
- indexed.Inc()
- size.Set(5)
-
- http.ListenAndServe(":8080", nil)
-}
-
-func init() {
- prometheus.MustRegister(indexed)
- prometheus.MustRegister(size)
-}
-```
-
-# Documentation
-
-[](https://godoc.org/github.com/prometheus/client_golang)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
deleted file mode 100644
index c0468800..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-// Collector is the interface implemented by anything that can be used by
-// Prometheus to collect metrics. A Collector has to be registered for
-// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
-//
-// The stock metrics provided by this package (like Gauge, Counter, Summary) are
-// also Collectors (which only ever collect one metric, namely itself). An
-// implementer of Collector may, however, collect multiple metrics in a
-// coordinated fashion and/or create metrics on the fly. Examples for collectors
-// already implemented in this library are the metric vectors (i.e. collection
-// of multiple instances of the same Metric but with different label values)
-// like GaugeVec or SummaryVec, and the ExpvarCollector.
-type Collector interface {
- // Describe sends the super-set of all possible descriptors of metrics
- // collected by this Collector to the provided channel and returns once
- // the last descriptor has been sent. The sent descriptors fulfill the
- // consistency and uniqueness requirements described in the Desc
- // documentation. (It is valid if one and the same Collector sends
- // duplicate descriptors. Those duplicates are simply ignored. However,
- // two different Collectors must not send duplicate descriptors.) This
- // method idempotently sends the same descriptors throughout the
- // lifetime of the Collector. If a Collector encounters an error while
- // executing this method, it must send an invalid descriptor (created
- // with NewInvalidDesc) to signal the error to the registry.
- Describe(chan<- *Desc)
- // Collect is called by Prometheus when collecting metrics. The
- // implementation sends each collected metric via the provided channel
- // and returns once the last metric has been sent. The descriptor of
- // each sent metric is one of those returned by Describe. Returned
- // metrics that share the same descriptor must differ in their variable
- // label values. This method may be called concurrently and must
- // therefore be implemented in a concurrency safe way. Blocking occurs
- // at the expense of total performance of rendering all registered
- // metrics. Ideally, Collector implementations support concurrent
- // readers.
- Collect(chan<- Metric)
-}
-
-// SelfCollector implements Collector for a single Metric so that that the
-// Metric collects itself. Add it as an anonymous field to a struct that
-// implements Metric, and call Init with the Metric itself as an argument.
-type SelfCollector struct {
- self Metric
-}
-
-// Init provides the SelfCollector with a reference to the metric it is supposed
-// to collect. It is usually called within the factory function to create a
-// metric. See example.
-func (c *SelfCollector) Init(self Metric) {
- c.self = self
-}
-
-// Describe implements Collector.
-func (c *SelfCollector) Describe(ch chan<- *Desc) {
- ch <- c.self.Desc()
-}
-
-// Collect implements Collector.
-func (c *SelfCollector) Collect(ch chan<- Metric) {
- ch <- c.self
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
deleted file mode 100644
index d2a564b5..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "errors"
-)
-
-// Counter is a Metric that represents a single numerical value that only ever
-// goes up. That implies that it cannot be used to count items whose number can
-// also go down, e.g. the number of currently running goroutines. Those
-// "counters" are represented by Gauges.
-//
-// A Counter is typically used to count requests served, tasks completed, errors
-// occurred, etc.
-//
-// To create Counter instances, use NewCounter.
-type Counter interface {
- Metric
- Collector
-
- // Set is used to set the Counter to an arbitrary value. It is only used
- // if you have to transfer a value from an external counter into this
- // Prometheus metric. Do not use it for regular handling of a
- // Prometheus counter (as it can be used to break the contract of
- // monotonically increasing values).
- Set(float64)
- // Inc increments the counter by 1.
- Inc()
- // Add adds the given value to the counter. It panics if the value is <
- // 0.
- Add(float64)
-}
-
-// CounterOpts is an alias for Opts. See there for doc comments.
-type CounterOpts Opts
-
-// NewCounter creates a new Counter based on the provided CounterOpts.
-func NewCounter(opts CounterOpts) Counter {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- )
- result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
- result.Init(result) // Init self-collection.
- return result
-}
-
-type counter struct {
- value
-}
-
-func (c *counter) Add(v float64) {
- if v < 0 {
- panic(errors.New("counter cannot decrease in value"))
- }
- c.value.Add(v)
-}
-
-// CounterVec is a Collector that bundles a set of Counters that all share the
-// same Desc, but have different values for their variable labels. This is used
-// if you want to count the same thing partitioned by various dimensions
-// (e.g. number of HTTP requests, partitioned by response code and
-// method). Create instances with NewCounterVec.
-//
-// CounterVec embeds MetricVec. See there for a full list of methods with
-// detailed documentation.
-type CounterVec struct {
- MetricVec
-}
-
-// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
-func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- labelNames,
- opts.ConstLabels,
- )
- return &CounterVec{
- MetricVec: MetricVec{
- children: map[uint64]Metric{},
- desc: desc,
- newMetric: func(lvs ...string) Metric {
- result := &counter{value: value{
- desc: desc,
- valType: CounterValue,
- labelPairs: makeLabelPairs(desc, lvs),
- }}
- result.Init(result) // Init self-collection.
- return result
- },
- },
- }
-}
-
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Counter and not a
-// Metric so that no type conversion is required.
-func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Counter), err
- }
- return nil, err
-}
-
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Counter and not a Metric so that no
-// type conversion is required.
-func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
- if metric != nil {
- return metric.(Counter), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Add(42)
-func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
- return m.MetricVec.WithLabelValues(lvs...).(Counter)
-}
-
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-func (m *CounterVec) With(labels Labels) Counter {
- return m.MetricVec.With(labels).(Counter)
-}
-
-// CounterFunc is a Counter whose value is determined at collect time by calling a
-// provided function.
-//
-// To create CounterFunc instances, use NewCounterFunc.
-type CounterFunc interface {
- Metric
- Collector
-}
-
-// NewCounterFunc creates a new CounterFunc based on the provided
-// CounterOpts. The value reported is determined by calling the given function
-// from within the Write method. Take into account that metric collection may
-// happen concurrently. If that results in concurrent calls to Write, like in
-// the case where a CounterFunc is directly registered with Prometheus, the
-// provided function must be concurrency-safe. The function should also honor
-// the contract for a Counter (values only go up, not down), but compliance will
-// not be checked.
-func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
- return newValueFunc(NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ), CounterValue, function)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
deleted file mode 100644
index ee02d9b8..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package prometheus
-
-import (
- "errors"
- "fmt"
- "regexp"
- "sort"
- "strings"
-
- "github.com/golang/protobuf/proto"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-var (
- metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
- labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
-)
-
-// reservedLabelPrefix is a prefix which is not legal in user-supplied
-// label names.
-const reservedLabelPrefix = "__"
-
-// Labels represents a collection of label name -> value mappings. This type is
-// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
-// metric vector Collectors, e.g.:
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-//
-// The other use-case is the specification of constant label pairs in Opts or to
-// create a Desc.
-type Labels map[string]string
-
-// Desc is the descriptor used by every Prometheus Metric. It is essentially
-// the immutable meta-data of a Metric. The normal Metric implementations
-// included in this package manage their Desc under the hood. Users only have to
-// deal with Desc if they use advanced features like the ExpvarCollector or
-// custom Collectors and Metrics.
-//
-// Descriptors registered with the same registry have to fulfill certain
-// consistency and uniqueness criteria if they share the same fully-qualified
-// name: They must have the same help string and the same label names (aka label
-// dimensions) in each, constLabels and variableLabels, but they must differ in
-// the values of the constLabels.
-//
-// Descriptors that share the same fully-qualified names and the same label
-// values of their constLabels are considered equal.
-//
-// Use NewDesc to create new Desc instances.
-type Desc struct {
- // fqName has been built from Namespace, Subsystem, and Name.
- fqName string
- // help provides some helpful information about this metric.
- help string
- // constLabelPairs contains precalculated DTO label pairs based on
- // the constant labels.
- constLabelPairs []*dto.LabelPair
- // VariableLabels contains names of labels for which the metric
- // maintains variable values.
- variableLabels []string
- // id is a hash of the values of the ConstLabels and fqName. This
- // must be unique among all registered descriptors and can therefore be
- // used as an identifier of the descriptor.
- id uint64
- // dimHash is a hash of the label names (preset and variable) and the
- // Help string. Each Desc with the same fqName must have the same
- // dimHash.
- dimHash uint64
- // err is an error that occured during construction. It is reported on
- // registration time.
- err error
-}
-
-// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
-// and will be reported on registration time. variableLabels and constLabels can
-// be nil if no such labels should be set. fqName and help must not be empty.
-//
-// variableLabels only contain the label names. Their label values are variable
-// and therefore not part of the Desc. (They are managed within the Metric.)
-//
-// For constLabels, the label values are constant. Therefore, they are fully
-// specified in the Desc. See the Opts documentation for the implications of
-// constant labels.
-func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
- d := &Desc{
- fqName: fqName,
- help: help,
- variableLabels: variableLabels,
- }
- if help == "" {
- d.err = errors.New("empty help string")
- return d
- }
- if !metricNameRE.MatchString(fqName) {
- d.err = fmt.Errorf("%q is not a valid metric name", fqName)
- return d
- }
- // labelValues contains the label values of const labels (in order of
- // their sorted label names) plus the fqName (at position 0).
- labelValues := make([]string, 1, len(constLabels)+1)
- labelValues[0] = fqName
- labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
- labelNameSet := map[string]struct{}{}
- // First add only the const label names and sort them...
- for labelName := range constLabels {
- if !checkLabelName(labelName) {
- d.err = fmt.Errorf("%q is not a valid label name", labelName)
- return d
- }
- labelNames = append(labelNames, labelName)
- labelNameSet[labelName] = struct{}{}
- }
- sort.Strings(labelNames)
- // ... so that we can now add const label values in the order of their names.
- for _, labelName := range labelNames {
- labelValues = append(labelValues, constLabels[labelName])
- }
- // Now add the variable label names, but prefix them with something that
- // cannot be in a regular label name. That prevents matching the label
- // dimension with a different mix between preset and variable labels.
- for _, labelName := range variableLabels {
- if !checkLabelName(labelName) {
- d.err = fmt.Errorf("%q is not a valid label name", labelName)
- return d
- }
- labelNames = append(labelNames, "$"+labelName)
- labelNameSet[labelName] = struct{}{}
- }
- if len(labelNames) != len(labelNameSet) {
- d.err = errors.New("duplicate label names")
- return d
- }
- vh := hashNew()
- for _, val := range labelValues {
- vh = hashAdd(vh, val)
- vh = hashAddByte(vh, separatorByte)
- }
- d.id = vh
- // Sort labelNames so that order doesn't matter for the hash.
- sort.Strings(labelNames)
- // Now hash together (in this order) the help string and the sorted
- // label names.
- lh := hashNew()
- lh = hashAdd(lh, help)
- lh = hashAddByte(lh, separatorByte)
- for _, labelName := range labelNames {
- lh = hashAdd(lh, labelName)
- lh = hashAddByte(lh, separatorByte)
- }
- d.dimHash = lh
-
- d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
- for n, v := range constLabels {
- d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
- Name: proto.String(n),
- Value: proto.String(v),
- })
- }
- sort.Sort(LabelPairSorter(d.constLabelPairs))
- return d
-}
-
-// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
-// provided error set. If a collector returning such a descriptor is registered,
-// registration will fail with the provided error. NewInvalidDesc can be used by
-// a Collector to signal inability to describe itself.
-func NewInvalidDesc(err error) *Desc {
- return &Desc{
- err: err,
- }
-}
-
-func (d *Desc) String() string {
- lpStrings := make([]string, 0, len(d.constLabelPairs))
- for _, lp := range d.constLabelPairs {
- lpStrings = append(
- lpStrings,
- fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
- )
- }
- return fmt.Sprintf(
- "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
- d.fqName,
- d.help,
- strings.Join(lpStrings, ","),
- d.variableLabels,
- )
-}
-
-func checkLabelName(l string) bool {
- return labelNameRE.MatchString(l) &&
- !strings.HasPrefix(l, reservedLabelPrefix)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
deleted file mode 100644
index ca56f5ed..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package prometheus provides embeddable metric primitives for servers and
-// standardized exposition of telemetry through a web services interface.
-//
-// All exported functions and methods are safe to be used concurrently unless
-// specified otherwise.
-//
-// To expose metrics registered with the Prometheus registry, an HTTP server
-// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
-//
-// http.Handle("/metrics", prometheus.Handler())
-//
-// As a starting point a very basic usage example:
-//
-// package main
-//
-// import (
-// "net/http"
-//
-// "github.com/prometheus/client_golang/prometheus"
-// )
-//
-// var (
-// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
-// Name: "cpu_temperature_celsius",
-// Help: "Current temperature of the CPU.",
-// })
-// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
-// Name: "hd_errors_total",
-// Help: "Number of hard-disk errors.",
-// })
-// )
-//
-// func init() {
-// prometheus.MustRegister(cpuTemp)
-// prometheus.MustRegister(hdFailures)
-// }
-//
-// func main() {
-// cpuTemp.Set(65.3)
-// hdFailures.Inc()
-//
-// http.Handle("/metrics", prometheus.Handler())
-// http.ListenAndServe(":8080", nil)
-// }
-//
-//
-// This is a complete program that exports two metrics, a Gauge and a Counter.
-// It also exports some stats about the HTTP usage of the /metrics
-// endpoint. (See the Handler function for more detail.)
-//
-// Two more advanced metric types are the Summary and Histogram. A more
-// thorough description of metric types can be found in the prometheus docs:
-// https://prometheus.io/docs/concepts/metric_types/
-//
-// In addition to the fundamental metric types Gauge, Counter, Summary, and
-// Histogram, a very important part of the Prometheus data model is the
-// partitioning of samples along dimensions called labels, which results in
-// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
-// and HistogramVec.
-//
-// Those are all the parts needed for basic usage. Detailed documentation and
-// examples are provided below.
-//
-// Everything else this package offers is essentially for "power users" only. A
-// few pointers to "power user features":
-//
-// All the various ...Opts structs have a ConstLabels field for labels that
-// never change their value (which is only useful under special circumstances,
-// see documentation of the Opts type).
-//
-// The Untyped metric behaves like a Gauge, but signals the Prometheus server
-// not to assume anything about its type.
-//
-// Functions to fine-tune how the metric registry works: EnableCollectChecks,
-// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
-//
-// For custom metric collection, there are two entry points: Custom Metric
-// implementations and custom Collector implementations. A Metric is the
-// fundamental unit in the Prometheus data model: a sample at a point in time
-// together with its meta-data (like its fully-qualified name and any number of
-// pairs of label name and label value) that knows how to marshal itself into a
-// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
-// gets registered with the Prometheus registry and manages the collection of
-// one or more Metrics. Many parts of this package are building blocks for
-// Metrics and Collectors. Desc is the metric descriptor, actually used by all
-// metrics under the hood, and by Collectors to describe the Metrics to be
-// collected, but only to be dealt with by users if they implement their own
-// Metrics or Collectors. To create a Desc, the BuildFQName function will come
-// in handy. Other useful components for Metric and Collector implementation
-// include: LabelPairSorter to sort the DTO version of label pairs,
-// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
-// collection time, MetricVec to bundle custom Metrics into a metric vector
-// Collector, SelfCollector to make a custom Metric collect itself.
-//
-// A good example for a custom Collector is the ExpVarCollector included in this
-// package, which exports variables exported via the "expvar" package as
-// Prometheus metrics.
-package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar.go
deleted file mode 100644
index 0f7630d5..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/expvar.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "encoding/json"
- "expvar"
-)
-
-// ExpvarCollector collects metrics from the expvar interface. It provides a
-// quick way to expose numeric values that are already exported via expvar as
-// Prometheus metrics. Note that the data models of expvar and Prometheus are
-// fundamentally different, and that the ExpvarCollector is inherently
-// slow. Thus, the ExpvarCollector is probably great for experiments and
-// prototying, but you should seriously consider a more direct implementation of
-// Prometheus metrics for monitoring production systems.
-//
-// Use NewExpvarCollector to create new instances.
-type ExpvarCollector struct {
- exports map[string]*Desc
-}
-
-// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
-// to be registered with the Prometheus registry.
-//
-// The exports map has the following meaning:
-//
-// The keys in the map correspond to expvar keys, i.e. for every expvar key you
-// want to export as Prometheus metric, you need an entry in the exports
-// map. The descriptor mapped to each key describes how to export the expvar
-// value. It defines the name and the help string of the Prometheus metric
-// proxying the expvar value. The type will always be Untyped.
-//
-// For descriptors without variable labels, the expvar value must be a number or
-// a bool. The number is then directly exported as the Prometheus sample
-// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
-// that are not numbers or bools are silently ignored.
-//
-// If the descriptor has one variable label, the expvar value must be an expvar
-// map. The keys in the expvar map become the various values of the one
-// Prometheus label. The values in the expvar map must be numbers or bools again
-// as above.
-//
-// For descriptors with more than one variable label, the expvar must be a
-// nested expvar map, i.e. where the values of the topmost map are maps again
-// etc. until a depth is reached that corresponds to the number of labels. The
-// leaves of that structure must be numbers or bools as above to serve as the
-// sample values.
-//
-// Anything that does not fit into the scheme above is silently ignored.
-func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
- return &ExpvarCollector{
- exports: exports,
- }
-}
-
-// Describe implements Collector.
-func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
- for _, desc := range e.exports {
- ch <- desc
- }
-}
-
-// Collect implements Collector.
-func (e *ExpvarCollector) Collect(ch chan<- Metric) {
- for name, desc := range e.exports {
- var m Metric
- expVar := expvar.Get(name)
- if expVar == nil {
- continue
- }
- var v interface{}
- labels := make([]string, len(desc.variableLabels))
- if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
- ch <- NewInvalidMetric(desc, err)
- continue
- }
- var processValue func(v interface{}, i int)
- processValue = func(v interface{}, i int) {
- if i >= len(labels) {
- copiedLabels := append(make([]string, 0, len(labels)), labels...)
- switch v := v.(type) {
- case float64:
- m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
- case bool:
- if v {
- m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
- } else {
- m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
- }
- default:
- return
- }
- ch <- m
- return
- }
- vm, ok := v.(map[string]interface{})
- if !ok {
- return
- }
- for lv, val := range vm {
- labels[i] = lv
- processValue(val, i+1)
- }
- }
- processValue(v, 0)
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
deleted file mode 100644
index e3b67df8..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package prometheus
-
-// Inline and byte-free variant of hash/fnv's fnv64a.
-
-const (
- offset64 = 14695981039346656037
- prime64 = 1099511628211
-)
-
-// hashNew initializies a new fnv64a hash value.
-func hashNew() uint64 {
- return offset64
-}
-
-// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
-func hashAdd(h uint64, s string) uint64 {
- for i := 0; i < len(s); i++ {
- h ^= uint64(s[i])
- h *= prime64
- }
- return h
-}
-
-// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
-func hashAddByte(h uint64, b byte) uint64 {
- h ^= uint64(b)
- h *= prime64
- return h
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
deleted file mode 100644
index 390c0746..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-// Gauge is a Metric that represents a single numerical value that can
-// arbitrarily go up and down.
-//
-// A Gauge is typically used for measured values like temperatures or current
-// memory usage, but also "counts" that can go up and down, like the number of
-// running goroutines.
-//
-// To create Gauge instances, use NewGauge.
-type Gauge interface {
- Metric
- Collector
-
- // Set sets the Gauge to an arbitrary value.
- Set(float64)
- // Inc increments the Gauge by 1.
- Inc()
- // Dec decrements the Gauge by 1.
- Dec()
- // Add adds the given value to the Gauge. (The value can be
- // negative, resulting in a decrease of the Gauge.)
- Add(float64)
- // Sub subtracts the given value from the Gauge. (The value can be
- // negative, resulting in an increase of the Gauge.)
- Sub(float64)
-}
-
-// GaugeOpts is an alias for Opts. See there for doc comments.
-type GaugeOpts Opts
-
-// NewGauge creates a new Gauge based on the provided GaugeOpts.
-func NewGauge(opts GaugeOpts) Gauge {
- return newValue(NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ), GaugeValue, 0)
-}
-
-// GaugeVec is a Collector that bundles a set of Gauges that all share the same
-// Desc, but have different values for their variable labels. This is used if
-// you want to count the same thing partitioned by various dimensions
-// (e.g. number of operations queued, partitioned by user and operation
-// type). Create instances with NewGaugeVec.
-type GaugeVec struct {
- MetricVec
-}
-
-// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
-func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- labelNames,
- opts.ConstLabels,
- )
- return &GaugeVec{
- MetricVec: MetricVec{
- children: map[uint64]Metric{},
- desc: desc,
- newMetric: func(lvs ...string) Metric {
- return newValue(desc, GaugeValue, 0, lvs...)
- },
- },
- }
-}
-
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Gauge and not a
-// Metric so that no type conversion is required.
-func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Gauge), err
- }
- return nil, err
-}
-
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Gauge and not a Metric so that no
-// type conversion is required.
-func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
- if metric != nil {
- return metric.(Gauge), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Add(42)
-func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
- return m.MetricVec.WithLabelValues(lvs...).(Gauge)
-}
-
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-func (m *GaugeVec) With(labels Labels) Gauge {
- return m.MetricVec.With(labels).(Gauge)
-}
-
-// GaugeFunc is a Gauge whose value is determined at collect time by calling a
-// provided function.
-//
-// To create GaugeFunc instances, use NewGaugeFunc.
-type GaugeFunc interface {
- Metric
- Collector
-}
-
-// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
-// value reported is determined by calling the given function from within the
-// Write method. Take into account that metric collection may happen
-// concurrently. If that results in concurrent calls to Write, like in the case
-// where a GaugeFunc is directly registered with Prometheus, the provided
-// function must be concurrency-safe.
-func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
- return newValueFunc(NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ), GaugeValue, function)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
deleted file mode 100644
index b0d4fb95..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ /dev/null
@@ -1,263 +0,0 @@
-package prometheus
-
-import (
- "fmt"
- "runtime"
- "runtime/debug"
- "time"
-)
-
-type goCollector struct {
- goroutines Gauge
- gcDesc *Desc
-
- // metrics to describe and collect
- metrics memStatsMetrics
-}
-
-// NewGoCollector returns a collector which exports metrics about the current
-// go process.
-func NewGoCollector() *goCollector {
- return &goCollector{
- goroutines: NewGauge(GaugeOpts{
- Namespace: "go",
- Name: "goroutines",
- Help: "Number of goroutines that currently exist.",
- }),
- gcDesc: NewDesc(
- "go_gc_duration_seconds",
- "A summary of the GC invocation durations.",
- nil, nil),
- metrics: memStatsMetrics{
- {
- desc: NewDesc(
- memstatNamespace("alloc_bytes"),
- "Number of bytes allocated and still in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("alloc_bytes_total"),
- "Total number of bytes allocated, even if freed.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("sys_bytes"),
- "Number of bytes obtained by system. Sum of all system allocations.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("lookups_total"),
- "Total number of pointer lookups.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mallocs_total"),
- "Total number of mallocs.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("frees_total"),
- "Total number of frees.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_alloc_bytes"),
- "Number of heap bytes allocated and still in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_sys_bytes"),
- "Number of heap bytes obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_idle_bytes"),
- "Number of heap bytes waiting to be used.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_inuse_bytes"),
- "Number of heap bytes that are in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_released_bytes_total"),
- "Total number of heap bytes released to OS.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_objects"),
- "Number of allocated objects.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_inuse_bytes"),
- "Number of bytes in use by the stack allocator.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_sys_bytes"),
- "Number of bytes obtained from system for stack allocator.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_inuse_bytes"),
- "Number of bytes in use by mspan structures.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_sys_bytes"),
- "Number of bytes used for mspan structures obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_inuse_bytes"),
- "Number of bytes in use by mcache structures.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_sys_bytes"),
- "Number of bytes used for mcache structures obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("buck_hash_sys_bytes"),
- "Number of bytes used by the profiling bucket hash table.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("gc_sys_bytes"),
- "Number of bytes used for garbage collection system metadata.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("other_sys_bytes"),
- "Number of bytes used for other system allocations.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("next_gc_bytes"),
- "Number of heap bytes when next garbage collection will take place.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("last_gc_time_seconds"),
- "Number of seconds since 1970 of last garbage collection.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
- valType: GaugeValue,
- },
- },
- }
-}
-
-func memstatNamespace(s string) string {
- return fmt.Sprintf("go_memstats_%s", s)
-}
-
-// Describe returns all descriptions of the collector.
-func (c *goCollector) Describe(ch chan<- *Desc) {
- ch <- c.goroutines.Desc()
- ch <- c.gcDesc
-
- for _, i := range c.metrics {
- ch <- i.desc
- }
-}
-
-// Collect returns the current state of all metrics of the collector.
-func (c *goCollector) Collect(ch chan<- Metric) {
- c.goroutines.Set(float64(runtime.NumGoroutine()))
- ch <- c.goroutines
-
- var stats debug.GCStats
- stats.PauseQuantiles = make([]time.Duration, 5)
- debug.ReadGCStats(&stats)
-
- quantiles := make(map[float64]float64)
- for idx, pq := range stats.PauseQuantiles[1:] {
- quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
- }
- quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
- ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
-
- ms := &runtime.MemStats{}
- runtime.ReadMemStats(ms)
- for _, i := range c.metrics {
- ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
- }
-}
-
-// memStatsMetrics provide description, value, and value type for memstat metrics.
-type memStatsMetrics []struct {
- desc *Desc
- eval func(*runtime.MemStats) float64
- valType ValueType
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
deleted file mode 100644
index 7a689108..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ /dev/null
@@ -1,448 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "math"
- "sort"
- "sync/atomic"
-
- "github.com/golang/protobuf/proto"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// A Histogram counts individual observations from an event or sample stream in
-// configurable buckets. Similar to a summary, it also provides a sum of
-// observations and an observation count.
-//
-// On the Prometheus server, quantiles can be calculated from a Histogram using
-// the histogram_quantile function in the query language.
-//
-// Note that Histograms, in contrast to Summaries, can be aggregated with the
-// Prometheus query language (see the documentation for detailed
-// procedures). However, Histograms require the user to pre-define suitable
-// buckets, and they are in general less accurate. The Observe method of a
-// Histogram has a very low performance overhead in comparison with the Observe
-// method of a Summary.
-//
-// To create Histogram instances, use NewHistogram.
-type Histogram interface {
- Metric
- Collector
-
- // Observe adds a single observation to the histogram.
- Observe(float64)
-}
-
-// bucketLabel is used for the label that defines the upper bound of a
-// bucket of a histogram ("le" -> "less or equal").
-const bucketLabel = "le"
-
-var (
- // DefBuckets are the default Histogram buckets. The default buckets are
- // tailored to broadly measure the response time (in seconds) of a
- // network service. Most likely, however, you will be required to define
- // buckets customized to your use case.
- DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
-
- errBucketLabelNotAllowed = fmt.Errorf(
- "%q is not allowed as label name in histograms", bucketLabel,
- )
-)
-
-// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
-// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
-//
-// The function panics if 'count' is zero or negative.
-func LinearBuckets(start, width float64, count int) []float64 {
- if count < 1 {
- panic("LinearBuckets needs a positive count")
- }
- buckets := make([]float64, count)
- for i := range buckets {
- buckets[i] = start
- start += width
- }
- return buckets
-}
-
-// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
-// upper bound of 'start' and each following bucket's upper bound is 'factor'
-// times the previous bucket's upper bound. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
-//
-// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
-// or if 'factor' is less than or equal 1.
-func ExponentialBuckets(start, factor float64, count int) []float64 {
- if count < 1 {
- panic("ExponentialBuckets needs a positive count")
- }
- if start <= 0 {
- panic("ExponentialBuckets needs a positive start value")
- }
- if factor <= 1 {
- panic("ExponentialBuckets needs a factor greater than 1")
- }
- buckets := make([]float64, count)
- for i := range buckets {
- buckets[i] = start
- start *= factor
- }
- return buckets
-}
-
-// HistogramOpts bundles the options for creating a Histogram metric. It is
-// mandatory to set Name and Help to a non-empty string. All other fields are
-// optional and can safely be left at their zero value.
-type HistogramOpts struct {
- // Namespace, Subsystem, and Name are components of the fully-qualified
- // name of the Histogram (created by joining these components with
- // "_"). Only Name is mandatory, the others merely help structuring the
- // name. Note that the fully-qualified name of the Histogram must be a
- // valid Prometheus metric name.
- Namespace string
- Subsystem string
- Name string
-
- // Help provides information about this Histogram. Mandatory!
- //
- // Metrics with the same fully-qualified name must have the same Help
- // string.
- Help string
-
- // ConstLabels are used to attach fixed labels to this
- // Histogram. Histograms with the same fully-qualified name must have the
- // same label names in their ConstLabels.
- //
- // Note that in most cases, labels have a value that varies during the
- // lifetime of a process. Those labels are usually managed with a
- // HistogramVec. ConstLabels serve only special purposes. One is for the
- // special case where the value of a label does not change during the
- // lifetime of a process, e.g. if the revision of the running binary is
- // put into a label. Another, more advanced purpose is if more than one
- // Collector needs to collect Histograms with the same fully-qualified
- // name. In that case, those Summaries must differ in the values of
- // their ConstLabels. See the Collector examples.
- //
- // If the value of a label never changes (not even between binaries),
- // that label most likely should not be a label at all (but part of the
- // metric name).
- ConstLabels Labels
-
- // Buckets defines the buckets into which observations are counted. Each
- // element in the slice is the upper inclusive bound of a bucket. The
- // values must be sorted in strictly increasing order. There is no need
- // to add a highest bucket with +Inf bound, it will be added
- // implicitly. The default value is DefBuckets.
- Buckets []float64
-}
-
-// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
-// panics if the buckets in HistogramOpts are not in strictly increasing order.
-func NewHistogram(opts HistogramOpts) Histogram {
- return newHistogram(
- NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ),
- opts,
- )
-}
-
-func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
- if len(desc.variableLabels) != len(labelValues) {
- panic(errInconsistentCardinality)
- }
-
- for _, n := range desc.variableLabels {
- if n == bucketLabel {
- panic(errBucketLabelNotAllowed)
- }
- }
- for _, lp := range desc.constLabelPairs {
- if lp.GetName() == bucketLabel {
- panic(errBucketLabelNotAllowed)
- }
- }
-
- if len(opts.Buckets) == 0 {
- opts.Buckets = DefBuckets
- }
-
- h := &histogram{
- desc: desc,
- upperBounds: opts.Buckets,
- labelPairs: makeLabelPairs(desc, labelValues),
- }
- for i, upperBound := range h.upperBounds {
- if i < len(h.upperBounds)-1 {
- if upperBound >= h.upperBounds[i+1] {
- panic(fmt.Errorf(
- "histogram buckets must be in increasing order: %f >= %f",
- upperBound, h.upperBounds[i+1],
- ))
- }
- } else {
- if math.IsInf(upperBound, +1) {
- // The +Inf bucket is implicit. Remove it here.
- h.upperBounds = h.upperBounds[:i]
- }
- }
- }
- // Finally we know the final length of h.upperBounds and can make counts.
- h.counts = make([]uint64, len(h.upperBounds))
-
- h.Init(h) // Init self-collection.
- return h
-}
-
-type histogram struct {
- // sumBits contains the bits of the float64 representing the sum of all
- // observations. sumBits and count have to go first in the struct to
- // guarantee alignment for atomic operations.
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- sumBits uint64
- count uint64
-
- SelfCollector
- // Note that there is no mutex required.
-
- desc *Desc
-
- upperBounds []float64
- counts []uint64
-
- labelPairs []*dto.LabelPair
-}
-
-func (h *histogram) Desc() *Desc {
- return h.desc
-}
-
-func (h *histogram) Observe(v float64) {
- // TODO(beorn7): For small numbers of buckets (<30), a linear search is
- // slightly faster than the binary search. If we really care, we could
- // switch from one search strategy to the other depending on the number
- // of buckets.
- //
- // Microbenchmarks (BenchmarkHistogramNoLabels):
- // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
- // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
- // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
- i := sort.SearchFloat64s(h.upperBounds, v)
- if i < len(h.counts) {
- atomic.AddUint64(&h.counts[i], 1)
- }
- atomic.AddUint64(&h.count, 1)
- for {
- oldBits := atomic.LoadUint64(&h.sumBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
- if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
- break
- }
- }
-}
-
-func (h *histogram) Write(out *dto.Metric) error {
- his := &dto.Histogram{}
- buckets := make([]*dto.Bucket, len(h.upperBounds))
-
- his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
- his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
- var count uint64
- for i, upperBound := range h.upperBounds {
- count += atomic.LoadUint64(&h.counts[i])
- buckets[i] = &dto.Bucket{
- CumulativeCount: proto.Uint64(count),
- UpperBound: proto.Float64(upperBound),
- }
- }
- his.Bucket = buckets
- out.Histogram = his
- out.Label = h.labelPairs
- return nil
-}
-
-// HistogramVec is a Collector that bundles a set of Histograms that all share the
-// same Desc, but have different values for their variable labels. This is used
-// if you want to count the same thing partitioned by various dimensions
-// (e.g. HTTP request latencies, partitioned by status code and method). Create
-// instances with NewHistogramVec.
-type HistogramVec struct {
- MetricVec
-}
-
-// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
-func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- labelNames,
- opts.ConstLabels,
- )
- return &HistogramVec{
- MetricVec: MetricVec{
- children: map[uint64]Metric{},
- desc: desc,
- newMetric: func(lvs ...string) Metric {
- return newHistogram(desc, opts, lvs...)
- },
- },
- }
-}
-
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Histogram and not a
-// Metric so that no type conversion is required.
-func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Histogram), err
- }
- return nil, err
-}
-
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Histogram and not a Metric so that no
-// type conversion is required.
-func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
- if metric != nil {
- return metric.(Histogram), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Observe(42.21)
-func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
- return m.MetricVec.WithLabelValues(lvs...).(Histogram)
-}
-
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
-func (m *HistogramVec) With(labels Labels) Histogram {
- return m.MetricVec.With(labels).(Histogram)
-}
-
-type constHistogram struct {
- desc *Desc
- count uint64
- sum float64
- buckets map[float64]uint64
- labelPairs []*dto.LabelPair
-}
-
-func (h *constHistogram) Desc() *Desc {
- return h.desc
-}
-
-func (h *constHistogram) Write(out *dto.Metric) error {
- his := &dto.Histogram{}
- buckets := make([]*dto.Bucket, 0, len(h.buckets))
-
- his.SampleCount = proto.Uint64(h.count)
- his.SampleSum = proto.Float64(h.sum)
-
- for upperBound, count := range h.buckets {
- buckets = append(buckets, &dto.Bucket{
- CumulativeCount: proto.Uint64(count),
- UpperBound: proto.Float64(upperBound),
- })
- }
-
- if len(buckets) > 0 {
- sort.Sort(buckSort(buckets))
- }
- his.Bucket = buckets
-
- out.Histogram = his
- out.Label = h.labelPairs
-
- return nil
-}
-
-// NewConstHistogram returns a metric representing a Prometheus histogram with
-// fixed values for the count, sum, and bucket counts. As those parameters
-// cannot be changed, the returned value does not implement the Histogram
-// interface (but only the Metric interface). Users of this package will not
-// have much use for it in regular operations. However, when implementing custom
-// Collectors, it is useful as a throw-away metric that is generated on the fly
-// to send it to Prometheus in the Collect method.
-//
-// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
-// bucket.
-//
-// NewConstHistogram returns an error if the length of labelValues is not
-// consistent with the variable labels in Desc.
-func NewConstHistogram(
- desc *Desc,
- count uint64,
- sum float64,
- buckets map[float64]uint64,
- labelValues ...string,
-) (Metric, error) {
- if len(desc.variableLabels) != len(labelValues) {
- return nil, errInconsistentCardinality
- }
- return &constHistogram{
- desc: desc,
- count: count,
- sum: sum,
- buckets: buckets,
- labelPairs: makeLabelPairs(desc, labelValues),
- }, nil
-}
-
-// MustNewConstHistogram is a version of NewConstHistogram that panics where
-// NewConstMetric would have returned an error.
-func MustNewConstHistogram(
- desc *Desc,
- count uint64,
- sum float64,
- buckets map[float64]uint64,
- labelValues ...string,
-) Metric {
- m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
- if err != nil {
- panic(err)
- }
- return m
-}
-
-type buckSort []*dto.Bucket
-
-func (s buckSort) Len() int {
- return len(s)
-}
-
-func (s buckSort) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s buckSort) Less(i, j int) bool {
- return s[i].GetUpperBound() < s[j].GetUpperBound()
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go
deleted file mode 100644
index e078e3ed..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/http.go
+++ /dev/null
@@ -1,381 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "bufio"
- "io"
- "net"
- "net/http"
- "strconv"
- "strings"
- "time"
-)
-
-var instLabels = []string{"method", "code"}
-
-type nower interface {
- Now() time.Time
-}
-
-type nowFunc func() time.Time
-
-func (n nowFunc) Now() time.Time {
- return n()
-}
-
-var now nower = nowFunc(func() time.Time {
- return time.Now()
-})
-
-func nowSeries(t ...time.Time) nower {
- return nowFunc(func() time.Time {
- defer func() {
- t = t[1:]
- }()
-
- return t[0]
- })
-}
-
-// InstrumentHandler wraps the given HTTP handler for instrumentation. It
-// registers four metric collectors (if not already done) and reports HTTP
-// metrics to the (newly or already) registered collectors: http_requests_total
-// (CounterVec), http_request_duration_microseconds (Summary),
-// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
-// has a constant label named "handler" with the provided handlerName as
-// value. http_requests_total is a metric vector partitioned by HTTP method
-// (label name "method") and HTTP status code (label name "code").
-//
-// Note that InstrumentHandler has several issues:
-//
-// - It uses Summaries rather than Histograms. Summaries are not useful if
-// aggregation across multiple instances is required.
-//
-// - It uses microseconds as unit, which is deprecated and should be replaced by
-// seconds.
-//
-// - The size of the request is calculated in a separate goroutine. Since this
-// calculator requires access to the request header, it creates a race with
-// any writes to the header performed during request handling.
-// httputil.ReverseProxy is a prominent example for a handler
-// performing such writes.
-//
-// Upcoming versions of this package will provide ways of instrumenting HTTP
-// handlers that are more flexible and have fewer issues. Consider this function
-// DEPRECATED and prefer direct instrumentation in the meantime.
-func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
- return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
-}
-
-// InstrumentHandlerFunc wraps the given function for instrumentation. It
-// otherwise works in the same way as InstrumentHandler (and shares the same
-// issues).
-func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
- return InstrumentHandlerFuncWithOpts(
- SummaryOpts{
- Subsystem: "http",
- ConstLabels: Labels{"handler": handlerName},
- },
- handlerFunc,
- )
-}
-
-// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
-// issues) but provides more flexibility (at the cost of a more complex call
-// syntax). As InstrumentHandler, this function registers four metric
-// collectors, but it uses the provided SummaryOpts to create them. However, the
-// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
-// by "requests_total", "request_duration_microseconds", "request_size_bytes",
-// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
-// help string. The names of the variable labels of the http_requests_total
-// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
-//
-// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
-// behavior of InstrumentHandler:
-//
-// prometheus.InstrumentHandlerWithOpts(
-// prometheus.SummaryOpts{
-// Subsystem: "http",
-// ConstLabels: prometheus.Labels{"handler": handlerName},
-// },
-// handler,
-// )
-//
-// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
-// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
-// and all its fields are set to the equally named fields in the provided
-// SummaryOpts.
-func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
- return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
-}
-
-// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
-// the same issues) but provides more flexibility (at the cost of a more complex
-// call syntax). See InstrumentHandlerWithOpts for details how the provided
-// SummaryOpts are used.
-func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
- reqCnt := NewCounterVec(
- CounterOpts{
- Namespace: opts.Namespace,
- Subsystem: opts.Subsystem,
- Name: "requests_total",
- Help: "Total number of HTTP requests made.",
- ConstLabels: opts.ConstLabels,
- },
- instLabels,
- )
-
- opts.Name = "request_duration_microseconds"
- opts.Help = "The HTTP request latencies in microseconds."
- reqDur := NewSummary(opts)
-
- opts.Name = "request_size_bytes"
- opts.Help = "The HTTP request sizes in bytes."
- reqSz := NewSummary(opts)
-
- opts.Name = "response_size_bytes"
- opts.Help = "The HTTP response sizes in bytes."
- resSz := NewSummary(opts)
-
- regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
- regReqDur := MustRegisterOrGet(reqDur).(Summary)
- regReqSz := MustRegisterOrGet(reqSz).(Summary)
- regResSz := MustRegisterOrGet(resSz).(Summary)
-
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- now := time.Now()
-
- delegate := &responseWriterDelegator{ResponseWriter: w}
- out := make(chan int)
- urlLen := 0
- if r.URL != nil {
- urlLen = len(r.URL.String())
- }
- go computeApproximateRequestSize(r, out, urlLen)
-
- _, cn := w.(http.CloseNotifier)
- _, fl := w.(http.Flusher)
- _, hj := w.(http.Hijacker)
- _, rf := w.(io.ReaderFrom)
- var rw http.ResponseWriter
- if cn && fl && hj && rf {
- rw = &fancyResponseWriterDelegator{delegate}
- } else {
- rw = delegate
- }
- handlerFunc(rw, r)
-
- elapsed := float64(time.Since(now)) / float64(time.Microsecond)
-
- method := sanitizeMethod(r.Method)
- code := sanitizeCode(delegate.status)
- regReqCnt.WithLabelValues(method, code).Inc()
- regReqDur.Observe(elapsed)
- regResSz.Observe(float64(delegate.written))
- regReqSz.Observe(float64(<-out))
- })
-}
-
-func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
- s += len(r.Method)
- s += len(r.Proto)
- for name, values := range r.Header {
- s += len(name)
- for _, value := range values {
- s += len(value)
- }
- }
- s += len(r.Host)
-
- // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
-
- if r.ContentLength != -1 {
- s += int(r.ContentLength)
- }
- out <- s
-}
-
-type responseWriterDelegator struct {
- http.ResponseWriter
-
- handler, method string
- status int
- written int64
- wroteHeader bool
-}
-
-func (r *responseWriterDelegator) WriteHeader(code int) {
- r.status = code
- r.wroteHeader = true
- r.ResponseWriter.WriteHeader(code)
-}
-
-func (r *responseWriterDelegator) Write(b []byte) (int, error) {
- if !r.wroteHeader {
- r.WriteHeader(http.StatusOK)
- }
- n, err := r.ResponseWriter.Write(b)
- r.written += int64(n)
- return n, err
-}
-
-type fancyResponseWriterDelegator struct {
- *responseWriterDelegator
-}
-
-func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
- return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
-}
-
-func (f *fancyResponseWriterDelegator) Flush() {
- f.ResponseWriter.(http.Flusher).Flush()
-}
-
-func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
- return f.ResponseWriter.(http.Hijacker).Hijack()
-}
-
-func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
- if !f.wroteHeader {
- f.WriteHeader(http.StatusOK)
- }
- n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
- f.written += n
- return n, err
-}
-
-func sanitizeMethod(m string) string {
- switch m {
- case "GET", "get":
- return "get"
- case "PUT", "put":
- return "put"
- case "HEAD", "head":
- return "head"
- case "POST", "post":
- return "post"
- case "DELETE", "delete":
- return "delete"
- case "CONNECT", "connect":
- return "connect"
- case "OPTIONS", "options":
- return "options"
- case "NOTIFY", "notify":
- return "notify"
- default:
- return strings.ToLower(m)
- }
-}
-
-func sanitizeCode(s int) string {
- switch s {
- case 100:
- return "100"
- case 101:
- return "101"
-
- case 200:
- return "200"
- case 201:
- return "201"
- case 202:
- return "202"
- case 203:
- return "203"
- case 204:
- return "204"
- case 205:
- return "205"
- case 206:
- return "206"
-
- case 300:
- return "300"
- case 301:
- return "301"
- case 302:
- return "302"
- case 304:
- return "304"
- case 305:
- return "305"
- case 307:
- return "307"
-
- case 400:
- return "400"
- case 401:
- return "401"
- case 402:
- return "402"
- case 403:
- return "403"
- case 404:
- return "404"
- case 405:
- return "405"
- case 406:
- return "406"
- case 407:
- return "407"
- case 408:
- return "408"
- case 409:
- return "409"
- case 410:
- return "410"
- case 411:
- return "411"
- case 412:
- return "412"
- case 413:
- return "413"
- case 414:
- return "414"
- case 415:
- return "415"
- case 416:
- return "416"
- case 417:
- return "417"
- case 418:
- return "418"
-
- case 500:
- return "500"
- case 501:
- return "501"
- case 502:
- return "502"
- case 503:
- return "503"
- case 504:
- return "504"
- case 505:
- return "505"
-
- case 428:
- return "428"
- case 429:
- return "429"
- case 431:
- return "431"
- case 511:
- return "511"
-
- default:
- return strconv.Itoa(s)
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
deleted file mode 100644
index 86fd81c1..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "strings"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-const separatorByte byte = 255
-
-// A Metric models a single sample value with its meta data being exported to
-// Prometheus. Implementers of Metric in this package inclued Gauge, Counter,
-// Untyped, and Summary. Users can implement their own Metric types, but that
-// should be rarely needed. See the example for SelfCollector, which is also an
-// example for a user-implemented Metric.
-type Metric interface {
- // Desc returns the descriptor for the Metric. This method idempotently
- // returns the same descriptor throughout the lifetime of the
- // Metric. The returned descriptor is immutable by contract. A Metric
- // unable to describe itself must return an invalid descriptor (created
- // with NewInvalidDesc).
- Desc() *Desc
- // Write encodes the Metric into a "Metric" Protocol Buffer data
- // transmission object.
- //
- // Implementers of custom Metric types must observe concurrency safety
- // as reads of this metric may occur at any time, and any blocking
- // occurs at the expense of total performance of rendering all
- // registered metrics. Ideally Metric implementations should support
- // concurrent readers.
- //
- // The Prometheus client library attempts to minimize memory allocations
- // and will provide a pre-existing reset dto.Metric pointer. Prometheus
- // may recycle the dto.Metric proto message, so Metric implementations
- // should just populate the provided dto.Metric and then should not keep
- // any reference to it.
- //
- // While populating dto.Metric, labels must be sorted lexicographically.
- // (Implementers may find LabelPairSorter useful for that.)
- Write(*dto.Metric) error
-}
-
-// Opts bundles the options for creating most Metric types. Each metric
-// implementation XXX has its own XXXOpts type, but in most cases, it is just be
-// an alias of this type (which might change when the requirement arises.)
-//
-// It is mandatory to set Name and Help to a non-empty string. All other fields
-// are optional and can safely be left at their zero value.
-type Opts struct {
- // Namespace, Subsystem, and Name are components of the fully-qualified
- // name of the Metric (created by joining these components with
- // "_"). Only Name is mandatory, the others merely help structuring the
- // name. Note that the fully-qualified name of the metric must be a
- // valid Prometheus metric name.
- Namespace string
- Subsystem string
- Name string
-
- // Help provides information about this metric. Mandatory!
- //
- // Metrics with the same fully-qualified name must have the same Help
- // string.
- Help string
-
- // ConstLabels are used to attach fixed labels to this metric. Metrics
- // with the same fully-qualified name must have the same label names in
- // their ConstLabels.
- //
- // Note that in most cases, labels have a value that varies during the
- // lifetime of a process. Those labels are usually managed with a metric
- // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
- // serve only special purposes. One is for the special case where the
- // value of a label does not change during the lifetime of a process,
- // e.g. if the revision of the running binary is put into a
- // label. Another, more advanced purpose is if more than one Collector
- // needs to collect Metrics with the same fully-qualified name. In that
- // case, those Metrics must differ in the values of their
- // ConstLabels. See the Collector examples.
- //
- // If the value of a label never changes (not even between binaries),
- // that label most likely should not be a label at all (but part of the
- // metric name).
- ConstLabels Labels
-}
-
-// BuildFQName joins the given three name components by "_". Empty name
-// components are ignored. If the name parameter itself is empty, an empty
-// string is returned, no matter what. Metric implementations included in this
-// library use this function internally to generate the fully-qualified metric
-// name from the name component in their Opts. Users of the library will only
-// need this function if they implement their own Metric or instantiate a Desc
-// (with NewDesc) directly.
-func BuildFQName(namespace, subsystem, name string) string {
- if name == "" {
- return ""
- }
- switch {
- case namespace != "" && subsystem != "":
- return strings.Join([]string{namespace, subsystem, name}, "_")
- case namespace != "":
- return strings.Join([]string{namespace, name}, "_")
- case subsystem != "":
- return strings.Join([]string{subsystem, name}, "_")
- }
- return name
-}
-
-// LabelPairSorter implements sort.Interface. It is used to sort a slice of
-// dto.LabelPair pointers. This is useful for implementing the Write method of
-// custom metrics.
-type LabelPairSorter []*dto.LabelPair
-
-func (s LabelPairSorter) Len() int {
- return len(s)
-}
-
-func (s LabelPairSorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s LabelPairSorter) Less(i, j int) bool {
- return s[i].GetName() < s[j].GetName()
-}
-
-type hashSorter []uint64
-
-func (s hashSorter) Len() int {
- return len(s)
-}
-
-func (s hashSorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s hashSorter) Less(i, j int) bool {
- return s[i] < s[j]
-}
-
-type invalidMetric struct {
- desc *Desc
- err error
-}
-
-// NewInvalidMetric returns a metric whose Write method always returns the
-// provided error. It is useful if a Collector finds itself unable to collect
-// a metric and wishes to report an error to the registry.
-func NewInvalidMetric(desc *Desc, err error) Metric {
- return &invalidMetric{desc, err}
-}
-
-func (m *invalidMetric) Desc() *Desc { return m.desc }
-
-func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
deleted file mode 100644
index d8cf0eda..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import "github.com/prometheus/procfs"
-
-type processCollector struct {
- pid int
- collectFn func(chan<- Metric)
- pidFn func() (int, error)
- cpuTotal Counter
- openFDs, maxFDs Gauge
- vsize, rss Gauge
- startTime Gauge
-}
-
-// NewProcessCollector returns a collector which exports the current state of
-// process metrics including cpu, memory and file descriptor usage as well as
-// the process start time for the given process id under the given namespace.
-func NewProcessCollector(pid int, namespace string) *processCollector {
- return NewProcessCollectorPIDFn(
- func() (int, error) { return pid, nil },
- namespace,
- )
-}
-
-// NewProcessCollectorPIDFn returns a collector which exports the current state
-// of process metrics including cpu, memory and file descriptor usage as well
-// as the process start time under the given namespace. The given pidFn is
-// called on each collect and is used to determine the process to export
-// metrics for.
-func NewProcessCollectorPIDFn(
- pidFn func() (int, error),
- namespace string,
-) *processCollector {
- c := processCollector{
- pidFn: pidFn,
- collectFn: func(chan<- Metric) {},
-
- cpuTotal: NewCounter(CounterOpts{
- Namespace: namespace,
- Name: "process_cpu_seconds_total",
- Help: "Total user and system CPU time spent in seconds.",
- }),
- openFDs: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_open_fds",
- Help: "Number of open file descriptors.",
- }),
- maxFDs: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_max_fds",
- Help: "Maximum number of open file descriptors.",
- }),
- vsize: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_virtual_memory_bytes",
- Help: "Virtual memory size in bytes.",
- }),
- rss: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_resident_memory_bytes",
- Help: "Resident memory size in bytes.",
- }),
- startTime: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_start_time_seconds",
- Help: "Start time of the process since unix epoch in seconds.",
- }),
- }
-
- // Set up process metric collection if supported by the runtime.
- if _, err := procfs.NewStat(); err == nil {
- c.collectFn = c.processCollect
- }
-
- return &c
-}
-
-// Describe returns all descriptions of the collector.
-func (c *processCollector) Describe(ch chan<- *Desc) {
- ch <- c.cpuTotal.Desc()
- ch <- c.openFDs.Desc()
- ch <- c.maxFDs.Desc()
- ch <- c.vsize.Desc()
- ch <- c.rss.Desc()
- ch <- c.startTime.Desc()
-}
-
-// Collect returns the current state of all metrics of the collector.
-func (c *processCollector) Collect(ch chan<- Metric) {
- c.collectFn(ch)
-}
-
-// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
-// client allows users to configure the error behavior.
-func (c *processCollector) processCollect(ch chan<- Metric) {
- pid, err := c.pidFn()
- if err != nil {
- return
- }
-
- p, err := procfs.NewProc(pid)
- if err != nil {
- return
- }
-
- if stat, err := p.NewStat(); err == nil {
- c.cpuTotal.Set(stat.CPUTime())
- ch <- c.cpuTotal
- c.vsize.Set(float64(stat.VirtualMemory()))
- ch <- c.vsize
- c.rss.Set(float64(stat.ResidentMemory()))
- ch <- c.rss
-
- if startTime, err := stat.StartTime(); err == nil {
- c.startTime.Set(startTime)
- ch <- c.startTime
- }
- }
-
- if fds, err := p.FileDescriptorsLen(); err == nil {
- c.openFDs.Set(float64(fds))
- ch <- c.openFDs
- }
-
- if limits, err := p.NewLimits(); err == nil {
- c.maxFDs.Set(float64(limits.OpenFiles))
- ch <- c.maxFDs
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push.go b/vendor/github.com/prometheus/client_golang/prometheus/push.go
deleted file mode 100644
index 5ec0a3ab..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/push.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Copyright (c) 2013, The Prometheus Authors
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be found
-// in the LICENSE file.
-
-package prometheus
-
-// Push triggers a metric collection by the default registry and pushes all
-// collected metrics to the Pushgateway specified by url. See the Pushgateway
-// documentation for detailed implications of the job and instance
-// parameter. instance can be left empty. You can use just host:port or ip:port
-// as url, in which case 'http://' is added automatically. You can also include
-// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
-//
-// Note that all previously pushed metrics with the same job and instance will
-// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
-// to push to the Pushgateway.)
-func Push(job, instance, url string) error {
- return defRegistry.Push(job, instance, url, "PUT")
-}
-
-// PushAdd works like Push, but only previously pushed metrics with the same
-// name (and the same job and instance) will be replaced. (It uses HTTP method
-// 'POST' to push to the Pushgateway.)
-func PushAdd(job, instance, url string) error {
- return defRegistry.Push(job, instance, url, "POST")
-}
-
-// PushCollectors works like Push, but it does not collect from the default
-// registry. Instead, it collects from the provided collectors. It is a
-// convenient way to push only a few metrics.
-func PushCollectors(job, instance, url string, collectors ...Collector) error {
- return pushCollectors(job, instance, url, "PUT", collectors...)
-}
-
-// PushAddCollectors works like PushAdd, but it does not collect from the
-// default registry. Instead, it collects from the provided collectors. It is a
-// convenient way to push only a few metrics.
-func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
- return pushCollectors(job, instance, url, "POST", collectors...)
-}
-
-func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
- r := newRegistry()
- for _, collector := range collectors {
- if _, err := r.Register(collector); err != nil {
- return err
- }
- }
- return r.Push(job, instance, url, method)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
deleted file mode 100644
index f6ae51be..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ /dev/null
@@ -1,741 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Copyright (c) 2013, The Prometheus Authors
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be found
-// in the LICENSE file.
-
-package prometheus
-
-import (
- "bytes"
- "compress/gzip"
- "errors"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "os"
- "sort"
- "strings"
- "sync"
-
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/expfmt"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-var (
- defRegistry = newDefaultRegistry()
- errAlreadyReg = errors.New("duplicate metrics collector registration attempted")
-)
-
-// Constants relevant to the HTTP interface.
-const (
- // APIVersion is the version of the format of the exported data. This
- // will match this library's version, which subscribes to the Semantic
- // Versioning scheme.
- APIVersion = "0.0.4"
-
- // DelimitedTelemetryContentType is the content type set on telemetry
- // data responses in delimited protobuf format.
- DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`
- // TextTelemetryContentType is the content type set on telemetry data
- // responses in text format.
- TextTelemetryContentType = `text/plain; version=` + APIVersion
- // ProtoTextTelemetryContentType is the content type set on telemetry
- // data responses in protobuf text format. (Only used for debugging.)
- ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`
- // ProtoCompactTextTelemetryContentType is the content type set on
- // telemetry data responses in protobuf compact text format. (Only used
- // for debugging.)
- ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`
-
- // Constants for object pools.
- numBufs = 4
- numMetricFamilies = 1000
- numMetrics = 10000
-
- // Capacity for the channel to collect metrics and descriptors.
- capMetricChan = 1000
- capDescChan = 10
-
- contentTypeHeader = "Content-Type"
- contentLengthHeader = "Content-Length"
- contentEncodingHeader = "Content-Encoding"
-
- acceptEncodingHeader = "Accept-Encoding"
- acceptHeader = "Accept"
-)
-
-// Handler returns the HTTP handler for the global Prometheus registry. It is
-// already instrumented with InstrumentHandler (using "prometheus" as handler
-// name). Usually the handler is used to handle the "/metrics" endpoint.
-//
-// Please note the issues described in the doc comment of InstrumentHandler. You
-// might want to consider using UninstrumentedHandler instead.
-func Handler() http.Handler {
- return InstrumentHandler("prometheus", defRegistry)
-}
-
-// UninstrumentedHandler works in the same way as Handler, but the returned HTTP
-// handler is not instrumented. This is useful if no instrumentation is desired
-// (for whatever reason) or if the instrumentation has to happen with a
-// different handler name (or with a different instrumentation approach
-// altogether). See the InstrumentHandler example.
-func UninstrumentedHandler() http.Handler {
- return defRegistry
-}
-
-// Register registers a new Collector to be included in metrics collection. It
-// returns an error if the descriptors provided by the Collector are invalid or
-// if they - in combination with descriptors of already registered Collectors -
-// do not fulfill the consistency and uniqueness criteria described in the Desc
-// documentation.
-//
-// Do not register the same Collector multiple times concurrently. (Registering
-// the same Collector twice would result in an error anyway, but on top of that,
-// it is not safe to do so concurrently.)
-func Register(m Collector) error {
- _, err := defRegistry.Register(m)
- return err
-}
-
-// MustRegister works like Register but panics where Register would have
-// returned an error. MustRegister is also Variadic, where Register only
-// accepts a single Collector to register.
-func MustRegister(m ...Collector) {
- for i := range m {
- if err := Register(m[i]); err != nil {
- panic(err)
- }
- }
-}
-
-// RegisterOrGet works like Register but does not return an error if a Collector
-// is registered that equals a previously registered Collector. (Two Collectors
-// are considered equal if their Describe method yields the same set of
-// descriptors.) Instead, the previously registered Collector is returned (which
-// is helpful if the new and previously registered Collectors are equal but not
-// identical, i.e. not pointers to the same object).
-//
-// As for Register, it is still not safe to call RegisterOrGet with the same
-// Collector multiple times concurrently.
-func RegisterOrGet(m Collector) (Collector, error) {
- return defRegistry.RegisterOrGet(m)
-}
-
-// MustRegisterOrGet works like Register but panics where RegisterOrGet would
-// have returned an error.
-func MustRegisterOrGet(m Collector) Collector {
- existing, err := RegisterOrGet(m)
- if err != nil {
- panic(err)
- }
- return existing
-}
-
-// Unregister unregisters the Collector that equals the Collector passed in as
-// an argument. (Two Collectors are considered equal if their Describe method
-// yields the same set of descriptors.) The function returns whether a Collector
-// was unregistered.
-func Unregister(c Collector) bool {
- return defRegistry.Unregister(c)
-}
-
-// SetMetricFamilyInjectionHook sets a function that is called whenever metrics
-// are collected. The hook function must be set before metrics collection begins
-// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The
-// MetricFamily protobufs returned by the hook function are merged with the
-// metrics collected in the usual way.
-//
-// This is a way to directly inject MetricFamily protobufs managed and owned by
-// the caller. The caller has full responsibility. As no registration of the
-// injected metrics has happened, there is no descriptor to check against, and
-// there are no registration-time checks. If collect-time checks are disabled
-// (see function EnableCollectChecks), no sanity checks are performed on the
-// returned protobufs at all. If collect-checks are enabled, type and uniqueness
-// checks are performed, but no further consistency checks (which would require
-// knowledge of a metric descriptor).
-//
-// Sorting concerns: The caller is responsible for sorting the label pairs in
-// each metric. However, the order of metrics will be sorted by the registry as
-// it is required anyway after merging with the metric families collected
-// conventionally.
-//
-// The function must be callable at any time and concurrently.
-func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
- defRegistry.metricFamilyInjectionHook = hook
-}
-
-// PanicOnCollectError sets the behavior whether a panic is caused upon an error
-// while metrics are collected and served to the HTTP endpoint. By default, an
-// internal server error (status code 500) is served with an error message.
-func PanicOnCollectError(b bool) {
- defRegistry.panicOnCollectError = b
-}
-
-// EnableCollectChecks enables (or disables) additional consistency checks
-// during metrics collection. These additional checks are not enabled by default
-// because they inflict a performance penalty and the errors they check for can
-// only happen if the used Metric and Collector types have internal programming
-// errors. It can be helpful to enable these checks while working with custom
-// Collectors or Metrics whose correctness is not well established yet.
-func EnableCollectChecks(b bool) {
- defRegistry.collectChecksEnabled = b
-}
-
-// encoder is a function that writes a dto.MetricFamily to an io.Writer in a
-// certain encoding. It returns the number of bytes written and any error
-// encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText
-// are encoders.
-type encoder func(io.Writer, *dto.MetricFamily) (int, error)
-
-type registry struct {
- mtx sync.RWMutex
- collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
- descIDs map[uint64]struct{}
- dimHashesByName map[string]uint64
- bufPool chan *bytes.Buffer
- metricFamilyPool chan *dto.MetricFamily
- metricPool chan *dto.Metric
- metricFamilyInjectionHook func() []*dto.MetricFamily
-
- panicOnCollectError, collectChecksEnabled bool
-}
-
-func (r *registry) Register(c Collector) (Collector, error) {
- descChan := make(chan *Desc, capDescChan)
- go func() {
- c.Describe(descChan)
- close(descChan)
- }()
-
- newDescIDs := map[uint64]struct{}{}
- newDimHashesByName := map[string]uint64{}
- var collectorID uint64 // Just a sum of all desc IDs.
- var duplicateDescErr error
-
- r.mtx.Lock()
- defer r.mtx.Unlock()
- // Coduct various tests...
- for desc := range descChan {
-
- // Is the descriptor valid at all?
- if desc.err != nil {
- return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
- }
-
- // Is the descID unique?
- // (In other words: Is the fqName + constLabel combination unique?)
- if _, exists := r.descIDs[desc.id]; exists {
- duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
- }
- // If it is not a duplicate desc in this collector, add it to
- // the collectorID. (We allow duplicate descs within the same
- // collector, but their existence must be a no-op.)
- if _, exists := newDescIDs[desc.id]; !exists {
- newDescIDs[desc.id] = struct{}{}
- collectorID += desc.id
- }
-
- // Are all the label names and the help string consistent with
- // previous descriptors of the same name?
- // First check existing descriptors...
- if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
- if dimHash != desc.dimHash {
- return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
- }
- } else {
- // ...then check the new descriptors already seen.
- if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
- if dimHash != desc.dimHash {
- return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
- }
- } else {
- newDimHashesByName[desc.fqName] = desc.dimHash
- }
- }
- }
- // Did anything happen at all?
- if len(newDescIDs) == 0 {
- return nil, errors.New("collector has no descriptors")
- }
- if existing, exists := r.collectorsByID[collectorID]; exists {
- return existing, errAlreadyReg
- }
- // If the collectorID is new, but at least one of the descs existed
- // before, we are in trouble.
- if duplicateDescErr != nil {
- return nil, duplicateDescErr
- }
-
- // Only after all tests have passed, actually register.
- r.collectorsByID[collectorID] = c
- for hash := range newDescIDs {
- r.descIDs[hash] = struct{}{}
- }
- for name, dimHash := range newDimHashesByName {
- r.dimHashesByName[name] = dimHash
- }
- return c, nil
-}
-
-func (r *registry) RegisterOrGet(m Collector) (Collector, error) {
- existing, err := r.Register(m)
- if err != nil && err != errAlreadyReg {
- return nil, err
- }
- return existing, nil
-}
-
-func (r *registry) Unregister(c Collector) bool {
- descChan := make(chan *Desc, capDescChan)
- go func() {
- c.Describe(descChan)
- close(descChan)
- }()
-
- descIDs := map[uint64]struct{}{}
- var collectorID uint64 // Just a sum of the desc IDs.
- for desc := range descChan {
- if _, exists := descIDs[desc.id]; !exists {
- collectorID += desc.id
- descIDs[desc.id] = struct{}{}
- }
- }
-
- r.mtx.RLock()
- if _, exists := r.collectorsByID[collectorID]; !exists {
- r.mtx.RUnlock()
- return false
- }
- r.mtx.RUnlock()
-
- r.mtx.Lock()
- defer r.mtx.Unlock()
-
- delete(r.collectorsByID, collectorID)
- for id := range descIDs {
- delete(r.descIDs, id)
- }
- // dimHashesByName is left untouched as those must be consistent
- // throughout the lifetime of a program.
- return true
-}
-
-func (r *registry) Push(job, instance, pushURL, method string) error {
- if !strings.Contains(pushURL, "://") {
- pushURL = "http://" + pushURL
- }
- if strings.HasSuffix(pushURL, "/") {
- pushURL = pushURL[:len(pushURL)-1]
- }
- pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job))
- if instance != "" {
- pushURL += "/instances/" + url.QueryEscape(instance)
- }
- buf := r.getBuf()
- defer r.giveBuf(buf)
- if err := r.writePB(expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)); err != nil {
- if r.panicOnCollectError {
- panic(err)
- }
- return err
- }
- req, err := http.NewRequest(method, pushURL, buf)
- if err != nil {
- return err
- }
- req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType)
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
- if resp.StatusCode != 202 {
- return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL)
- }
- return nil
-}
-
-func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- contentType := expfmt.Negotiate(req.Header)
- buf := r.getBuf()
- defer r.giveBuf(buf)
- writer, encoding := decorateWriter(req, buf)
- if err := r.writePB(expfmt.NewEncoder(writer, contentType)); err != nil {
- if r.panicOnCollectError {
- panic(err)
- }
- http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError)
- return
- }
- if closer, ok := writer.(io.Closer); ok {
- closer.Close()
- }
- header := w.Header()
- header.Set(contentTypeHeader, string(contentType))
- header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
- if encoding != "" {
- header.Set(contentEncodingHeader, encoding)
- }
- w.Write(buf.Bytes())
-}
-
-func (r *registry) writePB(encoder expfmt.Encoder) error {
- var metricHashes map[uint64]struct{}
- if r.collectChecksEnabled {
- metricHashes = make(map[uint64]struct{})
- }
- metricChan := make(chan Metric, capMetricChan)
- wg := sync.WaitGroup{}
-
- r.mtx.RLock()
- metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
-
- // Scatter.
- // (Collectors could be complex and slow, so we call them all at once.)
- wg.Add(len(r.collectorsByID))
- go func() {
- wg.Wait()
- close(metricChan)
- }()
- for _, collector := range r.collectorsByID {
- go func(collector Collector) {
- defer wg.Done()
- collector.Collect(metricChan)
- }(collector)
- }
- r.mtx.RUnlock()
-
- // Drain metricChan in case of premature return.
- defer func() {
- for _ = range metricChan {
- }
- }()
-
- // Gather.
- for metric := range metricChan {
- // This could be done concurrently, too, but it required locking
- // of metricFamiliesByName (and of metricHashes if checks are
- // enabled). Most likely not worth it.
- desc := metric.Desc()
- metricFamily, ok := metricFamiliesByName[desc.fqName]
- if !ok {
- metricFamily = r.getMetricFamily()
- defer r.giveMetricFamily(metricFamily)
- metricFamily.Name = proto.String(desc.fqName)
- metricFamily.Help = proto.String(desc.help)
- metricFamiliesByName[desc.fqName] = metricFamily
- }
- dtoMetric := r.getMetric()
- defer r.giveMetric(dtoMetric)
- if err := metric.Write(dtoMetric); err != nil {
- // TODO: Consider different means of error reporting so
- // that a single erroneous metric could be skipped
- // instead of blowing up the whole collection.
- return fmt.Errorf("error collecting metric %v: %s", desc, err)
- }
- switch {
- case metricFamily.Type != nil:
- // Type already set. We are good.
- case dtoMetric.Gauge != nil:
- metricFamily.Type = dto.MetricType_GAUGE.Enum()
- case dtoMetric.Counter != nil:
- metricFamily.Type = dto.MetricType_COUNTER.Enum()
- case dtoMetric.Summary != nil:
- metricFamily.Type = dto.MetricType_SUMMARY.Enum()
- case dtoMetric.Untyped != nil:
- metricFamily.Type = dto.MetricType_UNTYPED.Enum()
- case dtoMetric.Histogram != nil:
- metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
- default:
- return fmt.Errorf("empty metric collected: %s", dtoMetric)
- }
- if r.collectChecksEnabled {
- if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil {
- return err
- }
- }
- metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
- }
-
- if r.metricFamilyInjectionHook != nil {
- for _, mf := range r.metricFamilyInjectionHook() {
- existingMF, exists := metricFamiliesByName[mf.GetName()]
- if !exists {
- metricFamiliesByName[mf.GetName()] = mf
- if r.collectChecksEnabled {
- for _, m := range mf.Metric {
- if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil {
- return err
- }
- }
- }
- continue
- }
- for _, m := range mf.Metric {
- if r.collectChecksEnabled {
- if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil {
- return err
- }
- }
- existingMF.Metric = append(existingMF.Metric, m)
- }
- }
- }
-
- // Now that MetricFamilies are all set, sort their Metrics
- // lexicographically by their label values.
- for _, mf := range metricFamiliesByName {
- sort.Sort(metricSorter(mf.Metric))
- }
-
- // Write out MetricFamilies sorted by their name.
- names := make([]string, 0, len(metricFamiliesByName))
- for name := range metricFamiliesByName {
- names = append(names, name)
- }
- sort.Strings(names)
-
- for _, name := range names {
- if err := encoder.Encode(metricFamiliesByName[name]); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error {
-
- // Type consistency with metric family.
- if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
- metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
- metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
- metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
- metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
- return fmt.Errorf(
- "collected metric %s %s is not a %s",
- metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
- )
- }
-
- // Is the metric unique (i.e. no other metric with the same name and the same label values)?
- h := hashNew()
- h = hashAdd(h, metricFamily.GetName())
- h = hashAddByte(h, separatorByte)
- // Make sure label pairs are sorted. We depend on it for the consistency
- // check. Label pairs must be sorted by contract. But the point of this
- // method is to check for contract violations. So we better do the sort
- // now.
- sort.Sort(LabelPairSorter(dtoMetric.Label))
- for _, lp := range dtoMetric.Label {
- h = hashAdd(h, lp.GetValue())
- h = hashAddByte(h, separatorByte)
- }
- if _, exists := metricHashes[h]; exists {
- return fmt.Errorf(
- "collected metric %s %s was collected before with the same name and label values",
- metricFamily.GetName(), dtoMetric,
- )
- }
- metricHashes[h] = struct{}{}
-
- if desc == nil {
- return nil // Nothing left to check if we have no desc.
- }
-
- // Desc consistency with metric family.
- if metricFamily.GetName() != desc.fqName {
- return fmt.Errorf(
- "collected metric %s %s has name %q but should have %q",
- metricFamily.GetName(), dtoMetric, metricFamily.GetName(), desc.fqName,
- )
- }
- if metricFamily.GetHelp() != desc.help {
- return fmt.Errorf(
- "collected metric %s %s has help %q but should have %q",
- metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
- )
- }
-
- // Is the desc consistent with the content of the metric?
- lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
- lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
- for _, l := range desc.variableLabels {
- lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
- Name: proto.String(l),
- })
- }
- if len(lpsFromDesc) != len(dtoMetric.Label) {
- return fmt.Errorf(
- "labels in collected metric %s %s are inconsistent with descriptor %s",
- metricFamily.GetName(), dtoMetric, desc,
- )
- }
- sort.Sort(LabelPairSorter(lpsFromDesc))
- for i, lpFromDesc := range lpsFromDesc {
- lpFromMetric := dtoMetric.Label[i]
- if lpFromDesc.GetName() != lpFromMetric.GetName() ||
- lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
- return fmt.Errorf(
- "labels in collected metric %s %s are inconsistent with descriptor %s",
- metricFamily.GetName(), dtoMetric, desc,
- )
- }
- }
-
- r.mtx.RLock() // Remaining checks need the read lock.
- defer r.mtx.RUnlock()
-
- // Is the desc registered?
- if _, exist := r.descIDs[desc.id]; !exist {
- return fmt.Errorf(
- "collected metric %s %s with unregistered descriptor %s",
- metricFamily.GetName(), dtoMetric, desc,
- )
- }
-
- return nil
-}
-
-func (r *registry) getBuf() *bytes.Buffer {
- select {
- case buf := <-r.bufPool:
- return buf
- default:
- return &bytes.Buffer{}
- }
-}
-
-func (r *registry) giveBuf(buf *bytes.Buffer) {
- buf.Reset()
- select {
- case r.bufPool <- buf:
- default:
- }
-}
-
-func (r *registry) getMetricFamily() *dto.MetricFamily {
- select {
- case mf := <-r.metricFamilyPool:
- return mf
- default:
- return &dto.MetricFamily{}
- }
-}
-
-func (r *registry) giveMetricFamily(mf *dto.MetricFamily) {
- mf.Reset()
- select {
- case r.metricFamilyPool <- mf:
- default:
- }
-}
-
-func (r *registry) getMetric() *dto.Metric {
- select {
- case m := <-r.metricPool:
- return m
- default:
- return &dto.Metric{}
- }
-}
-
-func (r *registry) giveMetric(m *dto.Metric) {
- m.Reset()
- select {
- case r.metricPool <- m:
- default:
- }
-}
-
-func newRegistry() *registry {
- return ®istry{
- collectorsByID: map[uint64]Collector{},
- descIDs: map[uint64]struct{}{},
- dimHashesByName: map[string]uint64{},
- bufPool: make(chan *bytes.Buffer, numBufs),
- metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies),
- metricPool: make(chan *dto.Metric, numMetrics),
- }
-}
-
-func newDefaultRegistry() *registry {
- r := newRegistry()
- r.Register(NewProcessCollector(os.Getpid(), ""))
- r.Register(NewGoCollector())
- return r
-}
-
-// decorateWriter wraps a writer to handle gzip compression if requested. It
-// returns the decorated writer and the appropriate "Content-Encoding" header
-// (which is empty if no compression is enabled).
-func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
- header := request.Header.Get(acceptEncodingHeader)
- parts := strings.Split(header, ",")
- for _, part := range parts {
- part := strings.TrimSpace(part)
- if part == "gzip" || strings.HasPrefix(part, "gzip;") {
- return gzip.NewWriter(writer), "gzip"
- }
- }
- return writer, ""
-}
-
-type metricSorter []*dto.Metric
-
-func (s metricSorter) Len() int {
- return len(s)
-}
-
-func (s metricSorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s metricSorter) Less(i, j int) bool {
- if len(s[i].Label) != len(s[j].Label) {
- // This should not happen. The metrics are
- // inconsistent. However, we have to deal with the fact, as
- // people might use custom collectors or metric family injection
- // to create inconsistent metrics. So let's simply compare the
- // number of labels in this case. That will still yield
- // reproducible sorting.
- return len(s[i].Label) < len(s[j].Label)
- }
- for n, lp := range s[i].Label {
- vi := lp.GetValue()
- vj := s[j].Label[n].GetValue()
- if vi != vj {
- return vi < vj
- }
- }
-
- // We should never arrive here. Multiple metrics with the same
- // label set in the same scrape will lead to undefined ingestion
- // behavior. However, as above, we have to provide stable sorting
- // here, even for inconsistent metrics. So sort equal metrics
- // by their timestamp, with missing timestamps (implying "now")
- // coming last.
- if s[i].TimestampMs == nil {
- return false
- }
- if s[j].TimestampMs == nil {
- return true
- }
- return s[i].GetTimestampMs() < s[j].GetTimestampMs()
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
deleted file mode 100644
index eb849616..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ /dev/null
@@ -1,538 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "math"
- "sort"
- "sync"
- "time"
-
- "github.com/beorn7/perks/quantile"
- "github.com/golang/protobuf/proto"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// quantileLabel is used for the label that defines the quantile in a
-// summary.
-const quantileLabel = "quantile"
-
-// A Summary captures individual observations from an event or sample stream and
-// summarizes them in a manner similar to traditional summary statistics: 1. sum
-// of observations, 2. observation count, 3. rank estimations.
-//
-// A typical use-case is the observation of request latencies. By default, a
-// Summary provides the median, the 90th and the 99th percentile of the latency
-// as rank estimations.
-//
-// Note that the rank estimations cannot be aggregated in a meaningful way with
-// the Prometheus query language (i.e. you cannot average or add them). If you
-// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
-// queries served across all instances of a service), consider the Histogram
-// metric type. See the Prometheus documentation for more details.
-//
-// To create Summary instances, use NewSummary.
-type Summary interface {
- Metric
- Collector
-
- // Observe adds a single observation to the summary.
- Observe(float64)
-}
-
-var (
- // DefObjectives are the default Summary quantile values.
- DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
-
- errQuantileLabelNotAllowed = fmt.Errorf(
- "%q is not allowed as label name in summaries", quantileLabel,
- )
-)
-
-// Default values for SummaryOpts.
-const (
- // DefMaxAge is the default duration for which observations stay
- // relevant.
- DefMaxAge time.Duration = 10 * time.Minute
- // DefAgeBuckets is the default number of buckets used to calculate the
- // age of observations.
- DefAgeBuckets = 5
- // DefBufCap is the standard buffer size for collecting Summary observations.
- DefBufCap = 500
-)
-
-// SummaryOpts bundles the options for creating a Summary metric. It is
-// mandatory to set Name and Help to a non-empty string. All other fields are
-// optional and can safely be left at their zero value.
-type SummaryOpts struct {
- // Namespace, Subsystem, and Name are components of the fully-qualified
- // name of the Summary (created by joining these components with
- // "_"). Only Name is mandatory, the others merely help structuring the
- // name. Note that the fully-qualified name of the Summary must be a
- // valid Prometheus metric name.
- Namespace string
- Subsystem string
- Name string
-
- // Help provides information about this Summary. Mandatory!
- //
- // Metrics with the same fully-qualified name must have the same Help
- // string.
- Help string
-
- // ConstLabels are used to attach fixed labels to this
- // Summary. Summaries with the same fully-qualified name must have the
- // same label names in their ConstLabels.
- //
- // Note that in most cases, labels have a value that varies during the
- // lifetime of a process. Those labels are usually managed with a
- // SummaryVec. ConstLabels serve only special purposes. One is for the
- // special case where the value of a label does not change during the
- // lifetime of a process, e.g. if the revision of the running binary is
- // put into a label. Another, more advanced purpose is if more than one
- // Collector needs to collect Summaries with the same fully-qualified
- // name. In that case, those Summaries must differ in the values of
- // their ConstLabels. See the Collector examples.
- //
- // If the value of a label never changes (not even between binaries),
- // that label most likely should not be a label at all (but part of the
- // metric name).
- ConstLabels Labels
-
- // Objectives defines the quantile rank estimates with their respective
- // absolute error. If Objectives[q] = e, then the value reported
- // for q will be the φ-quantile value for some φ between q-e and q+e.
- // The default value is DefObjectives.
- Objectives map[float64]float64
-
- // MaxAge defines the duration for which an observation stays relevant
- // for the summary. Must be positive. The default value is DefMaxAge.
- MaxAge time.Duration
-
- // AgeBuckets is the number of buckets used to exclude observations that
- // are older than MaxAge from the summary. A higher number has a
- // resource penalty, so only increase it if the higher resolution is
- // really required. For very high observation rates, you might want to
- // reduce the number of age buckets. With only one age bucket, you will
- // effectively see a complete reset of the summary each time MaxAge has
- // passed. The default value is DefAgeBuckets.
- AgeBuckets uint32
-
- // BufCap defines the default sample stream buffer size. The default
- // value of DefBufCap should suffice for most uses. If there is a need
- // to increase the value, a multiple of 500 is recommended (because that
- // is the internal buffer size of the underlying package
- // "github.com/bmizerany/perks/quantile").
- BufCap uint32
-}
-
-// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
-// method of perk/quantile is actually not working as advertised - and it might
-// be unfixable, as the underlying algorithm is apparently not capable of
-// merging summaries in the first place. To avoid using Merge, we are currently
-// adding observations to _each_ age bucket, i.e. the effort to add a sample is
-// essentially multiplied by the number of age buckets. When rotating age
-// buckets, we empty the previous head stream. On scrape time, we simply take
-// the quantiles from the head stream (no merging required). Result: More effort
-// on observation time, less effort on scrape time, which is exactly the
-// opposite of what we try to accomplish, but at least the results are correct.
-//
-// The quite elegant previous contraption to merge the age buckets efficiently
-// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
-// can't be used anymore.
-
-// NewSummary creates a new Summary based on the provided SummaryOpts.
-func NewSummary(opts SummaryOpts) Summary {
- return newSummary(
- NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ),
- opts,
- )
-}
-
-func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
- if len(desc.variableLabels) != len(labelValues) {
- panic(errInconsistentCardinality)
- }
-
- for _, n := range desc.variableLabels {
- if n == quantileLabel {
- panic(errQuantileLabelNotAllowed)
- }
- }
- for _, lp := range desc.constLabelPairs {
- if lp.GetName() == quantileLabel {
- panic(errQuantileLabelNotAllowed)
- }
- }
-
- if len(opts.Objectives) == 0 {
- opts.Objectives = DefObjectives
- }
-
- if opts.MaxAge < 0 {
- panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
- }
- if opts.MaxAge == 0 {
- opts.MaxAge = DefMaxAge
- }
-
- if opts.AgeBuckets == 0 {
- opts.AgeBuckets = DefAgeBuckets
- }
-
- if opts.BufCap == 0 {
- opts.BufCap = DefBufCap
- }
-
- s := &summary{
- desc: desc,
-
- objectives: opts.Objectives,
- sortedObjectives: make([]float64, 0, len(opts.Objectives)),
-
- labelPairs: makeLabelPairs(desc, labelValues),
-
- hotBuf: make([]float64, 0, opts.BufCap),
- coldBuf: make([]float64, 0, opts.BufCap),
- streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
- }
- s.headStreamExpTime = time.Now().Add(s.streamDuration)
- s.hotBufExpTime = s.headStreamExpTime
-
- for i := uint32(0); i < opts.AgeBuckets; i++ {
- s.streams = append(s.streams, s.newStream())
- }
- s.headStream = s.streams[0]
-
- for qu := range s.objectives {
- s.sortedObjectives = append(s.sortedObjectives, qu)
- }
- sort.Float64s(s.sortedObjectives)
-
- s.Init(s) // Init self-collection.
- return s
-}
-
-type summary struct {
- SelfCollector
-
- bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
- mtx sync.Mutex // Protects every other moving part.
- // Lock bufMtx before mtx if both are needed.
-
- desc *Desc
-
- objectives map[float64]float64
- sortedObjectives []float64
-
- labelPairs []*dto.LabelPair
-
- sum float64
- cnt uint64
-
- hotBuf, coldBuf []float64
-
- streams []*quantile.Stream
- streamDuration time.Duration
- headStream *quantile.Stream
- headStreamIdx int
- headStreamExpTime, hotBufExpTime time.Time
-}
-
-func (s *summary) Desc() *Desc {
- return s.desc
-}
-
-func (s *summary) Observe(v float64) {
- s.bufMtx.Lock()
- defer s.bufMtx.Unlock()
-
- now := time.Now()
- if now.After(s.hotBufExpTime) {
- s.asyncFlush(now)
- }
- s.hotBuf = append(s.hotBuf, v)
- if len(s.hotBuf) == cap(s.hotBuf) {
- s.asyncFlush(now)
- }
-}
-
-func (s *summary) Write(out *dto.Metric) error {
- sum := &dto.Summary{}
- qs := make([]*dto.Quantile, 0, len(s.objectives))
-
- s.bufMtx.Lock()
- s.mtx.Lock()
- // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
- s.swapBufs(time.Now())
- s.bufMtx.Unlock()
-
- s.flushColdBuf()
- sum.SampleCount = proto.Uint64(s.cnt)
- sum.SampleSum = proto.Float64(s.sum)
-
- for _, rank := range s.sortedObjectives {
- var q float64
- if s.headStream.Count() == 0 {
- q = math.NaN()
- } else {
- q = s.headStream.Query(rank)
- }
- qs = append(qs, &dto.Quantile{
- Quantile: proto.Float64(rank),
- Value: proto.Float64(q),
- })
- }
-
- s.mtx.Unlock()
-
- if len(qs) > 0 {
- sort.Sort(quantSort(qs))
- }
- sum.Quantile = qs
-
- out.Summary = sum
- out.Label = s.labelPairs
- return nil
-}
-
-func (s *summary) newStream() *quantile.Stream {
- return quantile.NewTargeted(s.objectives)
-}
-
-// asyncFlush needs bufMtx locked.
-func (s *summary) asyncFlush(now time.Time) {
- s.mtx.Lock()
- s.swapBufs(now)
-
- // Unblock the original goroutine that was responsible for the mutation
- // that triggered the compaction. But hold onto the global non-buffer
- // state mutex until the operation finishes.
- go func() {
- s.flushColdBuf()
- s.mtx.Unlock()
- }()
-}
-
-// rotateStreams needs mtx AND bufMtx locked.
-func (s *summary) maybeRotateStreams() {
- for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
- s.headStream.Reset()
- s.headStreamIdx++
- if s.headStreamIdx >= len(s.streams) {
- s.headStreamIdx = 0
- }
- s.headStream = s.streams[s.headStreamIdx]
- s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
- }
-}
-
-// flushColdBuf needs mtx locked.
-func (s *summary) flushColdBuf() {
- for _, v := range s.coldBuf {
- for _, stream := range s.streams {
- stream.Insert(v)
- }
- s.cnt++
- s.sum += v
- }
- s.coldBuf = s.coldBuf[0:0]
- s.maybeRotateStreams()
-}
-
-// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
-func (s *summary) swapBufs(now time.Time) {
- if len(s.coldBuf) != 0 {
- panic("coldBuf is not empty")
- }
- s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
- // hotBuf is now empty and gets new expiration set.
- for now.After(s.hotBufExpTime) {
- s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
- }
-}
-
-type quantSort []*dto.Quantile
-
-func (s quantSort) Len() int {
- return len(s)
-}
-
-func (s quantSort) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s quantSort) Less(i, j int) bool {
- return s[i].GetQuantile() < s[j].GetQuantile()
-}
-
-// SummaryVec is a Collector that bundles a set of Summaries that all share the
-// same Desc, but have different values for their variable labels. This is used
-// if you want to count the same thing partitioned by various dimensions
-// (e.g. HTTP request latencies, partitioned by status code and method). Create
-// instances with NewSummaryVec.
-type SummaryVec struct {
- MetricVec
-}
-
-// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
-func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- labelNames,
- opts.ConstLabels,
- )
- return &SummaryVec{
- MetricVec: MetricVec{
- children: map[uint64]Metric{},
- desc: desc,
- newMetric: func(lvs ...string) Metric {
- return newSummary(desc, opts, lvs...)
- },
- },
- }
-}
-
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Summary and not a
-// Metric so that no type conversion is required.
-func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Summary), err
- }
- return nil, err
-}
-
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Summary and not a Metric so that no
-// type conversion is required.
-func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
- if metric != nil {
- return metric.(Summary), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Observe(42.21)
-func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
- return m.MetricVec.WithLabelValues(lvs...).(Summary)
-}
-
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
-func (m *SummaryVec) With(labels Labels) Summary {
- return m.MetricVec.With(labels).(Summary)
-}
-
-type constSummary struct {
- desc *Desc
- count uint64
- sum float64
- quantiles map[float64]float64
- labelPairs []*dto.LabelPair
-}
-
-func (s *constSummary) Desc() *Desc {
- return s.desc
-}
-
-func (s *constSummary) Write(out *dto.Metric) error {
- sum := &dto.Summary{}
- qs := make([]*dto.Quantile, 0, len(s.quantiles))
-
- sum.SampleCount = proto.Uint64(s.count)
- sum.SampleSum = proto.Float64(s.sum)
-
- for rank, q := range s.quantiles {
- qs = append(qs, &dto.Quantile{
- Quantile: proto.Float64(rank),
- Value: proto.Float64(q),
- })
- }
-
- if len(qs) > 0 {
- sort.Sort(quantSort(qs))
- }
- sum.Quantile = qs
-
- out.Summary = sum
- out.Label = s.labelPairs
-
- return nil
-}
-
-// NewConstSummary returns a metric representing a Prometheus summary with fixed
-// values for the count, sum, and quantiles. As those parameters cannot be
-// changed, the returned value does not implement the Summary interface (but
-// only the Metric interface). Users of this package will not have much use for
-// it in regular operations. However, when implementing custom Collectors, it is
-// useful as a throw-away metric that is generated on the fly to send it to
-// Prometheus in the Collect method.
-//
-// quantiles maps ranks to quantile values. For example, a median latency of
-// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
-// map[float64]float64{0.5: 0.23, 0.99: 0.56}
-//
-// NewConstSummary returns an error if the length of labelValues is not
-// consistent with the variable labels in Desc.
-func NewConstSummary(
- desc *Desc,
- count uint64,
- sum float64,
- quantiles map[float64]float64,
- labelValues ...string,
-) (Metric, error) {
- if len(desc.variableLabels) != len(labelValues) {
- return nil, errInconsistentCardinality
- }
- return &constSummary{
- desc: desc,
- count: count,
- sum: sum,
- quantiles: quantiles,
- labelPairs: makeLabelPairs(desc, labelValues),
- }, nil
-}
-
-// MustNewConstSummary is a version of NewConstSummary that panics where
-// NewConstMetric would have returned an error.
-func MustNewConstSummary(
- desc *Desc,
- count uint64,
- sum float64,
- quantiles map[float64]float64,
- labelValues ...string,
-) Metric {
- m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
- if err != nil {
- panic(err)
- }
- return m
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
deleted file mode 100644
index 89b86ea9..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-// Untyped is a Metric that represents a single numerical value that can
-// arbitrarily go up and down.
-//
-// An Untyped metric works the same as a Gauge. The only difference is that to
-// no type information is implied.
-//
-// To create Untyped instances, use NewUntyped.
-type Untyped interface {
- Metric
- Collector
-
- // Set sets the Untyped metric to an arbitrary value.
- Set(float64)
- // Inc increments the Untyped metric by 1.
- Inc()
- // Dec decrements the Untyped metric by 1.
- Dec()
- // Add adds the given value to the Untyped metric. (The value can be
- // negative, resulting in a decrease.)
- Add(float64)
- // Sub subtracts the given value from the Untyped metric. (The value can
- // be negative, resulting in an increase.)
- Sub(float64)
-}
-
-// UntypedOpts is an alias for Opts. See there for doc comments.
-type UntypedOpts Opts
-
-// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
-func NewUntyped(opts UntypedOpts) Untyped {
- return newValue(NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ), UntypedValue, 0)
-}
-
-// UntypedVec is a Collector that bundles a set of Untyped metrics that all
-// share the same Desc, but have different values for their variable
-// labels. This is used if you want to count the same thing partitioned by
-// various dimensions. Create instances with NewUntypedVec.
-type UntypedVec struct {
- MetricVec
-}
-
-// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
-func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- labelNames,
- opts.ConstLabels,
- )
- return &UntypedVec{
- MetricVec: MetricVec{
- children: map[uint64]Metric{},
- desc: desc,
- newMetric: func(lvs ...string) Metric {
- return newValue(desc, UntypedValue, 0, lvs...)
- },
- },
- }
-}
-
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns an Untyped and not a
-// Metric so that no type conversion is required.
-func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Untyped), err
- }
- return nil, err
-}
-
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns an Untyped and not a Metric so that no
-// type conversion is required.
-func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
- if metric != nil {
- return metric.(Untyped), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Add(42)
-func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
- return m.MetricVec.WithLabelValues(lvs...).(Untyped)
-}
-
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-func (m *UntypedVec) With(labels Labels) Untyped {
- return m.MetricVec.With(labels).(Untyped)
-}
-
-// UntypedFunc is an Untyped whose value is determined at collect time by
-// calling a provided function.
-//
-// To create UntypedFunc instances, use NewUntypedFunc.
-type UntypedFunc interface {
- Metric
- Collector
-}
-
-// NewUntypedFunc creates a new UntypedFunc based on the provided
-// UntypedOpts. The value reported is determined by calling the given function
-// from within the Write method. Take into account that metric collection may
-// happen concurrently. If that results in concurrent calls to Write, like in
-// the case where an UntypedFunc is directly registered with Prometheus, the
-// provided function must be concurrency-safe.
-func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
- return newValueFunc(NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ), UntypedValue, function)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
deleted file mode 100644
index b54ac11e..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "errors"
- "fmt"
- "math"
- "sort"
- "sync/atomic"
-
- dto "github.com/prometheus/client_model/go"
-
- "github.com/golang/protobuf/proto"
-)
-
-// ValueType is an enumeration of metric types that represent a simple value.
-type ValueType int
-
-// Possible values for the ValueType enum.
-const (
- _ ValueType = iota
- CounterValue
- GaugeValue
- UntypedValue
-)
-
-var errInconsistentCardinality = errors.New("inconsistent label cardinality")
-
-// value is a generic metric for simple values. It implements Metric, Collector,
-// Counter, Gauge, and Untyped. Its effective type is determined by
-// ValueType. This is a low-level building block used by the library to back the
-// implementations of Counter, Gauge, and Untyped.
-type value struct {
- // valBits containst the bits of the represented float64 value. It has
- // to go first in the struct to guarantee alignment for atomic
- // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- valBits uint64
-
- SelfCollector
-
- desc *Desc
- valType ValueType
- labelPairs []*dto.LabelPair
-}
-
-// newValue returns a newly allocated value with the given Desc, ValueType,
-// sample value and label values. It panics if the number of label
-// values is different from the number of variable labels in Desc.
-func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
- if len(labelValues) != len(desc.variableLabels) {
- panic(errInconsistentCardinality)
- }
- result := &value{
- desc: desc,
- valType: valueType,
- valBits: math.Float64bits(val),
- labelPairs: makeLabelPairs(desc, labelValues),
- }
- result.Init(result)
- return result
-}
-
-func (v *value) Desc() *Desc {
- return v.desc
-}
-
-func (v *value) Set(val float64) {
- atomic.StoreUint64(&v.valBits, math.Float64bits(val))
-}
-
-func (v *value) Inc() {
- v.Add(1)
-}
-
-func (v *value) Dec() {
- v.Add(-1)
-}
-
-func (v *value) Add(val float64) {
- for {
- oldBits := atomic.LoadUint64(&v.valBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
- if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
- return
- }
- }
-}
-
-func (v *value) Sub(val float64) {
- v.Add(val * -1)
-}
-
-func (v *value) Write(out *dto.Metric) error {
- val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
- return populateMetric(v.valType, val, v.labelPairs, out)
-}
-
-// valueFunc is a generic metric for simple values retrieved on collect time
-// from a function. It implements Metric and Collector. Its effective type is
-// determined by ValueType. This is a low-level building block used by the
-// library to back the implementations of CounterFunc, GaugeFunc, and
-// UntypedFunc.
-type valueFunc struct {
- SelfCollector
-
- desc *Desc
- valType ValueType
- function func() float64
- labelPairs []*dto.LabelPair
-}
-
-// newValueFunc returns a newly allocated valueFunc with the given Desc and
-// ValueType. The value reported is determined by calling the given function
-// from within the Write method. Take into account that metric collection may
-// happen concurrently. If that results in concurrent calls to Write, like in
-// the case where a valueFunc is directly registered with Prometheus, the
-// provided function must be concurrency-safe.
-func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
- result := &valueFunc{
- desc: desc,
- valType: valueType,
- function: function,
- labelPairs: makeLabelPairs(desc, nil),
- }
- result.Init(result)
- return result
-}
-
-func (v *valueFunc) Desc() *Desc {
- return v.desc
-}
-
-func (v *valueFunc) Write(out *dto.Metric) error {
- return populateMetric(v.valType, v.function(), v.labelPairs, out)
-}
-
-// NewConstMetric returns a metric with one fixed value that cannot be
-// changed. Users of this package will not have much use for it in regular
-// operations. However, when implementing custom Collectors, it is useful as a
-// throw-away metric that is generated on the fly to send it to Prometheus in
-// the Collect method. NewConstMetric returns an error if the length of
-// labelValues is not consistent with the variable labels in Desc.
-func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
- if len(desc.variableLabels) != len(labelValues) {
- return nil, errInconsistentCardinality
- }
- return &constMetric{
- desc: desc,
- valType: valueType,
- val: value,
- labelPairs: makeLabelPairs(desc, labelValues),
- }, nil
-}
-
-// MustNewConstMetric is a version of NewConstMetric that panics where
-// NewConstMetric would have returned an error.
-func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
- m, err := NewConstMetric(desc, valueType, value, labelValues...)
- if err != nil {
- panic(err)
- }
- return m
-}
-
-type constMetric struct {
- desc *Desc
- valType ValueType
- val float64
- labelPairs []*dto.LabelPair
-}
-
-func (m *constMetric) Desc() *Desc {
- return m.desc
-}
-
-func (m *constMetric) Write(out *dto.Metric) error {
- return populateMetric(m.valType, m.val, m.labelPairs, out)
-}
-
-func populateMetric(
- t ValueType,
- v float64,
- labelPairs []*dto.LabelPair,
- m *dto.Metric,
-) error {
- m.Label = labelPairs
- switch t {
- case CounterValue:
- m.Counter = &dto.Counter{Value: proto.Float64(v)}
- case GaugeValue:
- m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
- case UntypedValue:
- m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
- default:
- return fmt.Errorf("encountered unknown type %v", t)
- }
- return nil
-}
-
-func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
- totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
- if totalLen == 0 {
- // Super fast path.
- return nil
- }
- if len(desc.variableLabels) == 0 {
- // Moderately fast path.
- return desc.constLabelPairs
- }
- labelPairs := make([]*dto.LabelPair, 0, totalLen)
- for i, n := range desc.variableLabels {
- labelPairs = append(labelPairs, &dto.LabelPair{
- Name: proto.String(n),
- Value: proto.String(labelValues[i]),
- })
- }
- for _, lp := range desc.constLabelPairs {
- labelPairs = append(labelPairs, lp)
- }
- sort.Sort(LabelPairSorter(labelPairs))
- return labelPairs
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
deleted file mode 100644
index 68f94612..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package prometheus
-
-import (
- "fmt"
- "sync"
-)
-
-// MetricVec is a Collector to bundle metrics of the same name that
-// differ in their label values. MetricVec is usually not used directly but as a
-// building block for implementations of vectors of a given metric
-// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
-// provided in this package.
-type MetricVec struct {
- mtx sync.RWMutex // Protects the children.
- children map[uint64]Metric
- desc *Desc
-
- newMetric func(labelValues ...string) Metric
-}
-
-// Describe implements Collector. The length of the returned slice
-// is always one.
-func (m *MetricVec) Describe(ch chan<- *Desc) {
- ch <- m.desc
-}
-
-// Collect implements Collector.
-func (m *MetricVec) Collect(ch chan<- Metric) {
- m.mtx.RLock()
- defer m.mtx.RUnlock()
-
- for _, metric := range m.children {
- ch <- metric
- }
-}
-
-// GetMetricWithLabelValues returns the Metric for the given slice of label
-// values (same order as the VariableLabels in Desc). If that combination of
-// label values is accessed for the first time, a new Metric is created.
-//
-// It is possible to call this method without using the returned Metric to only
-// create the new Metric but leave it at its start value (e.g. a Summary or
-// Histogram without any observations). See also the SummaryVec example.
-//
-// Keeping the Metric for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Metric from the MetricVec. In that case, the
-// Metric will still exist, but it will not be exported anymore, even if a
-// Metric with the same label values is created later. See also the CounterVec
-// example.
-//
-// An error is returned if the number of label values is not the same as the
-// number of VariableLabels in Desc.
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the GaugeVec example.
-func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
- h, err := m.hashLabelValues(lvs)
- if err != nil {
- return nil, err
- }
-
- m.mtx.RLock()
- metric, ok := m.children[h]
- m.mtx.RUnlock()
- if ok {
- return metric, nil
- }
-
- m.mtx.Lock()
- defer m.mtx.Unlock()
- return m.getOrCreateMetric(h, lvs...), nil
-}
-
-// GetMetricWith returns the Metric for the given Labels map (the label names
-// must match those of the VariableLabels in Desc). If that label map is
-// accessed for the first time, a new Metric is created. Implications of
-// creating a Metric without using it and keeping the Metric for later use are
-// the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the VariableLabels in Desc.
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
- h, err := m.hashLabels(labels)
- if err != nil {
- return nil, err
- }
-
- m.mtx.RLock()
- metric, ok := m.children[h]
- m.mtx.RUnlock()
- if ok {
- return metric, nil
- }
-
- lvs := make([]string, len(labels))
- for i, label := range m.desc.variableLabels {
- lvs[i] = labels[label]
- }
- m.mtx.Lock()
- defer m.mtx.Unlock()
- return m.getOrCreateMetric(h, lvs...), nil
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
-// occurs. The method allows neat syntax like:
-// httpReqs.WithLabelValues("404", "POST").Inc()
-func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
- metric, err := m.GetMetricWithLabelValues(lvs...)
- if err != nil {
- panic(err)
- }
- return metric
-}
-
-// With works as GetMetricWith, but panics if an error occurs. The method allows
-// neat syntax like:
-// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
-func (m *MetricVec) With(labels Labels) Metric {
- metric, err := m.GetMetricWith(labels)
- if err != nil {
- panic(err)
- }
- return metric
-}
-
-// DeleteLabelValues removes the metric where the variable labels are the same
-// as those passed in as labels (same order as the VariableLabels in Desc). It
-// returns true if a metric was deleted.
-//
-// It is not an error if the number of label values is not the same as the
-// number of VariableLabels in Desc. However, such inconsistent label count can
-// never match an actual Metric, so the method will always return false in that
-// case.
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider Delete(Labels) as an
-// alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the CounterVec example.
-func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- h, err := m.hashLabelValues(lvs)
- if err != nil {
- return false
- }
- if _, ok := m.children[h]; !ok {
- return false
- }
- delete(m.children, h)
- return true
-}
-
-// Delete deletes the metric where the variable labels are the same as those
-// passed in as labels. It returns true if a metric was deleted.
-//
-// It is not an error if the number and names of the Labels are inconsistent
-// with those of the VariableLabels in the Desc of the MetricVec. However, such
-// inconsistent Labels can never match an actual Metric, so the method will
-// always return false in that case.
-//
-// This method is used for the same purpose as DeleteLabelValues(...string). See
-// there for pros and cons of the two methods.
-func (m *MetricVec) Delete(labels Labels) bool {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- h, err := m.hashLabels(labels)
- if err != nil {
- return false
- }
- if _, ok := m.children[h]; !ok {
- return false
- }
- delete(m.children, h)
- return true
-}
-
-// Reset deletes all metrics in this vector.
-func (m *MetricVec) Reset() {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- for h := range m.children {
- delete(m.children, h)
- }
-}
-
-func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
- if len(vals) != len(m.desc.variableLabels) {
- return 0, errInconsistentCardinality
- }
- h := hashNew()
- for _, val := range vals {
- h = hashAdd(h, val)
- }
- return h, nil
-}
-
-func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
- if len(labels) != len(m.desc.variableLabels) {
- return 0, errInconsistentCardinality
- }
- h := hashNew()
- for _, label := range m.desc.variableLabels {
- val, ok := labels[label]
- if !ok {
- return 0, fmt.Errorf("label name %q missing in label map", label)
- }
- h = hashAdd(h, val)
- }
- return h, nil
-}
-
-func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {
- metric, ok := m.children[hash]
- if !ok {
- // Copy labelValues. Otherwise, they would be allocated even if we don't go
- // down this code path.
- copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...)
- metric = m.newMetric(copiedLabelValues...)
- m.children[hash] = metric
- }
- return metric
-}
diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE
deleted file mode 100644
index 261eeb9e..00000000
--- a/vendor/github.com/prometheus/client_model/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE
deleted file mode 100644
index 20110e41..00000000
--- a/vendor/github.com/prometheus/client_model/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-Data model artifacts for Prometheus.
-Copyright 2012-2015 The Prometheus Authors
-
-This product includes software developed at
-SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
deleted file mode 100644
index b065f868..00000000
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ /dev/null
@@ -1,364 +0,0 @@
-// Code generated by protoc-gen-go.
-// source: metrics.proto
-// DO NOT EDIT!
-
-/*
-Package io_prometheus_client is a generated protocol buffer package.
-
-It is generated from these files:
- metrics.proto
-
-It has these top-level messages:
- LabelPair
- Gauge
- Counter
- Quantile
- Summary
- Untyped
- Histogram
- Bucket
- Metric
- MetricFamily
-*/
-package io_prometheus_client
-
-import proto "github.com/golang/protobuf/proto"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = math.Inf
-
-type MetricType int32
-
-const (
- MetricType_COUNTER MetricType = 0
- MetricType_GAUGE MetricType = 1
- MetricType_SUMMARY MetricType = 2
- MetricType_UNTYPED MetricType = 3
- MetricType_HISTOGRAM MetricType = 4
-)
-
-var MetricType_name = map[int32]string{
- 0: "COUNTER",
- 1: "GAUGE",
- 2: "SUMMARY",
- 3: "UNTYPED",
- 4: "HISTOGRAM",
-}
-var MetricType_value = map[string]int32{
- "COUNTER": 0,
- "GAUGE": 1,
- "SUMMARY": 2,
- "UNTYPED": 3,
- "HISTOGRAM": 4,
-}
-
-func (x MetricType) Enum() *MetricType {
- p := new(MetricType)
- *p = x
- return p
-}
-func (x MetricType) String() string {
- return proto.EnumName(MetricType_name, int32(x))
-}
-func (x *MetricType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
- if err != nil {
- return err
- }
- *x = MetricType(value)
- return nil
-}
-
-type LabelPair struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *LabelPair) Reset() { *m = LabelPair{} }
-func (m *LabelPair) String() string { return proto.CompactTextString(m) }
-func (*LabelPair) ProtoMessage() {}
-
-func (m *LabelPair) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *LabelPair) GetValue() string {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return ""
-}
-
-type Gauge struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Gauge) Reset() { *m = Gauge{} }
-func (m *Gauge) String() string { return proto.CompactTextString(m) }
-func (*Gauge) ProtoMessage() {}
-
-func (m *Gauge) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type Counter struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Counter) Reset() { *m = Counter{} }
-func (m *Counter) String() string { return proto.CompactTextString(m) }
-func (*Counter) ProtoMessage() {}
-
-func (m *Counter) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type Quantile struct {
- Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Quantile) Reset() { *m = Quantile{} }
-func (m *Quantile) String() string { return proto.CompactTextString(m) }
-func (*Quantile) ProtoMessage() {}
-
-func (m *Quantile) GetQuantile() float64 {
- if m != nil && m.Quantile != nil {
- return *m.Quantile
- }
- return 0
-}
-
-func (m *Quantile) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type Summary struct {
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
- Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Summary) Reset() { *m = Summary{} }
-func (m *Summary) String() string { return proto.CompactTextString(m) }
-func (*Summary) ProtoMessage() {}
-
-func (m *Summary) GetSampleCount() uint64 {
- if m != nil && m.SampleCount != nil {
- return *m.SampleCount
- }
- return 0
-}
-
-func (m *Summary) GetSampleSum() float64 {
- if m != nil && m.SampleSum != nil {
- return *m.SampleSum
- }
- return 0
-}
-
-func (m *Summary) GetQuantile() []*Quantile {
- if m != nil {
- return m.Quantile
- }
- return nil
-}
-
-type Untyped struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Untyped) Reset() { *m = Untyped{} }
-func (m *Untyped) String() string { return proto.CompactTextString(m) }
-func (*Untyped) ProtoMessage() {}
-
-func (m *Untyped) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type Histogram struct {
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
- Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Histogram) Reset() { *m = Histogram{} }
-func (m *Histogram) String() string { return proto.CompactTextString(m) }
-func (*Histogram) ProtoMessage() {}
-
-func (m *Histogram) GetSampleCount() uint64 {
- if m != nil && m.SampleCount != nil {
- return *m.SampleCount
- }
- return 0
-}
-
-func (m *Histogram) GetSampleSum() float64 {
- if m != nil && m.SampleSum != nil {
- return *m.SampleSum
- }
- return 0
-}
-
-func (m *Histogram) GetBucket() []*Bucket {
- if m != nil {
- return m.Bucket
- }
- return nil
-}
-
-type Bucket struct {
- CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
- UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Bucket) Reset() { *m = Bucket{} }
-func (m *Bucket) String() string { return proto.CompactTextString(m) }
-func (*Bucket) ProtoMessage() {}
-
-func (m *Bucket) GetCumulativeCount() uint64 {
- if m != nil && m.CumulativeCount != nil {
- return *m.CumulativeCount
- }
- return 0
-}
-
-func (m *Bucket) GetUpperBound() float64 {
- if m != nil && m.UpperBound != nil {
- return *m.UpperBound
- }
- return 0
-}
-
-type Metric struct {
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
- Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
- Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
- Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
- Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
- TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Metric) Reset() { *m = Metric{} }
-func (m *Metric) String() string { return proto.CompactTextString(m) }
-func (*Metric) ProtoMessage() {}
-
-func (m *Metric) GetLabel() []*LabelPair {
- if m != nil {
- return m.Label
- }
- return nil
-}
-
-func (m *Metric) GetGauge() *Gauge {
- if m != nil {
- return m.Gauge
- }
- return nil
-}
-
-func (m *Metric) GetCounter() *Counter {
- if m != nil {
- return m.Counter
- }
- return nil
-}
-
-func (m *Metric) GetSummary() *Summary {
- if m != nil {
- return m.Summary
- }
- return nil
-}
-
-func (m *Metric) GetUntyped() *Untyped {
- if m != nil {
- return m.Untyped
- }
- return nil
-}
-
-func (m *Metric) GetHistogram() *Histogram {
- if m != nil {
- return m.Histogram
- }
- return nil
-}
-
-func (m *Metric) GetTimestampMs() int64 {
- if m != nil && m.TimestampMs != nil {
- return *m.TimestampMs
- }
- return 0
-}
-
-type MetricFamily struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
- Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
- Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *MetricFamily) Reset() { *m = MetricFamily{} }
-func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
-func (*MetricFamily) ProtoMessage() {}
-
-func (m *MetricFamily) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *MetricFamily) GetHelp() string {
- if m != nil && m.Help != nil {
- return *m.Help
- }
- return ""
-}
-
-func (m *MetricFamily) GetType() MetricType {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return MetricType_COUNTER
-}
-
-func (m *MetricFamily) GetMetric() []*Metric {
- if m != nil {
- return m.Metric
- }
- return nil
-}
-
-func init() {
- proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
-}
diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE
deleted file mode 100644
index 261eeb9e..00000000
--- a/vendor/github.com/prometheus/common/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE
deleted file mode 100644
index 636a2c1a..00000000
--- a/vendor/github.com/prometheus/common/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-Common libraries shared by Prometheus Go components.
-Copyright 2015 The Prometheus Authors
-
-This product includes software developed at
-SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
deleted file mode 100644
index a98696df..00000000
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ /dev/null
@@ -1,433 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "fmt"
- "io"
- "math"
- "mime"
- "net/http"
-
- dto "github.com/prometheus/client_model/go"
-
- "github.com/matttproud/golang_protobuf_extensions/pbutil"
- "github.com/prometheus/common/model"
-)
-
-// Decoder types decode an input stream into metric families.
-type Decoder interface {
- Decode(*dto.MetricFamily) error
-}
-
-type DecodeOptions struct {
- // Timestamp is added to each value from the stream that has no explicit timestamp set.
- Timestamp model.Time
-}
-
-// ResponseFormat extracts the correct format from a HTTP response header.
-// If no matching format can be found FormatUnknown is returned.
-func ResponseFormat(h http.Header) Format {
- ct := h.Get(hdrContentType)
-
- mediatype, params, err := mime.ParseMediaType(ct)
- if err != nil {
- return FmtUnknown
- }
-
- const (
- textType = "text/plain"
- jsonType = "application/json"
- )
-
- switch mediatype {
- case ProtoType:
- if p, ok := params["proto"]; ok && p != ProtoProtocol {
- return FmtUnknown
- }
- if e, ok := params["encoding"]; ok && e != "delimited" {
- return FmtUnknown
- }
- return FmtProtoDelim
-
- case textType:
- if v, ok := params["version"]; ok && v != TextVersion {
- return FmtUnknown
- }
- return FmtText
-
- case jsonType:
- var prometheusAPIVersion string
-
- if params["schema"] == "prometheus/telemetry" && params["version"] != "" {
- prometheusAPIVersion = params["version"]
- } else {
- prometheusAPIVersion = h.Get("X-Prometheus-API-Version")
- }
-
- switch prometheusAPIVersion {
- case "0.0.2", "":
- return fmtJSON2
- default:
- return FmtUnknown
- }
- }
-
- return FmtUnknown
-}
-
-// NewDecoder returns a new decoder based on the given input format.
-// If the input format does not imply otherwise, a text format decoder is returned.
-func NewDecoder(r io.Reader, format Format) Decoder {
- switch format {
- case FmtProtoDelim:
- return &protoDecoder{r: r}
- case fmtJSON2:
- return newJSON2Decoder(r)
- }
- return &textDecoder{r: r}
-}
-
-// protoDecoder implements the Decoder interface for protocol buffers.
-type protoDecoder struct {
- r io.Reader
-}
-
-// Decode implements the Decoder interface.
-func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
- _, err := pbutil.ReadDelimited(d.r, v)
- if err != nil {
- return err
- }
- if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
- return fmt.Errorf("invalid metric name %q", v.GetName())
- }
- for _, m := range v.GetMetric() {
- if m == nil {
- continue
- }
- for _, l := range m.GetLabel() {
- if l == nil {
- continue
- }
- if !model.LabelValue(l.GetValue()).IsValid() {
- return fmt.Errorf("invalid label value %q", l.GetValue())
- }
- if !model.LabelName(l.GetName()).IsValid() {
- return fmt.Errorf("invalid label name %q", l.GetName())
- }
- }
- }
- return nil
-}
-
-// textDecoder implements the Decoder interface for the text protcol.
-type textDecoder struct {
- r io.Reader
- p TextParser
- fams []*dto.MetricFamily
-}
-
-// Decode implements the Decoder interface.
-func (d *textDecoder) Decode(v *dto.MetricFamily) error {
- // TODO(fabxc): Wrap this as a line reader to make streaming safer.
- if len(d.fams) == 0 {
- // No cached metric families, read everything and parse metrics.
- fams, err := d.p.TextToMetricFamilies(d.r)
- if err != nil {
- return err
- }
- if len(fams) == 0 {
- return io.EOF
- }
- d.fams = make([]*dto.MetricFamily, 0, len(fams))
- for _, f := range fams {
- d.fams = append(d.fams, f)
- }
- }
-
- *v = *d.fams[0]
- d.fams = d.fams[1:]
-
- return nil
-}
-
-type SampleDecoder struct {
- Dec Decoder
- Opts *DecodeOptions
-
- f dto.MetricFamily
-}
-
-func (sd *SampleDecoder) Decode(s *model.Vector) error {
- if err := sd.Dec.Decode(&sd.f); err != nil {
- return err
- }
- *s = extractSamples(&sd.f, sd.Opts)
- return nil
-}
-
-// Extract samples builds a slice of samples from the provided metric families.
-func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
- var all model.Vector
- for _, f := range fams {
- all = append(all, extractSamples(f, o)...)
- }
- return all
-}
-
-func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
- switch f.GetType() {
- case dto.MetricType_COUNTER:
- return extractCounter(o, f)
- case dto.MetricType_GAUGE:
- return extractGauge(o, f)
- case dto.MetricType_SUMMARY:
- return extractSummary(o, f)
- case dto.MetricType_UNTYPED:
- return extractUntyped(o, f)
- case dto.MetricType_HISTOGRAM:
- return extractHistogram(o, f)
- }
- panic("expfmt.extractSamples: unknown metric family type")
-}
-
-func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Counter == nil {
- continue
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
-
- smpl := &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Counter.GetValue()),
- }
-
- if m.TimestampMs != nil {
- smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- } else {
- smpl.Timestamp = o.Timestamp
- }
-
- samples = append(samples, smpl)
- }
-
- return samples
-}
-
-func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Gauge == nil {
- continue
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
-
- smpl := &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Gauge.GetValue()),
- }
-
- if m.TimestampMs != nil {
- smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- } else {
- smpl.Timestamp = o.Timestamp
- }
-
- samples = append(samples, smpl)
- }
-
- return samples
-}
-
-func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Untyped == nil {
- continue
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
-
- smpl := &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Untyped.GetValue()),
- }
-
- if m.TimestampMs != nil {
- smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- } else {
- smpl.Timestamp = o.Timestamp
- }
-
- samples = append(samples, smpl)
- }
-
- return samples
-}
-
-func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Summary == nil {
- continue
- }
-
- timestamp := o.Timestamp
- if m.TimestampMs != nil {
- timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- }
-
- for _, q := range m.Summary.Quantile {
- lset := make(model.LabelSet, len(m.Label)+2)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- // BUG(matt): Update other names to "quantile".
- lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(q.GetValue()),
- Timestamp: timestamp,
- })
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Summary.GetSampleSum()),
- Timestamp: timestamp,
- })
-
- lset = make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Summary.GetSampleCount()),
- Timestamp: timestamp,
- })
- }
-
- return samples
-}
-
-func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
- samples := make(model.Vector, 0, len(f.Metric))
-
- for _, m := range f.Metric {
- if m.Histogram == nil {
- continue
- }
-
- timestamp := o.Timestamp
- if m.TimestampMs != nil {
- timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
- }
-
- infSeen := false
-
- for _, q := range m.Histogram.Bucket {
- lset := make(model.LabelSet, len(m.Label)+2)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
-
- if math.IsInf(q.GetUpperBound(), +1) {
- infSeen = true
- }
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(q.GetCumulativeCount()),
- Timestamp: timestamp,
- })
- }
-
- lset := make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Histogram.GetSampleSum()),
- Timestamp: timestamp,
- })
-
- lset = make(model.LabelSet, len(m.Label)+1)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
-
- count := &model.Sample{
- Metric: model.Metric(lset),
- Value: model.SampleValue(m.Histogram.GetSampleCount()),
- Timestamp: timestamp,
- }
- samples = append(samples, count)
-
- if !infSeen {
- // Append an infinity bucket sample.
- lset := make(model.LabelSet, len(m.Label)+2)
- for _, p := range m.Label {
- lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
- }
- lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
- lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
-
- samples = append(samples, &model.Sample{
- Metric: model.Metric(lset),
- Value: count.Value,
- Timestamp: timestamp,
- })
- }
- }
-
- return samples
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
deleted file mode 100644
index 11839ed6..00000000
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "fmt"
- "io"
- "net/http"
-
- "github.com/golang/protobuf/proto"
- "github.com/matttproud/golang_protobuf_extensions/pbutil"
- "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
-
- dto "github.com/prometheus/client_model/go"
-)
-
-// Encoder types encode metric families into an underlying wire protocol.
-type Encoder interface {
- Encode(*dto.MetricFamily) error
-}
-
-type encoder func(*dto.MetricFamily) error
-
-func (e encoder) Encode(v *dto.MetricFamily) error {
- return e(v)
-}
-
-// Negotiate returns the Content-Type based on the given Accept header.
-// If no appropriate accepted type is found, FmtText is returned.
-func Negotiate(h http.Header) Format {
- for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
- // Check for protocol buffer
- if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
- switch ac.Params["encoding"] {
- case "delimited":
- return FmtProtoDelim
- case "text":
- return FmtProtoText
- case "compact-text":
- return FmtProtoCompact
- }
- }
- // Check for text format.
- ver := ac.Params["version"]
- if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
- return FmtText
- }
- }
- return FmtText
-}
-
-// NewEncoder returns a new encoder based on content type negotiation.
-func NewEncoder(w io.Writer, format Format) Encoder {
- switch format {
- case FmtProtoDelim:
- return encoder(func(v *dto.MetricFamily) error {
- _, err := pbutil.WriteDelimited(w, v)
- return err
- })
- case FmtProtoCompact:
- return encoder(func(v *dto.MetricFamily) error {
- _, err := fmt.Fprintln(w, v.String())
- return err
- })
- case FmtProtoText:
- return encoder(func(v *dto.MetricFamily) error {
- _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
- return err
- })
- case FmtText:
- return encoder(func(v *dto.MetricFamily) error {
- _, err := MetricFamilyToText(w, v)
- return err
- })
- }
- panic("expfmt.NewEncoder: unknown format")
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
deleted file mode 100644
index 366fbde9..00000000
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// A package for reading and writing Prometheus metrics.
-package expfmt
-
-type Format string
-
-const (
- TextVersion = "0.0.4"
-
- ProtoType = `application/vnd.google.protobuf`
- ProtoProtocol = `io.prometheus.client.MetricFamily`
- ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
-
- // The Content-Type values for the different wire protocols.
- FmtUnknown Format = ``
- FmtText Format = `text/plain; version=` + TextVersion
- FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
- FmtProtoText Format = ProtoFmt + ` encoding=text`
- FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
-
- // fmtJSON2 is hidden as it is deprecated.
- fmtJSON2 Format = `application/json; version=0.0.2`
-)
-
-const (
- hdrContentType = "Content-Type"
- hdrAccept = "Accept"
-)
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
deleted file mode 100644
index dc2eedee..00000000
--- a/vendor/github.com/prometheus/common/expfmt/fuzz.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Build only when actually fuzzing
-// +build gofuzz
-
-package expfmt
-
-import "bytes"
-
-// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
-//
-// go-fuzz-build github.com/prometheus/common/expfmt
-// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
-//
-// Further input samples should go in the folder fuzz/corpus.
-func Fuzz(in []byte) int {
- parser := TextParser{}
- _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
-
- if err != nil {
- return 0
- }
-
- return 1
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/json_decode.go b/vendor/github.com/prometheus/common/expfmt/json_decode.go
deleted file mode 100644
index cf865451..00000000
--- a/vendor/github.com/prometheus/common/expfmt/json_decode.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "encoding/json"
- "fmt"
- "io"
- "sort"
-
- "github.com/golang/protobuf/proto"
- dto "github.com/prometheus/client_model/go"
-
- "github.com/prometheus/common/model"
-)
-
-type json2Decoder struct {
- dec *json.Decoder
- fams []*dto.MetricFamily
-}
-
-func newJSON2Decoder(r io.Reader) Decoder {
- return &json2Decoder{
- dec: json.NewDecoder(r),
- }
-}
-
-type histogram002 struct {
- Labels model.LabelSet `json:"labels"`
- Values map[string]float64 `json:"value"`
-}
-
-type counter002 struct {
- Labels model.LabelSet `json:"labels"`
- Value float64 `json:"value"`
-}
-
-func protoLabelSet(base, ext model.LabelSet) ([]*dto.LabelPair, error) {
- labels := base.Clone().Merge(ext)
- delete(labels, model.MetricNameLabel)
-
- names := make([]string, 0, len(labels))
- for ln := range labels {
- names = append(names, string(ln))
- }
- sort.Strings(names)
-
- pairs := make([]*dto.LabelPair, 0, len(labels))
-
- for _, ln := range names {
- if !model.LabelNameRE.MatchString(ln) {
- return nil, fmt.Errorf("invalid label name %q", ln)
- }
- lv := labels[model.LabelName(ln)]
-
- pairs = append(pairs, &dto.LabelPair{
- Name: proto.String(ln),
- Value: proto.String(string(lv)),
- })
- }
-
- return pairs, nil
-}
-
-func (d *json2Decoder) more() error {
- var entities []struct {
- BaseLabels model.LabelSet `json:"baseLabels"`
- Docstring string `json:"docstring"`
- Metric struct {
- Type string `json:"type"`
- Values json.RawMessage `json:"value"`
- } `json:"metric"`
- }
-
- if err := d.dec.Decode(&entities); err != nil {
- return err
- }
- for _, e := range entities {
- f := &dto.MetricFamily{
- Name: proto.String(string(e.BaseLabels[model.MetricNameLabel])),
- Help: proto.String(e.Docstring),
- Type: dto.MetricType_UNTYPED.Enum(),
- Metric: []*dto.Metric{},
- }
-
- d.fams = append(d.fams, f)
-
- switch e.Metric.Type {
- case "counter", "gauge":
- var values []counter002
-
- if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
- return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
- }
-
- for _, ctr := range values {
- labels, err := protoLabelSet(e.BaseLabels, ctr.Labels)
- if err != nil {
- return err
- }
- f.Metric = append(f.Metric, &dto.Metric{
- Label: labels,
- Untyped: &dto.Untyped{
- Value: proto.Float64(ctr.Value),
- },
- })
- }
-
- case "histogram":
- var values []histogram002
-
- if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
- return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
- }
-
- for _, hist := range values {
- quants := make([]string, 0, len(values))
- for q := range hist.Values {
- quants = append(quants, q)
- }
-
- sort.Strings(quants)
-
- for _, q := range quants {
- value := hist.Values[q]
- // The correct label is "quantile" but to not break old expressions
- // this remains "percentile"
- hist.Labels["percentile"] = model.LabelValue(q)
-
- labels, err := protoLabelSet(e.BaseLabels, hist.Labels)
- if err != nil {
- return err
- }
-
- f.Metric = append(f.Metric, &dto.Metric{
- Label: labels,
- Untyped: &dto.Untyped{
- Value: proto.Float64(value),
- },
- })
- }
- }
-
- default:
- return fmt.Errorf("unknown metric type %q", e.Metric.Type)
- }
- }
- return nil
-}
-
-// Decode implements the Decoder interface.
-func (d *json2Decoder) Decode(v *dto.MetricFamily) error {
- if len(d.fams) == 0 {
- if err := d.more(); err != nil {
- return err
- }
- }
-
- *v = *d.fams[0]
- d.fams = d.fams[1:]
-
- return nil
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
deleted file mode 100644
index 0bb9c14c..00000000
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "bytes"
- "fmt"
- "io"
- "math"
- "strings"
-
- dto "github.com/prometheus/client_model/go"
- "github.com/prometheus/common/model"
-)
-
-// MetricFamilyToText converts a MetricFamily proto message into text format and
-// writes the resulting lines to 'out'. It returns the number of bytes written
-// and any error encountered. This function does not perform checks on the
-// content of the metric and label names, i.e. invalid metric or label names
-// will result in invalid text format output.
-// This method fulfills the type 'prometheus.encoder'.
-func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
- var written int
-
- // Fail-fast checks.
- if len(in.Metric) == 0 {
- return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
- }
- name := in.GetName()
- if name == "" {
- return written, fmt.Errorf("MetricFamily has no name: %s", in)
- }
-
- // Comments, first HELP, then TYPE.
- if in.Help != nil {
- n, err := fmt.Fprintf(
- out, "# HELP %s %s\n",
- name, escapeString(*in.Help, false),
- )
- written += n
- if err != nil {
- return written, err
- }
- }
- metricType := in.GetType()
- n, err := fmt.Fprintf(
- out, "# TYPE %s %s\n",
- name, strings.ToLower(metricType.String()),
- )
- written += n
- if err != nil {
- return written, err
- }
-
- // Finally the samples, one line for each.
- for _, metric := range in.Metric {
- switch metricType {
- case dto.MetricType_COUNTER:
- if metric.Counter == nil {
- return written, fmt.Errorf(
- "expected counter in metric %s %s", name, metric,
- )
- }
- n, err = writeSample(
- name, metric, "", "",
- metric.Counter.GetValue(),
- out,
- )
- case dto.MetricType_GAUGE:
- if metric.Gauge == nil {
- return written, fmt.Errorf(
- "expected gauge in metric %s %s", name, metric,
- )
- }
- n, err = writeSample(
- name, metric, "", "",
- metric.Gauge.GetValue(),
- out,
- )
- case dto.MetricType_UNTYPED:
- if metric.Untyped == nil {
- return written, fmt.Errorf(
- "expected untyped in metric %s %s", name, metric,
- )
- }
- n, err = writeSample(
- name, metric, "", "",
- metric.Untyped.GetValue(),
- out,
- )
- case dto.MetricType_SUMMARY:
- if metric.Summary == nil {
- return written, fmt.Errorf(
- "expected summary in metric %s %s", name, metric,
- )
- }
- for _, q := range metric.Summary.Quantile {
- n, err = writeSample(
- name, metric,
- model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
- q.GetValue(),
- out,
- )
- written += n
- if err != nil {
- return written, err
- }
- }
- n, err = writeSample(
- name+"_sum", metric, "", "",
- metric.Summary.GetSampleSum(),
- out,
- )
- if err != nil {
- return written, err
- }
- written += n
- n, err = writeSample(
- name+"_count", metric, "", "",
- float64(metric.Summary.GetSampleCount()),
- out,
- )
- case dto.MetricType_HISTOGRAM:
- if metric.Histogram == nil {
- return written, fmt.Errorf(
- "expected histogram in metric %s %s", name, metric,
- )
- }
- infSeen := false
- for _, q := range metric.Histogram.Bucket {
- n, err = writeSample(
- name+"_bucket", metric,
- model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
- float64(q.GetCumulativeCount()),
- out,
- )
- written += n
- if err != nil {
- return written, err
- }
- if math.IsInf(q.GetUpperBound(), +1) {
- infSeen = true
- }
- }
- if !infSeen {
- n, err = writeSample(
- name+"_bucket", metric,
- model.BucketLabel, "+Inf",
- float64(metric.Histogram.GetSampleCount()),
- out,
- )
- if err != nil {
- return written, err
- }
- written += n
- }
- n, err = writeSample(
- name+"_sum", metric, "", "",
- metric.Histogram.GetSampleSum(),
- out,
- )
- if err != nil {
- return written, err
- }
- written += n
- n, err = writeSample(
- name+"_count", metric, "", "",
- float64(metric.Histogram.GetSampleCount()),
- out,
- )
- default:
- return written, fmt.Errorf(
- "unexpected type in metric %s %s", name, metric,
- )
- }
- written += n
- if err != nil {
- return written, err
- }
- }
- return written, nil
-}
-
-// writeSample writes a single sample in text format to out, given the metric
-// name, the metric proto message itself, optionally an additional label name
-// and value (use empty strings if not required), and the value. The function
-// returns the number of bytes written and any error encountered.
-func writeSample(
- name string,
- metric *dto.Metric,
- additionalLabelName, additionalLabelValue string,
- value float64,
- out io.Writer,
-) (int, error) {
- var written int
- n, err := fmt.Fprint(out, name)
- written += n
- if err != nil {
- return written, err
- }
- n, err = labelPairsToText(
- metric.Label,
- additionalLabelName, additionalLabelValue,
- out,
- )
- written += n
- if err != nil {
- return written, err
- }
- n, err = fmt.Fprintf(out, " %v", value)
- written += n
- if err != nil {
- return written, err
- }
- if metric.TimestampMs != nil {
- n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
- written += n
- if err != nil {
- return written, err
- }
- }
- n, err = out.Write([]byte{'\n'})
- written += n
- if err != nil {
- return written, err
- }
- return written, nil
-}
-
-// labelPairsToText converts a slice of LabelPair proto messages plus the
-// explicitly given additional label pair into text formatted as required by the
-// text format and writes it to 'out'. An empty slice in combination with an
-// empty string 'additionalLabelName' results in nothing being
-// written. Otherwise, the label pairs are written, escaped as required by the
-// text format, and enclosed in '{...}'. The function returns the number of
-// bytes written and any error encountered.
-func labelPairsToText(
- in []*dto.LabelPair,
- additionalLabelName, additionalLabelValue string,
- out io.Writer,
-) (int, error) {
- if len(in) == 0 && additionalLabelName == "" {
- return 0, nil
- }
- var written int
- separator := '{'
- for _, lp := range in {
- n, err := fmt.Fprintf(
- out, `%c%s="%s"`,
- separator, lp.GetName(), escapeString(lp.GetValue(), true),
- )
- written += n
- if err != nil {
- return written, err
- }
- separator = ','
- }
- if additionalLabelName != "" {
- n, err := fmt.Fprintf(
- out, `%c%s="%s"`,
- separator, additionalLabelName,
- escapeString(additionalLabelValue, true),
- )
- written += n
- if err != nil {
- return written, err
- }
- }
- n, err := out.Write([]byte{'}'})
- written += n
- if err != nil {
- return written, err
- }
- return written, nil
-}
-
-// escapeString replaces '\' by '\\', new line character by '\n', and - if
-// includeDoubleQuote is true - '"' by '\"'.
-func escapeString(v string, includeDoubleQuote bool) string {
- result := bytes.NewBuffer(make([]byte, 0, len(v)))
- for _, c := range v {
- switch {
- case c == '\\':
- result.WriteString(`\\`)
- case includeDoubleQuote && c == '"':
- result.WriteString(`\"`)
- case c == '\n':
- result.WriteString(`\n`)
- default:
- result.WriteRune(c)
- }
- }
- return result.String()
-}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
deleted file mode 100644
index bd170b16..00000000
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ /dev/null
@@ -1,753 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package expfmt
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "math"
- "strconv"
- "strings"
-
- dto "github.com/prometheus/client_model/go"
-
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/model"
-)
-
-// A stateFn is a function that represents a state in a state machine. By
-// executing it, the state is progressed to the next state. The stateFn returns
-// another stateFn, which represents the new state. The end state is represented
-// by nil.
-type stateFn func() stateFn
-
-// ParseError signals errors while parsing the simple and flat text-based
-// exchange format.
-type ParseError struct {
- Line int
- Msg string
-}
-
-// Error implements the error interface.
-func (e ParseError) Error() string {
- return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
-}
-
-// TextParser is used to parse the simple and flat text-based exchange format. Its
-// nil value is ready to use.
-type TextParser struct {
- metricFamiliesByName map[string]*dto.MetricFamily
- buf *bufio.Reader // Where the parsed input is read through.
- err error // Most recent error.
- lineCount int // Tracks the line count for error messages.
- currentByte byte // The most recent byte read.
- currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
- currentMF *dto.MetricFamily
- currentMetric *dto.Metric
- currentLabelPair *dto.LabelPair
-
- // The remaining member variables are only used for summaries/histograms.
- currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
- // Summary specific.
- summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
- currentQuantile float64
- // Histogram specific.
- histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
- currentBucket float64
- // These tell us if the currently processed line ends on '_count' or
- // '_sum' respectively and belong to a summary/histogram, representing the sample
- // count and sum of that summary/histogram.
- currentIsSummaryCount, currentIsSummarySum bool
- currentIsHistogramCount, currentIsHistogramSum bool
-}
-
-// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
-// format and creates MetricFamily proto messages. It returns the MetricFamily
-// proto messages in a map where the metric names are the keys, along with any
-// error encountered.
-//
-// If the input contains duplicate metrics (i.e. lines with the same metric name
-// and exactly the same label set), the resulting MetricFamily will contain
-// duplicate Metric proto messages. Similar is true for duplicate label
-// names. Checks for duplicates have to be performed separately, if required.
-// Also note that neither the metrics within each MetricFamily are sorted nor
-// the label pairs within each Metric. Sorting is not required for the most
-// frequent use of this method, which is sample ingestion in the Prometheus
-// server. However, for presentation purposes, you might want to sort the
-// metrics, and in some cases, you must sort the labels, e.g. for consumption by
-// the metric family injection hook of the Prometheus registry.
-//
-// Summaries and histograms are rather special beasts. You would probably not
-// use them in the simple text format anyway. This method can deal with
-// summaries and histograms if they are presented in exactly the way the
-// text.Create function creates them.
-//
-// This method must not be called concurrently. If you want to parse different
-// input concurrently, instantiate a separate Parser for each goroutine.
-func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
- p.reset(in)
- for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
- // Magic happens here...
- }
- // Get rid of empty metric families.
- for k, mf := range p.metricFamiliesByName {
- if len(mf.GetMetric()) == 0 {
- delete(p.metricFamiliesByName, k)
- }
- }
- // If p.err is io.EOF now, we have run into a premature end of the input
- // stream. Turn this error into something nicer and more
- // meaningful. (io.EOF is often used as a signal for the legitimate end
- // of an input stream.)
- if p.err == io.EOF {
- p.parseError("unexpected end of input stream")
- }
- return p.metricFamiliesByName, p.err
-}
-
-func (p *TextParser) reset(in io.Reader) {
- p.metricFamiliesByName = map[string]*dto.MetricFamily{}
- if p.buf == nil {
- p.buf = bufio.NewReader(in)
- } else {
- p.buf.Reset(in)
- }
- p.err = nil
- p.lineCount = 0
- if p.summaries == nil || len(p.summaries) > 0 {
- p.summaries = map[uint64]*dto.Metric{}
- }
- if p.histograms == nil || len(p.histograms) > 0 {
- p.histograms = map[uint64]*dto.Metric{}
- }
- p.currentQuantile = math.NaN()
- p.currentBucket = math.NaN()
-}
-
-// startOfLine represents the state where the next byte read from p.buf is the
-// start of a line (or whitespace leading up to it).
-func (p *TextParser) startOfLine() stateFn {
- p.lineCount++
- if p.skipBlankTab(); p.err != nil {
- // End of input reached. This is the only case where
- // that is not an error but a signal that we are done.
- p.err = nil
- return nil
- }
- switch p.currentByte {
- case '#':
- return p.startComment
- case '\n':
- return p.startOfLine // Empty line, start the next one.
- }
- return p.readingMetricName
-}
-
-// startComment represents the state where the next byte read from p.buf is the
-// start of a comment (or whitespace leading up to it).
-func (p *TextParser) startComment() stateFn {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte == '\n' {
- return p.startOfLine
- }
- if p.readTokenUntilWhitespace(); p.err != nil {
- return nil // Unexpected end of input.
- }
- // If we have hit the end of line already, there is nothing left
- // to do. This is not considered a syntax error.
- if p.currentByte == '\n' {
- return p.startOfLine
- }
- keyword := p.currentToken.String()
- if keyword != "HELP" && keyword != "TYPE" {
- // Generic comment, ignore by fast forwarding to end of line.
- for p.currentByte != '\n' {
- if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
- return nil // Unexpected end of input.
- }
- }
- return p.startOfLine
- }
- // There is something. Next has to be a metric name.
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.readTokenAsMetricName(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte == '\n' {
- // At the end of the line already.
- // Again, this is not considered a syntax error.
- return p.startOfLine
- }
- if !isBlankOrTab(p.currentByte) {
- p.parseError("invalid metric name in comment")
- return nil
- }
- p.setOrCreateCurrentMF()
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte == '\n' {
- // At the end of the line already.
- // Again, this is not considered a syntax error.
- return p.startOfLine
- }
- switch keyword {
- case "HELP":
- return p.readingHelp
- case "TYPE":
- return p.readingType
- }
- panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
-}
-
-// readingMetricName represents the state where the last byte read (now in
-// p.currentByte) is the first byte of a metric name.
-func (p *TextParser) readingMetricName() stateFn {
- if p.readTokenAsMetricName(); p.err != nil {
- return nil
- }
- if p.currentToken.Len() == 0 {
- p.parseError("invalid metric name")
- return nil
- }
- p.setOrCreateCurrentMF()
- // Now is the time to fix the type if it hasn't happened yet.
- if p.currentMF.Type == nil {
- p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
- }
- p.currentMetric = &dto.Metric{}
- // Do not append the newly created currentMetric to
- // currentMF.Metric right now. First wait if this is a summary,
- // and the metric exists already, which we can only know after
- // having read all the labels.
- if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- return p.readingLabels
-}
-
-// readingLabels represents the state where the last byte read (now in
-// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
-// first byte of the value (otherwise).
-func (p *TextParser) readingLabels() stateFn {
- // Summaries/histograms are special. We have to reset the
- // currentLabels map, currentQuantile and currentBucket before starting to
- // read labels.
- if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
- p.currentLabels = map[string]string{}
- p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
- p.currentQuantile = math.NaN()
- p.currentBucket = math.NaN()
- }
- if p.currentByte != '{' {
- return p.readingValue
- }
- return p.startLabelName
-}
-
-// startLabelName represents the state where the next byte read from p.buf is
-// the start of a label name (or whitespace leading up to it).
-func (p *TextParser) startLabelName() stateFn {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte == '}' {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- return p.readingValue
- }
- if p.readTokenAsLabelName(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentToken.Len() == 0 {
- p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
- return nil
- }
- p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
- if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
- p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
- return nil
- }
- // Special summary/histogram treatment. Don't add 'quantile' and 'le'
- // labels to 'real' labels.
- if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
- !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
- p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
- }
- if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte != '=' {
- p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
- return nil
- }
- return p.startLabelValue
-}
-
-// startLabelValue represents the state where the next byte read from p.buf is
-// the start of a (quoted) label value (or whitespace leading up to it).
-func (p *TextParser) startLabelValue() stateFn {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte != '"' {
- p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
- return nil
- }
- if p.readTokenAsLabelValue(); p.err != nil {
- return nil
- }
- p.currentLabelPair.Value = proto.String(p.currentToken.String())
- // Special treatment of summaries:
- // - Quantile labels are special, will result in dto.Quantile later.
- // - Other labels have to be added to currentLabels for signature calculation.
- if p.currentMF.GetType() == dto.MetricType_SUMMARY {
- if p.currentLabelPair.GetName() == model.QuantileLabel {
- if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
- // Create a more helpful error message.
- p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
- return nil
- }
- } else {
- p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
- }
- }
- // Similar special treatment of histograms.
- if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
- if p.currentLabelPair.GetName() == model.BucketLabel {
- if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
- // Create a more helpful error message.
- p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
- return nil
- }
- } else {
- p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
- }
- }
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- switch p.currentByte {
- case ',':
- return p.startLabelName
-
- case '}':
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- return p.readingValue
- default:
- p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
- return nil
- }
-}
-
-// readingValue represents the state where the last byte read (now in
-// p.currentByte) is the first byte of the sample value (i.e. a float).
-func (p *TextParser) readingValue() stateFn {
- // When we are here, we have read all the labels, so for the
- // special case of a summary/histogram, we can finally find out
- // if the metric already exists.
- if p.currentMF.GetType() == dto.MetricType_SUMMARY {
- signature := model.LabelsToSignature(p.currentLabels)
- if summary := p.summaries[signature]; summary != nil {
- p.currentMetric = summary
- } else {
- p.summaries[signature] = p.currentMetric
- p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
- }
- } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
- signature := model.LabelsToSignature(p.currentLabels)
- if histogram := p.histograms[signature]; histogram != nil {
- p.currentMetric = histogram
- } else {
- p.histograms[signature] = p.currentMetric
- p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
- }
- } else {
- p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
- }
- if p.readTokenUntilWhitespace(); p.err != nil {
- return nil // Unexpected end of input.
- }
- value, err := strconv.ParseFloat(p.currentToken.String(), 64)
- if err != nil {
- // Create a more helpful error message.
- p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
- return nil
- }
- switch p.currentMF.GetType() {
- case dto.MetricType_COUNTER:
- p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
- case dto.MetricType_GAUGE:
- p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
- case dto.MetricType_UNTYPED:
- p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
- case dto.MetricType_SUMMARY:
- // *sigh*
- if p.currentMetric.Summary == nil {
- p.currentMetric.Summary = &dto.Summary{}
- }
- switch {
- case p.currentIsSummaryCount:
- p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
- case p.currentIsSummarySum:
- p.currentMetric.Summary.SampleSum = proto.Float64(value)
- case !math.IsNaN(p.currentQuantile):
- p.currentMetric.Summary.Quantile = append(
- p.currentMetric.Summary.Quantile,
- &dto.Quantile{
- Quantile: proto.Float64(p.currentQuantile),
- Value: proto.Float64(value),
- },
- )
- }
- case dto.MetricType_HISTOGRAM:
- // *sigh*
- if p.currentMetric.Histogram == nil {
- p.currentMetric.Histogram = &dto.Histogram{}
- }
- switch {
- case p.currentIsHistogramCount:
- p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
- case p.currentIsHistogramSum:
- p.currentMetric.Histogram.SampleSum = proto.Float64(value)
- case !math.IsNaN(p.currentBucket):
- p.currentMetric.Histogram.Bucket = append(
- p.currentMetric.Histogram.Bucket,
- &dto.Bucket{
- UpperBound: proto.Float64(p.currentBucket),
- CumulativeCount: proto.Uint64(uint64(value)),
- },
- )
- }
- default:
- p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
- }
- if p.currentByte == '\n' {
- return p.startOfLine
- }
- return p.startTimestamp
-}
-
-// startTimestamp represents the state where the next byte read from p.buf is
-// the start of the timestamp (or whitespace leading up to it).
-func (p *TextParser) startTimestamp() stateFn {
- if p.skipBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.readTokenUntilWhitespace(); p.err != nil {
- return nil // Unexpected end of input.
- }
- timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
- if err != nil {
- // Create a more helpful error message.
- p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
- return nil
- }
- p.currentMetric.TimestampMs = proto.Int64(timestamp)
- if p.readTokenUntilNewline(false); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentToken.Len() > 0 {
- p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
- return nil
- }
- return p.startOfLine
-}
-
-// readingHelp represents the state where the last byte read (now in
-// p.currentByte) is the first byte of the docstring after 'HELP'.
-func (p *TextParser) readingHelp() stateFn {
- if p.currentMF.Help != nil {
- p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
- return nil
- }
- // Rest of line is the docstring.
- if p.readTokenUntilNewline(true); p.err != nil {
- return nil // Unexpected end of input.
- }
- p.currentMF.Help = proto.String(p.currentToken.String())
- return p.startOfLine
-}
-
-// readingType represents the state where the last byte read (now in
-// p.currentByte) is the first byte of the type hint after 'HELP'.
-func (p *TextParser) readingType() stateFn {
- if p.currentMF.Type != nil {
- p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
- return nil
- }
- // Rest of line is the type.
- if p.readTokenUntilNewline(false); p.err != nil {
- return nil // Unexpected end of input.
- }
- metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
- if !ok {
- p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
- return nil
- }
- p.currentMF.Type = dto.MetricType(metricType).Enum()
- return p.startOfLine
-}
-
-// parseError sets p.err to a ParseError at the current line with the given
-// message.
-func (p *TextParser) parseError(msg string) {
- p.err = ParseError{
- Line: p.lineCount,
- Msg: msg,
- }
-}
-
-// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
-// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
-func (p *TextParser) skipBlankTab() {
- for {
- if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
- return
- }
- }
-}
-
-// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
-// anything if p.currentByte is neither ' ' nor '\t'.
-func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
- if isBlankOrTab(p.currentByte) {
- p.skipBlankTab()
- }
-}
-
-// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
-// first byte considered is the byte already read (now in p.currentByte). The
-// first whitespace byte encountered is still copied into p.currentByte, but not
-// into p.currentToken.
-func (p *TextParser) readTokenUntilWhitespace() {
- p.currentToken.Reset()
- for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
- p.currentToken.WriteByte(p.currentByte)
- p.currentByte, p.err = p.buf.ReadByte()
- }
-}
-
-// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
-// byte considered is the byte already read (now in p.currentByte). The first
-// newline byte encountered is still copied into p.currentByte, but not into
-// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
-// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
-// other escape sequences are invalid and cause an error.
-func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
- p.currentToken.Reset()
- escaped := false
- for p.err == nil {
- if recognizeEscapeSequence && escaped {
- switch p.currentByte {
- case '\\':
- p.currentToken.WriteByte(p.currentByte)
- case 'n':
- p.currentToken.WriteByte('\n')
- default:
- p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
- return
- }
- escaped = false
- } else {
- switch p.currentByte {
- case '\n':
- return
- case '\\':
- escaped = true
- default:
- p.currentToken.WriteByte(p.currentByte)
- }
- }
- p.currentByte, p.err = p.buf.ReadByte()
- }
-}
-
-// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
-// The first byte considered is the byte already read (now in p.currentByte).
-// The first byte not part of a metric name is still copied into p.currentByte,
-// but not into p.currentToken.
-func (p *TextParser) readTokenAsMetricName() {
- p.currentToken.Reset()
- if !isValidMetricNameStart(p.currentByte) {
- return
- }
- for {
- p.currentToken.WriteByte(p.currentByte)
- p.currentByte, p.err = p.buf.ReadByte()
- if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
- return
- }
- }
-}
-
-// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
-// The first byte considered is the byte already read (now in p.currentByte).
-// The first byte not part of a label name is still copied into p.currentByte,
-// but not into p.currentToken.
-func (p *TextParser) readTokenAsLabelName() {
- p.currentToken.Reset()
- if !isValidLabelNameStart(p.currentByte) {
- return
- }
- for {
- p.currentToken.WriteByte(p.currentByte)
- p.currentByte, p.err = p.buf.ReadByte()
- if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
- return
- }
- }
-}
-
-// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
-// In contrast to the other 'readTokenAs...' functions, which start with the
-// last read byte in p.currentByte, this method ignores p.currentByte and starts
-// with reading a new byte from p.buf. The first byte not part of a label value
-// is still copied into p.currentByte, but not into p.currentToken.
-func (p *TextParser) readTokenAsLabelValue() {
- p.currentToken.Reset()
- escaped := false
- for {
- if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
- return
- }
- if escaped {
- switch p.currentByte {
- case '"', '\\':
- p.currentToken.WriteByte(p.currentByte)
- case 'n':
- p.currentToken.WriteByte('\n')
- default:
- p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
- return
- }
- escaped = false
- continue
- }
- switch p.currentByte {
- case '"':
- return
- case '\n':
- p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
- return
- case '\\':
- escaped = true
- default:
- p.currentToken.WriteByte(p.currentByte)
- }
- }
-}
-
-func (p *TextParser) setOrCreateCurrentMF() {
- p.currentIsSummaryCount = false
- p.currentIsSummarySum = false
- p.currentIsHistogramCount = false
- p.currentIsHistogramSum = false
- name := p.currentToken.String()
- if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
- return
- }
- // Try out if this is a _sum or _count for a summary/histogram.
- summaryName := summaryMetricName(name)
- if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
- if p.currentMF.GetType() == dto.MetricType_SUMMARY {
- if isCount(name) {
- p.currentIsSummaryCount = true
- }
- if isSum(name) {
- p.currentIsSummarySum = true
- }
- return
- }
- }
- histogramName := histogramMetricName(name)
- if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
- if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
- if isCount(name) {
- p.currentIsHistogramCount = true
- }
- if isSum(name) {
- p.currentIsHistogramSum = true
- }
- return
- }
- }
- p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
- p.metricFamiliesByName[name] = p.currentMF
-}
-
-func isValidLabelNameStart(b byte) bool {
- return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
-}
-
-func isValidLabelNameContinuation(b byte) bool {
- return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
-}
-
-func isValidMetricNameStart(b byte) bool {
- return isValidLabelNameStart(b) || b == ':'
-}
-
-func isValidMetricNameContinuation(b byte) bool {
- return isValidLabelNameContinuation(b) || b == ':'
-}
-
-func isBlankOrTab(b byte) bool {
- return b == ' ' || b == '\t'
-}
-
-func isCount(name string) bool {
- return len(name) > 6 && name[len(name)-6:] == "_count"
-}
-
-func isSum(name string) bool {
- return len(name) > 4 && name[len(name)-4:] == "_sum"
-}
-
-func isBucket(name string) bool {
- return len(name) > 7 && name[len(name)-7:] == "_bucket"
-}
-
-func summaryMetricName(name string) string {
- switch {
- case isCount(name):
- return name[:len(name)-6]
- case isSum(name):
- return name[:len(name)-4]
- default:
- return name
- }
-}
-
-func histogramMetricName(name string) string {
- switch {
- case isCount(name):
- return name[:len(name)-6]
- case isSum(name):
- return name[:len(name)-4]
- case isBucket(name):
- return name[:len(name)-7]
- default:
- return name
- }
-}
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
deleted file mode 100644
index 7723656d..00000000
--- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-PACKAGE
-
-package goautoneg
-import "bitbucket.org/ww/goautoneg"
-
-HTTP Content-Type Autonegotiation.
-
-The functions in this package implement the behaviour specified in
-http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
-
-Copyright (c) 2011, Open Knowledge Foundation Ltd.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
-
- Neither the name of the Open Knowledge Foundation Ltd. nor the
- names of its contributors may be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-FUNCTIONS
-
-func Negotiate(header string, alternatives []string) (content_type string)
-Negotiate the most appropriate content_type given the accept header
-and a list of alternatives.
-
-func ParseAccept(header string) (accept []Accept)
-Parse an Accept Header string returning a sorted list
-of clauses
-
-
-TYPES
-
-type Accept struct {
- Type, SubType string
- Q float32
- Params map[string]string
-}
-Structure to represent a clause in an HTTP Accept Header
-
-
-SUBDIRECTORIES
-
- .hg
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
deleted file mode 100644
index 648b38cb..00000000
--- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
-HTTP Content-Type Autonegotiation.
-
-The functions in this package implement the behaviour specified in
-http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
-
-Copyright (c) 2011, Open Knowledge Foundation Ltd.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
-
- Neither the name of the Open Knowledge Foundation Ltd. nor the
- names of its contributors may be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-*/
-package goautoneg
-
-import (
- "sort"
- "strconv"
- "strings"
-)
-
-// Structure to represent a clause in an HTTP Accept Header
-type Accept struct {
- Type, SubType string
- Q float64
- Params map[string]string
-}
-
-// For internal use, so that we can use the sort interface
-type accept_slice []Accept
-
-func (accept accept_slice) Len() int {
- slice := []Accept(accept)
- return len(slice)
-}
-
-func (accept accept_slice) Less(i, j int) bool {
- slice := []Accept(accept)
- ai, aj := slice[i], slice[j]
- if ai.Q > aj.Q {
- return true
- }
- if ai.Type != "*" && aj.Type == "*" {
- return true
- }
- if ai.SubType != "*" && aj.SubType == "*" {
- return true
- }
- return false
-}
-
-func (accept accept_slice) Swap(i, j int) {
- slice := []Accept(accept)
- slice[i], slice[j] = slice[j], slice[i]
-}
-
-// Parse an Accept Header string returning a sorted list
-// of clauses
-func ParseAccept(header string) (accept []Accept) {
- parts := strings.Split(header, ",")
- accept = make([]Accept, 0, len(parts))
- for _, part := range parts {
- part := strings.Trim(part, " ")
-
- a := Accept{}
- a.Params = make(map[string]string)
- a.Q = 1.0
-
- mrp := strings.Split(part, ";")
-
- media_range := mrp[0]
- sp := strings.Split(media_range, "/")
- a.Type = strings.Trim(sp[0], " ")
-
- switch {
- case len(sp) == 1 && a.Type == "*":
- a.SubType = "*"
- case len(sp) == 2:
- a.SubType = strings.Trim(sp[1], " ")
- default:
- continue
- }
-
- if len(mrp) == 1 {
- accept = append(accept, a)
- continue
- }
-
- for _, param := range mrp[1:] {
- sp := strings.SplitN(param, "=", 2)
- if len(sp) != 2 {
- continue
- }
- token := strings.Trim(sp[0], " ")
- if token == "q" {
- a.Q, _ = strconv.ParseFloat(sp[1], 32)
- } else {
- a.Params[token] = strings.Trim(sp[1], " ")
- }
- }
-
- accept = append(accept, a)
- }
-
- slice := accept_slice(accept)
- sort.Sort(slice)
-
- return
-}
-
-// Negotiate the most appropriate content_type given the accept header
-// and a list of alternatives.
-func Negotiate(header string, alternatives []string) (content_type string) {
- asp := make([][]string, 0, len(alternatives))
- for _, ctype := range alternatives {
- asp = append(asp, strings.SplitN(ctype, "/", 2))
- }
- for _, clause := range ParseAccept(header) {
- for i, ctsp := range asp {
- if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
- content_type = alternatives[i]
- return
- }
- if clause.Type == ctsp[0] && clause.SubType == "*" {
- content_type = alternatives[i]
- return
- }
- if clause.Type == "*" && clause.SubType == "*" {
- content_type = alternatives[i]
- return
- }
- }
- }
- return
-}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
deleted file mode 100644
index 35e739c7..00000000
--- a/vendor/github.com/prometheus/common/model/alert.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "fmt"
- "time"
-)
-
-type AlertStatus string
-
-const (
- AlertFiring AlertStatus = "firing"
- AlertResolved AlertStatus = "resolved"
-)
-
-// Alert is a generic representation of an alert in the Prometheus eco-system.
-type Alert struct {
- // Label value pairs for purpose of aggregation, matching, and disposition
- // dispatching. This must minimally include an "alertname" label.
- Labels LabelSet `json:"labels"`
-
- // Extra key/value information which does not define alert identity.
- Annotations LabelSet `json:"annotations"`
-
- // The known time range for this alert. Both ends are optional.
- StartsAt time.Time `json:"startsAt,omitempty"`
- EndsAt time.Time `json:"endsAt,omitempty"`
- GeneratorURL string `json:"generatorURL"`
-}
-
-// Name returns the name of the alert. It is equivalent to the "alertname" label.
-func (a *Alert) Name() string {
- return string(a.Labels[AlertNameLabel])
-}
-
-// Fingerprint returns a unique hash for the alert. It is equivalent to
-// the fingerprint of the alert's label set.
-func (a *Alert) Fingerprint() Fingerprint {
- return a.Labels.Fingerprint()
-}
-
-func (a *Alert) String() string {
- s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
- if a.Resolved() {
- return s + "[resolved]"
- }
- return s + "[active]"
-}
-
-// Resolved returns true iff the activity interval ended in the past.
-func (a *Alert) Resolved() bool {
- return a.ResolvedAt(time.Now())
-}
-
-// ResolvedAt returns true off the activity interval ended before
-// the given timestamp.
-func (a *Alert) ResolvedAt(ts time.Time) bool {
- if a.EndsAt.IsZero() {
- return false
- }
- return !a.EndsAt.After(ts)
-}
-
-// Status returns the status of the alert.
-func (a *Alert) Status() AlertStatus {
- if a.Resolved() {
- return AlertResolved
- }
- return AlertFiring
-}
-
-// Validate checks whether the alert data is inconsistent.
-func (a *Alert) Validate() error {
- if a.StartsAt.IsZero() {
- return fmt.Errorf("start time missing")
- }
- if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
- return fmt.Errorf("start time must be before end time")
- }
- if err := a.Labels.Validate(); err != nil {
- return fmt.Errorf("invalid label set: %s", err)
- }
- if len(a.Labels) == 0 {
- return fmt.Errorf("at least one label pair required")
- }
- if err := a.Annotations.Validate(); err != nil {
- return fmt.Errorf("invalid annotations: %s", err)
- }
- return nil
-}
-
-// Alert is a list of alerts that can be sorted in chronological order.
-type Alerts []*Alert
-
-func (as Alerts) Len() int { return len(as) }
-func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
-
-func (as Alerts) Less(i, j int) bool {
- if as[i].StartsAt.Before(as[j].StartsAt) {
- return true
- }
- if as[i].EndsAt.Before(as[j].EndsAt) {
- return true
- }
- return as[i].Fingerprint() < as[j].Fingerprint()
-}
-
-// HasFiring returns true iff one of the alerts is not resolved.
-func (as Alerts) HasFiring() bool {
- for _, a := range as {
- if !a.Resolved() {
- return true
- }
- }
- return false
-}
-
-// Status returns StatusFiring iff at least one of the alerts is firing.
-func (as Alerts) Status() AlertStatus {
- if as.HasFiring() {
- return AlertFiring
- }
- return AlertResolved
-}
diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go
deleted file mode 100644
index fc4de410..00000000
--- a/vendor/github.com/prometheus/common/model/fingerprinting.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "fmt"
- "strconv"
-)
-
-// Fingerprint provides a hash-capable representation of a Metric.
-// For our purposes, FNV-1A 64-bit is used.
-type Fingerprint uint64
-
-// FingerprintFromString transforms a string representation into a Fingerprint.
-func FingerprintFromString(s string) (Fingerprint, error) {
- num, err := strconv.ParseUint(s, 16, 64)
- return Fingerprint(num), err
-}
-
-// ParseFingerprint parses the input string into a fingerprint.
-func ParseFingerprint(s string) (Fingerprint, error) {
- num, err := strconv.ParseUint(s, 16, 64)
- if err != nil {
- return 0, err
- }
- return Fingerprint(num), nil
-}
-
-func (f Fingerprint) String() string {
- return fmt.Sprintf("%016x", uint64(f))
-}
-
-// Fingerprints represents a collection of Fingerprint subject to a given
-// natural sorting scheme. It implements sort.Interface.
-type Fingerprints []Fingerprint
-
-// Len implements sort.Interface.
-func (f Fingerprints) Len() int {
- return len(f)
-}
-
-// Less implements sort.Interface.
-func (f Fingerprints) Less(i, j int) bool {
- return f[i] < f[j]
-}
-
-// Swap implements sort.Interface.
-func (f Fingerprints) Swap(i, j int) {
- f[i], f[j] = f[j], f[i]
-}
-
-// FingerprintSet is a set of Fingerprints.
-type FingerprintSet map[Fingerprint]struct{}
-
-// Equal returns true if both sets contain the same elements (and not more).
-func (s FingerprintSet) Equal(o FingerprintSet) bool {
- if len(s) != len(o) {
- return false
- }
-
- for k := range s {
- if _, ok := o[k]; !ok {
- return false
- }
- }
-
- return true
-}
-
-// Intersection returns the elements contained in both sets.
-func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
- myLength, otherLength := len(s), len(o)
- if myLength == 0 || otherLength == 0 {
- return FingerprintSet{}
- }
-
- subSet := s
- superSet := o
-
- if otherLength < myLength {
- subSet = o
- superSet = s
- }
-
- out := FingerprintSet{}
-
- for k := range subSet {
- if _, ok := superSet[k]; ok {
- out[k] = struct{}{}
- }
- }
-
- return out
-}
diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go
deleted file mode 100644
index 038fc1c9..00000000
--- a/vendor/github.com/prometheus/common/model/fnv.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-// Inline and byte-free variant of hash/fnv's fnv64a.
-
-const (
- offset64 = 14695981039346656037
- prime64 = 1099511628211
-)
-
-// hashNew initializies a new fnv64a hash value.
-func hashNew() uint64 {
- return offset64
-}
-
-// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
-func hashAdd(h uint64, s string) uint64 {
- for i := 0; i < len(s); i++ {
- h ^= uint64(s[i])
- h *= prime64
- }
- return h
-}
-
-// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
-func hashAddByte(h uint64, b byte) uint64 {
- h ^= uint64(b)
- h *= prime64
- return h
-}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
deleted file mode 100644
index 41051a01..00000000
--- a/vendor/github.com/prometheus/common/model/labels.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
- "regexp"
- "strings"
- "unicode/utf8"
-)
-
-const (
- // AlertNameLabel is the name of the label containing the an alert's name.
- AlertNameLabel = "alertname"
-
- // ExportedLabelPrefix is the prefix to prepend to the label names present in
- // exported metrics if a label of the same name is added by the server.
- ExportedLabelPrefix = "exported_"
-
- // MetricNameLabel is the label name indicating the metric name of a
- // timeseries.
- MetricNameLabel = "__name__"
-
- // SchemeLabel is the name of the label that holds the scheme on which to
- // scrape a target.
- SchemeLabel = "__scheme__"
-
- // AddressLabel is the name of the label that holds the address of
- // a scrape target.
- AddressLabel = "__address__"
-
- // MetricsPathLabel is the name of the label that holds the path on which to
- // scrape a target.
- MetricsPathLabel = "__metrics_path__"
-
- // ReservedLabelPrefix is a prefix which is not legal in user-supplied
- // label names.
- ReservedLabelPrefix = "__"
-
- // MetaLabelPrefix is a prefix for labels that provide meta information.
- // Labels with this prefix are used for intermediate label processing and
- // will not be attached to time series.
- MetaLabelPrefix = "__meta_"
-
- // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
- // Labels with this prefix are used for intermediate label processing and
- // will not be attached to time series. This is reserved for use in
- // Prometheus configuration files by users.
- TmpLabelPrefix = "__tmp_"
-
- // ParamLabelPrefix is a prefix for labels that provide URL parameters
- // used to scrape a target.
- ParamLabelPrefix = "__param_"
-
- // JobLabel is the label name indicating the job from which a timeseries
- // was scraped.
- JobLabel = "job"
-
- // InstanceLabel is the label name used for the instance label.
- InstanceLabel = "instance"
-
- // BucketLabel is used for the label that defines the upper bound of a
- // bucket of a histogram ("le" -> "less or equal").
- BucketLabel = "le"
-
- // QuantileLabel is used for the label that defines the quantile in a
- // summary.
- QuantileLabel = "quantile"
-)
-
-// LabelNameRE is a regular expression matching valid label names. Note that the
-// IsValid method of LabelName performs the same check but faster than a match
-// with this regular expression.
-var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
-
-// A LabelName is a key for a LabelSet or Metric. It has a value associated
-// therewith.
-type LabelName string
-
-// IsValid is true iff the label name matches the pattern of LabelNameRE. This
-// method, however, does not use LabelNameRE for the check but a much faster
-// hardcoded implementation.
-func (ln LabelName) IsValid() bool {
- if len(ln) == 0 {
- return false
- }
- for i, b := range ln {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
- return false
- }
- }
- return true
-}
-
-// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var s string
- if err := unmarshal(&s); err != nil {
- return err
- }
- if !LabelName(s).IsValid() {
- return fmt.Errorf("%q is not a valid label name", s)
- }
- *ln = LabelName(s)
- return nil
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (ln *LabelName) UnmarshalJSON(b []byte) error {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
- return err
- }
- if !LabelName(s).IsValid() {
- return fmt.Errorf("%q is not a valid label name", s)
- }
- *ln = LabelName(s)
- return nil
-}
-
-// LabelNames is a sortable LabelName slice. In implements sort.Interface.
-type LabelNames []LabelName
-
-func (l LabelNames) Len() int {
- return len(l)
-}
-
-func (l LabelNames) Less(i, j int) bool {
- return l[i] < l[j]
-}
-
-func (l LabelNames) Swap(i, j int) {
- l[i], l[j] = l[j], l[i]
-}
-
-func (l LabelNames) String() string {
- labelStrings := make([]string, 0, len(l))
- for _, label := range l {
- labelStrings = append(labelStrings, string(label))
- }
- return strings.Join(labelStrings, ", ")
-}
-
-// A LabelValue is an associated value for a LabelName.
-type LabelValue string
-
-// IsValid returns true iff the string is a valid UTF8.
-func (lv LabelValue) IsValid() bool {
- return utf8.ValidString(string(lv))
-}
-
-// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
-type LabelValues []LabelValue
-
-func (l LabelValues) Len() int {
- return len(l)
-}
-
-func (l LabelValues) Less(i, j int) bool {
- return string(l[i]) < string(l[j])
-}
-
-func (l LabelValues) Swap(i, j int) {
- l[i], l[j] = l[j], l[i]
-}
-
-// LabelPair pairs a name with a value.
-type LabelPair struct {
- Name LabelName
- Value LabelValue
-}
-
-// LabelPairs is a sortable slice of LabelPair pointers. It implements
-// sort.Interface.
-type LabelPairs []*LabelPair
-
-func (l LabelPairs) Len() int {
- return len(l)
-}
-
-func (l LabelPairs) Less(i, j int) bool {
- switch {
- case l[i].Name > l[j].Name:
- return false
- case l[i].Name < l[j].Name:
- return true
- case l[i].Value > l[j].Value:
- return false
- case l[i].Value < l[j].Value:
- return true
- default:
- return false
- }
-}
-
-func (l LabelPairs) Swap(i, j int) {
- l[i], l[j] = l[j], l[i]
-}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
deleted file mode 100644
index 6eda08a7..00000000
--- a/vendor/github.com/prometheus/common/model/labelset.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
- "sort"
- "strings"
-)
-
-// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
-// may be fully-qualified down to the point where it may resolve to a single
-// Metric in the data store or not. All operations that occur within the realm
-// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
-// match.
-type LabelSet map[LabelName]LabelValue
-
-// Validate checks whether all names and values in the label set
-// are valid.
-func (ls LabelSet) Validate() error {
- for ln, lv := range ls {
- if !ln.IsValid() {
- return fmt.Errorf("invalid name %q", ln)
- }
- if !lv.IsValid() {
- return fmt.Errorf("invalid value %q", lv)
- }
- }
- return nil
-}
-
-// Equal returns true iff both label sets have exactly the same key/value pairs.
-func (ls LabelSet) Equal(o LabelSet) bool {
- if len(ls) != len(o) {
- return false
- }
- for ln, lv := range ls {
- olv, ok := o[ln]
- if !ok {
- return false
- }
- if olv != lv {
- return false
- }
- }
- return true
-}
-
-// Before compares the metrics, using the following criteria:
-//
-// If m has fewer labels than o, it is before o. If it has more, it is not.
-//
-// If the number of labels is the same, the superset of all label names is
-// sorted alphanumerically. The first differing label pair found in that order
-// determines the outcome: If the label does not exist at all in m, then m is
-// before o, and vice versa. Otherwise the label value is compared
-// alphanumerically.
-//
-// If m and o are equal, the method returns false.
-func (ls LabelSet) Before(o LabelSet) bool {
- if len(ls) < len(o) {
- return true
- }
- if len(ls) > len(o) {
- return false
- }
-
- lns := make(LabelNames, 0, len(ls)+len(o))
- for ln := range ls {
- lns = append(lns, ln)
- }
- for ln := range o {
- lns = append(lns, ln)
- }
- // It's probably not worth it to de-dup lns.
- sort.Sort(lns)
- for _, ln := range lns {
- mlv, ok := ls[ln]
- if !ok {
- return true
- }
- olv, ok := o[ln]
- if !ok {
- return false
- }
- if mlv < olv {
- return true
- }
- if mlv > olv {
- return false
- }
- }
- return false
-}
-
-// Clone returns a copy of the label set.
-func (ls LabelSet) Clone() LabelSet {
- lsn := make(LabelSet, len(ls))
- for ln, lv := range ls {
- lsn[ln] = lv
- }
- return lsn
-}
-
-// Merge is a helper function to non-destructively merge two label sets.
-func (l LabelSet) Merge(other LabelSet) LabelSet {
- result := make(LabelSet, len(l))
-
- for k, v := range l {
- result[k] = v
- }
-
- for k, v := range other {
- result[k] = v
- }
-
- return result
-}
-
-func (l LabelSet) String() string {
- lstrs := make([]string, 0, len(l))
- for l, v := range l {
- lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
- }
-
- sort.Strings(lstrs)
- return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
-}
-
-// Fingerprint returns the LabelSet's fingerprint.
-func (ls LabelSet) Fingerprint() Fingerprint {
- return labelSetToFingerprint(ls)
-}
-
-// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
-// algorithm, which is, however, more susceptible to hash collisions.
-func (ls LabelSet) FastFingerprint() Fingerprint {
- return labelSetToFastFingerprint(ls)
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (l *LabelSet) UnmarshalJSON(b []byte) error {
- var m map[LabelName]LabelValue
- if err := json.Unmarshal(b, &m); err != nil {
- return err
- }
- // encoding/json only unmarshals maps of the form map[string]T. It treats
- // LabelName as a string and does not call its UnmarshalJSON method.
- // Thus, we have to replicate the behavior here.
- for ln := range m {
- if !ln.IsValid() {
- return fmt.Errorf("%q is not a valid label name", ln)
- }
- }
- *l = LabelSet(m)
- return nil
-}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
deleted file mode 100644
index f7250909..00000000
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "fmt"
- "regexp"
- "sort"
- "strings"
-)
-
-var (
- separator = []byte{0}
- // MetricNameRE is a regular expression matching valid metric
- // names. Note that the IsValidMetricName function performs the same
- // check but faster than a match with this regular expression.
- MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
-)
-
-// A Metric is similar to a LabelSet, but the key difference is that a Metric is
-// a singleton and refers to one and only one stream of samples.
-type Metric LabelSet
-
-// Equal compares the metrics.
-func (m Metric) Equal(o Metric) bool {
- return LabelSet(m).Equal(LabelSet(o))
-}
-
-// Before compares the metrics' underlying label sets.
-func (m Metric) Before(o Metric) bool {
- return LabelSet(m).Before(LabelSet(o))
-}
-
-// Clone returns a copy of the Metric.
-func (m Metric) Clone() Metric {
- clone := make(Metric, len(m))
- for k, v := range m {
- clone[k] = v
- }
- return clone
-}
-
-func (m Metric) String() string {
- metricName, hasName := m[MetricNameLabel]
- numLabels := len(m) - 1
- if !hasName {
- numLabels = len(m)
- }
- labelStrings := make([]string, 0, numLabels)
- for label, value := range m {
- if label != MetricNameLabel {
- labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
- }
- }
-
- switch numLabels {
- case 0:
- if hasName {
- return string(metricName)
- }
- return "{}"
- default:
- sort.Strings(labelStrings)
- return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
- }
-}
-
-// Fingerprint returns a Metric's Fingerprint.
-func (m Metric) Fingerprint() Fingerprint {
- return LabelSet(m).Fingerprint()
-}
-
-// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
-// algorithm, which is, however, more susceptible to hash collisions.
-func (m Metric) FastFingerprint() Fingerprint {
- return LabelSet(m).FastFingerprint()
-}
-
-// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
-// This function, however, does not use MetricNameRE for the check but a much
-// faster hardcoded implementation.
-func IsValidMetricName(n LabelValue) bool {
- if len(n) == 0 {
- return false
- }
- for i, b := range n {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go
deleted file mode 100644
index a7b96917..00000000
--- a/vendor/github.com/prometheus/common/model/model.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package model contains common data structures that are shared across
-// Prometheus components and libraries.
-package model
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
deleted file mode 100644
index 8762b13c..00000000
--- a/vendor/github.com/prometheus/common/model/signature.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2014 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "sort"
-)
-
-// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
-// used to separate label names, label values, and other strings from each other
-// when calculating their combined hash value (aka signature aka fingerprint).
-const SeparatorByte byte = 255
-
-var (
- // cache the signature of an empty label set.
- emptyLabelSignature = hashNew()
-)
-
-// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
-// given label set. (Collisions are possible but unlikely if the number of label
-// sets the function is applied to is small.)
-func LabelsToSignature(labels map[string]string) uint64 {
- if len(labels) == 0 {
- return emptyLabelSignature
- }
-
- labelNames := make([]string, 0, len(labels))
- for labelName := range labels {
- labelNames = append(labelNames, labelName)
- }
- sort.Strings(labelNames)
-
- sum := hashNew()
- for _, labelName := range labelNames {
- sum = hashAdd(sum, labelName)
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, labels[labelName])
- sum = hashAddByte(sum, SeparatorByte)
- }
- return sum
-}
-
-// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
-// parameter (rather than a label map) and returns a Fingerprint.
-func labelSetToFingerprint(ls LabelSet) Fingerprint {
- if len(ls) == 0 {
- return Fingerprint(emptyLabelSignature)
- }
-
- labelNames := make(LabelNames, 0, len(ls))
- for labelName := range ls {
- labelNames = append(labelNames, labelName)
- }
- sort.Sort(labelNames)
-
- sum := hashNew()
- for _, labelName := range labelNames {
- sum = hashAdd(sum, string(labelName))
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, string(ls[labelName]))
- sum = hashAddByte(sum, SeparatorByte)
- }
- return Fingerprint(sum)
-}
-
-// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
-// faster and less allocation-heavy hash function, which is more susceptible to
-// create hash collisions. Therefore, collision detection should be applied.
-func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
- if len(ls) == 0 {
- return Fingerprint(emptyLabelSignature)
- }
-
- var result uint64
- for labelName, labelValue := range ls {
- sum := hashNew()
- sum = hashAdd(sum, string(labelName))
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, string(labelValue))
- result ^= sum
- }
- return Fingerprint(result)
-}
-
-// SignatureForLabels works like LabelsToSignature but takes a Metric as
-// parameter (rather than a label map) and only includes the labels with the
-// specified LabelNames into the signature calculation. The labels passed in
-// will be sorted by this function.
-func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
- if len(labels) == 0 {
- return emptyLabelSignature
- }
-
- sort.Sort(LabelNames(labels))
-
- sum := hashNew()
- for _, label := range labels {
- sum = hashAdd(sum, string(label))
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, string(m[label]))
- sum = hashAddByte(sum, SeparatorByte)
- }
- return sum
-}
-
-// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
-// parameter (rather than a label map) and excludes the labels with any of the
-// specified LabelNames from the signature calculation.
-func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
- if len(m) == 0 {
- return emptyLabelSignature
- }
-
- labelNames := make(LabelNames, 0, len(m))
- for labelName := range m {
- if _, exclude := labels[labelName]; !exclude {
- labelNames = append(labelNames, labelName)
- }
- }
- if len(labelNames) == 0 {
- return emptyLabelSignature
- }
- sort.Sort(labelNames)
-
- sum := hashNew()
- for _, labelName := range labelNames {
- sum = hashAdd(sum, string(labelName))
- sum = hashAddByte(sum, SeparatorByte)
- sum = hashAdd(sum, string(m[labelName]))
- sum = hashAddByte(sum, SeparatorByte)
- }
- return sum
-}
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
deleted file mode 100644
index 7538e299..00000000
--- a/vendor/github.com/prometheus/common/model/silence.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
- "regexp"
- "time"
-)
-
-// Matcher describes a matches the value of a given label.
-type Matcher struct {
- Name LabelName `json:"name"`
- Value string `json:"value"`
- IsRegex bool `json:"isRegex"`
-}
-
-func (m *Matcher) UnmarshalJSON(b []byte) error {
- type plain Matcher
- if err := json.Unmarshal(b, (*plain)(m)); err != nil {
- return err
- }
-
- if len(m.Name) == 0 {
- return fmt.Errorf("label name in matcher must not be empty")
- }
- if m.IsRegex {
- if _, err := regexp.Compile(m.Value); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Validate returns true iff all fields of the matcher have valid values.
-func (m *Matcher) Validate() error {
- if !m.Name.IsValid() {
- return fmt.Errorf("invalid name %q", m.Name)
- }
- if m.IsRegex {
- if _, err := regexp.Compile(m.Value); err != nil {
- return fmt.Errorf("invalid regular expression %q", m.Value)
- }
- } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
- return fmt.Errorf("invalid value %q", m.Value)
- }
- return nil
-}
-
-// Silence defines the representation of a silence definiton
-// in the Prometheus eco-system.
-type Silence struct {
- ID uint64 `json:"id,omitempty"`
-
- Matchers []*Matcher `json:"matchers"`
-
- StartsAt time.Time `json:"startsAt"`
- EndsAt time.Time `json:"endsAt"`
-
- CreatedAt time.Time `json:"createdAt,omitempty"`
- CreatedBy string `json:"createdBy"`
- Comment string `json:"comment,omitempty"`
-}
-
-// Validate returns true iff all fields of the silence have valid values.
-func (s *Silence) Validate() error {
- if len(s.Matchers) == 0 {
- return fmt.Errorf("at least one matcher required")
- }
- for _, m := range s.Matchers {
- if err := m.Validate(); err != nil {
- return fmt.Errorf("invalid matcher: %s", err)
- }
- }
- if s.StartsAt.IsZero() {
- return fmt.Errorf("start time missing")
- }
- if s.EndsAt.IsZero() {
- return fmt.Errorf("end time missing")
- }
- if s.EndsAt.Before(s.StartsAt) {
- return fmt.Errorf("start time must be before end time")
- }
- if s.CreatedBy == "" {
- return fmt.Errorf("creator information missing")
- }
- if s.Comment == "" {
- return fmt.Errorf("comment missing")
- }
- if s.CreatedAt.IsZero() {
- return fmt.Errorf("creation timestamp missing")
- }
- return nil
-}
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
deleted file mode 100644
index 548968ae..00000000
--- a/vendor/github.com/prometheus/common/model/time.go
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "fmt"
- "math"
- "regexp"
- "strconv"
- "strings"
- "time"
-)
-
-const (
- // MinimumTick is the minimum supported time resolution. This has to be
- // at least time.Second in order for the code below to work.
- minimumTick = time.Millisecond
- // second is the Time duration equivalent to one second.
- second = int64(time.Second / minimumTick)
- // The number of nanoseconds per minimum tick.
- nanosPerTick = int64(minimumTick / time.Nanosecond)
-
- // Earliest is the earliest Time representable. Handy for
- // initializing a high watermark.
- Earliest = Time(math.MinInt64)
- // Latest is the latest Time representable. Handy for initializing
- // a low watermark.
- Latest = Time(math.MaxInt64)
-)
-
-// Time is the number of milliseconds since the epoch
-// (1970-01-01 00:00 UTC) excluding leap seconds.
-type Time int64
-
-// Interval describes and interval between two timestamps.
-type Interval struct {
- Start, End Time
-}
-
-// Now returns the current time as a Time.
-func Now() Time {
- return TimeFromUnixNano(time.Now().UnixNano())
-}
-
-// TimeFromUnix returns the Time equivalent to the Unix Time t
-// provided in seconds.
-func TimeFromUnix(t int64) Time {
- return Time(t * second)
-}
-
-// TimeFromUnixNano returns the Time equivalent to the Unix Time
-// t provided in nanoseconds.
-func TimeFromUnixNano(t int64) Time {
- return Time(t / nanosPerTick)
-}
-
-// Equal reports whether two Times represent the same instant.
-func (t Time) Equal(o Time) bool {
- return t == o
-}
-
-// Before reports whether the Time t is before o.
-func (t Time) Before(o Time) bool {
- return t < o
-}
-
-// After reports whether the Time t is after o.
-func (t Time) After(o Time) bool {
- return t > o
-}
-
-// Add returns the Time t + d.
-func (t Time) Add(d time.Duration) Time {
- return t + Time(d/minimumTick)
-}
-
-// Sub returns the Duration t - o.
-func (t Time) Sub(o Time) time.Duration {
- return time.Duration(t-o) * minimumTick
-}
-
-// Time returns the time.Time representation of t.
-func (t Time) Time() time.Time {
- return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
-}
-
-// Unix returns t as a Unix time, the number of seconds elapsed
-// since January 1, 1970 UTC.
-func (t Time) Unix() int64 {
- return int64(t) / second
-}
-
-// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
-// since January 1, 1970 UTC.
-func (t Time) UnixNano() int64 {
- return int64(t) * nanosPerTick
-}
-
-// The number of digits after the dot.
-var dotPrecision = int(math.Log10(float64(second)))
-
-// String returns a string representation of the Time.
-func (t Time) String() string {
- return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
-}
-
-// MarshalJSON implements the json.Marshaler interface.
-func (t Time) MarshalJSON() ([]byte, error) {
- return []byte(t.String()), nil
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-func (t *Time) UnmarshalJSON(b []byte) error {
- p := strings.Split(string(b), ".")
- switch len(p) {
- case 1:
- v, err := strconv.ParseInt(string(p[0]), 10, 64)
- if err != nil {
- return err
- }
- *t = Time(v * second)
-
- case 2:
- v, err := strconv.ParseInt(string(p[0]), 10, 64)
- if err != nil {
- return err
- }
- v *= second
-
- prec := dotPrecision - len(p[1])
- if prec < 0 {
- p[1] = p[1][:dotPrecision]
- } else if prec > 0 {
- p[1] = p[1] + strings.Repeat("0", prec)
- }
-
- va, err := strconv.ParseInt(p[1], 10, 32)
- if err != nil {
- return err
- }
-
- *t = Time(v + va)
-
- default:
- return fmt.Errorf("invalid time %q", string(b))
- }
- return nil
-}
-
-// Duration wraps time.Duration. It is used to parse the custom duration format
-// from YAML.
-// This type should not propagate beyond the scope of input/output processing.
-type Duration time.Duration
-
-var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
-
-// StringToDuration parses a string into a time.Duration, assuming that a year
-// always has 365d, a week always has 7d, and a day always has 24h.
-func ParseDuration(durationStr string) (Duration, error) {
- matches := durationRE.FindStringSubmatch(durationStr)
- if len(matches) != 3 {
- return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
- }
- var (
- n, _ = strconv.Atoi(matches[1])
- dur = time.Duration(n) * time.Millisecond
- )
- switch unit := matches[2]; unit {
- case "y":
- dur *= 1000 * 60 * 60 * 24 * 365
- case "w":
- dur *= 1000 * 60 * 60 * 24 * 7
- case "d":
- dur *= 1000 * 60 * 60 * 24
- case "h":
- dur *= 1000 * 60 * 60
- case "m":
- dur *= 1000 * 60
- case "s":
- dur *= 1000
- case "ms":
- // Value already correct
- default:
- return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
- }
- return Duration(dur), nil
-}
-
-func (d Duration) String() string {
- var (
- ms = int64(time.Duration(d) / time.Millisecond)
- unit = "ms"
- )
- factors := map[string]int64{
- "y": 1000 * 60 * 60 * 24 * 365,
- "w": 1000 * 60 * 60 * 24 * 7,
- "d": 1000 * 60 * 60 * 24,
- "h": 1000 * 60 * 60,
- "m": 1000 * 60,
- "s": 1000,
- "ms": 1,
- }
-
- switch int64(0) {
- case ms % factors["y"]:
- unit = "y"
- case ms % factors["w"]:
- unit = "w"
- case ms % factors["d"]:
- unit = "d"
- case ms % factors["h"]:
- unit = "h"
- case ms % factors["m"]:
- unit = "m"
- case ms % factors["s"]:
- unit = "s"
- }
- return fmt.Sprintf("%v%v", ms/factors[unit], unit)
-}
-
-// MarshalYAML implements the yaml.Marshaler interface.
-func (d Duration) MarshalYAML() (interface{}, error) {
- return d.String(), nil
-}
-
-// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var s string
- if err := unmarshal(&s); err != nil {
- return err
- }
- dur, err := ParseDuration(s)
- if err != nil {
- return err
- }
- *d = dur
- return nil
-}
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
deleted file mode 100644
index c9ed3ffd..00000000
--- a/vendor/github.com/prometheus/common/model/value.go
+++ /dev/null
@@ -1,416 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package model
-
-import (
- "encoding/json"
- "fmt"
- "math"
- "sort"
- "strconv"
- "strings"
-)
-
-var (
- // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
- // non-existing sample pair. It is a SamplePair with timestamp Earliest and
- // value 0.0. Note that the natural zero value of SamplePair has a timestamp
- // of 0, which is possible to appear in a real SamplePair and thus not
- // suitable to signal a non-existing SamplePair.
- ZeroSamplePair = SamplePair{Timestamp: Earliest}
-
- // ZeroSample is the pseudo zero-value of Sample used to signal a
- // non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
- // and metric nil. Note that the natural zero value of Sample has a timestamp
- // of 0, which is possible to appear in a real Sample and thus not suitable
- // to signal a non-existing Sample.
- ZeroSample = Sample{Timestamp: Earliest}
-)
-
-// A SampleValue is a representation of a value for a given sample at a given
-// time.
-type SampleValue float64
-
-// MarshalJSON implements json.Marshaler.
-func (v SampleValue) MarshalJSON() ([]byte, error) {
- return json.Marshal(v.String())
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (v *SampleValue) UnmarshalJSON(b []byte) error {
- if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
- return fmt.Errorf("sample value must be a quoted string")
- }
- f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
- if err != nil {
- return err
- }
- *v = SampleValue(f)
- return nil
-}
-
-// Equal returns true if the value of v and o is equal or if both are NaN. Note
-// that v==o is false if both are NaN. If you want the conventional float
-// behavior, use == to compare two SampleValues.
-func (v SampleValue) Equal(o SampleValue) bool {
- if v == o {
- return true
- }
- return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
-}
-
-func (v SampleValue) String() string {
- return strconv.FormatFloat(float64(v), 'f', -1, 64)
-}
-
-// SamplePair pairs a SampleValue with a Timestamp.
-type SamplePair struct {
- Timestamp Time
- Value SampleValue
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s SamplePair) MarshalJSON() ([]byte, error) {
- t, err := json.Marshal(s.Timestamp)
- if err != nil {
- return nil, err
- }
- v, err := json.Marshal(s.Value)
- if err != nil {
- return nil, err
- }
- return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *SamplePair) UnmarshalJSON(b []byte) error {
- v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
- return json.Unmarshal(b, &v)
-}
-
-// Equal returns true if this SamplePair and o have equal Values and equal
-// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
-func (s *SamplePair) Equal(o *SamplePair) bool {
- return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
-}
-
-func (s SamplePair) String() string {
- return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
-}
-
-// Sample is a sample pair associated with a metric.
-type Sample struct {
- Metric Metric `json:"metric"`
- Value SampleValue `json:"value"`
- Timestamp Time `json:"timestamp"`
-}
-
-// Equal compares first the metrics, then the timestamp, then the value. The
-// sematics of value equality is defined by SampleValue.Equal.
-func (s *Sample) Equal(o *Sample) bool {
- if s == o {
- return true
- }
-
- if !s.Metric.Equal(o.Metric) {
- return false
- }
- if !s.Timestamp.Equal(o.Timestamp) {
- return false
- }
-
- return s.Value.Equal(o.Value)
-}
-
-func (s Sample) String() string {
- return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
- Timestamp: s.Timestamp,
- Value: s.Value,
- })
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s Sample) MarshalJSON() ([]byte, error) {
- v := struct {
- Metric Metric `json:"metric"`
- Value SamplePair `json:"value"`
- }{
- Metric: s.Metric,
- Value: SamplePair{
- Timestamp: s.Timestamp,
- Value: s.Value,
- },
- }
-
- return json.Marshal(&v)
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *Sample) UnmarshalJSON(b []byte) error {
- v := struct {
- Metric Metric `json:"metric"`
- Value SamplePair `json:"value"`
- }{
- Metric: s.Metric,
- Value: SamplePair{
- Timestamp: s.Timestamp,
- Value: s.Value,
- },
- }
-
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
-
- s.Metric = v.Metric
- s.Timestamp = v.Value.Timestamp
- s.Value = v.Value.Value
-
- return nil
-}
-
-// Samples is a sortable Sample slice. It implements sort.Interface.
-type Samples []*Sample
-
-func (s Samples) Len() int {
- return len(s)
-}
-
-// Less compares first the metrics, then the timestamp.
-func (s Samples) Less(i, j int) bool {
- switch {
- case s[i].Metric.Before(s[j].Metric):
- return true
- case s[j].Metric.Before(s[i].Metric):
- return false
- case s[i].Timestamp.Before(s[j].Timestamp):
- return true
- default:
- return false
- }
-}
-
-func (s Samples) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-// Equal compares two sets of samples and returns true if they are equal.
-func (s Samples) Equal(o Samples) bool {
- if len(s) != len(o) {
- return false
- }
-
- for i, sample := range s {
- if !sample.Equal(o[i]) {
- return false
- }
- }
- return true
-}
-
-// SampleStream is a stream of Values belonging to an attached COWMetric.
-type SampleStream struct {
- Metric Metric `json:"metric"`
- Values []SamplePair `json:"values"`
-}
-
-func (ss SampleStream) String() string {
- vals := make([]string, len(ss.Values))
- for i, v := range ss.Values {
- vals[i] = v.String()
- }
- return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
-}
-
-// Value is a generic interface for values resulting from a query evaluation.
-type Value interface {
- Type() ValueType
- String() string
-}
-
-func (Matrix) Type() ValueType { return ValMatrix }
-func (Vector) Type() ValueType { return ValVector }
-func (*Scalar) Type() ValueType { return ValScalar }
-func (*String) Type() ValueType { return ValString }
-
-type ValueType int
-
-const (
- ValNone ValueType = iota
- ValScalar
- ValVector
- ValMatrix
- ValString
-)
-
-// MarshalJSON implements json.Marshaler.
-func (et ValueType) MarshalJSON() ([]byte, error) {
- return json.Marshal(et.String())
-}
-
-func (et *ValueType) UnmarshalJSON(b []byte) error {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
- return err
- }
- switch s {
- case "":
- *et = ValNone
- case "scalar":
- *et = ValScalar
- case "vector":
- *et = ValVector
- case "matrix":
- *et = ValMatrix
- case "string":
- *et = ValString
- default:
- return fmt.Errorf("unknown value type %q", s)
- }
- return nil
-}
-
-func (e ValueType) String() string {
- switch e {
- case ValNone:
- return ""
- case ValScalar:
- return "scalar"
- case ValVector:
- return "vector"
- case ValMatrix:
- return "matrix"
- case ValString:
- return "string"
- }
- panic("ValueType.String: unhandled value type")
-}
-
-// Scalar is a scalar value evaluated at the set timestamp.
-type Scalar struct {
- Value SampleValue `json:"value"`
- Timestamp Time `json:"timestamp"`
-}
-
-func (s Scalar) String() string {
- return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s Scalar) MarshalJSON() ([]byte, error) {
- v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
- return json.Marshal([...]interface{}{s.Timestamp, string(v)})
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *Scalar) UnmarshalJSON(b []byte) error {
- var f string
- v := [...]interface{}{&s.Timestamp, &f}
-
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
-
- value, err := strconv.ParseFloat(f, 64)
- if err != nil {
- return fmt.Errorf("error parsing sample value: %s", err)
- }
- s.Value = SampleValue(value)
- return nil
-}
-
-// String is a string value evaluated at the set timestamp.
-type String struct {
- Value string `json:"value"`
- Timestamp Time `json:"timestamp"`
-}
-
-func (s *String) String() string {
- return s.Value
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s String) MarshalJSON() ([]byte, error) {
- return json.Marshal([]interface{}{s.Timestamp, s.Value})
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *String) UnmarshalJSON(b []byte) error {
- v := [...]interface{}{&s.Timestamp, &s.Value}
- return json.Unmarshal(b, &v)
-}
-
-// Vector is basically only an alias for Samples, but the
-// contract is that in a Vector, all Samples have the same timestamp.
-type Vector []*Sample
-
-func (vec Vector) String() string {
- entries := make([]string, len(vec))
- for i, s := range vec {
- entries[i] = s.String()
- }
- return strings.Join(entries, "\n")
-}
-
-func (vec Vector) Len() int { return len(vec) }
-func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
-
-// Less compares first the metrics, then the timestamp.
-func (vec Vector) Less(i, j int) bool {
- switch {
- case vec[i].Metric.Before(vec[j].Metric):
- return true
- case vec[j].Metric.Before(vec[i].Metric):
- return false
- case vec[i].Timestamp.Before(vec[j].Timestamp):
- return true
- default:
- return false
- }
-}
-
-// Equal compares two sets of samples and returns true if they are equal.
-func (vec Vector) Equal(o Vector) bool {
- if len(vec) != len(o) {
- return false
- }
-
- for i, sample := range vec {
- if !sample.Equal(o[i]) {
- return false
- }
- }
- return true
-}
-
-// Matrix is a list of time series.
-type Matrix []*SampleStream
-
-func (m Matrix) Len() int { return len(m) }
-func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
-func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
-
-func (mat Matrix) String() string {
- matCp := make(Matrix, len(mat))
- copy(matCp, mat)
- sort.Sort(matCp)
-
- strs := make([]string, len(matCp))
-
- for i, ss := range matCp {
- strs[i] = ss.String()
- }
-
- return strings.Join(strs, "\n")
-}
diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go
deleted file mode 100644
index 84489a51..00000000
--- a/vendor/github.com/prometheus/common/version/info.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2016 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package version
-
-import (
- "bytes"
- "fmt"
- "runtime"
- "strings"
- "text/template"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-// Build information. Populated at build-time.
-var (
- Version string
- Revision string
- Branch string
- BuildUser string
- BuildDate string
- GoVersion = runtime.Version()
-)
-
-// NewCollector returns a collector which exports metrics about current version information.
-func NewCollector(program string) *prometheus.GaugeVec {
- buildInfo := prometheus.NewGaugeVec(
- prometheus.GaugeOpts{
- Namespace: program,
- Name: "build_info",
- Help: fmt.Sprintf(
- "A metric with a constant '1' value labeled by version, revision, branch, and goversion from which %s was built.",
- program,
- ),
- },
- []string{"version", "revision", "branch", "goversion"},
- )
- buildInfo.WithLabelValues(Version, Revision, Branch, GoVersion).Set(1)
- return buildInfo
-}
-
-// versionInfoTmpl contains the template used by Info.
-var versionInfoTmpl = `
-{{.program}}, version {{.version}} (branch: {{.branch}}, revision: {{.revision}})
- build user: {{.buildUser}}
- build date: {{.buildDate}}
- go version: {{.goVersion}}
-`
-
-// Print returns version information.
-func Print(program string) string {
- m := map[string]string{
- "program": program,
- "version": Version,
- "revision": Revision,
- "branch": Branch,
- "buildUser": BuildUser,
- "buildDate": BuildDate,
- "goVersion": GoVersion,
- }
- t := template.Must(template.New("version").Parse(versionInfoTmpl))
-
- var buf bytes.Buffer
- if err := t.ExecuteTemplate(&buf, "version", m); err != nil {
- panic(err)
- }
- return strings.TrimSpace(buf.String())
-}
-
-// Info returns version, branch and revision information.
-func Info() string {
- return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, Revision)
-}
-
-// BuildContext returns goVersion, buildUser and buildDate information.
-func BuildContext() string {
- return fmt.Sprintf("(go=%s, user=%s, date=%s)", GoVersion, BuildUser, BuildDate)
-}
diff --git a/vendor/github.com/prometheus/procfs/AUTHORS.md b/vendor/github.com/prometheus/procfs/AUTHORS.md
deleted file mode 100644
index 0c802dd8..00000000
--- a/vendor/github.com/prometheus/procfs/AUTHORS.md
+++ /dev/null
@@ -1,20 +0,0 @@
-The Prometheus project was started by Matt T. Proud (emeritus) and
-Julius Volz in 2012.
-
-Maintainers of this repository:
-
-* Tobias Schmidt
-
-The following individuals have contributed code to this repository
-(listed in alphabetical order):
-
-* Armen Baghumian
-* Bjoern Rabenstein
-* David Cournapeau
-* Ji-Hoon, Seol
-* Jonas Große Sundrup
-* Julius Volz
-* Matthias Rampke
-* Nicky Gerritsen
-* Rémi Audebert
-* Tobias Schmidt
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
deleted file mode 100644
index 5705f0fb..00000000
--- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Contributing
-
-Prometheus uses GitHub to manage reviews of pull requests.
-
-* If you have a trivial fix or improvement, go ahead and create a pull
- request, addressing (with `@...`) one or more of the maintainers
- (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
-
-* If you plan to do something more involved, first discuss your ideas
- on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
- This will avoid unnecessary work and surely give you and us a good deal
- of inspiration.
-
-* Relevant coding style guidelines are the [Go Code Review
- Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
- and the _Formatting and style_ section of Peter Bourgon's [Go: Best
- Practices for Production
- Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE
deleted file mode 100644
index 261eeb9e..00000000
--- a/vendor/github.com/prometheus/procfs/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
deleted file mode 100644
index c264a49d..00000000
--- a/vendor/github.com/prometheus/procfs/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-ci:
- ! gofmt -l *.go | read nothing
- go vet
- go test -v ./...
- go get github.com/golang/lint/golint
- golint *.go
diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE
deleted file mode 100644
index 53c5e9aa..00000000
--- a/vendor/github.com/prometheus/procfs/NOTICE
+++ /dev/null
@@ -1,7 +0,0 @@
-procfs provides functions to retrieve system, kernel and process
-metrics from the pseudo-filesystem proc.
-
-Copyright 2014-2015 The Prometheus Authors
-
-This product includes software developed at
-SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
deleted file mode 100644
index 6e7ee6b8..00000000
--- a/vendor/github.com/prometheus/procfs/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# procfs
-
-This procfs package provides functions to retrieve system, kernel and process
-metrics from the pseudo-filesystem proc.
-
-*WARNING*: This package is a work in progress. Its API may still break in
-backwards-incompatible ways without warnings. Use it at your own risk.
-
-[](https://godoc.org/github.com/prometheus/procfs)
-[](https://travis-ci.org/prometheus/procfs)
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
deleted file mode 100644
index e2acd6d4..00000000
--- a/vendor/github.com/prometheus/procfs/doc.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2014 Prometheus Team
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package procfs provides functions to retrieve system, kernel and process
-// metrics from the pseudo-filesystem proc.
-//
-// Example:
-//
-// package main
-//
-// import (
-// "fmt"
-// "log"
-//
-// "github.com/prometheus/procfs"
-// )
-//
-// func main() {
-// p, err := procfs.Self()
-// if err != nil {
-// log.Fatalf("could not get process: %s", err)
-// }
-//
-// stat, err := p.NewStat()
-// if err != nil {
-// log.Fatalf("could not get process stat: %s", err)
-// }
-//
-// fmt.Printf("command: %s\n", stat.Comm)
-// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
-// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
-// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
-// }
-//
-package procfs
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
deleted file mode 100644
index 49aaab05..00000000
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package procfs
-
-import (
- "fmt"
- "os"
- "path"
-)
-
-// FS represents the pseudo-filesystem proc, which provides an interface to
-// kernel data structures.
-type FS string
-
-// DefaultMountPoint is the common mount point of the proc filesystem.
-const DefaultMountPoint = "/proc"
-
-// NewFS returns a new FS mounted under the given mountPoint. It will error
-// if the mount point can't be read.
-func NewFS(mountPoint string) (FS, error) {
- info, err := os.Stat(mountPoint)
- if err != nil {
- return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
- }
- if !info.IsDir() {
- return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
- }
-
- return FS(mountPoint), nil
-}
-
-// Path returns the path of the given subsystem relative to the procfs root.
-func (fs FS) Path(p ...string) string {
- return path.Join(append([]string{string(fs)}, p...)...)
-}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
deleted file mode 100644
index e7012f73..00000000
--- a/vendor/github.com/prometheus/procfs/ipvs.go
+++ /dev/null
@@ -1,224 +0,0 @@
-package procfs
-
-import (
- "bufio"
- "encoding/hex"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "os"
- "strconv"
- "strings"
-)
-
-// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
-type IPVSStats struct {
- // Total count of connections.
- Connections uint64
- // Total incoming packages processed.
- IncomingPackets uint64
- // Total outgoing packages processed.
- OutgoingPackets uint64
- // Total incoming traffic.
- IncomingBytes uint64
- // Total outgoing traffic.
- OutgoingBytes uint64
-}
-
-// IPVSBackendStatus holds current metrics of one virtual / real address pair.
-type IPVSBackendStatus struct {
- // The local (virtual) IP address.
- LocalAddress net.IP
- // The local (virtual) port.
- LocalPort uint16
- // The transport protocol (TCP, UDP).
- Proto string
- // The remote (real) IP address.
- RemoteAddress net.IP
- // The remote (real) port.
- RemotePort uint16
- // The current number of active connections for this virtual/real address pair.
- ActiveConn uint64
- // The current number of inactive connections for this virtual/real address pair.
- InactConn uint64
- // The current weight of this virtual/real address pair.
- Weight uint64
-}
-
-// NewIPVSStats reads the IPVS statistics.
-func NewIPVSStats() (IPVSStats, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return IPVSStats{}, err
- }
-
- return fs.NewIPVSStats()
-}
-
-// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
-func (fs FS) NewIPVSStats() (IPVSStats, error) {
- file, err := os.Open(fs.Path("net/ip_vs_stats"))
- if err != nil {
- return IPVSStats{}, err
- }
- defer file.Close()
-
- return parseIPVSStats(file)
-}
-
-// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
-func parseIPVSStats(file io.Reader) (IPVSStats, error) {
- var (
- statContent []byte
- statLines []string
- statFields []string
- stats IPVSStats
- )
-
- statContent, err := ioutil.ReadAll(file)
- if err != nil {
- return IPVSStats{}, err
- }
-
- statLines = strings.SplitN(string(statContent), "\n", 4)
- if len(statLines) != 4 {
- return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
- }
-
- statFields = strings.Fields(statLines[2])
- if len(statFields) != 5 {
- return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
- }
-
- stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
- stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
- stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
- stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
- stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
- if err != nil {
- return IPVSStats{}, err
- }
-
- return stats, nil
-}
-
-// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
-func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return []IPVSBackendStatus{}, err
- }
-
- return fs.NewIPVSBackendStatus()
-}
-
-// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
-func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
- file, err := os.Open(fs.Path("net/ip_vs"))
- if err != nil {
- return nil, err
- }
- defer file.Close()
-
- return parseIPVSBackendStatus(file)
-}
-
-func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
- var (
- status []IPVSBackendStatus
- scanner = bufio.NewScanner(file)
- proto string
- localAddress net.IP
- localPort uint16
- err error
- )
-
- for scanner.Scan() {
- fields := strings.Fields(string(scanner.Text()))
- if len(fields) == 0 {
- continue
- }
- switch {
- case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
- continue
- case fields[0] == "TCP" || fields[0] == "UDP":
- if len(fields) < 2 {
- continue
- }
- proto = fields[0]
- localAddress, localPort, err = parseIPPort(fields[1])
- if err != nil {
- return nil, err
- }
- case fields[0] == "->":
- if len(fields) < 6 {
- continue
- }
- remoteAddress, remotePort, err := parseIPPort(fields[1])
- if err != nil {
- return nil, err
- }
- weight, err := strconv.ParseUint(fields[3], 10, 64)
- if err != nil {
- return nil, err
- }
- activeConn, err := strconv.ParseUint(fields[4], 10, 64)
- if err != nil {
- return nil, err
- }
- inactConn, err := strconv.ParseUint(fields[5], 10, 64)
- if err != nil {
- return nil, err
- }
- status = append(status, IPVSBackendStatus{
- LocalAddress: localAddress,
- LocalPort: localPort,
- RemoteAddress: remoteAddress,
- RemotePort: remotePort,
- Proto: proto,
- Weight: weight,
- ActiveConn: activeConn,
- InactConn: inactConn,
- })
- }
- }
- return status, nil
-}
-
-func parseIPPort(s string) (net.IP, uint16, error) {
- tmp := strings.SplitN(s, ":", 2)
-
- if len(tmp) != 2 {
- return nil, 0, fmt.Errorf("invalid IP:Port: %s", s)
- }
-
- if len(tmp[0]) != 8 && len(tmp[0]) != 32 {
- return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0])
- }
-
- ip, err := hex.DecodeString(tmp[0])
- if err != nil {
- return nil, 0, err
- }
-
- port, err := strconv.ParseUint(tmp[1], 16, 16)
- if err != nil {
- return nil, 0, err
- }
-
- return ip, uint16(port), nil
-}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
deleted file mode 100644
index d7a248c0..00000000
--- a/vendor/github.com/prometheus/procfs/mdstat.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package procfs
-
-import (
- "fmt"
- "io/ioutil"
- "regexp"
- "strconv"
- "strings"
-)
-
-var (
- statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
- buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
-)
-
-// MDStat holds info parsed from /proc/mdstat.
-type MDStat struct {
- // Name of the device.
- Name string
- // activity-state of the device.
- ActivityState string
- // Number of active disks.
- DisksActive int64
- // Total number of disks the device consists of.
- DisksTotal int64
- // Number of blocks the device holds.
- BlocksTotal int64
- // Number of blocks on the device that are in sync.
- BlocksSynced int64
-}
-
-// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
-func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
- mdStatusFilePath := fs.Path("mdstat")
- content, err := ioutil.ReadFile(mdStatusFilePath)
- if err != nil {
- return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
- }
-
- mdStates := []MDStat{}
- lines := strings.Split(string(content), "\n")
- for i, l := range lines {
- if l == "" {
- continue
- }
- if l[0] == ' ' {
- continue
- }
- if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
- continue
- }
-
- mainLine := strings.Split(l, " ")
- if len(mainLine) < 3 {
- return mdStates, fmt.Errorf("error parsing mdline: %s", l)
- }
- mdName := mainLine[0]
- activityState := mainLine[2]
-
- if len(lines) <= i+3 {
- return mdStates, fmt.Errorf(
- "error parsing %s: too few lines for md device %s",
- mdStatusFilePath,
- mdName,
- )
- }
-
- active, total, size, err := evalStatusline(lines[i+1])
- if err != nil {
- return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
- }
-
- // j is the line number of the syncing-line.
- j := i + 2
- if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
- j = i + 3
- }
-
- // If device is syncing at the moment, get the number of currently
- // synced bytes, otherwise that number equals the size of the device.
- syncedBlocks := size
- if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
- syncedBlocks, err = evalBuildline(lines[j])
- if err != nil {
- return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
- }
- }
-
- mdStates = append(mdStates, MDStat{
- Name: mdName,
- ActivityState: activityState,
- DisksActive: active,
- DisksTotal: total,
- BlocksTotal: size,
- BlocksSynced: syncedBlocks,
- })
- }
-
- return mdStates, nil
-}
-
-func evalStatusline(statusline string) (active, total, size int64, err error) {
- matches := statuslineRE.FindStringSubmatch(statusline)
- if len(matches) != 4 {
- return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
- }
-
- size, err = strconv.ParseInt(matches[1], 10, 64)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
- }
-
- total, err = strconv.ParseInt(matches[2], 10, 64)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
- }
-
- active, err = strconv.ParseInt(matches[3], 10, 64)
- if err != nil {
- return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
- }
-
- return active, total, size, nil
-}
-
-func evalBuildline(buildline string) (syncedBlocks int64, err error) {
- matches := buildlineRE.FindStringSubmatch(buildline)
- if len(matches) != 2 {
- return 0, fmt.Errorf("unexpected buildline: %s", buildline)
- }
-
- syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
- if err != nil {
- return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
- }
-
- return syncedBlocks, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
deleted file mode 100644
index 0d0a6a90..00000000
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package procfs
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "strconv"
- "strings"
-)
-
-// Proc provides information about a running process.
-type Proc struct {
- // The process ID.
- PID int
-
- fs FS
-}
-
-// Procs represents a list of Proc structs.
-type Procs []Proc
-
-func (p Procs) Len() int { return len(p) }
-func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
-
-// Self returns a process for the current process read via /proc/self.
-func Self() (Proc, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return Proc{}, err
- }
- return fs.Self()
-}
-
-// NewProc returns a process for the given pid under /proc.
-func NewProc(pid int) (Proc, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return Proc{}, err
- }
- return fs.NewProc(pid)
-}
-
-// AllProcs returns a list of all currently available processes under /proc.
-func AllProcs() (Procs, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return Procs{}, err
- }
- return fs.AllProcs()
-}
-
-// Self returns a process for the current process.
-func (fs FS) Self() (Proc, error) {
- p, err := os.Readlink(fs.Path("self"))
- if err != nil {
- return Proc{}, err
- }
- pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1))
- if err != nil {
- return Proc{}, err
- }
- return fs.NewProc(pid)
-}
-
-// NewProc returns a process for the given pid.
-func (fs FS) NewProc(pid int) (Proc, error) {
- if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
- return Proc{}, err
- }
- return Proc{PID: pid, fs: fs}, nil
-}
-
-// AllProcs returns a list of all currently available processes.
-func (fs FS) AllProcs() (Procs, error) {
- d, err := os.Open(fs.Path())
- if err != nil {
- return Procs{}, err
- }
- defer d.Close()
-
- names, err := d.Readdirnames(-1)
- if err != nil {
- return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
- }
-
- p := Procs{}
- for _, n := range names {
- pid, err := strconv.ParseInt(n, 10, 64)
- if err != nil {
- continue
- }
- p = append(p, Proc{PID: int(pid), fs: fs})
- }
-
- return p, nil
-}
-
-// CmdLine returns the command line of a process.
-func (p Proc) CmdLine() ([]string, error) {
- f, err := os.Open(p.path("cmdline"))
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- data, err := ioutil.ReadAll(f)
- if err != nil {
- return nil, err
- }
-
- if len(data) < 1 {
- return []string{}, nil
- }
-
- return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
-}
-
-// Comm returns the command name of a process.
-func (p Proc) Comm() (string, error) {
- f, err := os.Open(p.path("comm"))
- if err != nil {
- return "", err
- }
- defer f.Close()
-
- data, err := ioutil.ReadAll(f)
- if err != nil {
- return "", err
- }
-
- return strings.TrimSpace(string(data)), nil
-}
-
-// Executable returns the absolute path of the executable command of a process.
-func (p Proc) Executable() (string, error) {
- exe, err := os.Readlink(p.path("exe"))
- if os.IsNotExist(err) {
- return "", nil
- }
-
- return exe, err
-}
-
-// FileDescriptors returns the currently open file descriptors of a process.
-func (p Proc) FileDescriptors() ([]uintptr, error) {
- names, err := p.fileDescriptors()
- if err != nil {
- return nil, err
- }
-
- fds := make([]uintptr, len(names))
- for i, n := range names {
- fd, err := strconv.ParseInt(n, 10, 32)
- if err != nil {
- return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
- }
- fds[i] = uintptr(fd)
- }
-
- return fds, nil
-}
-
-// FileDescriptorTargets returns the targets of all file descriptors of a process.
-// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
-func (p Proc) FileDescriptorTargets() ([]string, error) {
- names, err := p.fileDescriptors()
- if err != nil {
- return nil, err
- }
-
- targets := make([]string, len(names))
-
- for i, name := range names {
- target, err := os.Readlink(p.path("fd", name))
- if err == nil {
- targets[i] = target
- }
- }
-
- return targets, nil
-}
-
-// FileDescriptorsLen returns the number of currently open file descriptors of
-// a process.
-func (p Proc) FileDescriptorsLen() (int, error) {
- fds, err := p.fileDescriptors()
- if err != nil {
- return 0, err
- }
-
- return len(fds), nil
-}
-
-func (p Proc) fileDescriptors() ([]string, error) {
- d, err := os.Open(p.path("fd"))
- if err != nil {
- return nil, err
- }
- defer d.Close()
-
- names, err := d.Readdirnames(-1)
- if err != nil {
- return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
- }
-
- return names, nil
-}
-
-func (p Proc) path(pa ...string) string {
- return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
deleted file mode 100644
index b4e31d7b..00000000
--- a/vendor/github.com/prometheus/procfs/proc_io.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package procfs
-
-import (
- "fmt"
- "io/ioutil"
- "os"
-)
-
-// ProcIO models the content of /proc//io.
-type ProcIO struct {
- // Chars read.
- RChar uint64
- // Chars written.
- WChar uint64
- // Read syscalls.
- SyscR uint64
- // Write syscalls.
- SyscW uint64
- // Bytes read.
- ReadBytes uint64
- // Bytes written.
- WriteBytes uint64
- // Bytes written, but taking into account truncation. See
- // Documentation/filesystems/proc.txt in the kernel sources for
- // detailed explanation.
- CancelledWriteBytes int64
-}
-
-// NewIO creates a new ProcIO instance from a given Proc instance.
-func (p Proc) NewIO() (ProcIO, error) {
- pio := ProcIO{}
-
- f, err := os.Open(p.path("io"))
- if err != nil {
- return pio, err
- }
- defer f.Close()
-
- data, err := ioutil.ReadAll(f)
- if err != nil {
- return pio, err
- }
-
- ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
- "read_bytes: %d\nwrite_bytes: %d\n" +
- "cancelled_write_bytes: %d\n"
-
- _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
- &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
- if err != nil {
- return pio, err
- }
-
- return pio, nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
deleted file mode 100644
index 2df997ce..00000000
--- a/vendor/github.com/prometheus/procfs/proc_limits.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package procfs
-
-import (
- "bufio"
- "fmt"
- "os"
- "regexp"
- "strconv"
-)
-
-// ProcLimits represents the soft limits for each of the process's resource
-// limits. For more information see getrlimit(2):
-// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
-type ProcLimits struct {
- // CPU time limit in seconds.
- CPUTime int
- // Maximum size of files that the process may create.
- FileSize int
- // Maximum size of the process's data segment (initialized data,
- // uninitialized data, and heap).
- DataSize int
- // Maximum size of the process stack in bytes.
- StackSize int
- // Maximum size of a core file.
- CoreFileSize int
- // Limit of the process's resident set in pages.
- ResidentSet int
- // Maximum number of processes that can be created for the real user ID of
- // the calling process.
- Processes int
- // Value one greater than the maximum file descriptor number that can be
- // opened by this process.
- OpenFiles int
- // Maximum number of bytes of memory that may be locked into RAM.
- LockedMemory int
- // Maximum size of the process's virtual memory address space in bytes.
- AddressSpace int
- // Limit on the combined number of flock(2) locks and fcntl(2) leases that
- // this process may establish.
- FileLocks int
- // Limit of signals that may be queued for the real user ID of the calling
- // process.
- PendingSignals int
- // Limit on the number of bytes that can be allocated for POSIX message
- // queues for the real user ID of the calling process.
- MsqqueueSize int
- // Limit of the nice priority set using setpriority(2) or nice(2).
- NicePriority int
- // Limit of the real-time priority set using sched_setscheduler(2) or
- // sched_setparam(2).
- RealtimePriority int
- // Limit (in microseconds) on the amount of CPU time that a process
- // scheduled under a real-time scheduling policy may consume without making
- // a blocking system call.
- RealtimeTimeout int
-}
-
-const (
- limitsFields = 3
- limitsUnlimited = "unlimited"
-)
-
-var (
- limitsDelimiter = regexp.MustCompile(" +")
-)
-
-// NewLimits returns the current soft limits of the process.
-func (p Proc) NewLimits() (ProcLimits, error) {
- f, err := os.Open(p.path("limits"))
- if err != nil {
- return ProcLimits{}, err
- }
- defer f.Close()
-
- var (
- l = ProcLimits{}
- s = bufio.NewScanner(f)
- )
- for s.Scan() {
- fields := limitsDelimiter.Split(s.Text(), limitsFields)
- if len(fields) != limitsFields {
- return ProcLimits{}, fmt.Errorf(
- "couldn't parse %s line %s", f.Name(), s.Text())
- }
-
- switch fields[0] {
- case "Max cpu time":
- l.CPUTime, err = parseInt(fields[1])
- case "Max file size":
- l.FileSize, err = parseInt(fields[1])
- case "Max data size":
- l.DataSize, err = parseInt(fields[1])
- case "Max stack size":
- l.StackSize, err = parseInt(fields[1])
- case "Max core file size":
- l.CoreFileSize, err = parseInt(fields[1])
- case "Max resident set":
- l.ResidentSet, err = parseInt(fields[1])
- case "Max processes":
- l.Processes, err = parseInt(fields[1])
- case "Max open files":
- l.OpenFiles, err = parseInt(fields[1])
- case "Max locked memory":
- l.LockedMemory, err = parseInt(fields[1])
- case "Max address space":
- l.AddressSpace, err = parseInt(fields[1])
- case "Max file locks":
- l.FileLocks, err = parseInt(fields[1])
- case "Max pending signals":
- l.PendingSignals, err = parseInt(fields[1])
- case "Max msgqueue size":
- l.MsqqueueSize, err = parseInt(fields[1])
- case "Max nice priority":
- l.NicePriority, err = parseInt(fields[1])
- case "Max realtime priority":
- l.RealtimePriority, err = parseInt(fields[1])
- case "Max realtime timeout":
- l.RealtimeTimeout, err = parseInt(fields[1])
- }
- if err != nil {
- return ProcLimits{}, err
- }
- }
-
- return l, s.Err()
-}
-
-func parseInt(s string) (int, error) {
- if s == limitsUnlimited {
- return -1, nil
- }
- i, err := strconv.ParseInt(s, 10, 32)
- if err != nil {
- return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
- }
- return int(i), nil
-}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
deleted file mode 100644
index 724e271b..00000000
--- a/vendor/github.com/prometheus/procfs/proc_stat.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package procfs
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "os"
-)
-
-// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
-// which required cgo. However, that caused a lot of problems regarding
-// cross-compilation. Alternatives such as running a binary to determine the
-// value, or trying to derive it in some other way were all problematic. After
-// much research it was determined that USER_HZ is actually hardcoded to 100 on
-// all Go-supported platforms as of the time of this writing. This is why we
-// decided to hardcode it here as well. It is not impossible that there could
-// be systems with exceptions, but they should be very exotic edge cases, and
-// in that case, the worst outcome will be two misreported metrics.
-//
-// See also the following discussions:
-//
-// - https://github.com/prometheus/node_exporter/issues/52
-// - https://github.com/prometheus/procfs/pull/2
-// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
-const userHZ = 100
-
-// ProcStat provides status information about the process,
-// read from /proc/[pid]/stat.
-type ProcStat struct {
- // The process ID.
- PID int
- // The filename of the executable.
- Comm string
- // The process state.
- State string
- // The PID of the parent of this process.
- PPID int
- // The process group ID of the process.
- PGRP int
- // The session ID of the process.
- Session int
- // The controlling terminal of the process.
- TTY int
- // The ID of the foreground process group of the controlling terminal of
- // the process.
- TPGID int
- // The kernel flags word of the process.
- Flags uint
- // The number of minor faults the process has made which have not required
- // loading a memory page from disk.
- MinFlt uint
- // The number of minor faults that the process's waited-for children have
- // made.
- CMinFlt uint
- // The number of major faults the process has made which have required
- // loading a memory page from disk.
- MajFlt uint
- // The number of major faults that the process's waited-for children have
- // made.
- CMajFlt uint
- // Amount of time that this process has been scheduled in user mode,
- // measured in clock ticks.
- UTime uint
- // Amount of time that this process has been scheduled in kernel mode,
- // measured in clock ticks.
- STime uint
- // Amount of time that this process's waited-for children have been
- // scheduled in user mode, measured in clock ticks.
- CUTime uint
- // Amount of time that this process's waited-for children have been
- // scheduled in kernel mode, measured in clock ticks.
- CSTime uint
- // For processes running a real-time scheduling policy, this is the negated
- // scheduling priority, minus one.
- Priority int
- // The nice value, a value in the range 19 (low priority) to -20 (high
- // priority).
- Nice int
- // Number of threads in this process.
- NumThreads int
- // The time the process started after system boot, the value is expressed
- // in clock ticks.
- Starttime uint64
- // Virtual memory size in bytes.
- VSize int
- // Resident set size in pages.
- RSS int
-
- fs FS
-}
-
-// NewStat returns the current status information of the process.
-func (p Proc) NewStat() (ProcStat, error) {
- f, err := os.Open(p.path("stat"))
- if err != nil {
- return ProcStat{}, err
- }
- defer f.Close()
-
- data, err := ioutil.ReadAll(f)
- if err != nil {
- return ProcStat{}, err
- }
-
- var (
- ignore int
-
- s = ProcStat{PID: p.PID, fs: p.fs}
- l = bytes.Index(data, []byte("("))
- r = bytes.LastIndex(data, []byte(")"))
- )
-
- if l < 0 || r < 0 {
- return ProcStat{}, fmt.Errorf(
- "unexpected format, couldn't extract comm: %s",
- data,
- )
- }
-
- s.Comm = string(data[l+1 : r])
- _, err = fmt.Fscan(
- bytes.NewBuffer(data[r+2:]),
- &s.State,
- &s.PPID,
- &s.PGRP,
- &s.Session,
- &s.TTY,
- &s.TPGID,
- &s.Flags,
- &s.MinFlt,
- &s.CMinFlt,
- &s.MajFlt,
- &s.CMajFlt,
- &s.UTime,
- &s.STime,
- &s.CUTime,
- &s.CSTime,
- &s.Priority,
- &s.Nice,
- &s.NumThreads,
- &ignore,
- &s.Starttime,
- &s.VSize,
- &s.RSS,
- )
- if err != nil {
- return ProcStat{}, err
- }
-
- return s, nil
-}
-
-// VirtualMemory returns the virtual memory size in bytes.
-func (s ProcStat) VirtualMemory() int {
- return s.VSize
-}
-
-// ResidentMemory returns the resident memory size in bytes.
-func (s ProcStat) ResidentMemory() int {
- return s.RSS * os.Getpagesize()
-}
-
-// StartTime returns the unix timestamp of the process in seconds.
-func (s ProcStat) StartTime() (float64, error) {
- stat, err := s.fs.NewStat()
- if err != nil {
- return 0, err
- }
- return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
-}
-
-// CPUTime returns the total CPU user and system time in seconds.
-func (s ProcStat) CPUTime() float64 {
- return float64(s.UTime+s.STime) / userHZ
-}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
deleted file mode 100644
index 1ca217e8..00000000
--- a/vendor/github.com/prometheus/procfs/stat.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package procfs
-
-import (
- "bufio"
- "fmt"
- "os"
- "strconv"
- "strings"
-)
-
-// Stat represents kernel/system statistics.
-type Stat struct {
- // Boot time in seconds since the Epoch.
- BootTime int64
-}
-
-// NewStat returns kernel/system statistics read from /proc/stat.
-func NewStat() (Stat, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return Stat{}, err
- }
-
- return fs.NewStat()
-}
-
-// NewStat returns an information about current kernel/system statistics.
-func (fs FS) NewStat() (Stat, error) {
- f, err := os.Open(fs.Path("stat"))
- if err != nil {
- return Stat{}, err
- }
- defer f.Close()
-
- s := bufio.NewScanner(f)
- for s.Scan() {
- line := s.Text()
- if !strings.HasPrefix(line, "btime") {
- continue
- }
- fields := strings.Fields(line)
- if len(fields) != 2 {
- return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
- }
- i, err := strconv.ParseInt(fields[1], 10, 32)
- if err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
- }
- return Stat{BootTime: i}, nil
- }
- if err := s.Err(); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
- }
-
- return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
-}
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/COPYING b/vendor/gopkg.in/alecthomas/kingpin.v2/COPYING
deleted file mode 100644
index 2993ec08..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/COPYING
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (C) 2014 Alec Thomas
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/README.md b/vendor/gopkg.in/alecthomas/kingpin.v2/README.md
deleted file mode 100644
index 498704c8..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/README.md
+++ /dev/null
@@ -1,674 +0,0 @@
-# Kingpin - A Go (golang) command line and flag parser
-[](http://godoc.org/github.com/alecthomas/kingpin) [](https://travis-ci.org/alecthomas/kingpin) [](https://gitter.im/alecthomas/Lobby)
-
-
-
-
-
-- [Overview](#overview)
-- [Features](#features)
-- [User-visible changes between v1 and v2](#user-visible-changes-between-v1-and-v2)
- - [Flags can be used at any point after their definition.](#flags-can-be-used-at-any-point-after-their-definition)
- - [Short flags can be combined with their parameters](#short-flags-can-be-combined-with-their-parameters)
-- [API changes between v1 and v2](#api-changes-between-v1-and-v2)
-- [Versions](#versions)
- - [V2 is the current stable version](#v2-is-the-current-stable-version)
- - [V1 is the OLD stable version](#v1-is-the-old-stable-version)
-- [Change History](#change-history)
-- [Examples](#examples)
- - [Simple Example](#simple-example)
- - [Complex Example](#complex-example)
-- [Reference Documentation](#reference-documentation)
- - [Displaying errors and usage information](#displaying-errors-and-usage-information)
- - [Sub-commands](#sub-commands)
- - [Custom Parsers](#custom-parsers)
- - [Repeatable flags](#repeatable-flags)
- - [Boolean Values](#boolean-values)
- - [Default Values](#default-values)
- - [Place-holders in Help](#place-holders-in-help)
- - [Consuming all remaining arguments](#consuming-all-remaining-arguments)
- - [Bash/ZSH Shell Completion](#bashzsh-shell-completion)
- - [Supporting -h for help](#supporting--h-for-help)
- - [Custom help](#custom-help)
-
-
-
-## Overview
-
-Kingpin is a [fluent-style](http://en.wikipedia.org/wiki/Fluent_interface),
-type-safe command-line parser. It supports flags, nested commands, and
-positional arguments.
-
-Install it with:
-
- $ go get gopkg.in/alecthomas/kingpin.v2
-
-It looks like this:
-
-```go
-var (
- verbose = kingpin.Flag("verbose", "Verbose mode.").Short('v').Bool()
- name = kingpin.Arg("name", "Name of user.").Required().String()
-)
-
-func main() {
- kingpin.Parse()
- fmt.Printf("%v, %s\n", *verbose, *name)
-}
-```
-
-More [examples](https://github.com/alecthomas/kingpin/tree/master/_examples) are available.
-
-Second to parsing, providing the user with useful help is probably the most
-important thing a command-line parser does. Kingpin tries to provide detailed
-contextual help if `--help` is encountered at any point in the command line
-(excluding after `--`).
-
-## Features
-
-- Help output that isn't as ugly as sin.
-- Fully [customisable help](#custom-help), via Go templates.
-- Parsed, type-safe flags (`kingpin.Flag("f", "help").Int()`)
-- Parsed, type-safe positional arguments (`kingpin.Arg("a", "help").Int()`).
-- Parsed, type-safe, arbitrarily deep commands (`kingpin.Command("c", "help")`).
-- Support for required flags and required positional arguments (`kingpin.Flag("f", "").Required().Int()`).
-- Support for arbitrarily nested default commands (`command.Default()`).
-- Callbacks per command, flag and argument (`kingpin.Command("c", "").Action(myAction)`).
-- POSIX-style short flag combining (`-a -b` -> `-ab`).
-- Short-flag+parameter combining (`-a parm` -> `-aparm`).
-- Read command-line from files (`@`).
-- Automatically generate man pages (`--help-man`).
-
-## User-visible changes between v1 and v2
-
-### Flags can be used at any point after their definition.
-
-Flags can be specified at any point after their definition, not just
-*immediately after their associated command*. From the chat example below, the
-following used to be required:
-
-```
-$ chat --server=chat.server.com:8080 post --image=~/Downloads/owls.jpg pics
-```
-
-But the following will now work:
-
-```
-$ chat post --server=chat.server.com:8080 --image=~/Downloads/owls.jpg pics
-```
-
-### Short flags can be combined with their parameters
-
-Previously, if a short flag was used, any argument to that flag would have to
-be separated by a space. That is no longer the case.
-
-## API changes between v1 and v2
-
-- `ParseWithFileExpansion()` is gone. The new parser directly supports expanding `@`.
-- Added `FatalUsage()` and `FatalUsageContext()` for displaying an error + usage and terminating.
-- `Dispatch()` renamed to `Action()`.
-- Added `ParseContext()` for parsing a command line into its intermediate context form without executing.
-- Added `Terminate()` function to override the termination function.
-- Added `UsageForContextWithTemplate()` for printing usage via a custom template.
-- Added `UsageTemplate()` for overriding the default template to use. Two templates are included:
- 1. `DefaultUsageTemplate` - default template.
- 2. `CompactUsageTemplate` - compact command template for larger applications.
-
-## Versions
-
-Kingpin uses [gopkg.in](https://gopkg.in/alecthomas/kingpin) for versioning.
-
-The current stable version is [gopkg.in/alecthomas/kingpin.v2](https://gopkg.in/alecthomas/kingpin.v2). The previous version, [gopkg.in/alecthomas/kingpin.v1](https://gopkg.in/alecthomas/kingpin.v1), is deprecated and in maintenance mode.
-
-### [V2](https://gopkg.in/alecthomas/kingpin.v2) is the current stable version
-
-Installation:
-
-```sh
-$ go get gopkg.in/alecthomas/kingpin.v2
-```
-
-### [V1](https://gopkg.in/alecthomas/kingpin.v1) is the OLD stable version
-
-Installation:
-
-```sh
-$ go get gopkg.in/alecthomas/kingpin.v1
-```
-
-## Change History
-
-- *2015-09-19* -- Stable v2.1.0 release.
- - Added `command.Default()` to specify a default command to use if no other
- command matches. This allows for convenient user shortcuts.
- - Exposed `HelpFlag` and `VersionFlag` for further customisation.
- - `Action()` and `PreAction()` added and both now support an arbitrary
- number of callbacks.
- - `kingpin.SeparateOptionalFlagsUsageTemplate`.
- - `--help-long` and `--help-man` (hidden by default) flags.
- - Flags are "interspersed" by default, but can be disabled with `app.Interspersed(false)`.
- - Added flags for all simple builtin types (int8, uint16, etc.) and slice variants.
- - Use `app.Writer(os.Writer)` to specify the default writer for all output functions.
- - Dropped `os.Writer` prefix from all printf-like functions.
-
-- *2015-05-22* -- Stable v2.0.0 release.
- - Initial stable release of v2.0.0.
- - Fully supports interspersed flags, commands and arguments.
- - Flags can be present at any point after their logical definition.
- - Application.Parse() terminates if commands are present and a command is not parsed.
- - Dispatch() -> Action().
- - Actions are dispatched after all values are populated.
- - Override termination function (defaults to os.Exit).
- - Override output stream (defaults to os.Stderr).
- - Templatised usage help, with default and compact templates.
- - Make error/usage functions more consistent.
- - Support argument expansion from files by default (with @).
- - Fully public data model is available via .Model().
- - Parser has been completely refactored.
- - Parsing and execution has been split into distinct stages.
- - Use `go generate` to generate repeated flags.
- - Support combined short-flag+argument: -fARG.
-
-- *2015-01-23* -- Stable v1.3.4 release.
- - Support "--" for separating flags from positional arguments.
- - Support loading flags from files (ParseWithFileExpansion()). Use @FILE as an argument.
- - Add post-app and post-cmd validation hooks. This allows arbitrary validation to be added.
- - A bunch of improvements to help usage and formatting.
- - Support arbitrarily nested sub-commands.
-
-- *2014-07-08* -- Stable v1.2.0 release.
- - Pass any value through to `Strings()` when final argument.
- Allows for values that look like flags to be processed.
- - Allow `--help` to be used with commands.
- - Support `Hidden()` flags.
- - Parser for [units.Base2Bytes](https://github.com/alecthomas/units)
- type. Allows for flags like `--ram=512MB` or `--ram=1GB`.
- - Add an `Enum()` value, allowing only one of a set of values
- to be selected. eg. `Flag(...).Enum("debug", "info", "warning")`.
-
-- *2014-06-27* -- Stable v1.1.0 release.
- - Bug fixes.
- - Always return an error (rather than panicing) when misconfigured.
- - `OpenFile(flag, perm)` value type added, for finer control over opening files.
- - Significantly improved usage formatting.
-
-- *2014-06-19* -- Stable v1.0.0 release.
- - Support [cumulative positional](#consuming-all-remaining-arguments) arguments.
- - Return error rather than panic when there are fatal errors not caught by
- the type system. eg. when a default value is invalid.
- - Use gokpg.in.
-
-- *2014-06-10* -- Place-holder streamlining.
- - Renamed `MetaVar` to `PlaceHolder`.
- - Removed `MetaVarFromDefault`. Kingpin now uses [heuristics](#place-holders-in-help)
- to determine what to display.
-
-## Examples
-
-### Simple Example
-
-Kingpin can be used for simple flag+arg applications like so:
-
-```
-$ ping --help
-usage: ping [] []
-
-Flags:
- --debug Enable debug mode.
- --help Show help.
- -t, --timeout=5s Timeout waiting for ping.
-
-Args:
- IP address to ping.
- [] Number of packets to send
-$ ping 1.2.3.4 5
-Would ping: 1.2.3.4 with timeout 5s and count 5
-```
-
-From the following source:
-
-```go
-package main
-
-import (
- "fmt"
-
- "gopkg.in/alecthomas/kingpin.v2"
-)
-
-var (
- debug = kingpin.Flag("debug", "Enable debug mode.").Bool()
- timeout = kingpin.Flag("timeout", "Timeout waiting for ping.").Default("5s").OverrideDefaultFromEnvar("PING_TIMEOUT").Short('t').Duration()
- ip = kingpin.Arg("ip", "IP address to ping.").Required().IP()
- count = kingpin.Arg("count", "Number of packets to send").Int()
-)
-
-func main() {
- kingpin.Version("0.0.1")
- kingpin.Parse()
- fmt.Printf("Would ping: %s with timeout %s and count %d\n", *ip, *timeout, *count)
-}
-```
-
-### Complex Example
-
-Kingpin can also produce complex command-line applications with global flags,
-subcommands, and per-subcommand flags, like this:
-
-```
-$ chat --help
-usage: chat [] [] [ ...]
-
-A command-line chat application.
-
-Flags:
- --help Show help.
- --debug Enable debug mode.
- --server=127.0.0.1 Server address.
-
-Commands:
- help []
- Show help for a command.
-
- register
- Register a new user.
-
- post [] []
- Post a message to a channel.
-
-$ chat help post
-usage: chat [] post [] []
-
-Post a message to a channel.
-
-Flags:
- --image=IMAGE Image to post.
-
-Args:
- Channel to post to.
- [] Text to post.
-
-$ chat post --image=~/Downloads/owls.jpg pics
-...
-```
-
-From this code:
-
-```go
-package main
-
-import (
- "os"
- "strings"
- "gopkg.in/alecthomas/kingpin.v2"
-)
-
-var (
- app = kingpin.New("chat", "A command-line chat application.")
- debug = app.Flag("debug", "Enable debug mode.").Bool()
- serverIP = app.Flag("server", "Server address.").Default("127.0.0.1").IP()
-
- register = app.Command("register", "Register a new user.")
- registerNick = register.Arg("nick", "Nickname for user.").Required().String()
- registerName = register.Arg("name", "Name of user.").Required().String()
-
- post = app.Command("post", "Post a message to a channel.")
- postImage = post.Flag("image", "Image to post.").File()
- postChannel = post.Arg("channel", "Channel to post to.").Required().String()
- postText = post.Arg("text", "Text to post.").Strings()
-)
-
-func main() {
- switch kingpin.MustParse(app.Parse(os.Args[1:])) {
- // Register user
- case register.FullCommand():
- println(*registerNick)
-
- // Post message
- case post.FullCommand():
- if *postImage != nil {
- }
- text := strings.Join(*postText, " ")
- println("Post:", text)
- }
-}
-```
-
-## Reference Documentation
-
-### Displaying errors and usage information
-
-Kingpin exports a set of functions to provide consistent errors and usage
-information to the user.
-
-Error messages look something like this:
-
- : error:
-
-The functions on `Application` are:
-
-Function | Purpose
----------|--------------
-`Errorf(format, args)` | Display a printf formatted error to the user.
-`Fatalf(format, args)` | As with Errorf, but also call the termination handler.
-`FatalUsage(format, args)` | As with Fatalf, but also print contextual usage information.
-`FatalUsageContext(context, format, args)` | As with Fatalf, but also print contextual usage information from a `ParseContext`.
-`FatalIfError(err, format, args)` | Conditionally print an error prefixed with format+args, then call the termination handler
-
-There are equivalent global functions in the kingpin namespace for the default
-`kingpin.CommandLine` instance.
-
-### Sub-commands
-
-Kingpin supports nested sub-commands, with separate flag and positional
-arguments per sub-command. Note that positional arguments may only occur after
-sub-commands.
-
-For example:
-
-```go
-var (
- deleteCommand = kingpin.Command("delete", "Delete an object.")
- deleteUserCommand = deleteCommand.Command("user", "Delete a user.")
- deleteUserUIDFlag = deleteUserCommand.Flag("uid", "Delete user by UID rather than username.")
- deleteUserUsername = deleteUserCommand.Arg("username", "Username to delete.")
- deletePostCommand = deleteCommand.Command("post", "Delete a post.")
-)
-
-func main() {
- switch kingpin.Parse() {
- case "delete user":
- case "delete post":
- }
-}
-```
-
-### Custom Parsers
-
-Kingpin supports both flag and positional argument parsers for converting to
-Go types. For example, some included parsers are `Int()`, `Float()`,
-`Duration()` and `ExistingFile()` (see [parsers.go](./parsers.go) for a complete list of included parsers).
-
-Parsers conform to Go's [`flag.Value`](http://godoc.org/flag#Value)
-interface, so any existing implementations will work.
-
-For example, a parser for accumulating HTTP header values might look like this:
-
-```go
-type HTTPHeaderValue http.Header
-
-func (h *HTTPHeaderValue) Set(value string) error {
- parts := strings.SplitN(value, ":", 2)
- if len(parts) != 2 {
- return fmt.Errorf("expected HEADER:VALUE got '%s'", value)
- }
- (*http.Header)(h).Add(parts[0], parts[1])
- return nil
-}
-
-func (h *HTTPHeaderValue) String() string {
- return ""
-}
-```
-
-As a convenience, I would recommend something like this:
-
-```go
-func HTTPHeader(s Settings) (target *http.Header) {
- target = &http.Header{}
- s.SetValue((*HTTPHeaderValue)(target))
- return
-}
-```
-
-You would use it like so:
-
-```go
-headers = HTTPHeader(kingpin.Flag("header", "Add a HTTP header to the request.").Short('H'))
-```
-
-### Repeatable flags
-
-Depending on the `Value` they hold, some flags may be repeated. The
-`IsCumulative() bool` function on `Value` tells if it's safe to call `Set()`
-multiple times or if an error should be raised if several values are passed.
-
-The built-in `Value`s returning slices and maps, as well as `Counter` are
-examples of `Value`s that make a flag repeatable.
-
-### Boolean values
-
-Boolean values are uniquely managed by Kingpin. Each boolean flag will have a negative complement:
-`--` and `--no-`.
-
-### Default Values
-
-The default value is the zero value for a type. This can be overridden with
-the `Default(value...)` function on flags and arguments. This function accepts
-one or several strings, which are parsed by the value itself, so they *must*
-be compliant with the format expected.
-
-### Place-holders in Help
-
-The place-holder value for a flag is the value used in the help to describe
-the value of a non-boolean flag.
-
-The value provided to PlaceHolder() is used if provided, then the value
-provided by Default() if provided, then finally the capitalised flag name is
-used.
-
-Here are some examples of flags with various permutations:
-
- --name=NAME // Flag(...).String()
- --name="Harry" // Flag(...).Default("Harry").String()
- --name=FULL-NAME // Flag(...).PlaceHolder("FULL-NAME").Default("Harry").String()
-
-### Consuming all remaining arguments
-
-A common command-line idiom is to use all remaining arguments for some
-purpose. eg. The following command accepts an arbitrary number of
-IP addresses as positional arguments:
-
- ./cmd ping 10.1.1.1 192.168.1.1
-
-Such arguments are similar to [repeatable flags](#repeatable-flags), but for
-arguments. Therefore they use the same `IsCumulative() bool` function on the
-underlying `Value`, so the built-in `Value`s for which the `Set()` function
-can be called several times will consume multiple arguments.
-
-To implement the above example with a custom `Value`, we might do something
-like this:
-
-```go
-type ipList []net.IP
-
-func (i *ipList) Set(value string) error {
- if ip := net.ParseIP(value); ip == nil {
- return fmt.Errorf("'%s' is not an IP address", value)
- } else {
- *i = append(*i, ip)
- return nil
- }
-}
-
-func (i *ipList) String() string {
- return ""
-}
-
-func (i *ipList) IsCumulative() bool {
- return true
-}
-
-func IPList(s Settings) (target *[]net.IP) {
- target = new([]net.IP)
- s.SetValue((*ipList)(target))
- return
-}
-```
-
-And use it like so:
-
-```go
-ips := IPList(kingpin.Arg("ips", "IP addresses to ping."))
-```
-
-### Bash/ZSH Shell Completion
-
-By default, all flags and commands/subcommands generate completions
-internally.
-
-Out of the box, CLI tools using kingpin should be able to take advantage
-of completion hinting for flags and commands. By specifying
-`--completion-bash` as the first argument, your CLI tool will show
-possible subcommands. By ending your argv with `--`, hints for flags
-will be shown.
-
-To allow your end users to take advantage you must package a
-`/etc/bash_completion.d` script with your distribution (or the equivalent
-for your target platform/shell). An alternative is to instruct your end
-user to source a script from their `bash_profile` (or equivalent).
-
-Fortunately Kingpin makes it easy to generate or source a script for use
-with end users shells. `./yourtool --completion-script-bash` and
-`./yourtool --completion-script-zsh` will generate these scripts for you.
-
-**Installation by Package**
-
-For the best user experience, you should bundle your pre-created
-completion script with your CLI tool and install it inside
-`/etc/bash_completion.d` (or equivalent). A good suggestion is to add
-this as an automated step to your build pipeline, in the implementation
-is improved for bug fixed.
-
-**Installation by `bash_profile`**
-
-Alternatively, instruct your users to add an additional statement to
-their `bash_profile` (or equivalent):
-
-```
-eval "$(your-cli-tool --completion-script-bash)"
-```
-
-Or for ZSH
-
-```
-eval "$(your-cli-tool --completion-script-zsh)"
-```
-
-#### Additional API
-To provide more flexibility, a completion option API has been
-exposed for flags to allow user defined completion options, to extend
-completions further than just EnumVar/Enum.
-
-
-**Provide Static Options**
-
-When using an `Enum` or `EnumVar`, users are limited to only the options
-given. Maybe we wish to hint possible options to the user, but also
-allow them to provide their own custom option. `HintOptions` gives
-this functionality to flags.
-
-```
-app := kingpin.New("completion", "My application with bash completion.")
-app.Flag("port", "Provide a port to connect to").
- Required().
- HintOptions("80", "443", "8080").
- IntVar(&c.port)
-```
-
-**Provide Dynamic Options**
-Consider the case that you needed to read a local database or a file to
-provide suggestions. You can dynamically generate the options
-
-```
-func listHosts() []string {
- // Provide a dynamic list of hosts from a hosts file or otherwise
- // for bash completion. In this example we simply return static slice.
-
- // You could use this functionality to reach into a hosts file to provide
- // completion for a list of known hosts.
- return []string{"sshhost.example", "webhost.example", "ftphost.example"}
-}
-
-app := kingpin.New("completion", "My application with bash completion.")
-app.Flag("flag-1", "").HintAction(listHosts).String()
-```
-
-**EnumVar/Enum**
-When using `Enum` or `EnumVar`, any provided options will be automatically
-used for bash autocompletion. However, if you wish to provide a subset or
-different options, you can use `HintOptions` or `HintAction` which will override
-the default completion options for `Enum`/`EnumVar`.
-
-
-**Examples**
-You can see an in depth example of the completion API within
-`examples/completion/main.go`
-
-
-### Supporting -h for help
-
-`kingpin.CommandLine.HelpFlag.Short('h')`
-
-### Custom help
-
-Kingpin v2 supports templatised help using the text/template library (actually, [a fork](https://github.com/alecthomas/template)).
-
-You can specify the template to use with the [Application.UsageTemplate()](http://godoc.org/gopkg.in/alecthomas/kingpin.v2#Application.UsageTemplate) function.
-
-There are four included templates: `kingpin.DefaultUsageTemplate` is the default,
-`kingpin.CompactUsageTemplate` provides a more compact representation for more complex command-line structures,
-`kingpin.SeparateOptionalFlagsUsageTemplate` looks like the default template, but splits required
-and optional command flags into separate lists, and `kingpin.ManPageTemplate` is used to generate man pages.
-
-See the above templates for examples of usage, and the the function [UsageForContextWithTemplate()](https://github.com/alecthomas/kingpin/blob/master/usage.go#L198) method for details on the context.
-
-#### Default help template
-
-```
-$ go run ./examples/curl/curl.go --help
-usage: curl [] [ ...]
-
-An example implementation of curl.
-
-Flags:
- --help Show help.
- -t, --timeout=5s Set connection timeout.
- -H, --headers=HEADER=VALUE
- Add HTTP headers to the request.
-
-Commands:
- help [...]
- Show help.
-
- get url
- Retrieve a URL.
-
- get file
- Retrieve a file.
-
- post []
- POST a resource.
-```
-
-#### Compact help template
-
-```
-$ go run ./examples/curl/curl.go --help
-usage: curl [] [ ...]
-
-An example implementation of curl.
-
-Flags:
- --help Show help.
- -t, --timeout=5s Set connection timeout.
- -H, --headers=HEADER=VALUE
- Add HTTP headers to the request.
-
-Commands:
- help [...]
- get []
- url
- file
- post []
-```
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/actions.go b/vendor/gopkg.in/alecthomas/kingpin.v2/actions.go
deleted file mode 100644
index 72d6cbd4..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/actions.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package kingpin
-
-// Action callback executed at various stages after all values are populated.
-// The application, commands, arguments and flags all have corresponding
-// actions.
-type Action func(*ParseContext) error
-
-type actionMixin struct {
- actions []Action
- preActions []Action
-}
-
-type actionApplier interface {
- applyActions(*ParseContext) error
- applyPreActions(*ParseContext) error
-}
-
-func (a *actionMixin) addAction(action Action) {
- a.actions = append(a.actions, action)
-}
-
-func (a *actionMixin) addPreAction(action Action) {
- a.preActions = append(a.preActions, action)
-}
-
-func (a *actionMixin) applyActions(context *ParseContext) error {
- for _, action := range a.actions {
- if err := action(context); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (a *actionMixin) applyPreActions(context *ParseContext) error {
- for _, preAction := range a.preActions {
- if err := preAction(context); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/app.go b/vendor/gopkg.in/alecthomas/kingpin.v2/app.go
deleted file mode 100644
index 1a1a5eff..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/app.go
+++ /dev/null
@@ -1,688 +0,0 @@
-package kingpin
-
-import (
- "fmt"
- "io"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- ErrCommandNotSpecified = fmt.Errorf("command not specified")
-)
-
-var (
- envarTransformRegexp = regexp.MustCompile(`[^a-zA-Z0-9_]+`)
-)
-
-type ApplicationValidator func(*Application) error
-
-// An Application contains the definitions of flags, arguments and commands
-// for an application.
-type Application struct {
- cmdMixin
- initialized bool
-
- Name string
- Help string
-
- author string
- version string
- errorWriter io.Writer // Destination for errors.
- usageWriter io.Writer // Destination for usage
- usageTemplate string
- validator ApplicationValidator
- terminate func(status int) // See Terminate()
- noInterspersed bool // can flags be interspersed with args (or must they come first)
- defaultEnvars bool
- completion bool
-
- // Help flag. Exposed for user customisation.
- HelpFlag *FlagClause
- // Help command. Exposed for user customisation. May be nil.
- HelpCommand *CmdClause
- // Version flag. Exposed for user customisation. May be nil.
- VersionFlag *FlagClause
-}
-
-// New creates a new Kingpin application instance.
-func New(name, help string) *Application {
- a := &Application{
- Name: name,
- Help: help,
- errorWriter: os.Stderr, // Left for backwards compatibility purposes.
- usageWriter: os.Stderr,
- usageTemplate: DefaultUsageTemplate,
- terminate: os.Exit,
- }
- a.flagGroup = newFlagGroup()
- a.argGroup = newArgGroup()
- a.cmdGroup = newCmdGroup(a)
- a.HelpFlag = a.Flag("help", "Show context-sensitive help (also try --help-long and --help-man).")
- a.HelpFlag.Bool()
- a.Flag("help-long", "Generate long help.").Hidden().PreAction(a.generateLongHelp).Bool()
- a.Flag("help-man", "Generate a man page.").Hidden().PreAction(a.generateManPage).Bool()
- a.Flag("completion-bash", "Output possible completions for the given args.").Hidden().BoolVar(&a.completion)
- a.Flag("completion-script-bash", "Generate completion script for bash.").Hidden().PreAction(a.generateBashCompletionScript).Bool()
- a.Flag("completion-script-zsh", "Generate completion script for ZSH.").Hidden().PreAction(a.generateZSHCompletionScript).Bool()
-
- return a
-}
-
-func (a *Application) generateLongHelp(c *ParseContext) error {
- a.Writer(os.Stdout)
- if err := a.UsageForContextWithTemplate(c, 2, LongHelpTemplate); err != nil {
- return err
- }
- a.terminate(0)
- return nil
-}
-
-func (a *Application) generateManPage(c *ParseContext) error {
- a.Writer(os.Stdout)
- if err := a.UsageForContextWithTemplate(c, 2, ManPageTemplate); err != nil {
- return err
- }
- a.terminate(0)
- return nil
-}
-
-func (a *Application) generateBashCompletionScript(c *ParseContext) error {
- a.Writer(os.Stdout)
- if err := a.UsageForContextWithTemplate(c, 2, BashCompletionTemplate); err != nil {
- return err
- }
- a.terminate(0)
- return nil
-}
-
-func (a *Application) generateZSHCompletionScript(c *ParseContext) error {
- a.Writer(os.Stdout)
- if err := a.UsageForContextWithTemplate(c, 2, ZshCompletionTemplate); err != nil {
- return err
- }
- a.terminate(0)
- return nil
-}
-
-// DefaultEnvars configures all flags (that do not already have an associated
-// envar) to use a default environment variable in the form "_".
-//
-// For example, if the application is named "foo" and a flag is named "bar-
-// waz" the environment variable: "FOO_BAR_WAZ".
-func (a *Application) DefaultEnvars() *Application {
- a.defaultEnvars = true
- return a
-}
-
-// Terminate specifies the termination handler. Defaults to os.Exit(status).
-// If nil is passed, a no-op function will be used.
-func (a *Application) Terminate(terminate func(int)) *Application {
- if terminate == nil {
- terminate = func(int) {}
- }
- a.terminate = terminate
- return a
-}
-
-// Writer specifies the writer to use for usage and errors. Defaults to os.Stderr.
-// DEPRECATED: See ErrorWriter and UsageWriter.
-func (a *Application) Writer(w io.Writer) *Application {
- a.errorWriter = w
- a.usageWriter = w
- return a
-}
-
-// ErrorWriter sets the io.Writer to use for errors.
-func (a *Application) ErrorWriter(w io.Writer) *Application {
- a.errorWriter = w
- return a
-}
-
-// UsageWriter sets the io.Writer to use for errors.
-func (a *Application) UsageWriter(w io.Writer) *Application {
- a.usageWriter = w
- return a
-}
-
-// UsageTemplate specifies the text template to use when displaying usage
-// information. The default is UsageTemplate.
-func (a *Application) UsageTemplate(template string) *Application {
- a.usageTemplate = template
- return a
-}
-
-// Validate sets a validation function to run when parsing.
-func (a *Application) Validate(validator ApplicationValidator) *Application {
- a.validator = validator
- return a
-}
-
-// ParseContext parses the given command line and returns the fully populated
-// ParseContext.
-func (a *Application) ParseContext(args []string) (*ParseContext, error) {
- return a.parseContext(false, args)
-}
-
-func (a *Application) parseContext(ignoreDefault bool, args []string) (*ParseContext, error) {
- if err := a.init(); err != nil {
- return nil, err
- }
- context := tokenize(args, ignoreDefault)
- err := parse(context, a)
- return context, err
-}
-
-// Parse parses command-line arguments. It returns the selected command and an
-// error. The selected command will be a space separated subcommand, if
-// subcommands have been configured.
-//
-// This will populate all flag and argument values, call all callbacks, and so
-// on.
-func (a *Application) Parse(args []string) (command string, err error) {
-
- context, parseErr := a.ParseContext(args)
- selected := []string{}
- var setValuesErr error
-
- if context == nil {
- // Since we do not throw error immediately, there could be a case
- // where a context returns nil. Protect against that.
- return "", parseErr
- }
-
- if err = a.setDefaults(context); err != nil {
- return "", err
- }
-
- selected, setValuesErr = a.setValues(context)
-
- if err = a.applyPreActions(context, !a.completion); err != nil {
- return "", err
- }
-
- if a.completion {
- a.generateBashCompletion(context)
- a.terminate(0)
- } else {
- if parseErr != nil {
- return "", parseErr
- }
-
- a.maybeHelp(context)
- if !context.EOL() {
- return "", fmt.Errorf("unexpected argument '%s'", context.Peek())
- }
-
- if setValuesErr != nil {
- return "", setValuesErr
- }
-
- command, err = a.execute(context, selected)
- if err == ErrCommandNotSpecified {
- a.writeUsage(context, nil)
- }
- }
- return command, err
-}
-
-func (a *Application) writeUsage(context *ParseContext, err error) {
- if err != nil {
- a.Errorf("%s", err)
- }
- if err := a.UsageForContext(context); err != nil {
- panic(err)
- }
- if err != nil {
- a.terminate(1)
- } else {
- a.terminate(0)
- }
-}
-
-func (a *Application) maybeHelp(context *ParseContext) {
- for _, element := range context.Elements {
- if flag, ok := element.Clause.(*FlagClause); ok && flag == a.HelpFlag {
- // Re-parse the command-line ignoring defaults, so that help works correctly.
- context, _ = a.parseContext(true, context.rawArgs)
- a.writeUsage(context, nil)
- }
- }
-}
-
-// Version adds a --version flag for displaying the application version.
-func (a *Application) Version(version string) *Application {
- a.version = version
- a.VersionFlag = a.Flag("version", "Show application version.").PreAction(func(*ParseContext) error {
- fmt.Fprintln(a.usageWriter, version)
- a.terminate(0)
- return nil
- })
- a.VersionFlag.Bool()
- return a
-}
-
-// Author sets the author output by some help templates.
-func (a *Application) Author(author string) *Application {
- a.author = author
- return a
-}
-
-// Action callback to call when all values are populated and parsing is
-// complete, but before any command, flag or argument actions.
-//
-// All Action() callbacks are called in the order they are encountered on the
-// command line.
-func (a *Application) Action(action Action) *Application {
- a.addAction(action)
- return a
-}
-
-// Action called after parsing completes but before validation and execution.
-func (a *Application) PreAction(action Action) *Application {
- a.addPreAction(action)
- return a
-}
-
-// Command adds a new top-level command.
-func (a *Application) Command(name, help string) *CmdClause {
- return a.addCommand(name, help)
-}
-
-// Interspersed control if flags can be interspersed with positional arguments
-//
-// true (the default) means that they can, false means that all the flags must appear before the first positional arguments.
-func (a *Application) Interspersed(interspersed bool) *Application {
- a.noInterspersed = !interspersed
- return a
-}
-
-func (a *Application) defaultEnvarPrefix() string {
- if a.defaultEnvars {
- return a.Name
- }
- return ""
-}
-
-func (a *Application) init() error {
- if a.initialized {
- return nil
- }
- if a.cmdGroup.have() && a.argGroup.have() {
- return fmt.Errorf("can't mix top-level Arg()s with Command()s")
- }
-
- // If we have subcommands, add a help command at the top-level.
- if a.cmdGroup.have() {
- var command []string
- a.HelpCommand = a.Command("help", "Show help.").PreAction(func(context *ParseContext) error {
- a.Usage(command)
- a.terminate(0)
- return nil
- })
- a.HelpCommand.Arg("command", "Show help on command.").StringsVar(&command)
- // Make help first command.
- l := len(a.commandOrder)
- a.commandOrder = append(a.commandOrder[l-1:l], a.commandOrder[:l-1]...)
- }
-
- if err := a.flagGroup.init(a.defaultEnvarPrefix()); err != nil {
- return err
- }
- if err := a.cmdGroup.init(); err != nil {
- return err
- }
- if err := a.argGroup.init(); err != nil {
- return err
- }
- for _, cmd := range a.commands {
- if err := cmd.init(); err != nil {
- return err
- }
- }
- flagGroups := []*flagGroup{a.flagGroup}
- for _, cmd := range a.commandOrder {
- if err := checkDuplicateFlags(cmd, flagGroups); err != nil {
- return err
- }
- }
- a.initialized = true
- return nil
-}
-
-// Recursively check commands for duplicate flags.
-func checkDuplicateFlags(current *CmdClause, flagGroups []*flagGroup) error {
- // Check for duplicates.
- for _, flags := range flagGroups {
- for _, flag := range current.flagOrder {
- if flag.shorthand != 0 {
- if _, ok := flags.short[string(flag.shorthand)]; ok {
- return fmt.Errorf("duplicate short flag -%c", flag.shorthand)
- }
- }
- if _, ok := flags.long[flag.name]; ok {
- return fmt.Errorf("duplicate long flag --%s", flag.name)
- }
- }
- }
- flagGroups = append(flagGroups, current.flagGroup)
- // Check subcommands.
- for _, subcmd := range current.commandOrder {
- if err := checkDuplicateFlags(subcmd, flagGroups); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (a *Application) execute(context *ParseContext, selected []string) (string, error) {
- var err error
-
- if err = a.validateRequired(context); err != nil {
- return "", err
- }
-
- if err = a.applyValidators(context); err != nil {
- return "", err
- }
-
- if err = a.applyActions(context); err != nil {
- return "", err
- }
-
- command := strings.Join(selected, " ")
- if command == "" && a.cmdGroup.have() {
- return "", ErrCommandNotSpecified
- }
- return command, err
-}
-
-func (a *Application) setDefaults(context *ParseContext) error {
- flagElements := map[string]*ParseElement{}
- for _, element := range context.Elements {
- if flag, ok := element.Clause.(*FlagClause); ok {
- if flag.name == "help" {
- return nil
- }
- flagElements[flag.name] = element
- }
- }
-
- argElements := map[string]*ParseElement{}
- for _, element := range context.Elements {
- if arg, ok := element.Clause.(*ArgClause); ok {
- argElements[arg.name] = element
- }
- }
-
- // Check required flags and set defaults.
- for _, flag := range context.flags.long {
- if flagElements[flag.name] == nil {
- if err := flag.setDefault(); err != nil {
- return err
- }
- }
- }
-
- for _, arg := range context.arguments.args {
- if argElements[arg.name] == nil {
- if err := arg.setDefault(); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (a *Application) validateRequired(context *ParseContext) error {
- flagElements := map[string]*ParseElement{}
- for _, element := range context.Elements {
- if flag, ok := element.Clause.(*FlagClause); ok {
- flagElements[flag.name] = element
- }
- }
-
- argElements := map[string]*ParseElement{}
- for _, element := range context.Elements {
- if arg, ok := element.Clause.(*ArgClause); ok {
- argElements[arg.name] = element
- }
- }
-
- // Check required flags and set defaults.
- for _, flag := range context.flags.long {
- if flagElements[flag.name] == nil {
- // Check required flags were provided.
- if flag.needsValue() {
- return fmt.Errorf("required flag --%s not provided", flag.name)
- }
- }
- }
-
- for _, arg := range context.arguments.args {
- if argElements[arg.name] == nil {
- if arg.needsValue() {
- return fmt.Errorf("required argument '%s' not provided", arg.name)
- }
- }
- }
- return nil
-}
-
-func (a *Application) setValues(context *ParseContext) (selected []string, err error) {
- // Set all arg and flag values.
- var (
- lastCmd *CmdClause
- flagSet = map[string]struct{}{}
- )
- for _, element := range context.Elements {
- switch clause := element.Clause.(type) {
- case *FlagClause:
- if _, ok := flagSet[clause.name]; ok {
- if v, ok := clause.value.(repeatableFlag); !ok || !v.IsCumulative() {
- return nil, fmt.Errorf("flag '%s' cannot be repeated", clause.name)
- }
- }
- if err = clause.value.Set(*element.Value); err != nil {
- return
- }
- flagSet[clause.name] = struct{}{}
-
- case *ArgClause:
- if err = clause.value.Set(*element.Value); err != nil {
- return
- }
-
- case *CmdClause:
- if clause.validator != nil {
- if err = clause.validator(clause); err != nil {
- return
- }
- }
- selected = append(selected, clause.name)
- lastCmd = clause
- }
- }
-
- if lastCmd != nil && len(lastCmd.commands) > 0 {
- return nil, fmt.Errorf("must select a subcommand of '%s'", lastCmd.FullCommand())
- }
-
- return
-}
-
-func (a *Application) applyValidators(context *ParseContext) (err error) {
- // Call command validation functions.
- for _, element := range context.Elements {
- if cmd, ok := element.Clause.(*CmdClause); ok && cmd.validator != nil {
- if err = cmd.validator(cmd); err != nil {
- return err
- }
- }
- }
-
- if a.validator != nil {
- err = a.validator(a)
- }
- return err
-}
-
-func (a *Application) applyPreActions(context *ParseContext, dispatch bool) error {
- if err := a.actionMixin.applyPreActions(context); err != nil {
- return err
- }
- // Dispatch to actions.
- if dispatch {
- for _, element := range context.Elements {
- if applier, ok := element.Clause.(actionApplier); ok {
- if err := applier.applyPreActions(context); err != nil {
- return err
- }
- }
- }
- }
-
- return nil
-}
-
-func (a *Application) applyActions(context *ParseContext) error {
- if err := a.actionMixin.applyActions(context); err != nil {
- return err
- }
- // Dispatch to actions.
- for _, element := range context.Elements {
- if applier, ok := element.Clause.(actionApplier); ok {
- if err := applier.applyActions(context); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// Errorf prints an error message to w in the format ": error: ".
-func (a *Application) Errorf(format string, args ...interface{}) {
- fmt.Fprintf(a.errorWriter, a.Name+": error: "+format+"\n", args...)
-}
-
-// Fatalf writes a formatted error to w then terminates with exit status 1.
-func (a *Application) Fatalf(format string, args ...interface{}) {
- a.Errorf(format, args...)
- a.terminate(1)
-}
-
-// FatalUsage prints an error message followed by usage information, then
-// exits with a non-zero status.
-func (a *Application) FatalUsage(format string, args ...interface{}) {
- a.Errorf(format, args...)
- // Force usage to go to error output.
- a.usageWriter = a.errorWriter
- a.Usage([]string{})
- a.terminate(1)
-}
-
-// FatalUsageContext writes a printf formatted error message to w, then usage
-// information for the given ParseContext, before exiting.
-func (a *Application) FatalUsageContext(context *ParseContext, format string, args ...interface{}) {
- a.Errorf(format, args...)
- if err := a.UsageForContext(context); err != nil {
- panic(err)
- }
- a.terminate(1)
-}
-
-// FatalIfError prints an error and exits if err is not nil. The error is printed
-// with the given formatted string, if any.
-func (a *Application) FatalIfError(err error, format string, args ...interface{}) {
- if err != nil {
- prefix := ""
- if format != "" {
- prefix = fmt.Sprintf(format, args...) + ": "
- }
- a.Errorf(prefix+"%s", err)
- a.terminate(1)
- }
-}
-
-func (a *Application) completionOptions(context *ParseContext) []string {
- args := context.rawArgs
-
- var (
- currArg string
- prevArg string
- target cmdMixin
- )
-
- numArgs := len(args)
- if numArgs > 1 {
- args = args[1:]
- currArg = args[len(args)-1]
- }
- if numArgs > 2 {
- prevArg = args[len(args)-2]
- }
-
- target = a.cmdMixin
- if context.SelectedCommand != nil {
- // A subcommand was in use. We will use it as the target
- target = context.SelectedCommand.cmdMixin
- }
-
- if (currArg != "" && strings.HasPrefix(currArg, "--")) || strings.HasPrefix(prevArg, "--") {
- // Perform completion for A flag. The last/current argument started with "-"
- var (
- flagName string // The name of a flag if given (could be half complete)
- flagValue string // The value assigned to a flag (if given) (could be half complete)
- )
-
- if strings.HasPrefix(prevArg, "--") && !strings.HasPrefix(currArg, "--") {
- // Matches: ./myApp --flag value
- // Wont Match: ./myApp --flag --
- flagName = prevArg[2:] // Strip the "--"
- flagValue = currArg
- } else if strings.HasPrefix(currArg, "--") {
- // Matches: ./myApp --flag --
- // Matches: ./myApp --flag somevalue --
- // Matches: ./myApp --
- flagName = currArg[2:] // Strip the "--"
- }
-
- options, flagMatched, valueMatched := target.FlagCompletion(flagName, flagValue)
- if valueMatched {
- // Value Matched. Show cmdCompletions
- return target.CmdCompletion(context)
- }
-
- // Add top level flags if we're not at the top level and no match was found.
- if context.SelectedCommand != nil && !flagMatched {
- topOptions, topFlagMatched, topValueMatched := a.FlagCompletion(flagName, flagValue)
- if topValueMatched {
- // Value Matched. Back to cmdCompletions
- return target.CmdCompletion(context)
- }
-
- if topFlagMatched {
- // Top level had a flag which matched the input. Return it's options.
- options = topOptions
- } else {
- // Add top level flags
- options = append(options, topOptions...)
- }
- }
- return options
- }
-
- // Perform completion for sub commands and arguments.
- return target.CmdCompletion(context)
-}
-
-func (a *Application) generateBashCompletion(context *ParseContext) {
- options := a.completionOptions(context)
- fmt.Printf("%s", strings.Join(options, "\n"))
-}
-
-func envarTransform(name string) string {
- return strings.ToUpper(envarTransformRegexp.ReplaceAllString(name, "_"))
-}
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/args.go b/vendor/gopkg.in/alecthomas/kingpin.v2/args.go
deleted file mode 100644
index 34006947..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/args.go
+++ /dev/null
@@ -1,184 +0,0 @@
-package kingpin
-
-import (
- "fmt"
-)
-
-type argGroup struct {
- args []*ArgClause
-}
-
-func newArgGroup() *argGroup {
- return &argGroup{}
-}
-
-func (a *argGroup) have() bool {
- return len(a.args) > 0
-}
-
-// GetArg gets an argument definition.
-//
-// This allows existing arguments to be modified after definition but before parsing. Useful for
-// modular applications.
-func (a *argGroup) GetArg(name string) *ArgClause {
- for _, arg := range a.args {
- if arg.name == name {
- return arg
- }
- }
- return nil
-}
-
-func (a *argGroup) Arg(name, help string) *ArgClause {
- arg := newArg(name, help)
- a.args = append(a.args, arg)
- return arg
-}
-
-func (a *argGroup) init() error {
- required := 0
- seen := map[string]struct{}{}
- previousArgMustBeLast := false
- for i, arg := range a.args {
- if previousArgMustBeLast {
- return fmt.Errorf("Args() can't be followed by another argument '%s'", arg.name)
- }
- if arg.consumesRemainder() {
- previousArgMustBeLast = true
- }
- if _, ok := seen[arg.name]; ok {
- return fmt.Errorf("duplicate argument '%s'", arg.name)
- }
- seen[arg.name] = struct{}{}
- if arg.required && required != i {
- return fmt.Errorf("required arguments found after non-required")
- }
- if arg.required {
- required++
- }
- if err := arg.init(); err != nil {
- return err
- }
- }
- return nil
-}
-
-type ArgClause struct {
- actionMixin
- parserMixin
- completionsMixin
- envarMixin
- name string
- help string
- defaultValues []string
- required bool
-}
-
-func newArg(name, help string) *ArgClause {
- a := &ArgClause{
- name: name,
- help: help,
- }
- return a
-}
-
-func (a *ArgClause) setDefault() error {
- if a.HasEnvarValue() {
- if v, ok := a.value.(remainderArg); !ok || !v.IsCumulative() {
- // Use the value as-is
- return a.value.Set(a.GetEnvarValue())
- }
- for _, value := range a.GetSplitEnvarValue() {
- if err := a.value.Set(value); err != nil {
- return err
- }
- }
- return nil
- }
-
- if len(a.defaultValues) > 0 {
- for _, defaultValue := range a.defaultValues {
- if err := a.value.Set(defaultValue); err != nil {
- return err
- }
- }
- return nil
- }
-
- return nil
-}
-
-func (a *ArgClause) needsValue() bool {
- haveDefault := len(a.defaultValues) > 0
- return a.required && !(haveDefault || a.HasEnvarValue())
-}
-
-func (a *ArgClause) consumesRemainder() bool {
- if r, ok := a.value.(remainderArg); ok {
- return r.IsCumulative()
- }
- return false
-}
-
-// Required arguments must be input by the user. They can not have a Default() value provided.
-func (a *ArgClause) Required() *ArgClause {
- a.required = true
- return a
-}
-
-// Default values for this argument. They *must* be parseable by the value of the argument.
-func (a *ArgClause) Default(values ...string) *ArgClause {
- a.defaultValues = values
- return a
-}
-
-// Envar overrides the default value(s) for a flag from an environment variable,
-// if it is set. Several default values can be provided by using new lines to
-// separate them.
-func (a *ArgClause) Envar(name string) *ArgClause {
- a.envar = name
- a.noEnvar = false
- return a
-}
-
-// NoEnvar forces environment variable defaults to be disabled for this flag.
-// Most useful in conjunction with app.DefaultEnvars().
-func (a *ArgClause) NoEnvar() *ArgClause {
- a.envar = ""
- a.noEnvar = true
- return a
-}
-
-func (a *ArgClause) Action(action Action) *ArgClause {
- a.addAction(action)
- return a
-}
-
-func (a *ArgClause) PreAction(action Action) *ArgClause {
- a.addPreAction(action)
- return a
-}
-
-// HintAction registers a HintAction (function) for the arg to provide completions
-func (a *ArgClause) HintAction(action HintAction) *ArgClause {
- a.addHintAction(action)
- return a
-}
-
-// HintOptions registers any number of options for the flag to provide completions
-func (a *ArgClause) HintOptions(options ...string) *ArgClause {
- a.addHintAction(func() []string {
- return options
- })
- return a
-}
-
-func (a *ArgClause) init() error {
- if a.required && len(a.defaultValues) > 0 {
- return fmt.Errorf("required argument '%s' with unusable default value", a.name)
- }
- if a.value == nil {
- return fmt.Errorf("no parser defined for arg '%s'", a.name)
- }
- return nil
-}
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/cmd.go b/vendor/gopkg.in/alecthomas/kingpin.v2/cmd.go
deleted file mode 100644
index 0473b871..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/cmd.go
+++ /dev/null
@@ -1,274 +0,0 @@
-package kingpin
-
-import (
- "fmt"
- "strings"
-)
-
-type cmdMixin struct {
- *flagGroup
- *argGroup
- *cmdGroup
- actionMixin
-}
-
-// CmdCompletion returns completion options for arguments, if that's where
-// parsing left off, or commands if there aren't any unsatisfied args.
-func (c *cmdMixin) CmdCompletion(context *ParseContext) []string {
- var options []string
-
- // Count args already satisfied - we won't complete those, and add any
- // default commands' alternatives, since they weren't listed explicitly
- // and the user may want to explicitly list something else.
- argsSatisfied := 0
- for _, el := range context.Elements {
- switch clause := el.Clause.(type) {
- case *ArgClause:
- if el.Value != nil && *el.Value != "" {
- argsSatisfied++
- }
- case *CmdClause:
- options = append(options, clause.completionAlts...)
- default:
- }
- }
-
- if argsSatisfied < len(c.argGroup.args) {
- // Since not all args have been satisfied, show options for the current one
- options = append(options, c.argGroup.args[argsSatisfied].resolveCompletions()...)
- } else {
- // If all args are satisfied, then go back to completing commands
- for _, cmd := range c.cmdGroup.commandOrder {
- if !cmd.hidden {
- options = append(options, cmd.name)
- }
- }
- }
-
- return options
-}
-
-func (c *cmdMixin) FlagCompletion(flagName string, flagValue string) (choices []string, flagMatch bool, optionMatch bool) {
- // Check if flagName matches a known flag.
- // If it does, show the options for the flag
- // Otherwise, show all flags
-
- options := []string{}
-
- for _, flag := range c.flagGroup.flagOrder {
- // Loop through each flag and determine if a match exists
- if flag.name == flagName {
- // User typed entire flag. Need to look for flag options.
- options = flag.resolveCompletions()
- if len(options) == 0 {
- // No Options to Choose From, Assume Match.
- return options, true, true
- }
-
- // Loop options to find if the user specified value matches
- isPrefix := false
- matched := false
-
- for _, opt := range options {
- if flagValue == opt {
- matched = true
- } else if strings.HasPrefix(opt, flagValue) {
- isPrefix = true
- }
- }
-
- // Matched Flag Directly
- // Flag Value Not Prefixed, and Matched Directly
- return options, true, !isPrefix && matched
- }
-
- if !flag.hidden {
- options = append(options, "--"+flag.name)
- }
- }
- // No Flag directly matched.
- return options, false, false
-
-}
-
-type cmdGroup struct {
- app *Application
- parent *CmdClause
- commands map[string]*CmdClause
- commandOrder []*CmdClause
-}
-
-func (c *cmdGroup) defaultSubcommand() *CmdClause {
- for _, cmd := range c.commandOrder {
- if cmd.isDefault {
- return cmd
- }
- }
- return nil
-}
-
-func (c *cmdGroup) cmdNames() []string {
- names := make([]string, 0, len(c.commandOrder))
- for _, cmd := range c.commandOrder {
- names = append(names, cmd.name)
- }
- return names
-}
-
-// GetArg gets a command definition.
-//
-// This allows existing commands to be modified after definition but before parsing. Useful for
-// modular applications.
-func (c *cmdGroup) GetCommand(name string) *CmdClause {
- return c.commands[name]
-}
-
-func newCmdGroup(app *Application) *cmdGroup {
- return &cmdGroup{
- app: app,
- commands: make(map[string]*CmdClause),
- }
-}
-
-func (c *cmdGroup) flattenedCommands() (out []*CmdClause) {
- for _, cmd := range c.commandOrder {
- if len(cmd.commands) == 0 {
- out = append(out, cmd)
- }
- out = append(out, cmd.flattenedCommands()...)
- }
- return
-}
-
-func (c *cmdGroup) addCommand(name, help string) *CmdClause {
- cmd := newCommand(c.app, name, help)
- c.commands[name] = cmd
- c.commandOrder = append(c.commandOrder, cmd)
- return cmd
-}
-
-func (c *cmdGroup) init() error {
- seen := map[string]bool{}
- if c.defaultSubcommand() != nil && !c.have() {
- return fmt.Errorf("default subcommand %q provided but no subcommands defined", c.defaultSubcommand().name)
- }
- defaults := []string{}
- for _, cmd := range c.commandOrder {
- if cmd.isDefault {
- defaults = append(defaults, cmd.name)
- }
- if seen[cmd.name] {
- return fmt.Errorf("duplicate command %q", cmd.name)
- }
- seen[cmd.name] = true
- for _, alias := range cmd.aliases {
- if seen[alias] {
- return fmt.Errorf("alias duplicates existing command %q", alias)
- }
- c.commands[alias] = cmd
- }
- if err := cmd.init(); err != nil {
- return err
- }
- }
- if len(defaults) > 1 {
- return fmt.Errorf("more than one default subcommand exists: %s", strings.Join(defaults, ", "))
- }
- return nil
-}
-
-func (c *cmdGroup) have() bool {
- return len(c.commands) > 0
-}
-
-type CmdClauseValidator func(*CmdClause) error
-
-// A CmdClause is a single top-level command. It encapsulates a set of flags
-// and either subcommands or positional arguments.
-type CmdClause struct {
- cmdMixin
- app *Application
- name string
- aliases []string
- help string
- isDefault bool
- validator CmdClauseValidator
- hidden bool
- completionAlts []string
-}
-
-func newCommand(app *Application, name, help string) *CmdClause {
- c := &CmdClause{
- app: app,
- name: name,
- help: help,
- }
- c.flagGroup = newFlagGroup()
- c.argGroup = newArgGroup()
- c.cmdGroup = newCmdGroup(app)
- return c
-}
-
-// Add an Alias for this command.
-func (c *CmdClause) Alias(name string) *CmdClause {
- c.aliases = append(c.aliases, name)
- return c
-}
-
-// Validate sets a validation function to run when parsing.
-func (c *CmdClause) Validate(validator CmdClauseValidator) *CmdClause {
- c.validator = validator
- return c
-}
-
-func (c *CmdClause) FullCommand() string {
- out := []string{c.name}
- for p := c.parent; p != nil; p = p.parent {
- out = append([]string{p.name}, out...)
- }
- return strings.Join(out, " ")
-}
-
-// Command adds a new sub-command.
-func (c *CmdClause) Command(name, help string) *CmdClause {
- cmd := c.addCommand(name, help)
- cmd.parent = c
- return cmd
-}
-
-// Default makes this command the default if commands don't match.
-func (c *CmdClause) Default() *CmdClause {
- c.isDefault = true
- return c
-}
-
-func (c *CmdClause) Action(action Action) *CmdClause {
- c.addAction(action)
- return c
-}
-
-func (c *CmdClause) PreAction(action Action) *CmdClause {
- c.addPreAction(action)
- return c
-}
-
-func (c *CmdClause) init() error {
- if err := c.flagGroup.init(c.app.defaultEnvarPrefix()); err != nil {
- return err
- }
- if c.argGroup.have() && c.cmdGroup.have() {
- return fmt.Errorf("can't mix Arg()s with Command()s")
- }
- if err := c.argGroup.init(); err != nil {
- return err
- }
- if err := c.cmdGroup.init(); err != nil {
- return err
- }
- return nil
-}
-
-func (c *CmdClause) Hidden() *CmdClause {
- c.hidden = true
- return c
-}
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/completions.go b/vendor/gopkg.in/alecthomas/kingpin.v2/completions.go
deleted file mode 100644
index 6e7b409f..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/completions.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package kingpin
-
-// HintAction is a function type who is expected to return a slice of possible
-// command line arguments.
-type HintAction func() []string
-type completionsMixin struct {
- hintActions []HintAction
- builtinHintActions []HintAction
-}
-
-func (a *completionsMixin) addHintAction(action HintAction) {
- a.hintActions = append(a.hintActions, action)
-}
-
-// Allow adding of HintActions which are added internally, ie, EnumVar
-func (a *completionsMixin) addHintActionBuiltin(action HintAction) {
- a.builtinHintActions = append(a.builtinHintActions, action)
-}
-
-func (a *completionsMixin) resolveCompletions() []string {
- var hints []string
-
- options := a.builtinHintActions
- if len(a.hintActions) > 0 {
- // User specified their own hintActions. Use those instead.
- options = a.hintActions
- }
-
- for _, hintAction := range options {
- hints = append(hints, hintAction()...)
- }
- return hints
-}
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/doc.go b/vendor/gopkg.in/alecthomas/kingpin.v2/doc.go
deleted file mode 100644
index cb951a80..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/doc.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Package kingpin provides command line interfaces like this:
-//
-// $ chat
-// usage: chat [] [] [ ...]
-//
-// Flags:
-// --debug enable debug mode
-// --help Show help.
-// --server=127.0.0.1 server address
-//
-// Commands:
-// help
-// Show help for a command.
-//
-// post []
-// Post a message to a channel.
-//
-// register
-// Register a new user.
-//
-// $ chat help post
-// usage: chat [] post [] []
-//
-// Post a message to a channel.
-//
-// Flags:
-// --image=IMAGE image to post
-//
-// Args:
-// channel to post to
-// [] text to post
-// $ chat post --image=~/Downloads/owls.jpg pics
-//
-// From code like this:
-//
-// package main
-//
-// import "gopkg.in/alecthomas/kingpin.v2"
-//
-// var (
-// debug = kingpin.Flag("debug", "enable debug mode").Default("false").Bool()
-// serverIP = kingpin.Flag("server", "server address").Default("127.0.0.1").IP()
-//
-// register = kingpin.Command("register", "Register a new user.")
-// registerNick = register.Arg("nick", "nickname for user").Required().String()
-// registerName = register.Arg("name", "name of user").Required().String()
-//
-// post = kingpin.Command("post", "Post a message to a channel.")
-// postImage = post.Flag("image", "image to post").ExistingFile()
-// postChannel = post.Arg("channel", "channel to post to").Required().String()
-// postText = post.Arg("text", "text to post").String()
-// )
-//
-// func main() {
-// switch kingpin.Parse() {
-// // Register user
-// case "register":
-// println(*registerNick)
-//
-// // Post message
-// case "post":
-// if *postImage != nil {
-// }
-// if *postText != "" {
-// }
-// }
-// }
-package kingpin
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/envar.go b/vendor/gopkg.in/alecthomas/kingpin.v2/envar.go
deleted file mode 100644
index c01a27df..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/envar.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package kingpin
-
-import (
- "os"
- "regexp"
-)
-
-var (
- envVarValuesSeparator = "\r?\n"
- envVarValuesTrimmer = regexp.MustCompile(envVarValuesSeparator + "$")
- envVarValuesSplitter = regexp.MustCompile(envVarValuesSeparator)
-)
-
-type envarMixin struct {
- envar string
- noEnvar bool
-}
-
-func (e *envarMixin) HasEnvarValue() bool {
- return e.GetEnvarValue() != ""
-}
-
-func (e *envarMixin) GetEnvarValue() string {
- if e.noEnvar || e.envar == "" {
- return ""
- }
- return os.Getenv(e.envar)
-}
-
-func (e *envarMixin) GetSplitEnvarValue() []string {
- values := make([]string, 0)
-
- envarValue := e.GetEnvarValue()
- if envarValue == "" {
- return values
- }
-
- // Split by new line to extract multiple values, if any.
- trimmed := envVarValuesTrimmer.ReplaceAllString(envarValue, "")
- for _, value := range envVarValuesSplitter.Split(trimmed, -1) {
- values = append(values, value)
- }
-
- return values
-}
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/flags.go b/vendor/gopkg.in/alecthomas/kingpin.v2/flags.go
deleted file mode 100644
index 8f33721f..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/flags.go
+++ /dev/null
@@ -1,308 +0,0 @@
-package kingpin
-
-import (
- "fmt"
- "strings"
-)
-
-type flagGroup struct {
- short map[string]*FlagClause
- long map[string]*FlagClause
- flagOrder []*FlagClause
-}
-
-func newFlagGroup() *flagGroup {
- return &flagGroup{
- short: map[string]*FlagClause{},
- long: map[string]*FlagClause{},
- }
-}
-
-// GetFlag gets a flag definition.
-//
-// This allows existing flags to be modified after definition but before parsing. Useful for
-// modular applications.
-func (f *flagGroup) GetFlag(name string) *FlagClause {
- return f.long[name]
-}
-
-// Flag defines a new flag with the given long name and help.
-func (f *flagGroup) Flag(name, help string) *FlagClause {
- flag := newFlag(name, help)
- f.long[name] = flag
- f.flagOrder = append(f.flagOrder, flag)
- return flag
-}
-
-func (f *flagGroup) init(defaultEnvarPrefix string) error {
- if err := f.checkDuplicates(); err != nil {
- return err
- }
- for _, flag := range f.long {
- if defaultEnvarPrefix != "" && !flag.noEnvar && flag.envar == "" {
- flag.envar = envarTransform(defaultEnvarPrefix + "_" + flag.name)
- }
- if err := flag.init(); err != nil {
- return err
- }
- if flag.shorthand != 0 {
- f.short[string(flag.shorthand)] = flag
- }
- }
- return nil
-}
-
-func (f *flagGroup) checkDuplicates() error {
- seenShort := map[rune]bool{}
- seenLong := map[string]bool{}
- for _, flag := range f.flagOrder {
- if flag.shorthand != 0 {
- if _, ok := seenShort[flag.shorthand]; ok {
- return fmt.Errorf("duplicate short flag -%c", flag.shorthand)
- }
- seenShort[flag.shorthand] = true
- }
- if _, ok := seenLong[flag.name]; ok {
- return fmt.Errorf("duplicate long flag --%s", flag.name)
- }
- seenLong[flag.name] = true
- }
- return nil
-}
-
-func (f *flagGroup) parse(context *ParseContext) (*FlagClause, error) {
- var token *Token
-
-loop:
- for {
- token = context.Peek()
- switch token.Type {
- case TokenEOL:
- break loop
-
- case TokenLong, TokenShort:
- flagToken := token
- defaultValue := ""
- var flag *FlagClause
- var ok bool
- invert := false
-
- name := token.Value
- if token.Type == TokenLong {
- flag, ok = f.long[name]
- if !ok {
- if strings.HasPrefix(name, "no-") {
- name = name[3:]
- invert = true
- }
- flag, ok = f.long[name]
- }
- if !ok {
- return nil, fmt.Errorf("unknown long flag '%s'", flagToken)
- }
- } else {
- flag, ok = f.short[name]
- if !ok {
- return nil, fmt.Errorf("unknown short flag '%s'", flagToken)
- }
- }
-
- context.Next()
-
- fb, ok := flag.value.(boolFlag)
- if ok && fb.IsBoolFlag() {
- if invert {
- defaultValue = "false"
- } else {
- defaultValue = "true"
- }
- } else {
- if invert {
- context.Push(token)
- return nil, fmt.Errorf("unknown long flag '%s'", flagToken)
- }
- token = context.Peek()
- if token.Type != TokenArg {
- context.Push(token)
- return nil, fmt.Errorf("expected argument for flag '%s'", flagToken)
- }
- context.Next()
- defaultValue = token.Value
- }
-
- context.matchedFlag(flag, defaultValue)
- return flag, nil
-
- default:
- break loop
- }
- }
- return nil, nil
-}
-
-// FlagClause is a fluid interface used to build flags.
-type FlagClause struct {
- parserMixin
- actionMixin
- completionsMixin
- envarMixin
- name string
- shorthand rune
- help string
- defaultValues []string
- placeholder string
- hidden bool
-}
-
-func newFlag(name, help string) *FlagClause {
- f := &FlagClause{
- name: name,
- help: help,
- }
- return f
-}
-
-func (f *FlagClause) setDefault() error {
- if f.HasEnvarValue() {
- if v, ok := f.value.(repeatableFlag); !ok || !v.IsCumulative() {
- // Use the value as-is
- return f.value.Set(f.GetEnvarValue())
- } else {
- for _, value := range f.GetSplitEnvarValue() {
- if err := f.value.Set(value); err != nil {
- return err
- }
- }
- return nil
- }
- }
-
- if len(f.defaultValues) > 0 {
- for _, defaultValue := range f.defaultValues {
- if err := f.value.Set(defaultValue); err != nil {
- return err
- }
- }
- return nil
- }
-
- return nil
-}
-
-func (f *FlagClause) needsValue() bool {
- haveDefault := len(f.defaultValues) > 0
- return f.required && !(haveDefault || f.HasEnvarValue())
-}
-
-func (f *FlagClause) init() error {
- if f.required && len(f.defaultValues) > 0 {
- return fmt.Errorf("required flag '--%s' with default value that will never be used", f.name)
- }
- if f.value == nil {
- return fmt.Errorf("no type defined for --%s (eg. .String())", f.name)
- }
- if v, ok := f.value.(repeatableFlag); (!ok || !v.IsCumulative()) && len(f.defaultValues) > 1 {
- return fmt.Errorf("invalid default for '--%s', expecting single value", f.name)
- }
- return nil
-}
-
-// Dispatch to the given function after the flag is parsed and validated.
-func (f *FlagClause) Action(action Action) *FlagClause {
- f.addAction(action)
- return f
-}
-
-func (f *FlagClause) PreAction(action Action) *FlagClause {
- f.addPreAction(action)
- return f
-}
-
-// HintAction registers a HintAction (function) for the flag to provide completions
-func (a *FlagClause) HintAction(action HintAction) *FlagClause {
- a.addHintAction(action)
- return a
-}
-
-// HintOptions registers any number of options for the flag to provide completions
-func (a *FlagClause) HintOptions(options ...string) *FlagClause {
- a.addHintAction(func() []string {
- return options
- })
- return a
-}
-
-func (a *FlagClause) EnumVar(target *string, options ...string) {
- a.parserMixin.EnumVar(target, options...)
- a.addHintActionBuiltin(func() []string {
- return options
- })
-}
-
-func (a *FlagClause) Enum(options ...string) (target *string) {
- a.addHintActionBuiltin(func() []string {
- return options
- })
- return a.parserMixin.Enum(options...)
-}
-
-// Default values for this flag. They *must* be parseable by the value of the flag.
-func (f *FlagClause) Default(values ...string) *FlagClause {
- f.defaultValues = values
- return f
-}
-
-// DEPRECATED: Use Envar(name) instead.
-func (f *FlagClause) OverrideDefaultFromEnvar(envar string) *FlagClause {
- return f.Envar(envar)
-}
-
-// Envar overrides the default value(s) for a flag from an environment variable,
-// if it is set. Several default values can be provided by using new lines to
-// separate them.
-func (f *FlagClause) Envar(name string) *FlagClause {
- f.envar = name
- f.noEnvar = false
- return f
-}
-
-// NoEnvar forces environment variable defaults to be disabled for this flag.
-// Most useful in conjunction with app.DefaultEnvars().
-func (f *FlagClause) NoEnvar() *FlagClause {
- f.envar = ""
- f.noEnvar = true
- return f
-}
-
-// PlaceHolder sets the place-holder string used for flag values in the help. The
-// default behaviour is to use the value provided by Default() if provided,
-// then fall back on the capitalized flag name.
-func (f *FlagClause) PlaceHolder(placeholder string) *FlagClause {
- f.placeholder = placeholder
- return f
-}
-
-// Hidden hides a flag from usage but still allows it to be used.
-func (f *FlagClause) Hidden() *FlagClause {
- f.hidden = true
- return f
-}
-
-// Required makes the flag required. You can not provide a Default() value to a Required() flag.
-func (f *FlagClause) Required() *FlagClause {
- f.required = true
- return f
-}
-
-// Short sets the short flag name.
-func (f *FlagClause) Short(name rune) *FlagClause {
- f.shorthand = name
- return f
-}
-
-// Bool makes this flag a boolean flag.
-func (f *FlagClause) Bool() (target *bool) {
- target = new(bool)
- f.SetValue(newBoolValue(target))
- return
-}
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/global.go b/vendor/gopkg.in/alecthomas/kingpin.v2/global.go
deleted file mode 100644
index 10a29137..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/global.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package kingpin
-
-import (
- "os"
- "path/filepath"
-)
-
-var (
- // CommandLine is the default Kingpin parser.
- CommandLine = New(filepath.Base(os.Args[0]), "")
- // Global help flag. Exposed for user customisation.
- HelpFlag = CommandLine.HelpFlag
- // Top-level help command. Exposed for user customisation. May be nil.
- HelpCommand = CommandLine.HelpCommand
- // Global version flag. Exposed for user customisation. May be nil.
- VersionFlag = CommandLine.VersionFlag
-)
-
-// Command adds a new command to the default parser.
-func Command(name, help string) *CmdClause {
- return CommandLine.Command(name, help)
-}
-
-// Flag adds a new flag to the default parser.
-func Flag(name, help string) *FlagClause {
- return CommandLine.Flag(name, help)
-}
-
-// Arg adds a new argument to the top-level of the default parser.
-func Arg(name, help string) *ArgClause {
- return CommandLine.Arg(name, help)
-}
-
-// Parse and return the selected command. Will call the termination handler if
-// an error is encountered.
-func Parse() string {
- selected := MustParse(CommandLine.Parse(os.Args[1:]))
- if selected == "" && CommandLine.cmdGroup.have() {
- Usage()
- CommandLine.terminate(0)
- }
- return selected
-}
-
-// Errorf prints an error message to stderr.
-func Errorf(format string, args ...interface{}) {
- CommandLine.Errorf(format, args...)
-}
-
-// Fatalf prints an error message to stderr and exits.
-func Fatalf(format string, args ...interface{}) {
- CommandLine.Fatalf(format, args...)
-}
-
-// FatalIfError prints an error and exits if err is not nil. The error is printed
-// with the given prefix.
-func FatalIfError(err error, format string, args ...interface{}) {
- CommandLine.FatalIfError(err, format, args...)
-}
-
-// FatalUsage prints an error message followed by usage information, then
-// exits with a non-zero status.
-func FatalUsage(format string, args ...interface{}) {
- CommandLine.FatalUsage(format, args...)
-}
-
-// FatalUsageContext writes a printf formatted error message to stderr, then
-// usage information for the given ParseContext, before exiting.
-func FatalUsageContext(context *ParseContext, format string, args ...interface{}) {
- CommandLine.FatalUsageContext(context, format, args...)
-}
-
-// Usage prints usage to stderr.
-func Usage() {
- CommandLine.Usage(os.Args[1:])
-}
-
-// Set global usage template to use (defaults to DefaultUsageTemplate).
-func UsageTemplate(template string) *Application {
- return CommandLine.UsageTemplate(template)
-}
-
-// MustParse can be used with app.Parse(args) to exit with an error if parsing fails.
-func MustParse(command string, err error) string {
- if err != nil {
- Fatalf("%s, try --help", err)
- }
- return command
-}
-
-// Version adds a flag for displaying the application version number.
-func Version(version string) *Application {
- return CommandLine.Version(version)
-}
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth.go b/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth.go
deleted file mode 100644
index a269531c..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build appengine !linux,!freebsd,!darwin,!dragonfly,!netbsd,!openbsd
-
-package kingpin
-
-import "io"
-
-func guessWidth(w io.Writer) int {
- return 80
-}
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth_unix.go b/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth_unix.go
deleted file mode 100644
index ad8163f5..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/guesswidth_unix.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// +build !appengine,linux freebsd darwin dragonfly netbsd openbsd
-
-package kingpin
-
-import (
- "io"
- "os"
- "strconv"
- "syscall"
- "unsafe"
-)
-
-func guessWidth(w io.Writer) int {
- // check if COLUMNS env is set to comply with
- // http://pubs.opengroup.org/onlinepubs/009604499/basedefs/xbd_chap08.html
- colsStr := os.Getenv("COLUMNS")
- if colsStr != "" {
- if cols, err := strconv.Atoi(colsStr); err == nil {
- return cols
- }
- }
-
- if t, ok := w.(*os.File); ok {
- fd := t.Fd()
- var dimensions [4]uint16
-
- if _, _, err := syscall.Syscall6(
- syscall.SYS_IOCTL,
- uintptr(fd),
- uintptr(syscall.TIOCGWINSZ),
- uintptr(unsafe.Pointer(&dimensions)),
- 0, 0, 0,
- ); err == 0 {
- return int(dimensions[1])
- }
- }
- return 80
-}
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/model.go b/vendor/gopkg.in/alecthomas/kingpin.v2/model.go
deleted file mode 100644
index a4ee83b4..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/model.go
+++ /dev/null
@@ -1,227 +0,0 @@
-package kingpin
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-// Data model for Kingpin command-line structure.
-
-type FlagGroupModel struct {
- Flags []*FlagModel
-}
-
-func (f *FlagGroupModel) FlagSummary() string {
- out := []string{}
- count := 0
- for _, flag := range f.Flags {
- if flag.Name != "help" {
- count++
- }
- if flag.Required {
- if flag.IsBoolFlag() {
- out = append(out, fmt.Sprintf("--[no-]%s", flag.Name))
- } else {
- out = append(out, fmt.Sprintf("--%s=%s", flag.Name, flag.FormatPlaceHolder()))
- }
- }
- }
- if count != len(out) {
- out = append(out, "[]")
- }
- return strings.Join(out, " ")
-}
-
-type FlagModel struct {
- Name string
- Help string
- Short rune
- Default []string
- Envar string
- PlaceHolder string
- Required bool
- Hidden bool
- Value Value
-}
-
-func (f *FlagModel) String() string {
- return f.Value.String()
-}
-
-func (f *FlagModel) IsBoolFlag() bool {
- if fl, ok := f.Value.(boolFlag); ok {
- return fl.IsBoolFlag()
- }
- return false
-}
-
-func (f *FlagModel) FormatPlaceHolder() string {
- if f.PlaceHolder != "" {
- return f.PlaceHolder
- }
- if len(f.Default) > 0 {
- ellipsis := ""
- if len(f.Default) > 1 {
- ellipsis = "..."
- }
- if _, ok := f.Value.(*stringValue); ok {
- return strconv.Quote(f.Default[0]) + ellipsis
- }
- return f.Default[0] + ellipsis
- }
- return strings.ToUpper(f.Name)
-}
-
-type ArgGroupModel struct {
- Args []*ArgModel
-}
-
-func (a *ArgGroupModel) ArgSummary() string {
- depth := 0
- out := []string{}
- for _, arg := range a.Args {
- h := "<" + arg.Name + ">"
- if !arg.Required {
- h = "[" + h
- depth++
- }
- out = append(out, h)
- }
- out[len(out)-1] = out[len(out)-1] + strings.Repeat("]", depth)
- return strings.Join(out, " ")
-}
-
-type ArgModel struct {
- Name string
- Help string
- Default []string
- Envar string
- Required bool
- Value Value
-}
-
-func (a *ArgModel) String() string {
- return a.Value.String()
-}
-
-type CmdGroupModel struct {
- Commands []*CmdModel
-}
-
-func (c *CmdGroupModel) FlattenedCommands() (out []*CmdModel) {
- for _, cmd := range c.Commands {
- if len(cmd.Commands) == 0 {
- out = append(out, cmd)
- }
- out = append(out, cmd.FlattenedCommands()...)
- }
- return
-}
-
-type CmdModel struct {
- Name string
- Aliases []string
- Help string
- FullCommand string
- Depth int
- Hidden bool
- Default bool
- *FlagGroupModel
- *ArgGroupModel
- *CmdGroupModel
-}
-
-func (c *CmdModel) String() string {
- return c.FullCommand
-}
-
-type ApplicationModel struct {
- Name string
- Help string
- Version string
- Author string
- *ArgGroupModel
- *CmdGroupModel
- *FlagGroupModel
-}
-
-func (a *Application) Model() *ApplicationModel {
- return &ApplicationModel{
- Name: a.Name,
- Help: a.Help,
- Version: a.version,
- Author: a.author,
- FlagGroupModel: a.flagGroup.Model(),
- ArgGroupModel: a.argGroup.Model(),
- CmdGroupModel: a.cmdGroup.Model(),
- }
-}
-
-func (a *argGroup) Model() *ArgGroupModel {
- m := &ArgGroupModel{}
- for _, arg := range a.args {
- m.Args = append(m.Args, arg.Model())
- }
- return m
-}
-
-func (a *ArgClause) Model() *ArgModel {
- return &ArgModel{
- Name: a.name,
- Help: a.help,
- Default: a.defaultValues,
- Envar: a.envar,
- Required: a.required,
- Value: a.value,
- }
-}
-
-func (f *flagGroup) Model() *FlagGroupModel {
- m := &FlagGroupModel{}
- for _, fl := range f.flagOrder {
- m.Flags = append(m.Flags, fl.Model())
- }
- return m
-}
-
-func (f *FlagClause) Model() *FlagModel {
- return &FlagModel{
- Name: f.name,
- Help: f.help,
- Short: rune(f.shorthand),
- Default: f.defaultValues,
- Envar: f.envar,
- PlaceHolder: f.placeholder,
- Required: f.required,
- Hidden: f.hidden,
- Value: f.value,
- }
-}
-
-func (c *cmdGroup) Model() *CmdGroupModel {
- m := &CmdGroupModel{}
- for _, cm := range c.commandOrder {
- m.Commands = append(m.Commands, cm.Model())
- }
- return m
-}
-
-func (c *CmdClause) Model() *CmdModel {
- depth := 0
- for i := c; i != nil; i = i.parent {
- depth++
- }
- return &CmdModel{
- Name: c.name,
- Aliases: c.aliases,
- Help: c.help,
- Depth: depth,
- Hidden: c.hidden,
- Default: c.isDefault,
- FullCommand: c.FullCommand(),
- FlagGroupModel: c.flagGroup.Model(),
- ArgGroupModel: c.argGroup.Model(),
- CmdGroupModel: c.cmdGroup.Model(),
- }
-}
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/parser.go b/vendor/gopkg.in/alecthomas/kingpin.v2/parser.go
deleted file mode 100644
index 2a183519..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/parser.go
+++ /dev/null
@@ -1,396 +0,0 @@
-package kingpin
-
-import (
- "bufio"
- "fmt"
- "os"
- "strings"
- "unicode/utf8"
-)
-
-type TokenType int
-
-// Token types.
-const (
- TokenShort TokenType = iota
- TokenLong
- TokenArg
- TokenError
- TokenEOL
-)
-
-func (t TokenType) String() string {
- switch t {
- case TokenShort:
- return "short flag"
- case TokenLong:
- return "long flag"
- case TokenArg:
- return "argument"
- case TokenError:
- return "error"
- case TokenEOL:
- return ""
- }
- return "?"
-}
-
-var (
- TokenEOLMarker = Token{-1, TokenEOL, ""}
-)
-
-type Token struct {
- Index int
- Type TokenType
- Value string
-}
-
-func (t *Token) Equal(o *Token) bool {
- return t.Index == o.Index
-}
-
-func (t *Token) IsFlag() bool {
- return t.Type == TokenShort || t.Type == TokenLong
-}
-
-func (t *Token) IsEOF() bool {
- return t.Type == TokenEOL
-}
-
-func (t *Token) String() string {
- switch t.Type {
- case TokenShort:
- return "-" + t.Value
- case TokenLong:
- return "--" + t.Value
- case TokenArg:
- return t.Value
- case TokenError:
- return "error: " + t.Value
- case TokenEOL:
- return ""
- default:
- panic("unhandled type")
- }
-}
-
-// A union of possible elements in a parse stack.
-type ParseElement struct {
- // Clause is either *CmdClause, *ArgClause or *FlagClause.
- Clause interface{}
- // Value is corresponding value for an ArgClause or FlagClause (if any).
- Value *string
-}
-
-// ParseContext holds the current context of the parser. When passed to
-// Action() callbacks Elements will be fully populated with *FlagClause,
-// *ArgClause and *CmdClause values and their corresponding arguments (if
-// any).
-type ParseContext struct {
- SelectedCommand *CmdClause
- ignoreDefault bool
- argsOnly bool
- peek []*Token
- argi int // Index of current command-line arg we're processing.
- args []string
- rawArgs []string
- flags *flagGroup
- arguments *argGroup
- argumenti int // Cursor into arguments
- // Flags, arguments and commands encountered and collected during parse.
- Elements []*ParseElement
-}
-
-func (p *ParseContext) nextArg() *ArgClause {
- if p.argumenti >= len(p.arguments.args) {
- return nil
- }
- arg := p.arguments.args[p.argumenti]
- if !arg.consumesRemainder() {
- p.argumenti++
- }
- return arg
-}
-
-func (p *ParseContext) next() {
- p.argi++
- p.args = p.args[1:]
-}
-
-// HasTrailingArgs returns true if there are unparsed command-line arguments.
-// This can occur if the parser can not match remaining arguments.
-func (p *ParseContext) HasTrailingArgs() bool {
- return len(p.args) > 0
-}
-
-func tokenize(args []string, ignoreDefault bool) *ParseContext {
- return &ParseContext{
- ignoreDefault: ignoreDefault,
- args: args,
- rawArgs: args,
- flags: newFlagGroup(),
- arguments: newArgGroup(),
- }
-}
-
-func (p *ParseContext) mergeFlags(flags *flagGroup) {
- for _, flag := range flags.flagOrder {
- if flag.shorthand != 0 {
- p.flags.short[string(flag.shorthand)] = flag
- }
- p.flags.long[flag.name] = flag
- p.flags.flagOrder = append(p.flags.flagOrder, flag)
- }
-}
-
-func (p *ParseContext) mergeArgs(args *argGroup) {
- for _, arg := range args.args {
- p.arguments.args = append(p.arguments.args, arg)
- }
-}
-
-func (p *ParseContext) EOL() bool {
- return p.Peek().Type == TokenEOL
-}
-
-func (p *ParseContext) Error() bool {
- return p.Peek().Type == TokenError
-}
-
-// Next token in the parse context.
-func (p *ParseContext) Next() *Token {
- if len(p.peek) > 0 {
- return p.pop()
- }
-
- // End of tokens.
- if len(p.args) == 0 {
- return &Token{Index: p.argi, Type: TokenEOL}
- }
-
- arg := p.args[0]
- p.next()
-
- if p.argsOnly {
- return &Token{p.argi, TokenArg, arg}
- }
-
- // All remaining args are passed directly.
- if arg == "--" {
- p.argsOnly = true
- return p.Next()
- }
-
- if strings.HasPrefix(arg, "--") {
- parts := strings.SplitN(arg[2:], "=", 2)
- token := &Token{p.argi, TokenLong, parts[0]}
- if len(parts) == 2 {
- p.Push(&Token{p.argi, TokenArg, parts[1]})
- }
- return token
- }
-
- if strings.HasPrefix(arg, "-") {
- if len(arg) == 1 {
- return &Token{Index: p.argi, Type: TokenShort}
- }
- shortRune, size := utf8.DecodeRuneInString(arg[1:])
- short := string(shortRune)
- flag, ok := p.flags.short[short]
- // Not a known short flag, we'll just return it anyway.
- if !ok {
- } else if fb, ok := flag.value.(boolFlag); ok && fb.IsBoolFlag() {
- // Bool short flag.
- } else {
- // Short flag with combined argument: -fARG
- token := &Token{p.argi, TokenShort, short}
- if len(arg) > size+1 {
- p.Push(&Token{p.argi, TokenArg, arg[size+1:]})
- }
- return token
- }
-
- if len(arg) > size+1 {
- p.args = append([]string{"-" + arg[size+1:]}, p.args...)
- }
- return &Token{p.argi, TokenShort, short}
- } else if strings.HasPrefix(arg, "@") {
- expanded, err := ExpandArgsFromFile(arg[1:])
- if err != nil {
- return &Token{p.argi, TokenError, err.Error()}
- }
- if len(p.args) == 0 {
- p.args = expanded
- } else {
- p.args = append(expanded, p.args...)
- }
- return p.Next()
- }
-
- return &Token{p.argi, TokenArg, arg}
-}
-
-func (p *ParseContext) Peek() *Token {
- if len(p.peek) == 0 {
- return p.Push(p.Next())
- }
- return p.peek[len(p.peek)-1]
-}
-
-func (p *ParseContext) Push(token *Token) *Token {
- p.peek = append(p.peek, token)
- return token
-}
-
-func (p *ParseContext) pop() *Token {
- end := len(p.peek) - 1
- token := p.peek[end]
- p.peek = p.peek[0:end]
- return token
-}
-
-func (p *ParseContext) String() string {
- return p.SelectedCommand.FullCommand()
-}
-
-func (p *ParseContext) matchedFlag(flag *FlagClause, value string) {
- p.Elements = append(p.Elements, &ParseElement{Clause: flag, Value: &value})
-}
-
-func (p *ParseContext) matchedArg(arg *ArgClause, value string) {
- p.Elements = append(p.Elements, &ParseElement{Clause: arg, Value: &value})
-}
-
-func (p *ParseContext) matchedCmd(cmd *CmdClause) {
- p.Elements = append(p.Elements, &ParseElement{Clause: cmd})
- p.mergeFlags(cmd.flagGroup)
- p.mergeArgs(cmd.argGroup)
- p.SelectedCommand = cmd
-}
-
-// Expand arguments from a file. Lines starting with # will be treated as comments.
-func ExpandArgsFromFile(filename string) (out []string, err error) {
- if filename == "" {
- return nil, fmt.Errorf("expected @ file to expand arguments from")
- }
- r, err := os.Open(filename)
- if err != nil {
- return nil, fmt.Errorf("failed to open arguments file %q: %s", filename, err)
- }
- defer r.Close()
- scanner := bufio.NewScanner(r)
- for scanner.Scan() {
- line := scanner.Text()
- if strings.HasPrefix(line, "#") {
- continue
- }
- out = append(out, line)
- }
- err = scanner.Err()
- if err != nil {
- return nil, fmt.Errorf("failed to read arguments from %q: %s", filename, err)
- }
- return
-}
-
-func parse(context *ParseContext, app *Application) (err error) {
- context.mergeFlags(app.flagGroup)
- context.mergeArgs(app.argGroup)
-
- cmds := app.cmdGroup
- ignoreDefault := context.ignoreDefault
-
-loop:
- for !context.EOL() && !context.Error() {
- token := context.Peek()
-
- switch token.Type {
- case TokenLong, TokenShort:
- if flag, err := context.flags.parse(context); err != nil {
- if !ignoreDefault {
- if cmd := cmds.defaultSubcommand(); cmd != nil {
- cmd.completionAlts = cmds.cmdNames()
- context.matchedCmd(cmd)
- cmds = cmd.cmdGroup
- break
- }
- }
- return err
- } else if flag == HelpFlag {
- ignoreDefault = true
- }
-
- case TokenArg:
- if cmds.have() {
- selectedDefault := false
- cmd, ok := cmds.commands[token.String()]
- if !ok {
- if !ignoreDefault {
- if cmd = cmds.defaultSubcommand(); cmd != nil {
- cmd.completionAlts = cmds.cmdNames()
- selectedDefault = true
- }
- }
- if cmd == nil {
- return fmt.Errorf("expected command but got %q", token)
- }
- }
- if cmd == HelpCommand {
- ignoreDefault = true
- }
- cmd.completionAlts = nil
- context.matchedCmd(cmd)
- cmds = cmd.cmdGroup
- if !selectedDefault {
- context.Next()
- }
- } else if context.arguments.have() {
- if app.noInterspersed {
- // no more flags
- context.argsOnly = true
- }
- arg := context.nextArg()
- if arg == nil {
- break loop
- }
- context.matchedArg(arg, token.String())
- context.Next()
- } else {
- break loop
- }
-
- case TokenEOL:
- break loop
- }
- }
-
- // Move to innermost default command.
- for !ignoreDefault {
- if cmd := cmds.defaultSubcommand(); cmd != nil {
- cmd.completionAlts = cmds.cmdNames()
- context.matchedCmd(cmd)
- cmds = cmd.cmdGroup
- } else {
- break
- }
- }
-
- if context.Error() {
- return fmt.Errorf("%s", context.Peek().Value)
- }
-
- if !context.EOL() {
- return fmt.Errorf("unexpected %s", context.Peek())
- }
-
- // Set defaults for all remaining args.
- for arg := context.nextArg(); arg != nil && !arg.consumesRemainder(); arg = context.nextArg() {
- for _, defaultValue := range arg.defaultValues {
- if err := arg.value.Set(defaultValue); err != nil {
- return fmt.Errorf("invalid default value '%s' for argument '%s'", defaultValue, arg.name)
- }
- }
- }
-
- return
-}
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/parsers.go b/vendor/gopkg.in/alecthomas/kingpin.v2/parsers.go
deleted file mode 100644
index d9ad57e5..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/parsers.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package kingpin
-
-import (
- "net"
- "net/url"
- "os"
- "time"
-
- "github.com/alecthomas/units"
-)
-
-type Settings interface {
- SetValue(value Value)
-}
-
-type parserMixin struct {
- value Value
- required bool
-}
-
-func (p *parserMixin) SetValue(value Value) {
- p.value = value
-}
-
-// StringMap provides key=value parsing into a map.
-func (p *parserMixin) StringMap() (target *map[string]string) {
- target = &(map[string]string{})
- p.StringMapVar(target)
- return
-}
-
-// Duration sets the parser to a time.Duration parser.
-func (p *parserMixin) Duration() (target *time.Duration) {
- target = new(time.Duration)
- p.DurationVar(target)
- return
-}
-
-// Bytes parses numeric byte units. eg. 1.5KB
-func (p *parserMixin) Bytes() (target *units.Base2Bytes) {
- target = new(units.Base2Bytes)
- p.BytesVar(target)
- return
-}
-
-// IP sets the parser to a net.IP parser.
-func (p *parserMixin) IP() (target *net.IP) {
- target = new(net.IP)
- p.IPVar(target)
- return
-}
-
-// TCP (host:port) address.
-func (p *parserMixin) TCP() (target **net.TCPAddr) {
- target = new(*net.TCPAddr)
- p.TCPVar(target)
- return
-}
-
-// TCPVar (host:port) address.
-func (p *parserMixin) TCPVar(target **net.TCPAddr) {
- p.SetValue(newTCPAddrValue(target))
-}
-
-// ExistingFile sets the parser to one that requires and returns an existing file.
-func (p *parserMixin) ExistingFile() (target *string) {
- target = new(string)
- p.ExistingFileVar(target)
- return
-}
-
-// ExistingDir sets the parser to one that requires and returns an existing directory.
-func (p *parserMixin) ExistingDir() (target *string) {
- target = new(string)
- p.ExistingDirVar(target)
- return
-}
-
-// ExistingFileOrDir sets the parser to one that requires and returns an existing file OR directory.
-func (p *parserMixin) ExistingFileOrDir() (target *string) {
- target = new(string)
- p.ExistingFileOrDirVar(target)
- return
-}
-
-// File returns an os.File against an existing file.
-func (p *parserMixin) File() (target **os.File) {
- target = new(*os.File)
- p.FileVar(target)
- return
-}
-
-// File attempts to open a File with os.OpenFile(flag, perm).
-func (p *parserMixin) OpenFile(flag int, perm os.FileMode) (target **os.File) {
- target = new(*os.File)
- p.OpenFileVar(target, flag, perm)
- return
-}
-
-// URL provides a valid, parsed url.URL.
-func (p *parserMixin) URL() (target **url.URL) {
- target = new(*url.URL)
- p.URLVar(target)
- return
-}
-
-// StringMap provides key=value parsing into a map.
-func (p *parserMixin) StringMapVar(target *map[string]string) {
- p.SetValue(newStringMapValue(target))
-}
-
-// Float sets the parser to a float64 parser.
-func (p *parserMixin) Float() (target *float64) {
- return p.Float64()
-}
-
-// Float sets the parser to a float64 parser.
-func (p *parserMixin) FloatVar(target *float64) {
- p.Float64Var(target)
-}
-
-// Duration sets the parser to a time.Duration parser.
-func (p *parserMixin) DurationVar(target *time.Duration) {
- p.SetValue(newDurationValue(target))
-}
-
-// BytesVar parses numeric byte units. eg. 1.5KB
-func (p *parserMixin) BytesVar(target *units.Base2Bytes) {
- p.SetValue(newBytesValue(target))
-}
-
-// IP sets the parser to a net.IP parser.
-func (p *parserMixin) IPVar(target *net.IP) {
- p.SetValue(newIPValue(target))
-}
-
-// ExistingFile sets the parser to one that requires and returns an existing file.
-func (p *parserMixin) ExistingFileVar(target *string) {
- p.SetValue(newExistingFileValue(target))
-}
-
-// ExistingDir sets the parser to one that requires and returns an existing directory.
-func (p *parserMixin) ExistingDirVar(target *string) {
- p.SetValue(newExistingDirValue(target))
-}
-
-// ExistingDir sets the parser to one that requires and returns an existing directory.
-func (p *parserMixin) ExistingFileOrDirVar(target *string) {
- p.SetValue(newExistingFileOrDirValue(target))
-}
-
-// FileVar opens an existing file.
-func (p *parserMixin) FileVar(target **os.File) {
- p.SetValue(newFileValue(target, os.O_RDONLY, 0))
-}
-
-// OpenFileVar calls os.OpenFile(flag, perm)
-func (p *parserMixin) OpenFileVar(target **os.File, flag int, perm os.FileMode) {
- p.SetValue(newFileValue(target, flag, perm))
-}
-
-// URL provides a valid, parsed url.URL.
-func (p *parserMixin) URLVar(target **url.URL) {
- p.SetValue(newURLValue(target))
-}
-
-// URLList provides a parsed list of url.URL values.
-func (p *parserMixin) URLList() (target *[]*url.URL) {
- target = new([]*url.URL)
- p.URLListVar(target)
- return
-}
-
-// URLListVar provides a parsed list of url.URL values.
-func (p *parserMixin) URLListVar(target *[]*url.URL) {
- p.SetValue(newURLListValue(target))
-}
-
-// Enum allows a value from a set of options.
-func (p *parserMixin) Enum(options ...string) (target *string) {
- target = new(string)
- p.EnumVar(target, options...)
- return
-}
-
-// EnumVar allows a value from a set of options.
-func (p *parserMixin) EnumVar(target *string, options ...string) {
- p.SetValue(newEnumFlag(target, options...))
-}
-
-// Enums allows a set of values from a set of options.
-func (p *parserMixin) Enums(options ...string) (target *[]string) {
- target = new([]string)
- p.EnumsVar(target, options...)
- return
-}
-
-// EnumVar allows a value from a set of options.
-func (p *parserMixin) EnumsVar(target *[]string, options ...string) {
- p.SetValue(newEnumsFlag(target, options...))
-}
-
-// A Counter increments a number each time it is encountered.
-func (p *parserMixin) Counter() (target *int) {
- target = new(int)
- p.CounterVar(target)
- return
-}
-
-func (p *parserMixin) CounterVar(target *int) {
- p.SetValue(newCounterValue(target))
-}
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/templates.go b/vendor/gopkg.in/alecthomas/kingpin.v2/templates.go
deleted file mode 100644
index 97b5c9fc..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/templates.go
+++ /dev/null
@@ -1,262 +0,0 @@
-package kingpin
-
-// Default usage template.
-var DefaultUsageTemplate = `{{define "FormatCommand"}}\
-{{if .FlagSummary}} {{.FlagSummary}}{{end}}\
-{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\
-{{end}}\
-
-{{define "FormatCommands"}}\
-{{range .FlattenedCommands}}\
-{{if not .Hidden}}\
- {{.FullCommand}}{{if .Default}}*{{end}}{{template "FormatCommand" .}}
-{{.Help|Wrap 4}}
-{{end}}\
-{{end}}\
-{{end}}\
-
-{{define "FormatUsage"}}\
-{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}}
-{{if .Help}}
-{{.Help|Wrap 0}}\
-{{end}}\
-
-{{end}}\
-
-{{if .Context.SelectedCommand}}\
-usage: {{.App.Name}} {{.Context.SelectedCommand}}{{template "FormatUsage" .Context.SelectedCommand}}
-{{else}}\
-usage: {{.App.Name}}{{template "FormatUsage" .App}}
-{{end}}\
-{{if .Context.Flags}}\
-Flags:
-{{.Context.Flags|FlagsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .Context.Args}}\
-Args:
-{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .Context.SelectedCommand}}\
-{{if len .Context.SelectedCommand.Commands}}\
-Subcommands:
-{{template "FormatCommands" .Context.SelectedCommand}}
-{{end}}\
-{{else if .App.Commands}}\
-Commands:
-{{template "FormatCommands" .App}}
-{{end}}\
-`
-
-// Usage template where command's optional flags are listed separately
-var SeparateOptionalFlagsUsageTemplate = `{{define "FormatCommand"}}\
-{{if .FlagSummary}} {{.FlagSummary}}{{end}}\
-{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\
-{{end}}\
-
-{{define "FormatCommands"}}\
-{{range .FlattenedCommands}}\
-{{if not .Hidden}}\
- {{.FullCommand}}{{if .Default}}*{{end}}{{template "FormatCommand" .}}
-{{.Help|Wrap 4}}
-{{end}}\
-{{end}}\
-{{end}}\
-
-{{define "FormatUsage"}}\
-{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}}
-{{if .Help}}
-{{.Help|Wrap 0}}\
-{{end}}\
-
-{{end}}\
-{{if .Context.SelectedCommand}}\
-usage: {{.App.Name}} {{.Context.SelectedCommand}}{{template "FormatUsage" .Context.SelectedCommand}}
-{{else}}\
-usage: {{.App.Name}}{{template "FormatUsage" .App}}
-{{end}}\
-
-{{if .Context.Flags|RequiredFlags}}\
-Required flags:
-{{.Context.Flags|RequiredFlags|FlagsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .Context.Flags|OptionalFlags}}\
-Optional flags:
-{{.Context.Flags|OptionalFlags|FlagsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .Context.Args}}\
-Args:
-{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .Context.SelectedCommand}}\
-Subcommands:
-{{if .Context.SelectedCommand.Commands}}\
-{{template "FormatCommands" .Context.SelectedCommand}}
-{{end}}\
-{{else if .App.Commands}}\
-Commands:
-{{template "FormatCommands" .App}}
-{{end}}\
-`
-
-// Usage template with compactly formatted commands.
-var CompactUsageTemplate = `{{define "FormatCommand"}}\
-{{if .FlagSummary}} {{.FlagSummary}}{{end}}\
-{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\
-{{end}}\
-
-{{define "FormatCommandList"}}\
-{{range .}}\
-{{if not .Hidden}}\
-{{.Depth|Indent}}{{.Name}}{{if .Default}}*{{end}}{{template "FormatCommand" .}}
-{{end}}\
-{{template "FormatCommandList" .Commands}}\
-{{end}}\
-{{end}}\
-
-{{define "FormatUsage"}}\
-{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}}
-{{if .Help}}
-{{.Help|Wrap 0}}\
-{{end}}\
-
-{{end}}\
-
-{{if .Context.SelectedCommand}}\
-usage: {{.App.Name}} {{.Context.SelectedCommand}}{{template "FormatUsage" .Context.SelectedCommand}}
-{{else}}\
-usage: {{.App.Name}}{{template "FormatUsage" .App}}
-{{end}}\
-{{if .Context.Flags}}\
-Flags:
-{{.Context.Flags|FlagsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .Context.Args}}\
-Args:
-{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .Context.SelectedCommand}}\
-{{if .Context.SelectedCommand.Commands}}\
-Commands:
- {{.Context.SelectedCommand}}
-{{template "FormatCommandList" .Context.SelectedCommand.Commands}}
-{{end}}\
-{{else if .App.Commands}}\
-Commands:
-{{template "FormatCommandList" .App.Commands}}
-{{end}}\
-`
-
-var ManPageTemplate = `{{define "FormatFlags"}}\
-{{range .Flags}}\
-{{if not .Hidden}}\
-.TP
-\fB{{if .Short}}-{{.Short|Char}}, {{end}}--{{.Name}}{{if not .IsBoolFlag}}={{.FormatPlaceHolder}}{{end}}\\fR
-{{.Help}}
-{{end}}\
-{{end}}\
-{{end}}\
-
-{{define "FormatCommand"}}\
-{{if .FlagSummary}} {{.FlagSummary}}{{end}}\
-{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}{{if .Default}}*{{end}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\
-{{end}}\
-
-{{define "FormatCommands"}}\
-{{range .FlattenedCommands}}\
-{{if not .Hidden}}\
-.SS
-\fB{{.FullCommand}}{{template "FormatCommand" .}}\\fR
-.PP
-{{.Help}}
-{{template "FormatFlags" .}}\
-{{end}}\
-{{end}}\
-{{end}}\
-
-{{define "FormatUsage"}}\
-{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}}\\fR
-{{end}}\
-
-.TH {{.App.Name}} 1 {{.App.Version}} "{{.App.Author}}"
-.SH "NAME"
-{{.App.Name}}
-.SH "SYNOPSIS"
-.TP
-\fB{{.App.Name}}{{template "FormatUsage" .App}}
-.SH "DESCRIPTION"
-{{.App.Help}}
-.SH "OPTIONS"
-{{template "FormatFlags" .App}}\
-{{if .App.Commands}}\
-.SH "COMMANDS"
-{{template "FormatCommands" .App}}\
-{{end}}\
-`
-
-// Default usage template.
-var LongHelpTemplate = `{{define "FormatCommand"}}\
-{{if .FlagSummary}} {{.FlagSummary}}{{end}}\
-{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\
-{{end}}\
-
-{{define "FormatCommands"}}\
-{{range .FlattenedCommands}}\
-{{if not .Hidden}}\
- {{.FullCommand}}{{template "FormatCommand" .}}
-{{.Help|Wrap 4}}
-{{with .Flags|FlagsToTwoColumns}}{{FormatTwoColumnsWithIndent . 4 2}}{{end}}
-{{end}}\
-{{end}}\
-{{end}}\
-
-{{define "FormatUsage"}}\
-{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}}
-{{if .Help}}
-{{.Help|Wrap 0}}\
-{{end}}\
-
-{{end}}\
-
-usage: {{.App.Name}}{{template "FormatUsage" .App}}
-{{if .Context.Flags}}\
-Flags:
-{{.Context.Flags|FlagsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .Context.Args}}\
-Args:
-{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .App.Commands}}\
-Commands:
-{{template "FormatCommands" .App}}
-{{end}}\
-`
-
-var BashCompletionTemplate = `
-_{{.App.Name}}_bash_autocomplete() {
- local cur prev opts base
- COMPREPLY=()
- cur="${COMP_WORDS[COMP_CWORD]}"
- opts=$( ${COMP_WORDS[0]} --completion-bash ${COMP_WORDS[@]:1:$COMP_CWORD} )
- COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
- return 0
-}
-complete -F _{{.App.Name}}_bash_autocomplete {{.App.Name}}
-
-`
-
-var ZshCompletionTemplate = `
-#compdef {{.App.Name}}
-autoload -U compinit && compinit
-autoload -U bashcompinit && bashcompinit
-
-_{{.App.Name}}_bash_autocomplete() {
- local cur prev opts base
- COMPREPLY=()
- cur="${COMP_WORDS[COMP_CWORD]}"
- opts=$( ${COMP_WORDS[0]} --completion-bash ${COMP_WORDS[@]:1:$COMP_CWORD} )
- COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
- return 0
-}
-complete -F _{{.App.Name}}_bash_autocomplete {{.App.Name}}
-`
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/usage.go b/vendor/gopkg.in/alecthomas/kingpin.v2/usage.go
deleted file mode 100644
index 44af6f65..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/usage.go
+++ /dev/null
@@ -1,211 +0,0 @@
-package kingpin
-
-import (
- "bytes"
- "fmt"
- "go/doc"
- "io"
- "strings"
-
- "github.com/alecthomas/template"
-)
-
-var (
- preIndent = " "
-)
-
-func formatTwoColumns(w io.Writer, indent, padding, width int, rows [][2]string) {
- // Find size of first column.
- s := 0
- for _, row := range rows {
- if c := len(row[0]); c > s && c < 30 {
- s = c
- }
- }
-
- indentStr := strings.Repeat(" ", indent)
- offsetStr := strings.Repeat(" ", s+padding)
-
- for _, row := range rows {
- buf := bytes.NewBuffer(nil)
- doc.ToText(buf, row[1], "", preIndent, width-s-padding-indent)
- lines := strings.Split(strings.TrimRight(buf.String(), "\n"), "\n")
- fmt.Fprintf(w, "%s%-*s%*s", indentStr, s, row[0], padding, "")
- if len(row[0]) >= 30 {
- fmt.Fprintf(w, "\n%s%s", indentStr, offsetStr)
- }
- fmt.Fprintf(w, "%s\n", lines[0])
- for _, line := range lines[1:] {
- fmt.Fprintf(w, "%s%s%s\n", indentStr, offsetStr, line)
- }
- }
-}
-
-// Usage writes application usage to w. It parses args to determine
-// appropriate help context, such as which command to show help for.
-func (a *Application) Usage(args []string) {
- context, err := a.parseContext(true, args)
- a.FatalIfError(err, "")
- if err := a.UsageForContextWithTemplate(context, 2, a.usageTemplate); err != nil {
- panic(err)
- }
-}
-
-func formatAppUsage(app *ApplicationModel) string {
- s := []string{app.Name}
- if len(app.Flags) > 0 {
- s = append(s, app.FlagSummary())
- }
- if len(app.Args) > 0 {
- s = append(s, app.ArgSummary())
- }
- return strings.Join(s, " ")
-}
-
-func formatCmdUsage(app *ApplicationModel, cmd *CmdModel) string {
- s := []string{app.Name, cmd.String()}
- if len(app.Flags) > 0 {
- s = append(s, app.FlagSummary())
- }
- if len(app.Args) > 0 {
- s = append(s, app.ArgSummary())
- }
- return strings.Join(s, " ")
-}
-
-func formatFlag(haveShort bool, flag *FlagModel) string {
- flagString := ""
- if flag.Short != 0 {
- flagString += fmt.Sprintf("-%c, --%s", flag.Short, flag.Name)
- } else {
- if haveShort {
- flagString += fmt.Sprintf(" --%s", flag.Name)
- } else {
- flagString += fmt.Sprintf("--%s", flag.Name)
- }
- }
- if !flag.IsBoolFlag() {
- flagString += fmt.Sprintf("=%s", flag.FormatPlaceHolder())
- }
- if v, ok := flag.Value.(repeatableFlag); ok && v.IsCumulative() {
- flagString += " ..."
- }
- return flagString
-}
-
-type templateParseContext struct {
- SelectedCommand *CmdModel
- *FlagGroupModel
- *ArgGroupModel
-}
-
-type templateContext struct {
- App *ApplicationModel
- Width int
- Context *templateParseContext
-}
-
-// UsageForContext displays usage information from a ParseContext (obtained from
-// Application.ParseContext() or Action(f) callbacks).
-func (a *Application) UsageForContext(context *ParseContext) error {
- return a.UsageForContextWithTemplate(context, 2, a.usageTemplate)
-}
-
-// UsageForContextWithTemplate is the base usage function. You generally don't need to use this.
-func (a *Application) UsageForContextWithTemplate(context *ParseContext, indent int, tmpl string) error {
- width := guessWidth(a.usageWriter)
- funcs := template.FuncMap{
- "Indent": func(level int) string {
- return strings.Repeat(" ", level*indent)
- },
- "Wrap": func(indent int, s string) string {
- buf := bytes.NewBuffer(nil)
- indentText := strings.Repeat(" ", indent)
- doc.ToText(buf, s, indentText, " "+indentText, width-indent)
- return buf.String()
- },
- "FormatFlag": formatFlag,
- "FlagsToTwoColumns": func(f []*FlagModel) [][2]string {
- rows := [][2]string{}
- haveShort := false
- for _, flag := range f {
- if flag.Short != 0 {
- haveShort = true
- break
- }
- }
- for _, flag := range f {
- if !flag.Hidden {
- rows = append(rows, [2]string{formatFlag(haveShort, flag), flag.Help})
- }
- }
- return rows
- },
- "RequiredFlags": func(f []*FlagModel) []*FlagModel {
- requiredFlags := []*FlagModel{}
- for _, flag := range f {
- if flag.Required {
- requiredFlags = append(requiredFlags, flag)
- }
- }
- return requiredFlags
- },
- "OptionalFlags": func(f []*FlagModel) []*FlagModel {
- optionalFlags := []*FlagModel{}
- for _, flag := range f {
- if !flag.Required {
- optionalFlags = append(optionalFlags, flag)
- }
- }
- return optionalFlags
- },
- "ArgsToTwoColumns": func(a []*ArgModel) [][2]string {
- rows := [][2]string{}
- for _, arg := range a {
- s := "<" + arg.Name + ">"
- if !arg.Required {
- s = "[" + s + "]"
- }
- rows = append(rows, [2]string{s, arg.Help})
- }
- return rows
- },
- "FormatTwoColumns": func(rows [][2]string) string {
- buf := bytes.NewBuffer(nil)
- formatTwoColumns(buf, indent, indent, width, rows)
- return buf.String()
- },
- "FormatTwoColumnsWithIndent": func(rows [][2]string, indent, padding int) string {
- buf := bytes.NewBuffer(nil)
- formatTwoColumns(buf, indent, padding, width, rows)
- return buf.String()
- },
- "FormatAppUsage": formatAppUsage,
- "FormatCommandUsage": formatCmdUsage,
- "IsCumulative": func(value Value) bool {
- r, ok := value.(remainderArg)
- return ok && r.IsCumulative()
- },
- "Char": func(c rune) string {
- return string(c)
- },
- }
- t, err := template.New("usage").Funcs(funcs).Parse(tmpl)
- if err != nil {
- return err
- }
- var selectedCommand *CmdModel
- if context.SelectedCommand != nil {
- selectedCommand = context.SelectedCommand.Model()
- }
- ctx := templateContext{
- App: a.Model(),
- Width: width,
- Context: &templateParseContext{
- SelectedCommand: selectedCommand,
- FlagGroupModel: context.flags.Model(),
- ArgGroupModel: context.arguments.Model(),
- },
- }
- return t.Execute(a.usageWriter, ctx)
-}
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/values.go b/vendor/gopkg.in/alecthomas/kingpin.v2/values.go
deleted file mode 100644
index 7ee9a3b3..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/values.go
+++ /dev/null
@@ -1,470 +0,0 @@
-package kingpin
-
-//go:generate go run ./cmd/genvalues/main.go
-
-import (
- "fmt"
- "net"
- "net/url"
- "os"
- "reflect"
- "regexp"
- "strings"
- "time"
-
- "github.com/alecthomas/units"
-)
-
-// NOTE: Most of the base type values were lifted from:
-// http://golang.org/src/pkg/flag/flag.go?s=20146:20222
-
-// Value is the interface to the dynamic value stored in a flag.
-// (The default value is represented as a string.)
-//
-// If a Value has an IsBoolFlag() bool method returning true, the command-line
-// parser makes --name equivalent to -name=true rather than using the next
-// command-line argument, and adds a --no-name counterpart for negating the
-// flag.
-type Value interface {
- String() string
- Set(string) error
-}
-
-// Getter is an interface that allows the contents of a Value to be retrieved.
-// It wraps the Value interface, rather than being part of it, because it
-// appeared after Go 1 and its compatibility rules. All Value types provided
-// by this package satisfy the Getter interface.
-type Getter interface {
- Value
- Get() interface{}
-}
-
-// Optional interface to indicate boolean flags that don't accept a value, and
-// implicitly have a --no- negation counterpart.
-type boolFlag interface {
- Value
- IsBoolFlag() bool
-}
-
-// Optional interface for arguments that cumulatively consume all remaining
-// input.
-type remainderArg interface {
- Value
- IsCumulative() bool
-}
-
-// Optional interface for flags that can be repeated.
-type repeatableFlag interface {
- Value
- IsCumulative() bool
-}
-
-type accumulator struct {
- element func(value interface{}) Value
- typ reflect.Type
- slice reflect.Value
-}
-
-// Use reflection to accumulate values into a slice.
-//
-// target := []string{}
-// newAccumulator(&target, func (value interface{}) Value {
-// return newStringValue(value.(*string))
-// })
-func newAccumulator(slice interface{}, element func(value interface{}) Value) *accumulator {
- typ := reflect.TypeOf(slice)
- if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Slice {
- panic("expected a pointer to a slice")
- }
- return &accumulator{
- element: element,
- typ: typ.Elem().Elem(),
- slice: reflect.ValueOf(slice),
- }
-}
-
-func (a *accumulator) String() string {
- out := []string{}
- s := a.slice.Elem()
- for i := 0; i < s.Len(); i++ {
- out = append(out, a.element(s.Index(i).Addr().Interface()).String())
- }
- return strings.Join(out, ",")
-}
-
-func (a *accumulator) Set(value string) error {
- e := reflect.New(a.typ)
- if err := a.element(e.Interface()).Set(value); err != nil {
- return err
- }
- slice := reflect.Append(a.slice.Elem(), e.Elem())
- a.slice.Elem().Set(slice)
- return nil
-}
-
-func (a *accumulator) Get() interface{} {
- return a.slice.Interface()
-}
-
-func (a *accumulator) IsCumulative() bool {
- return true
-}
-
-func (b *boolValue) IsBoolFlag() bool { return true }
-
-// -- time.Duration Value
-type durationValue time.Duration
-
-func newDurationValue(p *time.Duration) *durationValue {
- return (*durationValue)(p)
-}
-
-func (d *durationValue) Set(s string) error {
- v, err := time.ParseDuration(s)
- *d = durationValue(v)
- return err
-}
-
-func (d *durationValue) Get() interface{} { return time.Duration(*d) }
-
-func (d *durationValue) String() string { return (*time.Duration)(d).String() }
-
-// -- map[string]string Value
-type stringMapValue map[string]string
-
-func newStringMapValue(p *map[string]string) *stringMapValue {
- return (*stringMapValue)(p)
-}
-
-var stringMapRegex = regexp.MustCompile("[:=]")
-
-func (s *stringMapValue) Set(value string) error {
- parts := stringMapRegex.Split(value, 2)
- if len(parts) != 2 {
- return fmt.Errorf("expected KEY=VALUE got '%s'", value)
- }
- (*s)[parts[0]] = parts[1]
- return nil
-}
-
-func (s *stringMapValue) Get() interface{} {
- return (map[string]string)(*s)
-}
-
-func (s *stringMapValue) String() string {
- return fmt.Sprintf("%s", map[string]string(*s))
-}
-
-func (s *stringMapValue) IsCumulative() bool {
- return true
-}
-
-// -- net.IP Value
-type ipValue net.IP
-
-func newIPValue(p *net.IP) *ipValue {
- return (*ipValue)(p)
-}
-
-func (i *ipValue) Set(value string) error {
- if ip := net.ParseIP(value); ip == nil {
- return fmt.Errorf("'%s' is not an IP address", value)
- } else {
- *i = *(*ipValue)(&ip)
- return nil
- }
-}
-
-func (i *ipValue) Get() interface{} {
- return (net.IP)(*i)
-}
-
-func (i *ipValue) String() string {
- return (*net.IP)(i).String()
-}
-
-// -- *net.TCPAddr Value
-type tcpAddrValue struct {
- addr **net.TCPAddr
-}
-
-func newTCPAddrValue(p **net.TCPAddr) *tcpAddrValue {
- return &tcpAddrValue{p}
-}
-
-func (i *tcpAddrValue) Set(value string) error {
- if addr, err := net.ResolveTCPAddr("tcp", value); err != nil {
- return fmt.Errorf("'%s' is not a valid TCP address: %s", value, err)
- } else {
- *i.addr = addr
- return nil
- }
-}
-
-func (t *tcpAddrValue) Get() interface{} {
- return (*net.TCPAddr)(*t.addr)
-}
-
-func (i *tcpAddrValue) String() string {
- return (*i.addr).String()
-}
-
-// -- existingFile Value
-
-type fileStatValue struct {
- path *string
- predicate func(os.FileInfo) error
-}
-
-func newFileStatValue(p *string, predicate func(os.FileInfo) error) *fileStatValue {
- return &fileStatValue{
- path: p,
- predicate: predicate,
- }
-}
-
-func (e *fileStatValue) Set(value string) error {
- if s, err := os.Stat(value); os.IsNotExist(err) {
- return fmt.Errorf("path '%s' does not exist", value)
- } else if err != nil {
- return err
- } else if err := e.predicate(s); err != nil {
- return err
- }
- *e.path = value
- return nil
-}
-
-func (f *fileStatValue) Get() interface{} {
- return (string)(*f.path)
-}
-
-func (e *fileStatValue) String() string {
- return *e.path
-}
-
-// -- os.File value
-
-type fileValue struct {
- f **os.File
- flag int
- perm os.FileMode
-}
-
-func newFileValue(p **os.File, flag int, perm os.FileMode) *fileValue {
- return &fileValue{p, flag, perm}
-}
-
-func (f *fileValue) Set(value string) error {
- if fd, err := os.OpenFile(value, f.flag, f.perm); err != nil {
- return err
- } else {
- *f.f = fd
- return nil
- }
-}
-
-func (f *fileValue) Get() interface{} {
- return (*os.File)(*f.f)
-}
-
-func (f *fileValue) String() string {
- if *f.f == nil {
- return ""
- }
- return (*f.f).Name()
-}
-
-// -- url.URL Value
-type urlValue struct {
- u **url.URL
-}
-
-func newURLValue(p **url.URL) *urlValue {
- return &urlValue{p}
-}
-
-func (u *urlValue) Set(value string) error {
- if url, err := url.Parse(value); err != nil {
- return fmt.Errorf("invalid URL: %s", err)
- } else {
- *u.u = url
- return nil
- }
-}
-
-func (u *urlValue) Get() interface{} {
- return (*url.URL)(*u.u)
-}
-
-func (u *urlValue) String() string {
- if *u.u == nil {
- return ""
- }
- return (*u.u).String()
-}
-
-// -- []*url.URL Value
-type urlListValue []*url.URL
-
-func newURLListValue(p *[]*url.URL) *urlListValue {
- return (*urlListValue)(p)
-}
-
-func (u *urlListValue) Set(value string) error {
- if url, err := url.Parse(value); err != nil {
- return fmt.Errorf("invalid URL: %s", err)
- } else {
- *u = append(*u, url)
- return nil
- }
-}
-
-func (u *urlListValue) Get() interface{} {
- return ([]*url.URL)(*u)
-}
-
-func (u *urlListValue) String() string {
- out := []string{}
- for _, url := range *u {
- out = append(out, url.String())
- }
- return strings.Join(out, ",")
-}
-
-func (u *urlListValue) IsCumulative() bool {
- return true
-}
-
-// A flag whose value must be in a set of options.
-type enumValue struct {
- value *string
- options []string
-}
-
-func newEnumFlag(target *string, options ...string) *enumValue {
- return &enumValue{
- value: target,
- options: options,
- }
-}
-
-func (a *enumValue) String() string {
- return *a.value
-}
-
-func (a *enumValue) Set(value string) error {
- for _, v := range a.options {
- if v == value {
- *a.value = value
- return nil
- }
- }
- return fmt.Errorf("enum value must be one of %s, got '%s'", strings.Join(a.options, ","), value)
-}
-
-func (e *enumValue) Get() interface{} {
- return (string)(*e.value)
-}
-
-// -- []string Enum Value
-type enumsValue struct {
- value *[]string
- options []string
-}
-
-func newEnumsFlag(target *[]string, options ...string) *enumsValue {
- return &enumsValue{
- value: target,
- options: options,
- }
-}
-
-func (s *enumsValue) Set(value string) error {
- for _, v := range s.options {
- if v == value {
- *s.value = append(*s.value, value)
- return nil
- }
- }
- return fmt.Errorf("enum value must be one of %s, got '%s'", strings.Join(s.options, ","), value)
-}
-
-func (e *enumsValue) Get() interface{} {
- return ([]string)(*e.value)
-}
-
-func (s *enumsValue) String() string {
- return strings.Join(*s.value, ",")
-}
-
-func (s *enumsValue) IsCumulative() bool {
- return true
-}
-
-// -- units.Base2Bytes Value
-type bytesValue units.Base2Bytes
-
-func newBytesValue(p *units.Base2Bytes) *bytesValue {
- return (*bytesValue)(p)
-}
-
-func (d *bytesValue) Set(s string) error {
- v, err := units.ParseBase2Bytes(s)
- *d = bytesValue(v)
- return err
-}
-
-func (d *bytesValue) Get() interface{} { return units.Base2Bytes(*d) }
-
-func (d *bytesValue) String() string { return (*units.Base2Bytes)(d).String() }
-
-func newExistingFileValue(target *string) *fileStatValue {
- return newFileStatValue(target, func(s os.FileInfo) error {
- if s.IsDir() {
- return fmt.Errorf("'%s' is a directory", s.Name())
- }
- return nil
- })
-}
-
-func newExistingDirValue(target *string) *fileStatValue {
- return newFileStatValue(target, func(s os.FileInfo) error {
- if !s.IsDir() {
- return fmt.Errorf("'%s' is a file", s.Name())
- }
- return nil
- })
-}
-
-func newExistingFileOrDirValue(target *string) *fileStatValue {
- return newFileStatValue(target, func(s os.FileInfo) error { return nil })
-}
-
-type counterValue int
-
-func newCounterValue(n *int) *counterValue {
- return (*counterValue)(n)
-}
-
-func (c *counterValue) Set(s string) error {
- *c++
- return nil
-}
-
-func (c *counterValue) Get() interface{} { return (int)(*c) }
-func (c *counterValue) IsBoolFlag() bool { return true }
-func (c *counterValue) String() string { return fmt.Sprintf("%d", *c) }
-func (c *counterValue) IsCumulative() bool { return true }
-
-func resolveHost(value string) (net.IP, error) {
- if ip := net.ParseIP(value); ip != nil {
- return ip, nil
- } else {
- if addr, err := net.ResolveIPAddr("ip", value); err != nil {
- return nil, err
- } else {
- return addr.IP, nil
- }
- }
-}
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/values.json b/vendor/gopkg.in/alecthomas/kingpin.v2/values.json
deleted file mode 100644
index 23c67448..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/values.json
+++ /dev/null
@@ -1,25 +0,0 @@
-[
- {"type": "bool", "parser": "strconv.ParseBool(s)"},
- {"type": "string", "parser": "s, error(nil)", "format": "string(*f.v)", "plural": "Strings"},
- {"type": "uint", "parser": "strconv.ParseUint(s, 0, 64)", "plural": "Uints"},
- {"type": "uint8", "parser": "strconv.ParseUint(s, 0, 8)"},
- {"type": "uint16", "parser": "strconv.ParseUint(s, 0, 16)"},
- {"type": "uint32", "parser": "strconv.ParseUint(s, 0, 32)"},
- {"type": "uint64", "parser": "strconv.ParseUint(s, 0, 64)"},
- {"type": "int", "parser": "strconv.ParseFloat(s, 64)", "plural": "Ints"},
- {"type": "int8", "parser": "strconv.ParseInt(s, 0, 8)"},
- {"type": "int16", "parser": "strconv.ParseInt(s, 0, 16)"},
- {"type": "int32", "parser": "strconv.ParseInt(s, 0, 32)"},
- {"type": "int64", "parser": "strconv.ParseInt(s, 0, 64)"},
- {"type": "float64", "parser": "strconv.ParseFloat(s, 64)"},
- {"type": "float32", "parser": "strconv.ParseFloat(s, 32)"},
- {"name": "Duration", "type": "time.Duration", "no_value_parser": true},
- {"name": "IP", "type": "net.IP", "no_value_parser": true},
- {"name": "TCPAddr", "Type": "*net.TCPAddr", "plural": "TCPList", "no_value_parser": true},
- {"name": "ExistingFile", "Type": "string", "plural": "ExistingFiles", "no_value_parser": true},
- {"name": "ExistingDir", "Type": "string", "plural": "ExistingDirs", "no_value_parser": true},
- {"name": "ExistingFileOrDir", "Type": "string", "plural": "ExistingFilesOrDirs", "no_value_parser": true},
- {"name": "Regexp", "Type": "*regexp.Regexp", "parser": "regexp.Compile(s)"},
- {"name": "ResolvedIP", "Type": "net.IP", "parser": "resolveHost(s)", "help": "Resolve a hostname or IP to an IP."},
- {"name": "HexBytes", "Type": "[]byte", "parser": "hex.DecodeString(s)", "help": "Bytes as a hex string."}
-]
diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/values_generated.go b/vendor/gopkg.in/alecthomas/kingpin.v2/values_generated.go
deleted file mode 100644
index 8d492bf9..00000000
--- a/vendor/gopkg.in/alecthomas/kingpin.v2/values_generated.go
+++ /dev/null
@@ -1,821 +0,0 @@
-package kingpin
-
-import (
- "encoding/hex"
- "fmt"
- "net"
- "regexp"
- "strconv"
- "time"
-)
-
-// This file is autogenerated by "go generate .". Do not modify.
-
-// -- bool Value
-type boolValue struct{ v *bool }
-
-func newBoolValue(p *bool) *boolValue {
- return &boolValue{p}
-}
-
-func (f *boolValue) Set(s string) error {
- v, err := strconv.ParseBool(s)
- if err == nil {
- *f.v = (bool)(v)
- }
- return err
-}
-
-func (f *boolValue) Get() interface{} { return (bool)(*f.v) }
-
-func (f *boolValue) String() string { return fmt.Sprintf("%v", *f.v) }
-
-// Bool parses the next command-line value as bool.
-func (p *parserMixin) Bool() (target *bool) {
- target = new(bool)
- p.BoolVar(target)
- return
-}
-
-func (p *parserMixin) BoolVar(target *bool) {
- p.SetValue(newBoolValue(target))
-}
-
-// BoolList accumulates bool values into a slice.
-func (p *parserMixin) BoolList() (target *[]bool) {
- target = new([]bool)
- p.BoolListVar(target)
- return
-}
-
-func (p *parserMixin) BoolListVar(target *[]bool) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newBoolValue(v.(*bool))
- }))
-}
-
-// -- string Value
-type stringValue struct{ v *string }
-
-func newStringValue(p *string) *stringValue {
- return &stringValue{p}
-}
-
-func (f *stringValue) Set(s string) error {
- v, err := s, error(nil)
- if err == nil {
- *f.v = (string)(v)
- }
- return err
-}
-
-func (f *stringValue) Get() interface{} { return (string)(*f.v) }
-
-func (f *stringValue) String() string { return string(*f.v) }
-
-// String parses the next command-line value as string.
-func (p *parserMixin) String() (target *string) {
- target = new(string)
- p.StringVar(target)
- return
-}
-
-func (p *parserMixin) StringVar(target *string) {
- p.SetValue(newStringValue(target))
-}
-
-// Strings accumulates string values into a slice.
-func (p *parserMixin) Strings() (target *[]string) {
- target = new([]string)
- p.StringsVar(target)
- return
-}
-
-func (p *parserMixin) StringsVar(target *[]string) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newStringValue(v.(*string))
- }))
-}
-
-// -- uint Value
-type uintValue struct{ v *uint }
-
-func newUintValue(p *uint) *uintValue {
- return &uintValue{p}
-}
-
-func (f *uintValue) Set(s string) error {
- v, err := strconv.ParseUint(s, 0, 64)
- if err == nil {
- *f.v = (uint)(v)
- }
- return err
-}
-
-func (f *uintValue) Get() interface{} { return (uint)(*f.v) }
-
-func (f *uintValue) String() string { return fmt.Sprintf("%v", *f.v) }
-
-// Uint parses the next command-line value as uint.
-func (p *parserMixin) Uint() (target *uint) {
- target = new(uint)
- p.UintVar(target)
- return
-}
-
-func (p *parserMixin) UintVar(target *uint) {
- p.SetValue(newUintValue(target))
-}
-
-// Uints accumulates uint values into a slice.
-func (p *parserMixin) Uints() (target *[]uint) {
- target = new([]uint)
- p.UintsVar(target)
- return
-}
-
-func (p *parserMixin) UintsVar(target *[]uint) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newUintValue(v.(*uint))
- }))
-}
-
-// -- uint8 Value
-type uint8Value struct{ v *uint8 }
-
-func newUint8Value(p *uint8) *uint8Value {
- return &uint8Value{p}
-}
-
-func (f *uint8Value) Set(s string) error {
- v, err := strconv.ParseUint(s, 0, 8)
- if err == nil {
- *f.v = (uint8)(v)
- }
- return err
-}
-
-func (f *uint8Value) Get() interface{} { return (uint8)(*f.v) }
-
-func (f *uint8Value) String() string { return fmt.Sprintf("%v", *f.v) }
-
-// Uint8 parses the next command-line value as uint8.
-func (p *parserMixin) Uint8() (target *uint8) {
- target = new(uint8)
- p.Uint8Var(target)
- return
-}
-
-func (p *parserMixin) Uint8Var(target *uint8) {
- p.SetValue(newUint8Value(target))
-}
-
-// Uint8List accumulates uint8 values into a slice.
-func (p *parserMixin) Uint8List() (target *[]uint8) {
- target = new([]uint8)
- p.Uint8ListVar(target)
- return
-}
-
-func (p *parserMixin) Uint8ListVar(target *[]uint8) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newUint8Value(v.(*uint8))
- }))
-}
-
-// -- uint16 Value
-type uint16Value struct{ v *uint16 }
-
-func newUint16Value(p *uint16) *uint16Value {
- return &uint16Value{p}
-}
-
-func (f *uint16Value) Set(s string) error {
- v, err := strconv.ParseUint(s, 0, 16)
- if err == nil {
- *f.v = (uint16)(v)
- }
- return err
-}
-
-func (f *uint16Value) Get() interface{} { return (uint16)(*f.v) }
-
-func (f *uint16Value) String() string { return fmt.Sprintf("%v", *f.v) }
-
-// Uint16 parses the next command-line value as uint16.
-func (p *parserMixin) Uint16() (target *uint16) {
- target = new(uint16)
- p.Uint16Var(target)
- return
-}
-
-func (p *parserMixin) Uint16Var(target *uint16) {
- p.SetValue(newUint16Value(target))
-}
-
-// Uint16List accumulates uint16 values into a slice.
-func (p *parserMixin) Uint16List() (target *[]uint16) {
- target = new([]uint16)
- p.Uint16ListVar(target)
- return
-}
-
-func (p *parserMixin) Uint16ListVar(target *[]uint16) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newUint16Value(v.(*uint16))
- }))
-}
-
-// -- uint32 Value
-type uint32Value struct{ v *uint32 }
-
-func newUint32Value(p *uint32) *uint32Value {
- return &uint32Value{p}
-}
-
-func (f *uint32Value) Set(s string) error {
- v, err := strconv.ParseUint(s, 0, 32)
- if err == nil {
- *f.v = (uint32)(v)
- }
- return err
-}
-
-func (f *uint32Value) Get() interface{} { return (uint32)(*f.v) }
-
-func (f *uint32Value) String() string { return fmt.Sprintf("%v", *f.v) }
-
-// Uint32 parses the next command-line value as uint32.
-func (p *parserMixin) Uint32() (target *uint32) {
- target = new(uint32)
- p.Uint32Var(target)
- return
-}
-
-func (p *parserMixin) Uint32Var(target *uint32) {
- p.SetValue(newUint32Value(target))
-}
-
-// Uint32List accumulates uint32 values into a slice.
-func (p *parserMixin) Uint32List() (target *[]uint32) {
- target = new([]uint32)
- p.Uint32ListVar(target)
- return
-}
-
-func (p *parserMixin) Uint32ListVar(target *[]uint32) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newUint32Value(v.(*uint32))
- }))
-}
-
-// -- uint64 Value
-type uint64Value struct{ v *uint64 }
-
-func newUint64Value(p *uint64) *uint64Value {
- return &uint64Value{p}
-}
-
-func (f *uint64Value) Set(s string) error {
- v, err := strconv.ParseUint(s, 0, 64)
- if err == nil {
- *f.v = (uint64)(v)
- }
- return err
-}
-
-func (f *uint64Value) Get() interface{} { return (uint64)(*f.v) }
-
-func (f *uint64Value) String() string { return fmt.Sprintf("%v", *f.v) }
-
-// Uint64 parses the next command-line value as uint64.
-func (p *parserMixin) Uint64() (target *uint64) {
- target = new(uint64)
- p.Uint64Var(target)
- return
-}
-
-func (p *parserMixin) Uint64Var(target *uint64) {
- p.SetValue(newUint64Value(target))
-}
-
-// Uint64List accumulates uint64 values into a slice.
-func (p *parserMixin) Uint64List() (target *[]uint64) {
- target = new([]uint64)
- p.Uint64ListVar(target)
- return
-}
-
-func (p *parserMixin) Uint64ListVar(target *[]uint64) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newUint64Value(v.(*uint64))
- }))
-}
-
-// -- int Value
-type intValue struct{ v *int }
-
-func newIntValue(p *int) *intValue {
- return &intValue{p}
-}
-
-func (f *intValue) Set(s string) error {
- v, err := strconv.ParseFloat(s, 64)
- if err == nil {
- *f.v = (int)(v)
- }
- return err
-}
-
-func (f *intValue) Get() interface{} { return (int)(*f.v) }
-
-func (f *intValue) String() string { return fmt.Sprintf("%v", *f.v) }
-
-// Int parses the next command-line value as int.
-func (p *parserMixin) Int() (target *int) {
- target = new(int)
- p.IntVar(target)
- return
-}
-
-func (p *parserMixin) IntVar(target *int) {
- p.SetValue(newIntValue(target))
-}
-
-// Ints accumulates int values into a slice.
-func (p *parserMixin) Ints() (target *[]int) {
- target = new([]int)
- p.IntsVar(target)
- return
-}
-
-func (p *parserMixin) IntsVar(target *[]int) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newIntValue(v.(*int))
- }))
-}
-
-// -- int8 Value
-type int8Value struct{ v *int8 }
-
-func newInt8Value(p *int8) *int8Value {
- return &int8Value{p}
-}
-
-func (f *int8Value) Set(s string) error {
- v, err := strconv.ParseInt(s, 0, 8)
- if err == nil {
- *f.v = (int8)(v)
- }
- return err
-}
-
-func (f *int8Value) Get() interface{} { return (int8)(*f.v) }
-
-func (f *int8Value) String() string { return fmt.Sprintf("%v", *f.v) }
-
-// Int8 parses the next command-line value as int8.
-func (p *parserMixin) Int8() (target *int8) {
- target = new(int8)
- p.Int8Var(target)
- return
-}
-
-func (p *parserMixin) Int8Var(target *int8) {
- p.SetValue(newInt8Value(target))
-}
-
-// Int8List accumulates int8 values into a slice.
-func (p *parserMixin) Int8List() (target *[]int8) {
- target = new([]int8)
- p.Int8ListVar(target)
- return
-}
-
-func (p *parserMixin) Int8ListVar(target *[]int8) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newInt8Value(v.(*int8))
- }))
-}
-
-// -- int16 Value
-type int16Value struct{ v *int16 }
-
-func newInt16Value(p *int16) *int16Value {
- return &int16Value{p}
-}
-
-func (f *int16Value) Set(s string) error {
- v, err := strconv.ParseInt(s, 0, 16)
- if err == nil {
- *f.v = (int16)(v)
- }
- return err
-}
-
-func (f *int16Value) Get() interface{} { return (int16)(*f.v) }
-
-func (f *int16Value) String() string { return fmt.Sprintf("%v", *f.v) }
-
-// Int16 parses the next command-line value as int16.
-func (p *parserMixin) Int16() (target *int16) {
- target = new(int16)
- p.Int16Var(target)
- return
-}
-
-func (p *parserMixin) Int16Var(target *int16) {
- p.SetValue(newInt16Value(target))
-}
-
-// Int16List accumulates int16 values into a slice.
-func (p *parserMixin) Int16List() (target *[]int16) {
- target = new([]int16)
- p.Int16ListVar(target)
- return
-}
-
-func (p *parserMixin) Int16ListVar(target *[]int16) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newInt16Value(v.(*int16))
- }))
-}
-
-// -- int32 Value
-type int32Value struct{ v *int32 }
-
-func newInt32Value(p *int32) *int32Value {
- return &int32Value{p}
-}
-
-func (f *int32Value) Set(s string) error {
- v, err := strconv.ParseInt(s, 0, 32)
- if err == nil {
- *f.v = (int32)(v)
- }
- return err
-}
-
-func (f *int32Value) Get() interface{} { return (int32)(*f.v) }
-
-func (f *int32Value) String() string { return fmt.Sprintf("%v", *f.v) }
-
-// Int32 parses the next command-line value as int32.
-func (p *parserMixin) Int32() (target *int32) {
- target = new(int32)
- p.Int32Var(target)
- return
-}
-
-func (p *parserMixin) Int32Var(target *int32) {
- p.SetValue(newInt32Value(target))
-}
-
-// Int32List accumulates int32 values into a slice.
-func (p *parserMixin) Int32List() (target *[]int32) {
- target = new([]int32)
- p.Int32ListVar(target)
- return
-}
-
-func (p *parserMixin) Int32ListVar(target *[]int32) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newInt32Value(v.(*int32))
- }))
-}
-
-// -- int64 Value
-type int64Value struct{ v *int64 }
-
-func newInt64Value(p *int64) *int64Value {
- return &int64Value{p}
-}
-
-func (f *int64Value) Set(s string) error {
- v, err := strconv.ParseInt(s, 0, 64)
- if err == nil {
- *f.v = (int64)(v)
- }
- return err
-}
-
-func (f *int64Value) Get() interface{} { return (int64)(*f.v) }
-
-func (f *int64Value) String() string { return fmt.Sprintf("%v", *f.v) }
-
-// Int64 parses the next command-line value as int64.
-func (p *parserMixin) Int64() (target *int64) {
- target = new(int64)
- p.Int64Var(target)
- return
-}
-
-func (p *parserMixin) Int64Var(target *int64) {
- p.SetValue(newInt64Value(target))
-}
-
-// Int64List accumulates int64 values into a slice.
-func (p *parserMixin) Int64List() (target *[]int64) {
- target = new([]int64)
- p.Int64ListVar(target)
- return
-}
-
-func (p *parserMixin) Int64ListVar(target *[]int64) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newInt64Value(v.(*int64))
- }))
-}
-
-// -- float64 Value
-type float64Value struct{ v *float64 }
-
-func newFloat64Value(p *float64) *float64Value {
- return &float64Value{p}
-}
-
-func (f *float64Value) Set(s string) error {
- v, err := strconv.ParseFloat(s, 64)
- if err == nil {
- *f.v = (float64)(v)
- }
- return err
-}
-
-func (f *float64Value) Get() interface{} { return (float64)(*f.v) }
-
-func (f *float64Value) String() string { return fmt.Sprintf("%v", *f.v) }
-
-// Float64 parses the next command-line value as float64.
-func (p *parserMixin) Float64() (target *float64) {
- target = new(float64)
- p.Float64Var(target)
- return
-}
-
-func (p *parserMixin) Float64Var(target *float64) {
- p.SetValue(newFloat64Value(target))
-}
-
-// Float64List accumulates float64 values into a slice.
-func (p *parserMixin) Float64List() (target *[]float64) {
- target = new([]float64)
- p.Float64ListVar(target)
- return
-}
-
-func (p *parserMixin) Float64ListVar(target *[]float64) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newFloat64Value(v.(*float64))
- }))
-}
-
-// -- float32 Value
-type float32Value struct{ v *float32 }
-
-func newFloat32Value(p *float32) *float32Value {
- return &float32Value{p}
-}
-
-func (f *float32Value) Set(s string) error {
- v, err := strconv.ParseFloat(s, 32)
- if err == nil {
- *f.v = (float32)(v)
- }
- return err
-}
-
-func (f *float32Value) Get() interface{} { return (float32)(*f.v) }
-
-func (f *float32Value) String() string { return fmt.Sprintf("%v", *f.v) }
-
-// Float32 parses the next command-line value as float32.
-func (p *parserMixin) Float32() (target *float32) {
- target = new(float32)
- p.Float32Var(target)
- return
-}
-
-func (p *parserMixin) Float32Var(target *float32) {
- p.SetValue(newFloat32Value(target))
-}
-
-// Float32List accumulates float32 values into a slice.
-func (p *parserMixin) Float32List() (target *[]float32) {
- target = new([]float32)
- p.Float32ListVar(target)
- return
-}
-
-func (p *parserMixin) Float32ListVar(target *[]float32) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newFloat32Value(v.(*float32))
- }))
-}
-
-// DurationList accumulates time.Duration values into a slice.
-func (p *parserMixin) DurationList() (target *[]time.Duration) {
- target = new([]time.Duration)
- p.DurationListVar(target)
- return
-}
-
-func (p *parserMixin) DurationListVar(target *[]time.Duration) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newDurationValue(v.(*time.Duration))
- }))
-}
-
-// IPList accumulates net.IP values into a slice.
-func (p *parserMixin) IPList() (target *[]net.IP) {
- target = new([]net.IP)
- p.IPListVar(target)
- return
-}
-
-func (p *parserMixin) IPListVar(target *[]net.IP) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newIPValue(v.(*net.IP))
- }))
-}
-
-// TCPList accumulates *net.TCPAddr values into a slice.
-func (p *parserMixin) TCPList() (target *[]*net.TCPAddr) {
- target = new([]*net.TCPAddr)
- p.TCPListVar(target)
- return
-}
-
-func (p *parserMixin) TCPListVar(target *[]*net.TCPAddr) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newTCPAddrValue(v.(**net.TCPAddr))
- }))
-}
-
-// ExistingFiles accumulates string values into a slice.
-func (p *parserMixin) ExistingFiles() (target *[]string) {
- target = new([]string)
- p.ExistingFilesVar(target)
- return
-}
-
-func (p *parserMixin) ExistingFilesVar(target *[]string) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newExistingFileValue(v.(*string))
- }))
-}
-
-// ExistingDirs accumulates string values into a slice.
-func (p *parserMixin) ExistingDirs() (target *[]string) {
- target = new([]string)
- p.ExistingDirsVar(target)
- return
-}
-
-func (p *parserMixin) ExistingDirsVar(target *[]string) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newExistingDirValue(v.(*string))
- }))
-}
-
-// ExistingFilesOrDirs accumulates string values into a slice.
-func (p *parserMixin) ExistingFilesOrDirs() (target *[]string) {
- target = new([]string)
- p.ExistingFilesOrDirsVar(target)
- return
-}
-
-func (p *parserMixin) ExistingFilesOrDirsVar(target *[]string) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newExistingFileOrDirValue(v.(*string))
- }))
-}
-
-// -- *regexp.Regexp Value
-type regexpValue struct{ v **regexp.Regexp }
-
-func newRegexpValue(p **regexp.Regexp) *regexpValue {
- return ®expValue{p}
-}
-
-func (f *regexpValue) Set(s string) error {
- v, err := regexp.Compile(s)
- if err == nil {
- *f.v = (*regexp.Regexp)(v)
- }
- return err
-}
-
-func (f *regexpValue) Get() interface{} { return (*regexp.Regexp)(*f.v) }
-
-func (f *regexpValue) String() string { return fmt.Sprintf("%v", *f.v) }
-
-// Regexp parses the next command-line value as *regexp.Regexp.
-func (p *parserMixin) Regexp() (target **regexp.Regexp) {
- target = new(*regexp.Regexp)
- p.RegexpVar(target)
- return
-}
-
-func (p *parserMixin) RegexpVar(target **regexp.Regexp) {
- p.SetValue(newRegexpValue(target))
-}
-
-// RegexpList accumulates *regexp.Regexp values into a slice.
-func (p *parserMixin) RegexpList() (target *[]*regexp.Regexp) {
- target = new([]*regexp.Regexp)
- p.RegexpListVar(target)
- return
-}
-
-func (p *parserMixin) RegexpListVar(target *[]*regexp.Regexp) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newRegexpValue(v.(**regexp.Regexp))
- }))
-}
-
-// -- net.IP Value
-type resolvedIPValue struct{ v *net.IP }
-
-func newResolvedIPValue(p *net.IP) *resolvedIPValue {
- return &resolvedIPValue{p}
-}
-
-func (f *resolvedIPValue) Set(s string) error {
- v, err := resolveHost(s)
- if err == nil {
- *f.v = (net.IP)(v)
- }
- return err
-}
-
-func (f *resolvedIPValue) Get() interface{} { return (net.IP)(*f.v) }
-
-func (f *resolvedIPValue) String() string { return fmt.Sprintf("%v", *f.v) }
-
-// Resolve a hostname or IP to an IP.
-func (p *parserMixin) ResolvedIP() (target *net.IP) {
- target = new(net.IP)
- p.ResolvedIPVar(target)
- return
-}
-
-func (p *parserMixin) ResolvedIPVar(target *net.IP) {
- p.SetValue(newResolvedIPValue(target))
-}
-
-// ResolvedIPList accumulates net.IP values into a slice.
-func (p *parserMixin) ResolvedIPList() (target *[]net.IP) {
- target = new([]net.IP)
- p.ResolvedIPListVar(target)
- return
-}
-
-func (p *parserMixin) ResolvedIPListVar(target *[]net.IP) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newResolvedIPValue(v.(*net.IP))
- }))
-}
-
-// -- []byte Value
-type hexBytesValue struct{ v *[]byte }
-
-func newHexBytesValue(p *[]byte) *hexBytesValue {
- return &hexBytesValue{p}
-}
-
-func (f *hexBytesValue) Set(s string) error {
- v, err := hex.DecodeString(s)
- if err == nil {
- *f.v = ([]byte)(v)
- }
- return err
-}
-
-func (f *hexBytesValue) Get() interface{} { return ([]byte)(*f.v) }
-
-func (f *hexBytesValue) String() string { return fmt.Sprintf("%v", *f.v) }
-
-// Bytes as a hex string.
-func (p *parserMixin) HexBytes() (target *[]byte) {
- target = new([]byte)
- p.HexBytesVar(target)
- return
-}
-
-func (p *parserMixin) HexBytesVar(target *[]byte) {
- p.SetValue(newHexBytesValue(target))
-}
-
-// HexBytesList accumulates []byte values into a slice.
-func (p *parserMixin) HexBytesList() (target *[][]byte) {
- target = new([][]byte)
- p.HexBytesListVar(target)
- return
-}
-
-func (p *parserMixin) HexBytesListVar(target *[][]byte) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newHexBytesValue(v.(*[]byte))
- }))
-}
diff --git a/vendor/vendor.json b/vendor/vendor.json
deleted file mode 100644
index bc6c9abd..00000000
--- a/vendor/vendor.json
+++ /dev/null
@@ -1,133 +0,0 @@
-{
- "comment": "",
- "ignore": "test",
- "package": [
- {
- "checksumSHA1": "KmjnydoAbofMieIWm+it5OWERaM=",
- "path": "github.com/alecthomas/template",
- "revision": "a0175ee3bccc567396460bf5acd36800cb10c49c",
- "revisionTime": "2016-04-05T07:15:01Z"
- },
- {
- "checksumSHA1": "3wt0pTXXeS+S93unwhGoLIyGX/Q=",
- "path": "github.com/alecthomas/template/parse",
- "revision": "a0175ee3bccc567396460bf5acd36800cb10c49c",
- "revisionTime": "2016-04-05T07:15:01Z"
- },
- {
- "checksumSHA1": "fCc3grA7vIxfBru7R3SqjcW+oLI=",
- "path": "github.com/alecthomas/units",
- "revision": "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a",
- "revisionTime": "2015-10-22T06:55:26Z"
- },
- {
- "checksumSHA1": "4QnLdmB1kG3N+KlDd1N+G9TWAGQ=",
- "path": "github.com/beorn7/perks/quantile",
- "revision": "3ac7bf7a47d159a033b107610db8a1b6575507a4",
- "revisionTime": "2016-02-29T21:34:45Z"
- },
- {
- "checksumSHA1": "f423Rwr9K0TMRFwPgJgjojAmZd8=",
- "path": "github.com/blang/semver",
- "revision": "3c1074078d32d767e08ab2c8564867292da86926",
- "revisionTime": "2018-07-15T01:52:53Z"
- },
- {
- "checksumSHA1": "bzCBeQrZ+LxbYyJbp7uDILX6t7c=",
- "path": "github.com/go-kit/kit/log",
- "revision": "0c52d4024a04395dabac42b65e5825d08d3335df",
- "revisionTime": "2017-06-20T08:06:29Z"
- },
- {
- "checksumSHA1": "t7aTpDH0h4BZcGU0KkUr14QQG2w=",
- "path": "github.com/go-kit/kit/log/level",
- "revision": "0c52d4024a04395dabac42b65e5825d08d3335df",
- "revisionTime": "2017-06-20T08:06:29Z"
- },
- {
- "checksumSHA1": "KxX/Drph+byPXBFIXaCZaCOAnrU=",
- "path": "github.com/go-logfmt/logfmt",
- "revision": "390ab7935ee28ec6b286364bba9b4dd6410cb3d5",
- "revisionTime": "2016-11-15T14:25:13Z"
- },
- {
- "checksumSHA1": "VcLNrvCcnZuSnebTxMRyNtzY3ZY=",
- "path": "github.com/go-stack/stack",
- "revision": "7a2f19628aabfe68f0766b59e74d6315f8347d22",
- "revisionTime": "2017-05-04T03:43:18Z"
- },
- {
- "checksumSHA1": "FczzogSoZcKU3h21tCUyHzMsnBY=",
- "path": "github.com/golang/protobuf/proto",
- "revision": "7cc19b78d562895b13596ddce7aafb59dd789318",
- "revisionTime": "2016-04-25T21:53:00Z"
- },
- {
- "checksumSHA1": "yw6trQTxmryPs2hvnI5LHNw3xjc=",
- "path": "github.com/imdario/mergo",
- "revision": "ca3dcc1022bae9b5510f3c83705b72db1b1a96f9",
- "revisionTime": "2018-11-07T19:11:38Z"
- },
- {
- "checksumSHA1": "abKzFXAn0KDr5U+JON1ZgJ2lUtU=",
- "path": "github.com/kr/logfmt",
- "revision": "b84e30acd515aadc4b783ad4ff83aff3299bdfe0",
- "revisionTime": "2014-02-26T03:06:59Z"
- },
- {
- "checksumSHA1": "bKMZjd2wPw13VwoE7mBeSv5djFA=",
- "path": "github.com/matttproud/golang_protobuf_extensions/pbutil",
- "revision": "c12348ce28de40eed0136aa2b644d0ee0650e56c",
- "revisionTime": "2016-04-24T11:30:07Z"
- },
- {
- "checksumSHA1": "E9hLErovYB3phLHwM/OoyL0d+hs=",
- "path": "github.com/prometheus/client_golang/prometheus",
- "revision": "28be15864ef9ba05d74fa6fd13b928fd250e8f01",
- "revisionTime": "2016-07-11T22:22:38Z"
- },
- {
- "checksumSHA1": "DvwvOlPNAgRntBzt3b3OSRMS2N4=",
- "path": "github.com/prometheus/client_model/go",
- "revision": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6",
- "revisionTime": "2015-02-12T10:17:44Z"
- },
- {
- "checksumSHA1": "DOkbYZAGVR8Xv/dqg5PZbsS6Aeg=",
- "path": "github.com/prometheus/common/expfmt",
- "revision": "ba1c9185b796b0ca48e40c09edf7951b1a18f349",
- "revisionTime": "2016-05-17T13:00:36Z"
- },
- {
- "checksumSHA1": "GWlM3d2vPYyNATtTFgftS10/A9w=",
- "path": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
- "revision": "ba1c9185b796b0ca48e40c09edf7951b1a18f349",
- "revisionTime": "2016-05-17T13:00:36Z"
- },
- {
- "checksumSHA1": "0LL9u9tfv1KPBjNEiMDP6q7lpog=",
- "path": "github.com/prometheus/common/model",
- "revision": "3233b24a36715a2fe36d162260457857a56d672d",
- "revisionTime": "2017-06-15T07:20:16Z"
- },
- {
- "checksumSHA1": "91KYK0SpvkaMJJA2+BcxbVnyRO0=",
- "path": "github.com/prometheus/common/version",
- "revision": "bc8b88226a1210b016e9993b1d75f858c9c8f778",
- "revisionTime": "2017-08-30T19:05:55Z"
- },
- {
- "checksumSHA1": "W218eJZPXJG783fUr/z6IaAZyes=",
- "path": "github.com/prometheus/procfs",
- "revision": "abf152e5f3e97f2fafac028d2cc06c1feb87ffa5",
- "revisionTime": "2016-04-11T19:08:41Z"
- },
- {
- "checksumSHA1": "sToCp8GThnMnsBzsHv+L/tBYQrQ=",
- "path": "gopkg.in/alecthomas/kingpin.v2",
- "revision": "947dcec5ba9c011838740e680966fd7087a71d0d",
- "revisionTime": "2017-12-17T18:08:21Z"
- }
- ],
- "rootPath": "github.com/justwatchcom/elasticsearch_exporter"
-}