diff --git a/.github/env b/.github/env index 5c85627..623c884 100644 --- a/.github/env +++ b/.github/env @@ -1 +1 @@ -golang-version=1.19.x +golang-version=1.23.x diff --git a/Dockerfile b/Dockerfile index ae8dd6a..0136370 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM --platform=$BUILDPLATFORM golang:1.19-alpine3.16 as builder +FROM --platform=$BUILDPLATFORM golang:1.23-alpine3.21 as builder RUN apk add --update --no-cache ca-certificates tzdata git make bash && update-ca-certificates diff --git a/Makefile b/Makefile index 4f9433b..531a608 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,7 @@ include .bingo/Variables.mk SHELL=/usr/bin/env bash -o pipefail TMP_DIR := $(shell pwd)/tmp +TOOLS_BIN_DIR ?= $(shell pwd)/tmp/tools/bin BIN_DIR ?= $(TMP_DIR)/bin LIB_DIR ?= $(TMP_DIR)/lib FIRST_GOPATH := $(firstword $(subst :, ,$(shell go env GOPATH))) @@ -18,7 +19,7 @@ DOCKER_REPO ?= quay.io/observatorium/token-refresher THANOS ?= $(BIN_DIR)/thanos THANOS_VERSION ?= 0.13.0 -OBSERVATORIUM ?= $(BIN_DIR)/observatorium +API ?= $(BIN_DIR)/api UP ?= $(BIN_DIR)/up HYDRA ?= $(BIN_DIR)/hydra GOLANGCILINT ?= $(FIRST_GOPATH)/bin/golangci-lint @@ -27,6 +28,8 @@ EMBEDMD ?= $(BIN_DIR)/embedmd SHELLCHECK ?= $(BIN_DIR)/shellcheck MEMCACHED ?= $(BIN_DIR)/memcached +TOOLING=$(API) $(UP) $(EMBEDMD) + default: token-refresher all: clean lint test token-refresher @@ -72,7 +75,7 @@ test-unit: .PHONY: test-integration test-integration: build integration-test-dependencies - PATH=$(BIN_DIR):$(FIRST_GOPATH)/bin:$$PATH LD_LIBRARY_PATH=$$LD_LIBRARY_PATH:$(LIB_DIR) ./test/integration.sh + PATH=$(TOOLS_BIN_DIR):$(BIN_DIR):$(FIRST_GOPATH)/bin:$$PATH LD_LIBRARY_PATH=$$LD_LIBRARY_PATH:$(LIB_DIR) ./test/integration.sh .PHONY: clean clean: @@ -154,7 +157,7 @@ container-release-build-push: container-build-push . .PHONY: integration-test-dependencies -integration-test-dependencies: $(THANOS) $(UP) $(HYDRA) $(OBSERVATORIUM) +integration-test-dependencies: $(THANOS) $(UP) $(HYDRA) $(API) $(BIN_DIR): mkdir -p $(BIN_DIR) @@ -166,18 +169,10 @@ $(THANOS): | $(BIN_DIR) @echo "Downloading Thanos" curl -L "https://github.com/thanos-io/thanos/releases/download/v$(THANOS_VERSION)/thanos-$(THANOS_VERSION).$$(go env GOOS)-$$(go env GOARCH).tar.gz" | tar --strip-components=1 -xzf - -C $(BIN_DIR) -$(OBSERVATORIUM): | vendor $(BIN_DIR) - go build -mod=vendor -o $@ github.com/observatorium/observatorium - -$(UP): | vendor $(BIN_DIR) - go build -mod=vendor -o $@ github.com/observatorium/up/cmd/up - $(HYDRA): | vendor $(BIN_DIR) @echo "Downloading Hydra" curl -L "https://github.com/ory/hydra/releases/download/v1.7.4/hydra_1.7.4_linux_64-bit.tar.gz" | tar -xzf - -C $(BIN_DIR) hydra -$(EMBEDMD): | vendor $(BIN_DIR) - go build -mod=vendor -o $@ github.com/campoy/embedmd $(GOLANGCILINT): curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCILINT_VERSION)/install.sh \ @@ -186,3 +181,10 @@ $(GOLANGCILINT): $(SHELLCHECK): | $(BIN_DIR) curl -sNL "https://github.com/koalaman/shellcheck/releases/download/stable/shellcheck-stable.$(OS).$(ARCH).tar.xz" | tar --strip-components=1 -xJf - -C $(BIN_DIR) + +$(TOOLS_BIN_DIR): + mkdir -p $(TOOLS_BIN_DIR) + +$(TOOLING): $(TOOLS_BIN_DIR) + @echo Installing tools from tools/tools.go + @cat tools/tools.go | grep _ | awk -F'"' '{print $$2}' | GOBIN=$(TOOLS_BIN_DIR) xargs -tI % go install -mod=readonly -modfile=tools/go.mod % \ No newline at end of file diff --git a/go.mod b/go.mod index a6bef9a..0582ac8 100644 --- a/go.mod +++ b/go.mod @@ -1,139 +1,34 @@ module github.com/observatorium/token-refresher -go 1.19 +go 1.23.3 + +toolchain go1.23.9 require ( - github.com/campoy/embedmd v1.0.0 github.com/coreos/go-oidc v2.2.1+incompatible - github.com/go-kit/kit v0.10.0 - github.com/metalmatze/signal v0.0.0-20201002154727-d0c16e42a3cf - github.com/observatorium/observatorium v0.1.2-0.20201001162514-585e9d38dea7 - github.com/observatorium/up v0.0.0-20200603110215-8a20b4e48ac0 + github.com/go-kit/kit v0.13.0 + github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a github.com/oklog/run v1.1.0 - github.com/prometheus/client_golang v1.5.1 + github.com/prometheus/client_golang v1.21.1 github.com/spf13/pflag v1.0.5 - golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d + golang.org/x/oauth2 v0.28.0 ) require ( - cloud.google.com/go v0.46.3 // indirect - github.com/BurntSushi/toml v0.3.1 // indirect - github.com/OneOfOne/xxhash v1.2.7 // indirect - github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect - github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect - github.com/beevik/etree v1.1.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/brancz/gojsontoyaml v0.0.0-20191212081931-bf2969bbd742 // indirect - github.com/brancz/kube-rbac-proxy v0.5.0 // indirect - github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect - github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41 // indirect - github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect - github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect - github.com/dexidp/dex v0.0.0-20200512115545-709d4169d646 // indirect - github.com/fatih/color v1.9.0 // indirect - github.com/felixge/httpsnoop v1.0.1 // indirect - github.com/fsnotify/fsnotify v1.4.9 // indirect - github.com/ghodss/yaml v1.0.0 // indirect - github.com/go-chi/chi v4.0.4+incompatible // indirect - github.com/go-logfmt/logfmt v0.5.0 // indirect - github.com/go-pluto/styx v0.0.0-20200109161911-78a77eb717b4 // indirect - github.com/go-sql-driver/mysql v1.5.0 // indirect - github.com/gobwas/glob v0.2.3 // indirect - github.com/gogo/protobuf v1.3.1 // indirect - github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect - github.com/golang/protobuf v1.4.0 // indirect - github.com/golang/snappy v0.0.1 // indirect - github.com/google/go-jsonnet v0.15.1-0.20200310221949-724650d358b6 // indirect - github.com/google/uuid v1.1.1 // indirect - github.com/googleapis/gax-go/v2 v2.0.5 // indirect - github.com/gorilla/handlers v1.4.2 // indirect - github.com/gorilla/mux v1.7.3 // indirect - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.14.3 // indirect - github.com/gtank/cryptopasta v0.0.0-20170601214702-1f550f6f2f69 // indirect - github.com/hashicorp/errwrap v1.0.0 // indirect - github.com/hashicorp/go-multierror v1.0.0 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect - github.com/instrumenta/kubeval v0.0.0-20200515185822-7721cbec724c // indirect - github.com/jonboulle/clockwork v0.1.0 // indirect - github.com/json-iterator/go v1.1.9 // indirect - github.com/jsonnet-bundler/jsonnet-bundler v0.3.1 // indirect - github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect - github.com/kr/text v0.2.0 // indirect - github.com/lib/pq v1.3.0 // indirect - github.com/magiconair/properties v1.8.1 // indirect - github.com/mattn/go-colorable v0.1.6 // indirect - github.com/mattn/go-isatty v0.0.12 // indirect - github.com/mattn/go-runewidth v0.0.4 // indirect - github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.3.2 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.1 // indirect - github.com/olekukonko/tablewriter v0.0.1 // indirect - github.com/onsi/gomega v1.7.1 // indirect - github.com/open-policy-agent/opa v0.23.2 // indirect - github.com/opencontainers/runc v1.0.0-rc9 // indirect - github.com/pelletier/go-toml v1.8.0 // indirect - github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.9.1 // indirect - github.com/prometheus/procfs v0.0.11 // indirect - github.com/prometheus/prometheus v1.8.2-0.20200305080338-7164b58945bb // indirect - github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a // indirect - github.com/russellhaering/goxmldsig v0.0.0-20180430223755-7acd5e4a6ef7 // indirect - github.com/russross/blackfriday/v2 v2.0.1 // indirect - github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect - github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect - github.com/sirupsen/logrus v1.6.0 // indirect - github.com/spf13/afero v1.2.2 // indirect - github.com/spf13/cast v1.3.1 // indirect - github.com/spf13/cobra v1.0.0 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/viper v1.7.0 // indirect - github.com/subosito/gotenv v1.2.0 // indirect - github.com/urfave/cli v1.22.2 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v0.0.0-20180816142147-da425ebb7609 // indirect - github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b // indirect - go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 // indirect - go.opencensus.io v0.22.2 // indirect - go.uber.org/atomic v1.5.1 // indirect - go.uber.org/automaxprocs v1.2.0 // indirect - go.uber.org/multierr v1.3.0 // indirect - go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee // indirect - go.uber.org/zap v1.13.0 // indirect - golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 // indirect - golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect - golang.org/x/mod v0.2.0 // indirect - golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect - golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a // indirect - golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 // indirect - golang.org/x/text v0.3.2 // indirect - golang.org/x/tools v0.0.0-20200313205530-4303120df7d8 // indirect - golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 // indirect - google.golang.org/api v0.17.0 // indirect - google.golang.org/appengine v1.6.5 // indirect - google.golang.org/genproto v0.0.0-20200413115906-b5235f65be36 // indirect - google.golang.org/grpc v1.27.1 // indirect - google.golang.org/protobuf v1.21.0 // indirect - gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect - gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect - gopkg.in/fsnotify.v1 v1.4.7 // indirect - gopkg.in/ini.v1 v1.57.0 // indirect - gopkg.in/ldap.v2 v2.5.1 // indirect - gopkg.in/square/go-jose.v2 v2.5.0 // indirect - gopkg.in/yaml.v2 v2.3.0 // indirect - honnef.co/go/tools v0.0.1-2019.2.3 // indirect - k8s.io/apimachinery v0.18.0 // indirect - k8s.io/component-base v0.18.0 // indirect - k8s.io/klog v1.0.0 // indirect - sigs.k8s.io/yaml v1.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/pquerna/cachecontrol v0.1.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + golang.org/x/crypto v0.37.0 // indirect + golang.org/x/sys v0.32.0 // indirect + google.golang.org/protobuf v1.36.6 // indirect + gopkg.in/square/go-jose.v2 v2.6.0 // indirect ) diff --git a/go.sum b/go.sum index 19af4ed..2c4cef5 100644 --- a/go.sum +++ b/go.sum @@ -1,1124 +1,136 @@ -bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -bitbucket.org/liamstask/goose v0.0.0-20150115234039-8488cc47d90c/go.mod h1:hSVuE3qU7grINVSwrmzHfpg9k87ALBk+XaualNyUzI4= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v39.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.5/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= -github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= -github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/OneOfOne/xxhash v1.2.7 h1:fzrmmkskv067ZQbd9wERNGuxckWw67dyzoMG62p7LMo= -github.com/OneOfOne/xxhash v1.2.7/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.29.4/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= -github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/brancz/gojsontoyaml v0.0.0-20191212081931-bf2969bbd742 h1:PdvQdwUXiFnSmWsOJcBXLpyH3mJfP2FMPTT3J0i7+8o= -github.com/brancz/gojsontoyaml v0.0.0-20191212081931-bf2969bbd742/go.mod h1:IyUJYN1gvWjtLF5ZuygmxbnsAyP3aJS6cHzIuZY50B0= -github.com/brancz/kube-rbac-proxy v0.5.0 h1:RdMeazKvTXwH66i9DeVtEV/o0XAqyhSaiWkZ8gj54x4= -github.com/brancz/kube-rbac-proxy v0.5.0/go.mod h1:cL2VjiIFGS90Cjh5ZZ8+It6tMcBt8rwvuw2J6Mamnl0= -github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY= -github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2ZOiguV6/NpiDgADjRLPNyZlApIWxKpkT+X8SdY= -github.com/cloudflare/cfssl v1.4.1/go.mod h1:KManx/OJPb5QY+y0+o/898AMcM128sF0bURvoVUSjTo= -github.com/cloudflare/go-metrics v0.0.0-20151117154305-6a9aea36fb41/go.mod h1:eaZPlJWD+G9wseg1BuRXlHnjntPMrywMsyxf+LTOdP4= -github.com/cloudflare/redoctober v0.0.0-20171127175943-746a508df14c/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41 h1:kIFnQBO7rQ0XkMe6xEwbybYHBEaWmh/f++laI6Emt7M= -github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dexidp/dex v0.0.0-20200512115545-709d4169d646 h1:H43NOKZO38H3Ob2U9xZyo3Tmq353+EtLgtzgHToMUIw= -github.com/dexidp/dex v0.0.0-20200512115545-709d4169d646/go.mod h1:7ic4FXokPpBxUHngMrp5MZQv4j6ixPoqF+1zC6QRlhw= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible h1:dvc1KSkIYTVjZgHf/CTC2diTYC8PzhaA5sFISRfNVrE= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v0.7.3-0.20190506211059-b20a14b54661 h1:ZuxGvIvF01nfc/G9RJ5Q7Va1zQE2WJyG18Zv3DqCEf4= -github.com/docker/docker v0.7.3-0.20190506211059-b20a14b54661/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/getsentry/raven-go v0.0.0-20180121060056-563b81fc02b7/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= -github.com/go-chi/chi v4.0.4+incompatible h1:7fVnpr0gAXG15uDbtH+LwSeMztvIvlHrBNRkTzgphS0= -github.com/go-chi/chi v4.0.4+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= +github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-pluto/styx v0.0.0-20200109161911-78a77eb717b4 h1:RxvOqfwCmWV+aJrvn1auXRNTOFbwPAoJsUoRQm4M/Eo= -github.com/go-pluto/styx v0.0.0-20200109161911-78a77eb717b4/go.mod h1:QIi7pCRNlknwBqmStvcUU/3kQkvcIsVWZV9+eveU6mo= -github.com/go-redis/redis v6.15.6+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v0.0.0-20181025225059-d3de96c4c28e/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= -github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-jsonnet v0.15.1-0.20200310221949-724650d358b6 h1:OuONH3JuUOrOWvAauOJALiciem0duSrjiTXGGBgPcLU= -github.com/google/go-jsonnet v0.15.1-0.20200310221949-724650d358b6/go.mod h1:sOcuej3UW1vpPTZOr8L7RQimqai1a57bt5j22LzGZCw= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.8.0/go.mod h1:Kc/QKr9thLKruO/dG0szY8kRIYS+iENz0ziI0hJf76A= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg= -github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/mux v0.0.0-20181024020800-521ea7b17d02/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-gateway v1.14.3 h1:OCJlWkOUoTnl0neNGlf4fUm3TmbEtguw7vR+nGtnDjY= -github.com/grpc-ecosystem/grpc-gateway v1.14.3/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= -github.com/gtank/cryptopasta v0.0.0-20170601214702-1f550f6f2f69 h1:7xsUJsB2NrdcttQPa7JLEaGzvdbk7KvfrjgHZXOQRo0= -github.com/gtank/cryptopasta v0.0.0-20170601214702-1f550f6f2f69/go.mod h1:YLEMZOtU+AZ7dhN9T/IpGhXVGly2bvkJQ+zxj3WeVQo= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/instrumenta/kubeval v0.0.0-20200515185822-7721cbec724c h1:tF3B96upB2wECZMXZxrAMLiVUgT22sNNxhuOhrcg28s= -github.com/instrumenta/kubeval v0.0.0-20200515185822-7721cbec724c/go.mod h1:cD+P/oZrBwOnaIHXrqvKPuN353KPxGomnsXSXf8pFJs= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo= -github.com/jmoiron/sqlx v0.0.0-20180124204410-05cef0741ade/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU= -github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jsonnet-bundler/jsonnet-bundler v0.3.1 h1:KmNzitX12fFoyqjhU8cRifEB5D8x1NT1UAcK7FQ0zpY= -github.com/jsonnet-bundler/jsonnet-bundler v0.3.1/go.mod h1:/by7P/OoohkI3q4CgSFqcoFsVY+IaNbzOVDknEsKDeU= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kisielk/sqlstruct v0.0.0-20150923205031-648daed35d49/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= -github.com/kisom/goutils v1.1.0/go.mod h1:+UBTfd78habUYWFbNWTJNG+jNG/i/lGURakr4A/yNRw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/go-gypsy v0.0.0-20160905020020-08cad365cd28/go.mod h1:T/T7jsxVqf9k/zYOqbgNAsANsjxTd1Yq3htjDhQ1H0c= -github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lib/pq v0.0.0-20180201184707-88edab080323/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-runewidth v0.0.0-20181025052659-b20a3daf6a39/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= -github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/metalmatze/signal v0.0.0-20200616171423-be84551ba3ce/go.mod h1:3OETvrxfELvGsU2RoGGWercfeZ4bCL3+SOwzIWtJH/Q= -github.com/metalmatze/signal v0.0.0-20201002154727-d0c16e42a3cf h1:g30K3kg3GSpKUn4ShNPrCpL4s9sePqmyu5e0K2O4nxA= -github.com/metalmatze/signal v0.0.0-20201002154727-d0c16e42a3cf/go.mod h1:3OETvrxfELvGsU2RoGGWercfeZ4bCL3+SOwzIWtJH/Q= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v0.0.0-20180715050151-f15292f7a699/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYGN7GeoRg= -github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a h1:0usWxe5SGXKQovz3p+BiQ81Jy845xSMu2CWKuXsXuUM= +github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a/go.mod h1:3OETvrxfELvGsU2RoGGWercfeZ4bCL3+SOwzIWtJH/Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= -github.com/observatorium/observatorium v0.1.2-0.20201001162514-585e9d38dea7 h1:DscvtKuRTfOpTxqpTsCq3SAo1yznMjAKwvApSbNUF3Q= -github.com/observatorium/observatorium v0.1.2-0.20201001162514-585e9d38dea7/go.mod h1:YV2UOq/lbGqpfVpgS5B6k+6Frr65ivCoaObnYVVpSVs= -github.com/observatorium/up v0.0.0-20200603110215-8a20b4e48ac0 h1:tbkKJiQkVbGfz7hEm3L+8HLEl7ZFzPuM9CNOVodQsmo= -github.com/observatorium/up v0.0.0-20200603110215-8a20b4e48ac0/go.mod h1:ysto9FMxd/1rHh9N9l1JfHLjT3X7ix4oWjMfnyrpqsY= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/open-policy-agent/opa v0.23.2 h1:co9fPjnLPwnvaEThBJjCb5E2iAyvW95Qq2PvSOEIwGE= -github.com/open-policy-agent/opa v0.23.2/go.mod h1:rrwxoT/b011T0cyj+gg2VvxqTtn6N3gp/jzmr3fjW44= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v0.0.0-20180724185102-c2dbbc24a979/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.8.0 h1:Keo9qb7iRJs2voHvunFtuuYFsbWeOBh8/P9v/kVMFtw= -github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d h1:zapSxdmZYY6vJWXFKLQ+MkI+agc+HQyfrCGowDSHiKs= -github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.0.0-20181023235946-059132a15dd0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 h1:J9b7z+QKAmPf4YLrFg6oQUotqHQeUNWwkvo7jZp1GLU= -github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= -github.com/prometheus/client_golang v0.0.0-20181025174421-f30f42803563/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= +github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= +github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= -github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/prometheus v1.8.2-0.20200305080338-7164b58945bb h1:pq+E7+eub6NcumRLSxoTXFUQiNQ/6RH6Jolw3S9xzKU= -github.com/prometheus/prometheus v1.8.2-0.20200305080338-7164b58945bb/go.mod h1:/feD9J3z9H+U+gA6A8c4nZ+hB6kJuyelr0XHOF4eNWM= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/russellhaering/goxmldsig v0.0.0-20180430223755-7acd5e4a6ef7 h1:J4AOUcOh/t1XbQcJfkEqhzgvMJ2tDxdCVvmHxW5QXao= -github.com/russellhaering/goxmldsig v0.0.0-20180430223755-7acd5e4a6ef7/go.mod h1:Oz4y6ImuOQZxynhbSXk7btjEfNBtGlj2dcaOvXl2FSM= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM= -github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.1/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.0-20180820174524-ff0d02e85550/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.0-20181021141114-fe5e611709b0/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/jwalterweatherman v0.0.0-20180814060501-14d3d4c51834/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v0.0.0-20180821114517-d929dcbb1086/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v0.0.0-20181024212040-082b515c9490/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.1.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/testcontainers/testcontainers-go v0.0.9 h1:mwvFz+FkuQMqQ9oLkG4cVzPsZTRmrCo2NcaerJNaptA= -github.com/testcontainers/testcontainers-go v0.0.9/go.mod h1:0Qe9qqjNZgxHzzdHPWwmQ2D49FFO7920hLdJ4yUJXJI= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/weppos/publicsuffix-go v0.4.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= -github.com/weppos/publicsuffix-go v0.5.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xeipuuv/gojsonschema v0.0.0-20180816142147-da425ebb7609 h1:BcMExZAULPkihVZ7UJXK7t8rwGqisXFw75tILnafhBY= -github.com/xeipuuv/gojsonschema v0.0.0-20180816142147-da425ebb7609/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b h1:vVRagRXf67ESqAb72hG2C/ZwI8NtJF2u2V76EsuOHGY= -github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b/go.mod h1:HptNXiXVDcJjXe9SqMd0v2FsL9f8dz4GnXgltU6q/co= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= -github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= -github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is= -github.com/zmap/zcrypto v0.0.0-20190729165852-9051775e6a2e/go.mod h1:w7kd3qXHh8FNaczNjslXqvFQiv5mMWRXlL9klTUAHc8= -github.com/zmap/zlint v0.0.0-20190806154020-fd021b4cfbeb/go.mod h1:29UiAJNsiVdvTBFCJW8e3q6dcDbOoPkhMgttOSCIMMY= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM= -go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/automaxprocs v1.2.0 h1:+RUihKM+nmYUoB9w0D0Ov5TJ2PpFO2FgenTxMJiZBZA= -go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0 h1:sFPn2GLc3poCkfrpIXGhBD2X0CMIo4Q/zSULXrj/+uc= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181023182221-1baf3a9d7d67/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= +golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180821044426-4ea2f632f6e9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 h1:OjiUf46hAmXblsZdnoSXsEUSKU8r1UEzcL5RVZ4gO9Y= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.0.0-20180810153555-6e3c4e7365dd/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190813214729-9dba7caff850/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200313205530-4303120df7d8 h1:gkI/wGGwpcG5W4hLCzZNGxA4wzWBGGDStRI1MrjDl2Q= -golang.org/x/tools v0.0.0-20200313205530-4303120df7d8/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0 h1:0q95w+VuFtv4PAx4PZVQdBMmYbaCHbnfKaEiDIcVyag= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200413115906-b5235f65be36 h1:j7CmVRD4Kec0+f8VuBAc2Ak2MFfXm5Q2/RxuJLL+76E= -google.golang.org/genproto v0.0.0-20200413115906-b5235f65be36/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= -gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ldap.v2 v2.5.1 h1:wiu0okdNfjlBzg6UWvd1Hn8Y+Ux17/u/4nlk4CQr6tU= -gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.5.0 h1:OZ4sdq+Y+SHfYB7vfthi1Ei8b0vkP8ZPQgUfUwdUSqo= -gopkg.in/square/go-jose.v2 v2.5.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.1.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= +gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200121175148-a6ecf24a6d71/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v0.0.0-20181223230014-1083505acf35/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.0.0-20191122220107-b5267f2975e0/go.mod h1:vYpRfxYkMrmPPSesoHEkGNHxNKTk96REAwqm/inQbs0= -k8s.io/api v0.17.3/go.mod h1:YZ0OTkuw7ipbe305fMpIdf3GLXZKRigjtZaV5gzC2J0= -k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8= -k8s.io/apimachinery v0.0.0-20191121175448-79c2a76c473a/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.17.3/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= -k8s.io/apimachinery v0.18.0 h1:fuPfYpk3cs1Okp/515pAf0dNhL66+8zk8RLbSX+EgAE= -k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/apiserver v0.0.0-20191122221311-9d521947b1e1/go.mod h1:RbsZY5zzBIWnz4KbctZsTVjwIuOpTp4Z8oCgFHN4kZQ= -k8s.io/client-go v0.0.0-20191122220542-ed16ecbdf3a0/go.mod h1:tyxNgOmR/Xi39HrlQ/9LQgiHJgBvmY7gp95o5GpBA4o= -k8s.io/client-go v0.17.3/go.mod h1:cLXlTMtWHkuK4tD360KpWz2gG2KtdWEr/OT02i3emRQ= -k8s.io/client-go v0.18.0 h1:yqKw4cTUQraZK3fcVCMeSa+lqKwcjZ5wtcOIPnxQno4= -k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8= -k8s.io/component-base v0.0.0-20191122220729-2684fb322cb9/go.mod h1:NFuUusy/X4Tk21m21tcNUihnmp4OI7lXU7/xA+rYXkc= -k8s.io/component-base v0.18.0 h1:I+lP0fNfsEdTDpHaL61bCAqTZLoiWjEEP304Mo5ZQgE= -k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/testing_frameworks v0.1.2 h1:vK0+tvjF0BZ/RYFeZ1E6BYBwHJJXhjuZ3TdsEKH+UQM= -sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/integration.sh b/test/integration.sh index f883c4e..b403e99 100755 --- a/test/integration.sh +++ b/test/integration.sh @@ -30,7 +30,7 @@ curl \ http://127.0.0.1:4445/clients ( - observatorium \ + api \ --web.listen=0.0.0.0:8443 \ --web.internal.listen=0.0.0.0:8448 \ --web.healthchecks.url=http://127.0.0.1:8443 \ diff --git a/tools.go b/tools.go deleted file mode 100644 index 4e1b9f8..0000000 --- a/tools.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build tools -// +build tools - -package main - -import ( - _ "github.com/campoy/embedmd" - _ "github.com/observatorium/observatorium" - _ "github.com/observatorium/up/cmd/up" -) diff --git a/tools/go.mod b/tools/go.mod new file mode 100644 index 0000000..9a3a689 --- /dev/null +++ b/tools/go.mod @@ -0,0 +1,76 @@ +module github.com/observatorium/token-refresher/tools + +go 1.23.3 + +toolchain go1.23.9 + +require ( + github.com/campoy/embedmd v1.0.0 + github.com/observatorium/api v0.1.2 + github.com/observatorium/up v0.0.0-20240109123005-e1e1857b0b6e +) + +require ( + github.com/OneOfOne/xxhash v1.2.7 // indirect + github.com/apache/thrift v0.13.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/brancz/kube-rbac-proxy v0.5.0 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-oidc v2.2.1+incompatible // indirect + github.com/felixge/httpsnoop v1.0.1 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-chi/chi v4.0.2+incompatible // indirect + github.com/go-chi/httprate v0.4.0 // indirect + github.com/go-kit/kit v0.10.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.13.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/metalmatze/signal v0.0.0-20201002154727-d0c16e42a3cf // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/open-policy-agent/opa v0.23.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect + github.com/prometheus/client_golang v1.17.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/prometheus v1.8.2-0.20200305080338-7164b58945bb // indirect + github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b // indirect + go.opentelemetry.io/contrib v0.16.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.16.0 // indirect + go.opentelemetry.io/contrib/propagators v0.16.0 // indirect + go.opentelemetry.io/otel v0.16.0 // indirect + go.opentelemetry.io/otel/exporters/trace/jaeger v0.16.0 // indirect + go.opentelemetry.io/otel/sdk v0.16.0 // indirect + go.uber.org/automaxprocs v1.2.0 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.12.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + google.golang.org/api v0.39.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d // indirect + google.golang.org/grpc v1.34.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/square/go-jose.v2 v2.4.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/apimachinery v0.18.0 // indirect + k8s.io/component-base v0.18.0 // indirect + k8s.io/klog v1.0.0 // indirect +) diff --git a/tools/go.sum b/tools/go.sum new file mode 100644 index 0000000..805b84e --- /dev/null +++ b/tools/go.sum @@ -0,0 +1,1114 @@ +bitbucket.org/liamstask/goose v0.0.0-20150115234039-8488cc47d90c/go.mod h1:hSVuE3qU7grINVSwrmzHfpg9k87ALBk+XaualNyUzI4= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v39.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.5/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM= +github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= +github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OneOfOne/xxhash v1.2.7 h1:fzrmmkskv067ZQbd9wERNGuxckWw67dyzoMG62p7LMo= +github.com/OneOfOne/xxhash v1.2.7/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.29.4/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/brancz/kube-rbac-proxy v0.5.0 h1:RdMeazKvTXwH66i9DeVtEV/o0XAqyhSaiWkZ8gj54x4= +github.com/brancz/kube-rbac-proxy v0.5.0/go.mod h1:cL2VjiIFGS90Cjh5ZZ8+It6tMcBt8rwvuw2J6Mamnl0= +github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY= +github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2ZOiguV6/NpiDgADjRLPNyZlApIWxKpkT+X8SdY= +github.com/cloudflare/cfssl v1.4.1/go.mod h1:KManx/OJPb5QY+y0+o/898AMcM128sF0bURvoVUSjTo= +github.com/cloudflare/go-metrics v0.0.0-20151117154305-6a9aea36fb41/go.mod h1:eaZPlJWD+G9wseg1BuRXlHnjntPMrywMsyxf+LTOdP4= +github.com/cloudflare/redoctober v0.0.0-20171127175943-746a508df14c/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= +github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/efficientgo/tools/core v0.0.0-20220225185207-fe763185946b h1:ZHiD4/yE4idlbqvAO6iYCOYRzOMRpxkW+FKasRA3tsQ= +github.com/efficientgo/tools/core v0.0.0-20220225185207-fe763185946b/go.mod h1:OmVcnJopJL8d3X3sSXTiypGoUSgFq1aDGmlrdi9dn/M= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/getsentry/raven-go v0.0.0-20180121060056-563b81fc02b7/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-chi/chi v4.0.2+incompatible h1:maB6vn6FqCxrpz4FqWdh4+lwpyZIQS7YEAUcHlgXVRs= +github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-chi/httprate v0.4.0 h1:M2qVV0w6ksgLs6L8lTrvqNeaVm0ZJNVdbYM8u2T8HaE= +github.com/go-chi/httprate v0.4.0/go.mod h1:7e7qjQtHzEbdyW5TYQrl4X2uNRCnlTajictc7B4ftgc= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v0.0.0-20181025225059-d3de96c4c28e/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gophercloud/gophercloud v0.8.0/go.mod h1:Kc/QKr9thLKruO/dG0szY8kRIYS+iENz0ziI0hJf76A= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v0.0.0-20181024020800-521ea7b17d02/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0 h1:sBDQoHXrOlfPobnKw69FIKa1wg9qsLLvvQ/Y19WtFgI= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo= +github.com/jmoiron/sqlx v0.0.0-20180124204410-05cef0741ade/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kisielk/sqlstruct v0.0.0-20150923205031-648daed35d49/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= +github.com/kisom/goutils v1.1.0/go.mod h1:+UBTfd78habUYWFbNWTJNG+jNG/i/lGURakr4A/yNRw= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/go-gypsy v0.0.0-20160905020020-08cad365cd28/go.mod h1:T/T7jsxVqf9k/zYOqbgNAsANsjxTd1Yq3htjDhQ1H0c= +github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/lib/pq v0.0.0-20180201184707-88edab080323/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.0-20181025052659-b20a3daf6a39/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/metalmatze/signal v0.0.0-20201002154727-d0c16e42a3cf h1:g30K3kg3GSpKUn4ShNPrCpL4s9sePqmyu5e0K2O4nxA= +github.com/metalmatze/signal v0.0.0-20201002154727-d0c16e42a3cf/go.mod h1:3OETvrxfELvGsU2RoGGWercfeZ4bCL3+SOwzIWtJH/Q= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= +github.com/observatorium/api v0.1.2 h1:+9Ds34P/+mtnrFhrjZedvntQjgtH5gKT8zsURaxZ7Cc= +github.com/observatorium/api v0.1.2/go.mod h1:xcTjZ7QpUXMZx8kOuPnfWnVA1Tx0ivOJl+kfT6dDC9Q= +github.com/observatorium/up v0.0.0-20240109123005-e1e1857b0b6e h1:nmJdb3MkRhVngLT9rLvkOU0bksqNz0mB7QsXhtOVigA= +github.com/observatorium/up v0.0.0-20240109123005-e1e1857b0b6e/go.mod h1:06ATHnkbnd7AvcI2GcwUdfS6UKfPzD8bf5LKfd4T89w= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/open-policy-agent/opa v0.23.2 h1:co9fPjnLPwnvaEThBJjCb5E2iAyvW95Qq2PvSOEIwGE= +github.com/open-policy-agent/opa v0.23.2/go.mod h1:rrwxoT/b011T0cyj+gg2VvxqTtn6N3gp/jzmr3fjW44= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.0.0-20181023235946-059132a15dd0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 h1:J9b7z+QKAmPf4YLrFg6oQUotqHQeUNWwkvo7jZp1GLU= +github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= +github.com/prometheus/client_golang v0.0.0-20181025174421-f30f42803563/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/prometheus v1.8.2-0.20200305080338-7164b58945bb h1:pq+E7+eub6NcumRLSxoTXFUQiNQ/6RH6Jolw3S9xzKU= +github.com/prometheus/prometheus v1.8.2-0.20200305080338-7164b58945bb/go.mod h1:/feD9J3z9H+U+gA6A8c4nZ+hB6kJuyelr0XHOF4eNWM= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cobra v0.0.0-20181021141114-fe5e611709b0/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v0.0.0-20181024212040-082b515c9490/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/weppos/publicsuffix-go v0.4.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= +github.com/weppos/publicsuffix-go v0.5.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b h1:vVRagRXf67ESqAb72hG2C/ZwI8NtJF2u2V76EsuOHGY= +github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b/go.mod h1:HptNXiXVDcJjXe9SqMd0v2FsL9f8dz4GnXgltU6q/co= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= +github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is= +github.com/zmap/zcrypto v0.0.0-20190729165852-9051775e6a2e/go.mod h1:w7kd3qXHh8FNaczNjslXqvFQiv5mMWRXlL9klTUAHc8= +github.com/zmap/zlint v0.0.0-20190806154020-fd021b4cfbeb/go.mod h1:29UiAJNsiVdvTBFCJW8e3q6dcDbOoPkhMgttOSCIMMY= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opentelemetry.io/contrib v0.16.0 h1:cScR/U3bjTjxsBv939wh4miANY/akdP644rsg9msrIA= +go.opentelemetry.io/contrib v0.16.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.16.0 h1:hPbUH5fugPACtUdBWGL5glNqzowwHvnOdnwvQOATWgM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.16.0/go.mod h1:dNF4PMGeouMEPAWDwgEjsGFlod9hAU8oj0TU0w2J19g= +go.opentelemetry.io/contrib/propagators v0.16.0 h1:6M16whHin+Uz2zMefuxmFG5CITQ2Q1CHnt2vKxWcQ+A= +go.opentelemetry.io/contrib/propagators v0.16.0/go.mod h1:5kVVCrfVbGf6mu9Lk6DS91EDrkdneQdqUkJhmZXrOrA= +go.opentelemetry.io/otel v0.16.0 h1:uIWEbdeb4vpKPGITLsRVUS44L5oDbDUCZxn8lkxhmgw= +go.opentelemetry.io/otel v0.16.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA= +go.opentelemetry.io/otel/exporters/trace/jaeger v0.16.0 h1:gOnjphv9uycs+AfZprppdsm/P7B0CVxPr5E1UtT2wqw= +go.opentelemetry.io/otel/exporters/trace/jaeger v0.16.0/go.mod h1:rOjxqmybkP0JdheT4+fUevI2KJuHXTd1fbDzq5Y9Hi8= +go.opentelemetry.io/otel/sdk v0.16.0 h1:5o+fkNsOfH5Mix1bHUApNBqeDcAYczHDa7Ix+R73K2U= +go.opentelemetry.io/otel/sdk v0.16.0/go.mod h1:Jb0B4wrxerxtBeapvstmAZvJGQmvah4dHgKSngDpiCo= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/automaxprocs v1.2.0 h1:+RUihKM+nmYUoB9w0D0Ov5TJ2PpFO2FgenTxMJiZBZA= +go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181023182221-1baf3a9d7d67/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.39.0 h1:zHCTXf0NeDdKTgcSQpT+ZflWAqHsEp1GmdpxW09f3YM= +google.golang.org/api v0.39.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d h1:HV9Z9qMhQEsdlvxNFELgQ11RkMzO3CMkjEySjCtuLes= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0 h1:raiipEjMOIC/TO2AvyTxP25XFdLxNIBwzDh3FM3XztI= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.4.1 h1:H0TmLt7/KmzlrDOpa1F+zr0Tk90PbJYBfsVUmRLrf9Y= +gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200121175148-a6ecf24a6d71/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.0.0-20191122220107-b5267f2975e0/go.mod h1:vYpRfxYkMrmPPSesoHEkGNHxNKTk96REAwqm/inQbs0= +k8s.io/api v0.17.3/go.mod h1:YZ0OTkuw7ipbe305fMpIdf3GLXZKRigjtZaV5gzC2J0= +k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8= +k8s.io/apimachinery v0.0.0-20191121175448-79c2a76c473a/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/apimachinery v0.17.3/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= +k8s.io/apimachinery v0.18.0 h1:fuPfYpk3cs1Okp/515pAf0dNhL66+8zk8RLbSX+EgAE= +k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= +k8s.io/apiserver v0.0.0-20191122221311-9d521947b1e1/go.mod h1:RbsZY5zzBIWnz4KbctZsTVjwIuOpTp4Z8oCgFHN4kZQ= +k8s.io/client-go v0.0.0-20191122220542-ed16ecbdf3a0/go.mod h1:tyxNgOmR/Xi39HrlQ/9LQgiHJgBvmY7gp95o5GpBA4o= +k8s.io/client-go v0.17.3/go.mod h1:cLXlTMtWHkuK4tD360KpWz2gG2KtdWEr/OT02i3emRQ= +k8s.io/client-go v0.18.0 h1:yqKw4cTUQraZK3fcVCMeSa+lqKwcjZ5wtcOIPnxQno4= +k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8= +k8s.io/component-base v0.0.0-20191122220729-2684fb322cb9/go.mod h1:NFuUusy/X4Tk21m21tcNUihnmp4OI7lXU7/xA+rYXkc= +k8s.io/component-base v0.18.0 h1:I+lP0fNfsEdTDpHaL61bCAqTZLoiWjEEP304Mo5ZQgE= +k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/tools/tools.go b/tools/tools.go new file mode 100644 index 0000000..9ad32bb --- /dev/null +++ b/tools/tools.go @@ -0,0 +1,10 @@ +//go:build tools +// +build tools + +package main + +import ( + _ "github.com/campoy/embedmd" + _ "github.com/observatorium/api" + _ "github.com/observatorium/up/cmd/up" +) diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE deleted file mode 100644 index d645695..0000000 --- a/vendor/cloud.google.com/go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json b/vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json deleted file mode 100644 index ca022cc..0000000 --- a/vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "name": "metadata", - "name_pretty": "Google Compute Engine Metadata API", - "product_documentation": "https://cloud.google.com/compute/docs/storing-retrieving-metadata", - "client_documentation": "https://godoc.org/cloud.google.com/go/compute/metadata", - "release_level": "ga", - "language": "go", - "repo": "googleapis/google-cloud-go", - "distribution_name": "cloud.google.com/go/compute/metadata", - "api_id": "compute:metadata", - "requires_billing": false -} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go deleted file mode 100644 index 4ff4e2f..0000000 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ /dev/null @@ -1,526 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metadata provides access to Google Compute Engine (GCE) -// metadata and API service accounts. -// -// This package is a wrapper around the GCE metadata service, -// as documented at https://developers.google.com/compute/docs/metadata. -package metadata // import "cloud.google.com/go/compute/metadata" - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strings" - "sync" - "time" -) - -const ( - // metadataIP is the documented metadata server IP address. - metadataIP = "169.254.169.254" - - // metadataHostEnv is the environment variable specifying the - // GCE metadata hostname. If empty, the default value of - // metadataIP ("169.254.169.254") is used instead. - // This is variable name is not defined by any spec, as far as - // I know; it was made up for the Go package. - metadataHostEnv = "GCE_METADATA_HOST" - - userAgent = "gcloud-golang/0.1" -) - -type cachedValue struct { - k string - trim bool - mu sync.Mutex - v string -} - -var ( - projID = &cachedValue{k: "project/project-id", trim: true} - projNum = &cachedValue{k: "project/numeric-project-id", trim: true} - instID = &cachedValue{k: "instance/id", trim: true} -) - -var ( - defaultClient = &Client{hc: &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - ResponseHeaderTimeout: 2 * time.Second, - }, - }} - subscribeClient = &Client{hc: &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - }, - }} -) - -// NotDefinedError is returned when requested metadata is not defined. -// -// The underlying string is the suffix after "/computeMetadata/v1/". -// -// This error is not returned if the value is defined to be the empty -// string. -type NotDefinedError string - -func (suffix NotDefinedError) Error() string { - return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) -} - -func (c *cachedValue) get(cl *Client) (v string, err error) { - defer c.mu.Unlock() - c.mu.Lock() - if c.v != "" { - return c.v, nil - } - if c.trim { - v, err = cl.getTrimmed(c.k) - } else { - v, err = cl.Get(c.k) - } - if err == nil { - c.v = v - } - return -} - -var ( - onGCEOnce sync.Once - onGCE bool -) - -// OnGCE reports whether this process is running on Google Compute Engine. -func OnGCE() bool { - onGCEOnce.Do(initOnGCE) - return onGCE -} - -func initOnGCE() { - onGCE = testOnGCE() -} - -func testOnGCE() bool { - // The user explicitly said they're on GCE, so trust them. - if os.Getenv(metadataHostEnv) != "" { - return true - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - resc := make(chan bool, 2) - - // Try two strategies in parallel. - // See https://github.com/googleapis/google-cloud-go/issues/194 - go func() { - req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) - req.Header.Set("User-Agent", userAgent) - res, err := defaultClient.hc.Do(req.WithContext(ctx)) - if err != nil { - resc <- false - return - } - defer res.Body.Close() - resc <- res.Header.Get("Metadata-Flavor") == "Google" - }() - - go func() { - addrs, err := net.LookupHost("metadata.google.internal") - if err != nil || len(addrs) == 0 { - resc <- false - return - } - resc <- strsContains(addrs, metadataIP) - }() - - tryHarder := systemInfoSuggestsGCE() - if tryHarder { - res := <-resc - if res { - // The first strategy succeeded, so let's use it. - return true - } - // Wait for either the DNS or metadata server probe to - // contradict the other one and say we are running on - // GCE. Give it a lot of time to do so, since the system - // info already suggests we're running on a GCE BIOS. - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() - select { - case res = <-resc: - return res - case <-timer.C: - // Too slow. Who knows what this system is. - return false - } - } - - // There's no hint from the system info that we're running on - // GCE, so use the first probe's result as truth, whether it's - // true or false. The goal here is to optimize for speed for - // users who are NOT running on GCE. We can't assume that - // either a DNS lookup or an HTTP request to a blackholed IP - // address is fast. Worst case this should return when the - // metaClient's Transport.ResponseHeaderTimeout or - // Transport.Dial.Timeout fires (in two seconds). - return <-resc -} - -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") - name := strings.TrimSpace(string(slurp)) - return name == "Google" || name == "Google Compute Engine" -} - -// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no -// ResponseHeaderTimeout). -func Subscribe(suffix string, fn func(v string, ok bool) error) error { - return subscribeClient.Subscribe(suffix, fn) -} - -// Get calls Client.Get on the default client. -func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } - -// ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return defaultClient.ProjectID() } - -// NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } - -// InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { return defaultClient.InternalIP() } - -// ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { return defaultClient.ExternalIP() } - -// Email calls Client.Email on the default client. -func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) } - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func Hostname() (string, error) { return defaultClient.Hostname() } - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } - -// InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { return defaultClient.InstanceID() } - -// InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { return defaultClient.InstanceName() } - -// Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { return defaultClient.Zone() } - -// InstanceAttributes calls Client.InstanceAttributes on the default client. -func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } - -// ProjectAttributes calls Client.ProjectAttributes on the default client. -func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } - -// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. -func InstanceAttributeValue(attr string) (string, error) { - return defaultClient.InstanceAttributeValue(attr) -} - -// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. -func ProjectAttributeValue(attr string) (string, error) { - return defaultClient.ProjectAttributeValue(attr) -} - -// Scopes calls Client.Scopes on the default client. -func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } - -func strsContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} - -// A Client provides metadata. -type Client struct { - hc *http.Client -} - -// NewClient returns a Client that can be used to fetch metadata. All HTTP requests -// will use the given http.Client instead of the default client. -func NewClient(c *http.Client) *Client { - return &Client{hc: c} -} - -// getETag returns a value from the metadata service as well as the associated ETag. -// This func is otherwise equivalent to Get. -func (c *Client) getETag(suffix string) (value, etag string, err error) { - // Using a fixed IP makes it very difficult to spoof the metadata service in - // a container, which is an important use-case for local testing of cloud - // deployments. To enable spoofing of the metadata service, the environment - // variable GCE_METADATA_HOST is first inspected to decide where metadata - // requests shall go. - host := os.Getenv(metadataHostEnv) - if host == "" { - // Using 169.254.169.254 instead of "metadata" here because Go - // binaries built with the "netgo" tag and without cgo won't - // know the search suffix for "metadata" is - // ".google.internal", and this IP address is documented as - // being stable anyway. - host = metadataIP - } - u := "http://" + host + "/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", u, nil) - req.Header.Set("Metadata-Flavor", "Google") - req.Header.Set("User-Agent", userAgent) - res, err := c.hc.Do(req) - if err != nil { - return "", "", err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return "", "", NotDefinedError(suffix) - } - all, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - if res.StatusCode != 200 { - return "", "", &Error{Code: res.StatusCode, Message: string(all)} - } - return string(all), res.Header.Get("Etag"), nil -} - -// Get returns a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// -// If the GCE_METADATA_HOST environment variable is not defined, a default of -// 169.254.169.254 will be used instead. -// -// If the requested metadata is not defined, the returned error will -// be of type NotDefinedError. -func (c *Client) Get(suffix string) (string, error) { - val, _, err := c.getETag(suffix) - return val, err -} - -func (c *Client) getTrimmed(suffix string) (s string, err error) { - s, err = c.Get(suffix) - s = strings.TrimSpace(s) - return -} - -func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.Get(suffix) - if err != nil { - return nil, err - } - s := strings.Split(strings.TrimSpace(j), "\n") - for i := range s { - s[i] = strings.TrimSpace(s[i]) - } - return s, nil -} - -// ProjectID returns the current instance's project ID string. -func (c *Client) ProjectID() (string, error) { return projID.get(c) } - -// NumericProjectID returns the current instance's numeric project ID. -func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } - -// InstanceID returns the current VM's numeric instance ID. -func (c *Client) InstanceID() (string, error) { return instID.get(c) } - -// InternalIP returns the instance's primary internal IP address. -func (c *Client) InternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/ip") -} - -// Email returns the email address associated with the service account. -// The account may be empty or the string "default" to use the instance's -// main account. -func (c *Client) Email(serviceAccount string) (string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email") -} - -// ExternalIP returns the instance's primary external (public) IP address. -func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") -} - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func (c *Client) Hostname() (string, error) { - return c.getTrimmed("instance/hostname") -} - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func (c *Client) InstanceTags() ([]string, error) { - var s []string - j, err := c.Get("instance/tags") - if err != nil { - return nil, err - } - if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { - return nil, err - } - return s, nil -} - -// InstanceName returns the current VM's instance ID string. -func (c *Client) InstanceName() (string, error) { - host, err := c.Hostname() - if err != nil { - return "", err - } - return strings.Split(host, ".")[0], nil -} - -// Zone returns the current VM's zone, such as "us-central1-b". -func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed("instance/zone") - // zone is of the form "projects//zones/". - if err != nil { - return "", err - } - return zone[strings.LastIndex(zone, "/")+1:], nil -} - -// InstanceAttributes returns the list of user-defined attributes, -// assigned when initially creating a GCE VM instance. The value of an -// attribute can be obtained with InstanceAttributeValue. -func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } - -// ProjectAttributes returns the list of user-defined attributes -// applying to the project as a whole, not just this VM. The value of -// an attribute can be obtained with ProjectAttributeValue. -func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } - -// InstanceAttributeValue returns the value of the provided VM -// instance attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// InstanceAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.Get("instance/attributes/" + attr) -} - -// ProjectAttributeValue returns the value of the provided -// project attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// ProjectAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.Get("project/attributes/" + attr) -} - -// Scopes returns the service account scopes for the given account. -// The account may be empty or the string "default" to use the instance's -// main account. -func (c *Client) Scopes(serviceAccount string) ([]string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") -} - -// Subscribe subscribes to a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// The suffix may contain query parameters. -// -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. -func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { - const failedSubscribeSleep = time.Second * 5 - - // First check to see if the metadata value exists at all. - val, lastETag, err := c.getETag(suffix) - if err != nil { - return err - } - - if err := fn(val, true); err != nil { - return err - } - - ok := true - if strings.ContainsRune(suffix, '?') { - suffix += "&wait_for_change=true&last_etag=" - } else { - suffix += "?wait_for_change=true&last_etag=" - } - for { - val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) - if err != nil { - if _, deleted := err.(NotDefinedError); !deleted { - time.Sleep(failedSubscribeSleep) - continue // Retry on other errors. - } - ok = false - } - lastETag = etag - - if err := fn(val, ok); err != nil || !ok { - return err - } - } -} - -// Error contains an error response from the server. -type Error struct { - // Code is the HTTP response status code. - Code int - // Message is the server response message. - Message string -} - -func (e *Error) Error() string { - return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) -} diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore deleted file mode 100644 index 0cd3800..0000000 --- a/vendor/github.com/BurntSushi/toml/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -TAGS -tags -.*.swp -tomlcheck/tomlcheck -toml.test diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml deleted file mode 100644 index 8b8afc4..0000000 --- a/vendor/github.com/BurntSushi/toml/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - tip -install: - - go install ./... - - go get github.com/BurntSushi/toml-test -script: - - export PATH="$PATH:$HOME/gopath/bin" - - make test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE deleted file mode 100644 index 6efcfd0..0000000 --- a/vendor/github.com/BurntSushi/toml/COMPATIBLE +++ /dev/null @@ -1,3 +0,0 @@ -Compatible with TOML version -[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md) - diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING deleted file mode 100644 index 01b5743..0000000 --- a/vendor/github.com/BurntSushi/toml/COPYING +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 TOML authors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile deleted file mode 100644 index 3600848..0000000 --- a/vendor/github.com/BurntSushi/toml/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -install: - go install ./... - -test: install - go test -v - toml-test toml-test-decoder - toml-test -encoder toml-test-encoder - -fmt: - gofmt -w *.go */*.go - colcheck *.go */*.go - -tags: - find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS - -push: - git push origin master - git push github master - diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md deleted file mode 100644 index 7c1b37e..0000000 --- a/vendor/github.com/BurntSushi/toml/README.md +++ /dev/null @@ -1,218 +0,0 @@ -## TOML parser and encoder for Go with reflection - -TOML stands for Tom's Obvious, Minimal Language. This Go package provides a -reflection interface similar to Go's standard library `json` and `xml` -packages. This package also supports the `encoding.TextUnmarshaler` and -`encoding.TextMarshaler` interfaces so that you can define custom data -representations. (There is an example of this below.) - -Spec: https://github.com/toml-lang/toml - -Compatible with TOML version -[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) - -Documentation: https://godoc.org/github.com/BurntSushi/toml - -Installation: - -```bash -go get github.com/BurntSushi/toml -``` - -Try the toml validator: - -```bash -go get github.com/BurntSushi/toml/cmd/tomlv -tomlv some-toml-file.toml -``` - -[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml) - -### Testing - -This package passes all tests in -[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder -and the encoder. - -### Examples - -This package works similarly to how the Go standard library handles `XML` -and `JSON`. Namely, data is loaded into Go values via reflection. - -For the simplest example, consider some TOML file as just a list of keys -and values: - -```toml -Age = 25 -Cats = [ "Cauchy", "Plato" ] -Pi = 3.14 -Perfection = [ 6, 28, 496, 8128 ] -DOB = 1987-07-05T05:45:00Z -``` - -Which could be defined in Go as: - -```go -type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time // requires `import time` -} -``` - -And then decoded with: - -```go -var conf Config -if _, err := toml.Decode(tomlData, &conf); err != nil { - // handle error -} -``` - -You can also use struct tags if your struct field name doesn't map to a TOML -key value directly: - -```toml -some_key_NAME = "wat" -``` - -```go -type TOML struct { - ObscureKey string `toml:"some_key_NAME"` -} -``` - -### Using the `encoding.TextUnmarshaler` interface - -Here's an example that automatically parses duration strings into -`time.Duration` values: - -```toml -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -``` - -Which can be decoded with: - -```go -type song struct { - Name string - Duration duration -} -type songs struct { - Song []song -} -var favorites songs -if _, err := toml.Decode(blob, &favorites); err != nil { - log.Fatal(err) -} - -for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) -} -``` - -And you'll also need a `duration` type that satisfies the -`encoding.TextUnmarshaler` interface: - -```go -type duration struct { - time.Duration -} - -func (d *duration) UnmarshalText(text []byte) error { - var err error - d.Duration, err = time.ParseDuration(string(text)) - return err -} -``` - -### More complex usage - -Here's an example of how to load the example from the official spec page: - -```toml -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] -``` - -And the corresponding Go types are: - -```go -type tomlConfig struct { - Title string - Owner ownerInfo - DB database `toml:"database"` - Servers map[string]server - Clients clients -} - -type ownerInfo struct { - Name string - Org string `toml:"organization"` - Bio string - DOB time.Time -} - -type database struct { - Server string - Ports []int - ConnMax int `toml:"connection_max"` - Enabled bool -} - -type server struct { - IP string - DC string -} - -type clients struct { - Data [][]interface{} - Hosts []string -} -``` - -Note that a case insensitive match will be tried if an exact match can't be -found. - -A working example of the above can be found in `_examples/example.{go,toml}`. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go deleted file mode 100644 index b0fd51d..0000000 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ /dev/null @@ -1,509 +0,0 @@ -package toml - -import ( - "fmt" - "io" - "io/ioutil" - "math" - "reflect" - "strings" - "time" -) - -func e(format string, args ...interface{}) error { - return fmt.Errorf("toml: "+format, args...) -} - -// Unmarshaler is the interface implemented by objects that can unmarshal a -// TOML description of themselves. -type Unmarshaler interface { - UnmarshalTOML(interface{}) error -} - -// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. -func Unmarshal(p []byte, v interface{}) error { - _, err := Decode(string(p), v) - return err -} - -// Primitive is a TOML value that hasn't been decoded into a Go value. -// When using the various `Decode*` functions, the type `Primitive` may -// be given to any value, and its decoding will be delayed. -// -// A `Primitive` value can be decoded using the `PrimitiveDecode` function. -// -// The underlying representation of a `Primitive` value is subject to change. -// Do not rely on it. -// -// N.B. Primitive values are still parsed, so using them will only avoid -// the overhead of reflection. They can be useful when you don't know the -// exact type of TOML data until run time. -type Primitive struct { - undecoded interface{} - context Key -} - -// DEPRECATED! -// -// Use MetaData.PrimitiveDecode instead. -func PrimitiveDecode(primValue Primitive, v interface{}) error { - md := MetaData{decoded: make(map[string]bool)} - return md.unify(primValue.undecoded, rvalue(v)) -} - -// PrimitiveDecode is just like the other `Decode*` functions, except it -// decodes a TOML value that has already been parsed. Valid primitive values -// can *only* be obtained from values filled by the decoder functions, -// including this method. (i.e., `v` may contain more `Primitive` -// values.) -// -// Meta data for primitive values is included in the meta data returned by -// the `Decode*` functions with one exception: keys returned by the Undecoded -// method will only reflect keys that were decoded. Namely, any keys hidden -// behind a Primitive will be considered undecoded. Executing this method will -// update the undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { - md.context = primValue.context - defer func() { md.context = nil }() - return md.unify(primValue.undecoded, rvalue(v)) -} - -// Decode will decode the contents of `data` in TOML format into a pointer -// `v`. -// -// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be -// used interchangeably.) -// -// TOML arrays of tables correspond to either a slice of structs or a slice -// of maps. -// -// TOML datetimes correspond to Go `time.Time` values. -// -// All other TOML types (float, string, int, bool and array) correspond -// to the obvious Go types. -// -// An exception to the above rules is if a type implements the -// encoding.TextUnmarshaler interface. In this case, any primitive TOML value -// (floats, strings, integers, booleans and datetimes) will be converted to -// a byte string and given to the value's UnmarshalText method. See the -// Unmarshaler example for a demonstration with time duration strings. -// -// Key mapping -// -// TOML keys can map to either keys in a Go map or field names in a Go -// struct. The special `toml` struct tag may be used to map TOML keys to -// struct fields that don't match the key name exactly. (See the example.) -// A case insensitive match to struct names will be tried if an exact match -// can't be found. -// -// The mapping between TOML values and Go values is loose. That is, there -// may exist TOML values that cannot be placed into your representation, and -// there may be parts of your representation that do not correspond to -// TOML values. This loose mapping can be made stricter by using the IsDefined -// and/or Undecoded methods on the MetaData returned. -// -// This decoder will not handle cyclic types. If a cyclic type is passed, -// `Decode` will not terminate. -func Decode(data string, v interface{}) (MetaData, error) { - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr { - return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) - } - if rv.IsNil() { - return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) - } - p, err := parse(data) - if err != nil { - return MetaData{}, err - } - md := MetaData{ - p.mapping, p.types, p.ordered, - make(map[string]bool, len(p.ordered)), nil, - } - return md, md.unify(p.mapping, indirect(rv)) -} - -// DecodeFile is just like Decode, except it will automatically read the -// contents of the file at `fpath` and decode it for you. -func DecodeFile(fpath string, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadFile(fpath) - if err != nil { - return MetaData{}, err - } - return Decode(string(bs), v) -} - -// DecodeReader is just like Decode, except it will consume all bytes -// from the reader and decode it for you. -func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadAll(r) - if err != nil { - return MetaData{}, err - } - return Decode(string(bs), v) -} - -// unify performs a sort of type unification based on the structure of `rv`, -// which is the client representation. -// -// Any type mismatch produces an error. Finding a type that we don't know -// how to handle produces an unsupported type error. -func (md *MetaData) unify(data interface{}, rv reflect.Value) error { - - // Special case. Look for a `Primitive` value. - if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { - // Save the undecoded data and the key context into the primitive - // value. - context := make(Key, len(md.context)) - copy(context, md.context) - rv.Set(reflect.ValueOf(Primitive{ - undecoded: data, - context: context, - })) - return nil - } - - // Special case. Unmarshaler Interface support. - if rv.CanAddr() { - if v, ok := rv.Addr().Interface().(Unmarshaler); ok { - return v.UnmarshalTOML(data) - } - } - - // Special case. Handle time.Time values specifically. - // TODO: Remove this code when we decide to drop support for Go 1.1. - // This isn't necessary in Go 1.2 because time.Time satisfies the encoding - // interfaces. - if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { - return md.unifyDatetime(data, rv) - } - - // Special case. Look for a value satisfying the TextUnmarshaler interface. - if v, ok := rv.Interface().(TextUnmarshaler); ok { - return md.unifyText(data, v) - } - // BUG(burntsushi) - // The behavior here is incorrect whenever a Go type satisfies the - // encoding.TextUnmarshaler interface but also corresponds to a TOML - // hash or array. In particular, the unmarshaler should only be applied - // to primitive TOML values. But at this point, it will be applied to - // all kinds of values and produce an incorrect error whenever those values - // are hashes or arrays (including arrays of tables). - - k := rv.Kind() - - // laziness - if k >= reflect.Int && k <= reflect.Uint64 { - return md.unifyInt(data, rv) - } - switch k { - case reflect.Ptr: - elem := reflect.New(rv.Type().Elem()) - err := md.unify(data, reflect.Indirect(elem)) - if err != nil { - return err - } - rv.Set(elem) - return nil - case reflect.Struct: - return md.unifyStruct(data, rv) - case reflect.Map: - return md.unifyMap(data, rv) - case reflect.Array: - return md.unifyArray(data, rv) - case reflect.Slice: - return md.unifySlice(data, rv) - case reflect.String: - return md.unifyString(data, rv) - case reflect.Bool: - return md.unifyBool(data, rv) - case reflect.Interface: - // we only support empty interfaces. - if rv.NumMethod() > 0 { - return e("unsupported type %s", rv.Type()) - } - return md.unifyAnything(data, rv) - case reflect.Float32: - fallthrough - case reflect.Float64: - return md.unifyFloat64(data, rv) - } - return e("unsupported type %s", rv.Kind()) -} - -func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - if mapping == nil { - return nil - } - return e("type mismatch for %s: expected table but found %T", - rv.Type().String(), mapping) - } - - for key, datum := range tmap { - var f *field - fields := cachedTypeFields(rv.Type()) - for i := range fields { - ff := &fields[i] - if ff.name == key { - f = ff - break - } - if f == nil && strings.EqualFold(ff.name, key) { - f = ff - } - } - if f != nil { - subv := rv - for _, i := range f.index { - subv = indirect(subv.Field(i)) - } - if isUnifiable(subv) { - md.decoded[md.context.add(key).String()] = true - md.context = append(md.context, key) - if err := md.unify(datum, subv); err != nil { - return err - } - md.context = md.context[0 : len(md.context)-1] - } else if f.name != "" { - // Bad user! No soup for you! - return e("cannot write unexported field %s.%s", - rv.Type().String(), f.name) - } - } - } - return nil -} - -func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - if tmap == nil { - return nil - } - return badtype("map", mapping) - } - if rv.IsNil() { - rv.Set(reflect.MakeMap(rv.Type())) - } - for k, v := range tmap { - md.decoded[md.context.add(k).String()] = true - md.context = append(md.context, k) - - rvkey := indirect(reflect.New(rv.Type().Key())) - rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) - if err := md.unify(v, rvval); err != nil { - return err - } - md.context = md.context[0 : len(md.context)-1] - - rvkey.SetString(k) - rv.SetMapIndex(rvkey, rvval) - } - return nil -} - -func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - if !datav.IsValid() { - return nil - } - return badtype("slice", data) - } - sliceLen := datav.Len() - if sliceLen != rv.Len() { - return e("expected array length %d; got TOML array of length %d", - rv.Len(), sliceLen) - } - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - if !datav.IsValid() { - return nil - } - return badtype("slice", data) - } - n := datav.Len() - if rv.IsNil() || rv.Cap() < n { - rv.Set(reflect.MakeSlice(rv.Type(), n, n)) - } - rv.SetLen(n) - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { - sliceLen := data.Len() - for i := 0; i < sliceLen; i++ { - v := data.Index(i).Interface() - sliceval := indirect(rv.Index(i)) - if err := md.unify(v, sliceval); err != nil { - return err - } - } - return nil -} - -func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { - if _, ok := data.(time.Time); ok { - rv.Set(reflect.ValueOf(data)) - return nil - } - return badtype("time.Time", data) -} - -func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { - if s, ok := data.(string); ok { - rv.SetString(s) - return nil - } - return badtype("string", data) -} - -func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { - if num, ok := data.(float64); ok { - switch rv.Kind() { - case reflect.Float32: - fallthrough - case reflect.Float64: - rv.SetFloat(num) - default: - panic("bug") - } - return nil - } - return badtype("float", data) -} - -func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { - if num, ok := data.(int64); ok { - if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { - switch rv.Kind() { - case reflect.Int, reflect.Int64: - // No bounds checking necessary. - case reflect.Int8: - if num < math.MinInt8 || num > math.MaxInt8 { - return e("value %d is out of range for int8", num) - } - case reflect.Int16: - if num < math.MinInt16 || num > math.MaxInt16 { - return e("value %d is out of range for int16", num) - } - case reflect.Int32: - if num < math.MinInt32 || num > math.MaxInt32 { - return e("value %d is out of range for int32", num) - } - } - rv.SetInt(num) - } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { - unum := uint64(num) - switch rv.Kind() { - case reflect.Uint, reflect.Uint64: - // No bounds checking necessary. - case reflect.Uint8: - if num < 0 || unum > math.MaxUint8 { - return e("value %d is out of range for uint8", num) - } - case reflect.Uint16: - if num < 0 || unum > math.MaxUint16 { - return e("value %d is out of range for uint16", num) - } - case reflect.Uint32: - if num < 0 || unum > math.MaxUint32 { - return e("value %d is out of range for uint32", num) - } - } - rv.SetUint(unum) - } else { - panic("unreachable") - } - return nil - } - return badtype("integer", data) -} - -func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { - if b, ok := data.(bool); ok { - rv.SetBool(b) - return nil - } - return badtype("boolean", data) -} - -func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { - rv.Set(reflect.ValueOf(data)) - return nil -} - -func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { - var s string - switch sdata := data.(type) { - case TextMarshaler: - text, err := sdata.MarshalText() - if err != nil { - return err - } - s = string(text) - case fmt.Stringer: - s = sdata.String() - case string: - s = sdata - case bool: - s = fmt.Sprintf("%v", sdata) - case int64: - s = fmt.Sprintf("%d", sdata) - case float64: - s = fmt.Sprintf("%f", sdata) - default: - return badtype("primitive (string-like)", data) - } - if err := v.UnmarshalText([]byte(s)); err != nil { - return err - } - return nil -} - -// rvalue returns a reflect.Value of `v`. All pointers are resolved. -func rvalue(v interface{}) reflect.Value { - return indirect(reflect.ValueOf(v)) -} - -// indirect returns the value pointed to by a pointer. -// Pointers are followed until the value is not a pointer. -// New values are allocated for each nil pointer. -// -// An exception to this rule is if the value satisfies an interface of -// interest to us (like encoding.TextUnmarshaler). -func indirect(v reflect.Value) reflect.Value { - if v.Kind() != reflect.Ptr { - if v.CanSet() { - pv := v.Addr() - if _, ok := pv.Interface().(TextUnmarshaler); ok { - return pv - } - } - return v - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - return indirect(reflect.Indirect(v)) -} - -func isUnifiable(rv reflect.Value) bool { - if rv.CanSet() { - return true - } - if _, ok := rv.Interface().(TextUnmarshaler); ok { - return true - } - return false -} - -func badtype(expected string, data interface{}) error { - return e("cannot load TOML value of type %T into a Go %s", data, expected) -} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go deleted file mode 100644 index b9914a6..0000000 --- a/vendor/github.com/BurntSushi/toml/decode_meta.go +++ /dev/null @@ -1,121 +0,0 @@ -package toml - -import "strings" - -// MetaData allows access to meta information about TOML data that may not -// be inferrable via reflection. In particular, whether a key has been defined -// and the TOML type of a key. -type MetaData struct { - mapping map[string]interface{} - types map[string]tomlType - keys []Key - decoded map[string]bool - context Key // Used only during decoding. -} - -// IsDefined returns true if the key given exists in the TOML data. The key -// should be specified hierarchially. e.g., -// -// // access the TOML key 'a.b.c' -// IsDefined("a", "b", "c") -// -// IsDefined will return false if an empty key given. Keys are case sensitive. -func (md *MetaData) IsDefined(key ...string) bool { - if len(key) == 0 { - return false - } - - var hash map[string]interface{} - var ok bool - var hashOrVal interface{} = md.mapping - for _, k := range key { - if hash, ok = hashOrVal.(map[string]interface{}); !ok { - return false - } - if hashOrVal, ok = hash[k]; !ok { - return false - } - } - return true -} - -// Type returns a string representation of the type of the key specified. -// -// Type will return the empty string if given an empty key or a key that -// does not exist. Keys are case sensitive. -func (md *MetaData) Type(key ...string) string { - fullkey := strings.Join(key, ".") - if typ, ok := md.types[fullkey]; ok { - return typ.typeString() - } - return "" -} - -// Key is the type of any TOML key, including key groups. Use (MetaData).Keys -// to get values of this type. -type Key []string - -func (k Key) String() string { - return strings.Join(k, ".") -} - -func (k Key) maybeQuotedAll() string { - var ss []string - for i := range k { - ss = append(ss, k.maybeQuoted(i)) - } - return strings.Join(ss, ".") -} - -func (k Key) maybeQuoted(i int) string { - quote := false - for _, c := range k[i] { - if !isBareKeyChar(c) { - quote = true - break - } - } - if quote { - return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" - } - return k[i] -} - -func (k Key) add(piece string) Key { - newKey := make(Key, len(k)+1) - copy(newKey, k) - newKey[len(k)] = piece - return newKey -} - -// Keys returns a slice of every key in the TOML data, including key groups. -// Each key is itself a slice, where the first element is the top of the -// hierarchy and the last is the most specific. -// -// The list will have the same order as the keys appeared in the TOML data. -// -// All keys returned are non-empty. -func (md *MetaData) Keys() []Key { - return md.keys -} - -// Undecoded returns all keys that have not been decoded in the order in which -// they appear in the original TOML document. -// -// This includes keys that haven't been decoded because of a Primitive value. -// Once the Primitive value is decoded, the keys will be considered decoded. -// -// Also note that decoding into an empty interface will result in no decoding, -// and so no keys will be considered decoded. -// -// In this sense, the Undecoded keys correspond to keys in the TOML document -// that do not have a concrete type in your representation. -func (md *MetaData) Undecoded() []Key { - undecoded := make([]Key, 0, len(md.keys)) - for _, key := range md.keys { - if !md.decoded[key.String()] { - undecoded = append(undecoded, key) - } - } - return undecoded -} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go deleted file mode 100644 index b371f39..0000000 --- a/vendor/github.com/BurntSushi/toml/doc.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Package toml provides facilities for decoding and encoding TOML configuration -files via reflection. There is also support for delaying decoding with -the Primitive type, and querying the set of keys in a TOML document with the -MetaData type. - -The specification implemented: https://github.com/toml-lang/toml - -The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify -whether a file is a valid TOML document. It can also be used to print the -type of each key in a TOML document. - -Testing - -There are two important types of tests used for this package. The first is -contained inside '*_test.go' files and uses the standard Go unit testing -framework. These tests are primarily devoted to holistically testing the -decoder and encoder. - -The second type of testing is used to verify the implementation's adherence -to the TOML specification. These tests have been factored into their own -project: https://github.com/BurntSushi/toml-test - -The reason the tests are in a separate project is so that they can be used by -any implementation of TOML. Namely, it is language agnostic. -*/ -package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go deleted file mode 100644 index d905c21..0000000 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ /dev/null @@ -1,568 +0,0 @@ -package toml - -import ( - "bufio" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -type tomlEncodeError struct{ error } - -var ( - errArrayMixedElementTypes = errors.New( - "toml: cannot encode array with mixed element types") - errArrayNilElement = errors.New( - "toml: cannot encode array with nil element") - errNonString = errors.New( - "toml: cannot encode a map with non-string key type") - errAnonNonStruct = errors.New( - "toml: cannot encode an anonymous field that is not a struct") - errArrayNoTable = errors.New( - "toml: TOML array element cannot contain a table") - errNoKey = errors.New( - "toml: top-level values must be Go maps or structs") - errAnything = errors.New("") // used in testing -) - -var quotedReplacer = strings.NewReplacer( - "\t", "\\t", - "\n", "\\n", - "\r", "\\r", - "\"", "\\\"", - "\\", "\\\\", -) - -// Encoder controls the encoding of Go values to a TOML document to some -// io.Writer. -// -// The indentation level can be controlled with the Indent field. -type Encoder struct { - // A single indentation level. By default it is two spaces. - Indent string - - // hasWritten is whether we have written any output to w yet. - hasWritten bool - w *bufio.Writer -} - -// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer -// given. By default, a single indentation level is 2 spaces. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: bufio.NewWriter(w), - Indent: " ", - } -} - -// Encode writes a TOML representation of the Go value to the underlying -// io.Writer. If the value given cannot be encoded to a valid TOML document, -// then an error is returned. -// -// The mapping between Go values and TOML values should be precisely the same -// as for the Decode* functions. Similarly, the TextMarshaler interface is -// supported by encoding the resulting bytes as strings. (If you want to write -// arbitrary binary data then you will need to use something like base64 since -// TOML does not have any binary types.) -// -// When encoding TOML hashes (i.e., Go maps or structs), keys without any -// sub-hashes are encoded first. -// -// If a Go map is encoded, then its keys are sorted alphabetically for -// deterministic output. More control over this behavior may be provided if -// there is demand for it. -// -// Encoding Go values without a corresponding TOML representation---like map -// types with non-string keys---will cause an error to be returned. Similarly -// for mixed arrays/slices, arrays/slices with nil elements, embedded -// non-struct types and nested slices containing maps or structs. -// (e.g., [][]map[string]string is not allowed but []map[string]string is OK -// and so is []map[string][]string.) -func (enc *Encoder) Encode(v interface{}) error { - rv := eindirect(reflect.ValueOf(v)) - if err := enc.safeEncode(Key([]string{}), rv); err != nil { - return err - } - return enc.w.Flush() -} - -func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { - defer func() { - if r := recover(); r != nil { - if terr, ok := r.(tomlEncodeError); ok { - err = terr.error - return - } - panic(r) - } - }() - enc.encode(key, rv) - return nil -} - -func (enc *Encoder) encode(key Key, rv reflect.Value) { - // Special case. Time needs to be in ISO8601 format. - // Special case. If we can marshal the type to text, then we used that. - // Basically, this prevents the encoder for handling these types as - // generic structs (or whatever the underlying type of a TextMarshaler is). - switch rv.Interface().(type) { - case time.Time, TextMarshaler: - enc.keyEqElement(key, rv) - return - } - - k := rv.Kind() - switch k { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64, - reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: - enc.keyEqElement(key, rv) - case reflect.Array, reflect.Slice: - if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { - enc.eArrayOfTables(key, rv) - } else { - enc.keyEqElement(key, rv) - } - case reflect.Interface: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Map: - if rv.IsNil() { - return - } - enc.eTable(key, rv) - case reflect.Ptr: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Struct: - enc.eTable(key, rv) - default: - panic(e("unsupported type for key '%s': %s", key, k)) - } -} - -// eElement encodes any value that can be an array element (primitives and -// arrays). -func (enc *Encoder) eElement(rv reflect.Value) { - switch v := rv.Interface().(type) { - case time.Time: - // Special case time.Time as a primitive. Has to come before - // TextMarshaler below because time.Time implements - // encoding.TextMarshaler, but we need to always use UTC. - enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) - return - case TextMarshaler: - // Special case. Use text marshaler if it's available for this value. - if s, err := v.MarshalText(); err != nil { - encPanic(err) - } else { - enc.writeQuoted(string(s)) - } - return - } - switch rv.Kind() { - case reflect.Bool: - enc.wf(strconv.FormatBool(rv.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64: - enc.wf(strconv.FormatInt(rv.Int(), 10)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, - reflect.Uint32, reflect.Uint64: - enc.wf(strconv.FormatUint(rv.Uint(), 10)) - case reflect.Float32: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) - case reflect.Float64: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) - case reflect.Array, reflect.Slice: - enc.eArrayOrSliceElement(rv) - case reflect.Interface: - enc.eElement(rv.Elem()) - case reflect.String: - enc.writeQuoted(rv.String()) - default: - panic(e("unexpected primitive type: %s", rv.Kind())) - } -} - -// By the TOML spec, all floats must have a decimal with at least one -// number on either side. -func floatAddDecimal(fstr string) string { - if !strings.Contains(fstr, ".") { - return fstr + ".0" - } - return fstr -} - -func (enc *Encoder) writeQuoted(s string) { - enc.wf("\"%s\"", quotedReplacer.Replace(s)) -} - -func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { - length := rv.Len() - enc.wf("[") - for i := 0; i < length; i++ { - elem := rv.Index(i) - enc.eElement(elem) - if i != length-1 { - enc.wf(", ") - } - } - enc.wf("]") -} - -func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - for i := 0; i < rv.Len(); i++ { - trv := rv.Index(i) - if isNil(trv) { - continue - } - panicIfInvalidKey(key) - enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) - enc.newline() - enc.eMapOrStruct(key, trv) - } -} - -func (enc *Encoder) eTable(key Key, rv reflect.Value) { - panicIfInvalidKey(key) - if len(key) == 1 { - // Output an extra newline between top-level tables. - // (The newline isn't written if nothing else has been written though.) - enc.newline() - } - if len(key) > 0 { - enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) - enc.newline() - } - enc.eMapOrStruct(key, rv) -} - -func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { - switch rv := eindirect(rv); rv.Kind() { - case reflect.Map: - enc.eMap(key, rv) - case reflect.Struct: - enc.eStruct(key, rv) - default: - panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) - } -} - -func (enc *Encoder) eMap(key Key, rv reflect.Value) { - rt := rv.Type() - if rt.Key().Kind() != reflect.String { - encPanic(errNonString) - } - - // Sort keys so that we have deterministic output. And write keys directly - // underneath this key first, before writing sub-structs or sub-maps. - var mapKeysDirect, mapKeysSub []string - for _, mapKey := range rv.MapKeys() { - k := mapKey.String() - if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { - mapKeysSub = append(mapKeysSub, k) - } else { - mapKeysDirect = append(mapKeysDirect, k) - } - } - - var writeMapKeys = func(mapKeys []string) { - sort.Strings(mapKeys) - for _, mapKey := range mapKeys { - mrv := rv.MapIndex(reflect.ValueOf(mapKey)) - if isNil(mrv) { - // Don't write anything for nil fields. - continue - } - enc.encode(key.add(mapKey), mrv) - } - } - writeMapKeys(mapKeysDirect) - writeMapKeys(mapKeysSub) -} - -func (enc *Encoder) eStruct(key Key, rv reflect.Value) { - // Write keys for fields directly under this key first, because if we write - // a field that creates a new table, then all keys under it will be in that - // table (not the one we're writing here). - rt := rv.Type() - var fieldsDirect, fieldsSub [][]int - var addFields func(rt reflect.Type, rv reflect.Value, start []int) - addFields = func(rt reflect.Type, rv reflect.Value, start []int) { - for i := 0; i < rt.NumField(); i++ { - f := rt.Field(i) - // skip unexported fields - if f.PkgPath != "" && !f.Anonymous { - continue - } - frv := rv.Field(i) - if f.Anonymous { - t := f.Type - switch t.Kind() { - case reflect.Struct: - // Treat anonymous struct fields with - // tag names as though they are not - // anonymous, like encoding/json does. - if getOptions(f.Tag).name == "" { - addFields(t, frv, f.Index) - continue - } - case reflect.Ptr: - if t.Elem().Kind() == reflect.Struct && - getOptions(f.Tag).name == "" { - if !frv.IsNil() { - addFields(t.Elem(), frv.Elem(), f.Index) - } - continue - } - // Fall through to the normal field encoding logic below - // for non-struct anonymous fields. - } - } - - if typeIsHash(tomlTypeOfGo(frv)) { - fieldsSub = append(fieldsSub, append(start, f.Index...)) - } else { - fieldsDirect = append(fieldsDirect, append(start, f.Index...)) - } - } - } - addFields(rt, rv, nil) - - var writeFields = func(fields [][]int) { - for _, fieldIndex := range fields { - sft := rt.FieldByIndex(fieldIndex) - sf := rv.FieldByIndex(fieldIndex) - if isNil(sf) { - // Don't write anything for nil fields. - continue - } - - opts := getOptions(sft.Tag) - if opts.skip { - continue - } - keyName := sft.Name - if opts.name != "" { - keyName = opts.name - } - if opts.omitempty && isEmpty(sf) { - continue - } - if opts.omitzero && isZero(sf) { - continue - } - - enc.encode(key.add(keyName), sf) - } - } - writeFields(fieldsDirect) - writeFields(fieldsSub) -} - -// tomlTypeName returns the TOML type name of the Go value's type. It is -// used to determine whether the types of array elements are mixed (which is -// forbidden). If the Go value is nil, then it is illegal for it to be an array -// element, and valueIsNil is returned as true. - -// Returns the TOML type of a Go value. The type may be `nil`, which means -// no concrete TOML type could be found. -func tomlTypeOfGo(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() { - return nil - } - switch rv.Kind() { - case reflect.Bool: - return tomlBool - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64: - return tomlInteger - case reflect.Float32, reflect.Float64: - return tomlFloat - case reflect.Array, reflect.Slice: - if typeEqual(tomlHash, tomlArrayType(rv)) { - return tomlArrayHash - } - return tomlArray - case reflect.Ptr, reflect.Interface: - return tomlTypeOfGo(rv.Elem()) - case reflect.String: - return tomlString - case reflect.Map: - return tomlHash - case reflect.Struct: - switch rv.Interface().(type) { - case time.Time: - return tomlDatetime - case TextMarshaler: - return tomlString - default: - return tomlHash - } - default: - panic("unexpected reflect.Kind: " + rv.Kind().String()) - } -} - -// tomlArrayType returns the element type of a TOML array. The type returned -// may be nil if it cannot be determined (e.g., a nil slice or a zero length -// slize). This function may also panic if it finds a type that cannot be -// expressed in TOML (such as nil elements, heterogeneous arrays or directly -// nested arrays of tables). -func tomlArrayType(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { - return nil - } - firstType := tomlTypeOfGo(rv.Index(0)) - if firstType == nil { - encPanic(errArrayNilElement) - } - - rvlen := rv.Len() - for i := 1; i < rvlen; i++ { - elem := rv.Index(i) - switch elemType := tomlTypeOfGo(elem); { - case elemType == nil: - encPanic(errArrayNilElement) - case !typeEqual(firstType, elemType): - encPanic(errArrayMixedElementTypes) - } - } - // If we have a nested array, then we must make sure that the nested - // array contains ONLY primitives. - // This checks arbitrarily nested arrays. - if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { - nest := tomlArrayType(eindirect(rv.Index(0))) - if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { - encPanic(errArrayNoTable) - } - } - return firstType -} - -type tagOptions struct { - skip bool // "-" - name string - omitempty bool - omitzero bool -} - -func getOptions(tag reflect.StructTag) tagOptions { - t := tag.Get("toml") - if t == "-" { - return tagOptions{skip: true} - } - var opts tagOptions - parts := strings.Split(t, ",") - opts.name = parts[0] - for _, s := range parts[1:] { - switch s { - case "omitempty": - opts.omitempty = true - case "omitzero": - opts.omitzero = true - } - } - return opts -} - -func isZero(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return rv.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return rv.Uint() == 0 - case reflect.Float32, reflect.Float64: - return rv.Float() == 0.0 - } - return false -} - -func isEmpty(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Array, reflect.Slice, reflect.Map, reflect.String: - return rv.Len() == 0 - case reflect.Bool: - return !rv.Bool() - } - return false -} - -func (enc *Encoder) newline() { - if enc.hasWritten { - enc.wf("\n") - } -} - -func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - panicIfInvalidKey(key) - enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) - enc.eElement(val) - enc.newline() -} - -func (enc *Encoder) wf(format string, v ...interface{}) { - if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { - encPanic(err) - } - enc.hasWritten = true -} - -func (enc *Encoder) indentStr(key Key) string { - return strings.Repeat(enc.Indent, len(key)-1) -} - -func encPanic(err error) { - panic(tomlEncodeError{err}) -} - -func eindirect(v reflect.Value) reflect.Value { - switch v.Kind() { - case reflect.Ptr, reflect.Interface: - return eindirect(v.Elem()) - default: - return v - } -} - -func isNil(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return rv.IsNil() - default: - return false - } -} - -func panicIfInvalidKey(key Key) { - for _, k := range key { - if len(k) == 0 { - encPanic(e("Key '%s' is not a valid table name. Key names "+ - "cannot be empty.", key.maybeQuotedAll())) - } - } -} - -func isValidKeyName(s string) bool { - return len(s) != 0 -} diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go deleted file mode 100644 index d36e1dd..0000000 --- a/vendor/github.com/BurntSushi/toml/encoding_types.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build go1.2 - -package toml - -// In order to support Go 1.1, we define our own TextMarshaler and -// TextUnmarshaler types. For Go 1.2+, we just alias them with the -// standard library interfaces. - -import ( - "encoding" -) - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler encoding.TextMarshaler - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go deleted file mode 100644 index e8d503d..0000000 --- a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !go1.2 - -package toml - -// These interfaces were introduced in Go 1.2, so we add them manually when -// compiling for Go 1.1. - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler interface { - MarshalText() (text []byte, err error) -} - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler interface { - UnmarshalText(text []byte) error -} diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go deleted file mode 100644 index e0a742a..0000000 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ /dev/null @@ -1,953 +0,0 @@ -package toml - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -type itemType int - -const ( - itemError itemType = iota - itemNIL // used in the parser to indicate no type - itemEOF - itemText - itemString - itemRawString - itemMultilineString - itemRawMultilineString - itemBool - itemInteger - itemFloat - itemDatetime - itemArray // the start of an array - itemArrayEnd - itemTableStart - itemTableEnd - itemArrayTableStart - itemArrayTableEnd - itemKeyStart - itemCommentStart - itemInlineTableStart - itemInlineTableEnd -) - -const ( - eof = 0 - comma = ',' - tableStart = '[' - tableEnd = ']' - arrayTableStart = '[' - arrayTableEnd = ']' - tableSep = '.' - keySep = '=' - arrayStart = '[' - arrayEnd = ']' - commentStart = '#' - stringStart = '"' - stringEnd = '"' - rawStringStart = '\'' - rawStringEnd = '\'' - inlineTableStart = '{' - inlineTableEnd = '}' -) - -type stateFn func(lx *lexer) stateFn - -type lexer struct { - input string - start int - pos int - line int - state stateFn - items chan item - - // Allow for backing up up to three runes. - // This is necessary because TOML contains 3-rune tokens (""" and '''). - prevWidths [3]int - nprev int // how many of prevWidths are in use - // If we emit an eof, we can still back up, but it is not OK to call - // next again. - atEOF bool - - // A stack of state functions used to maintain context. - // The idea is to reuse parts of the state machine in various places. - // For example, values can appear at the top level or within arbitrarily - // nested arrays. The last state on the stack is used after a value has - // been lexed. Similarly for comments. - stack []stateFn -} - -type item struct { - typ itemType - val string - line int -} - -func (lx *lexer) nextItem() item { - for { - select { - case item := <-lx.items: - return item - default: - lx.state = lx.state(lx) - } - } -} - -func lex(input string) *lexer { - lx := &lexer{ - input: input, - state: lexTop, - line: 1, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - } - return lx -} - -func (lx *lexer) push(state stateFn) { - lx.stack = append(lx.stack, state) -} - -func (lx *lexer) pop() stateFn { - if len(lx.stack) == 0 { - return lx.errorf("BUG in lexer: no states to pop") - } - last := lx.stack[len(lx.stack)-1] - lx.stack = lx.stack[0 : len(lx.stack)-1] - return last -} - -func (lx *lexer) current() string { - return lx.input[lx.start:lx.pos] -} - -func (lx *lexer) emit(typ itemType) { - lx.items <- item{typ, lx.current(), lx.line} - lx.start = lx.pos -} - -func (lx *lexer) emitTrim(typ itemType) { - lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} - lx.start = lx.pos -} - -func (lx *lexer) next() (r rune) { - if lx.atEOF { - panic("next called after EOF") - } - if lx.pos >= len(lx.input) { - lx.atEOF = true - return eof - } - - if lx.input[lx.pos] == '\n' { - lx.line++ - } - lx.prevWidths[2] = lx.prevWidths[1] - lx.prevWidths[1] = lx.prevWidths[0] - if lx.nprev < 3 { - lx.nprev++ - } - r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) - lx.prevWidths[0] = w - lx.pos += w - return r -} - -// ignore skips over the pending input before this point. -func (lx *lexer) ignore() { - lx.start = lx.pos -} - -// backup steps back one rune. Can be called only twice between calls to next. -func (lx *lexer) backup() { - if lx.atEOF { - lx.atEOF = false - return - } - if lx.nprev < 1 { - panic("backed up too far") - } - w := lx.prevWidths[0] - lx.prevWidths[0] = lx.prevWidths[1] - lx.prevWidths[1] = lx.prevWidths[2] - lx.nprev-- - lx.pos -= w - if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { - lx.line-- - } -} - -// accept consumes the next rune if it's equal to `valid`. -func (lx *lexer) accept(valid rune) bool { - if lx.next() == valid { - return true - } - lx.backup() - return false -} - -// peek returns but does not consume the next rune in the input. -func (lx *lexer) peek() rune { - r := lx.next() - lx.backup() - return r -} - -// skip ignores all input that matches the given predicate. -func (lx *lexer) skip(pred func(rune) bool) { - for { - r := lx.next() - if pred(r) { - continue - } - lx.backup() - lx.ignore() - return - } -} - -// errorf stops all lexing by emitting an error and returning `nil`. -// Note that any value that is a character is escaped if it's a special -// character (newlines, tabs, etc.). -func (lx *lexer) errorf(format string, values ...interface{}) stateFn { - lx.items <- item{ - itemError, - fmt.Sprintf(format, values...), - lx.line, - } - return nil -} - -// lexTop consumes elements at the top level of TOML data. -func lexTop(lx *lexer) stateFn { - r := lx.next() - if isWhitespace(r) || isNL(r) { - return lexSkip(lx, lexTop) - } - switch r { - case commentStart: - lx.push(lexTop) - return lexCommentStart - case tableStart: - return lexTableStart - case eof: - if lx.pos > lx.start { - return lx.errorf("unexpected EOF") - } - lx.emit(itemEOF) - return nil - } - - // At this point, the only valid item can be a key, so we back up - // and let the key lexer do the rest. - lx.backup() - lx.push(lexTopEnd) - return lexKeyStart -} - -// lexTopEnd is entered whenever a top-level item has been consumed. (A value -// or a table.) It must see only whitespace, and will turn back to lexTop -// upon a newline. If it sees EOF, it will quit the lexer successfully. -func lexTopEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case r == commentStart: - // a comment will read to a newline for us. - lx.push(lexTop) - return lexCommentStart - case isWhitespace(r): - return lexTopEnd - case isNL(r): - lx.ignore() - return lexTop - case r == eof: - lx.emit(itemEOF) - return nil - } - return lx.errorf("expected a top-level item to end with a newline, "+ - "comment, or EOF, but got %q instead", r) -} - -// lexTable lexes the beginning of a table. Namely, it makes sure that -// it starts with a character other than '.' and ']'. -// It assumes that '[' has already been consumed. -// It also handles the case that this is an item in an array of tables. -// e.g., '[[name]]'. -func lexTableStart(lx *lexer) stateFn { - if lx.peek() == arrayTableStart { - lx.next() - lx.emit(itemArrayTableStart) - lx.push(lexArrayTableEnd) - } else { - lx.emit(itemTableStart) - lx.push(lexTableEnd) - } - return lexTableNameStart -} - -func lexTableEnd(lx *lexer) stateFn { - lx.emit(itemTableEnd) - return lexTopEnd -} - -func lexArrayTableEnd(lx *lexer) stateFn { - if r := lx.next(); r != arrayTableEnd { - return lx.errorf("expected end of table array name delimiter %q, "+ - "but got %q instead", arrayTableEnd, r) - } - lx.emit(itemArrayTableEnd) - return lexTopEnd -} - -func lexTableNameStart(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.peek(); { - case r == tableEnd || r == eof: - return lx.errorf("unexpected end of table name " + - "(table names cannot be empty)") - case r == tableSep: - return lx.errorf("unexpected table separator " + - "(table names cannot be empty)") - case r == stringStart || r == rawStringStart: - lx.ignore() - lx.push(lexTableNameEnd) - return lexValue // reuse string lexing - default: - return lexBareTableName - } -} - -// lexBareTableName lexes the name of a table. It assumes that at least one -// valid character for the table has already been read. -func lexBareTableName(lx *lexer) stateFn { - r := lx.next() - if isBareKeyChar(r) { - return lexBareTableName - } - lx.backup() - lx.emit(itemText) - return lexTableNameEnd -} - -// lexTableNameEnd reads the end of a piece of a table name, optionally -// consuming whitespace. -func lexTableNameEnd(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.next(); { - case isWhitespace(r): - return lexTableNameEnd - case r == tableSep: - lx.ignore() - return lexTableNameStart - case r == tableEnd: - return lx.pop() - default: - return lx.errorf("expected '.' or ']' to end table name, "+ - "but got %q instead", r) - } -} - -// lexKeyStart consumes a key name up until the first non-whitespace character. -// lexKeyStart will ignore whitespace. -func lexKeyStart(lx *lexer) stateFn { - r := lx.peek() - switch { - case r == keySep: - return lx.errorf("unexpected key separator %q", keySep) - case isWhitespace(r) || isNL(r): - lx.next() - return lexSkip(lx, lexKeyStart) - case r == stringStart || r == rawStringStart: - lx.ignore() - lx.emit(itemKeyStart) - lx.push(lexKeyEnd) - return lexValue // reuse string lexing - default: - lx.ignore() - lx.emit(itemKeyStart) - return lexBareKey - } -} - -// lexBareKey consumes the text of a bare key. Assumes that the first character -// (which is not whitespace) has not yet been consumed. -func lexBareKey(lx *lexer) stateFn { - switch r := lx.next(); { - case isBareKeyChar(r): - return lexBareKey - case isWhitespace(r): - lx.backup() - lx.emit(itemText) - return lexKeyEnd - case r == keySep: - lx.backup() - lx.emit(itemText) - return lexKeyEnd - default: - return lx.errorf("bare keys cannot contain %q", r) - } -} - -// lexKeyEnd consumes the end of a key and trims whitespace (up to the key -// separator). -func lexKeyEnd(lx *lexer) stateFn { - switch r := lx.next(); { - case r == keySep: - return lexSkip(lx, lexValue) - case isWhitespace(r): - return lexSkip(lx, lexKeyEnd) - default: - return lx.errorf("expected key separator %q, but got %q instead", - keySep, r) - } -} - -// lexValue starts the consumption of a value anywhere a value is expected. -// lexValue will ignore whitespace. -// After a value is lexed, the last state on the next is popped and returned. -func lexValue(lx *lexer) stateFn { - // We allow whitespace to precede a value, but NOT newlines. - // In array syntax, the array states are responsible for ignoring newlines. - r := lx.next() - switch { - case isWhitespace(r): - return lexSkip(lx, lexValue) - case isDigit(r): - lx.backup() // avoid an extra state and use the same as above - return lexNumberOrDateStart - } - switch r { - case arrayStart: - lx.ignore() - lx.emit(itemArray) - return lexArrayValue - case inlineTableStart: - lx.ignore() - lx.emit(itemInlineTableStart) - return lexInlineTableValue - case stringStart: - if lx.accept(stringStart) { - if lx.accept(stringStart) { - lx.ignore() // Ignore """ - return lexMultilineString - } - lx.backup() - } - lx.ignore() // ignore the '"' - return lexString - case rawStringStart: - if lx.accept(rawStringStart) { - if lx.accept(rawStringStart) { - lx.ignore() // Ignore """ - return lexMultilineRawString - } - lx.backup() - } - lx.ignore() // ignore the "'" - return lexRawString - case '+', '-': - return lexNumberStart - case '.': // special error case, be kind to users - return lx.errorf("floats must start with a digit, not '.'") - } - if unicode.IsLetter(r) { - // Be permissive here; lexBool will give a nice error if the - // user wrote something like - // x = foo - // (i.e. not 'true' or 'false' but is something else word-like.) - lx.backup() - return lexBool - } - return lx.errorf("expected value but found %q instead", r) -} - -// lexArrayValue consumes one value in an array. It assumes that '[' or ',' -// have already been consumed. All whitespace and newlines are ignored. -func lexArrayValue(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValue) - case r == commentStart: - lx.push(lexArrayValue) - return lexCommentStart - case r == comma: - return lx.errorf("unexpected comma") - case r == arrayEnd: - // NOTE(caleb): The spec isn't clear about whether you can have - // a trailing comma or not, so we'll allow it. - return lexArrayEnd - } - - lx.backup() - lx.push(lexArrayValueEnd) - return lexValue -} - -// lexArrayValueEnd consumes everything between the end of an array value and -// the next value (or the end of the array): it ignores whitespace and newlines -// and expects either a ',' or a ']'. -func lexArrayValueEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValueEnd) - case r == commentStart: - lx.push(lexArrayValueEnd) - return lexCommentStart - case r == comma: - lx.ignore() - return lexArrayValue // move on to the next value - case r == arrayEnd: - return lexArrayEnd - } - return lx.errorf( - "expected a comma or array terminator %q, but got %q instead", - arrayEnd, r, - ) -} - -// lexArrayEnd finishes the lexing of an array. -// It assumes that a ']' has just been consumed. -func lexArrayEnd(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemArrayEnd) - return lx.pop() -} - -// lexInlineTableValue consumes one key/value pair in an inline table. -// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. -func lexInlineTableValue(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r): - return lexSkip(lx, lexInlineTableValue) - case isNL(r): - return lx.errorf("newlines not allowed within inline tables") - case r == commentStart: - lx.push(lexInlineTableValue) - return lexCommentStart - case r == comma: - return lx.errorf("unexpected comma") - case r == inlineTableEnd: - return lexInlineTableEnd - } - lx.backup() - lx.push(lexInlineTableValueEnd) - return lexKeyStart -} - -// lexInlineTableValueEnd consumes everything between the end of an inline table -// key/value pair and the next pair (or the end of the table): -// it ignores whitespace and expects either a ',' or a '}'. -func lexInlineTableValueEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r): - return lexSkip(lx, lexInlineTableValueEnd) - case isNL(r): - return lx.errorf("newlines not allowed within inline tables") - case r == commentStart: - lx.push(lexInlineTableValueEnd) - return lexCommentStart - case r == comma: - lx.ignore() - return lexInlineTableValue - case r == inlineTableEnd: - return lexInlineTableEnd - } - return lx.errorf("expected a comma or an inline table terminator %q, "+ - "but got %q instead", inlineTableEnd, r) -} - -// lexInlineTableEnd finishes the lexing of an inline table. -// It assumes that a '}' has just been consumed. -func lexInlineTableEnd(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemInlineTableEnd) - return lx.pop() -} - -// lexString consumes the inner contents of a string. It assumes that the -// beginning '"' has already been consumed and ignored. -func lexString(lx *lexer) stateFn { - r := lx.next() - switch { - case r == eof: - return lx.errorf("unexpected EOF") - case isNL(r): - return lx.errorf("strings cannot contain newlines") - case r == '\\': - lx.push(lexString) - return lexStringEscape - case r == stringEnd: - lx.backup() - lx.emit(itemString) - lx.next() - lx.ignore() - return lx.pop() - } - return lexString -} - -// lexMultilineString consumes the inner contents of a string. It assumes that -// the beginning '"""' has already been consumed and ignored. -func lexMultilineString(lx *lexer) stateFn { - switch lx.next() { - case eof: - return lx.errorf("unexpected EOF") - case '\\': - return lexMultilineStringEscape - case stringEnd: - if lx.accept(stringEnd) { - if lx.accept(stringEnd) { - lx.backup() - lx.backup() - lx.backup() - lx.emit(itemMultilineString) - lx.next() - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - } - return lexMultilineString -} - -// lexRawString consumes a raw string. Nothing can be escaped in such a string. -// It assumes that the beginning "'" has already been consumed and ignored. -func lexRawString(lx *lexer) stateFn { - r := lx.next() - switch { - case r == eof: - return lx.errorf("unexpected EOF") - case isNL(r): - return lx.errorf("strings cannot contain newlines") - case r == rawStringEnd: - lx.backup() - lx.emit(itemRawString) - lx.next() - lx.ignore() - return lx.pop() - } - return lexRawString -} - -// lexMultilineRawString consumes a raw string. Nothing can be escaped in such -// a string. It assumes that the beginning "'''" has already been consumed and -// ignored. -func lexMultilineRawString(lx *lexer) stateFn { - switch lx.next() { - case eof: - return lx.errorf("unexpected EOF") - case rawStringEnd: - if lx.accept(rawStringEnd) { - if lx.accept(rawStringEnd) { - lx.backup() - lx.backup() - lx.backup() - lx.emit(itemRawMultilineString) - lx.next() - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - } - return lexMultilineRawString -} - -// lexMultilineStringEscape consumes an escaped character. It assumes that the -// preceding '\\' has already been consumed. -func lexMultilineStringEscape(lx *lexer) stateFn { - // Handle the special case first: - if isNL(lx.next()) { - return lexMultilineString - } - lx.backup() - lx.push(lexMultilineString) - return lexStringEscape(lx) -} - -func lexStringEscape(lx *lexer) stateFn { - r := lx.next() - switch r { - case 'b': - fallthrough - case 't': - fallthrough - case 'n': - fallthrough - case 'f': - fallthrough - case 'r': - fallthrough - case '"': - fallthrough - case '\\': - return lx.pop() - case 'u': - return lexShortUnicodeEscape - case 'U': - return lexLongUnicodeEscape - } - return lx.errorf("invalid escape character %q; only the following "+ - "escape characters are allowed: "+ - `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) -} - -func lexShortUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 4; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf(`expected four hexadecimal digits after '\u', `+ - "but got %q instead", lx.current()) - } - } - return lx.pop() -} - -func lexLongUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 8; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf(`expected eight hexadecimal digits after '\U', `+ - "but got %q instead", lx.current()) - } - } - return lx.pop() -} - -// lexNumberOrDateStart consumes either an integer, a float, or datetime. -func lexNumberOrDateStart(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexNumberOrDate - } - switch r { - case '_': - return lexNumber - case 'e', 'E': - return lexFloat - case '.': - return lx.errorf("floats must start with a digit, not '.'") - } - return lx.errorf("expected a digit but got %q", r) -} - -// lexNumberOrDate consumes either an integer, float or datetime. -func lexNumberOrDate(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexNumberOrDate - } - switch r { - case '-': - return lexDatetime - case '_': - return lexNumber - case '.', 'e', 'E': - return lexFloat - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexDatetime consumes a Datetime, to a first approximation. -// The parser validates that it matches one of the accepted formats. -func lexDatetime(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexDatetime - } - switch r { - case '-', 'T', ':', '.', 'Z', '+': - return lexDatetime - } - - lx.backup() - lx.emit(itemDatetime) - return lx.pop() -} - -// lexNumberStart consumes either an integer or a float. It assumes that a sign -// has already been read, but that *no* digits have been consumed. -// lexNumberStart will move to the appropriate integer or float states. -func lexNumberStart(lx *lexer) stateFn { - // We MUST see a digit. Even floats have to start with a digit. - r := lx.next() - if !isDigit(r) { - if r == '.' { - return lx.errorf("floats must start with a digit, not '.'") - } - return lx.errorf("expected a digit but got %q", r) - } - return lexNumber -} - -// lexNumber consumes an integer or a float after seeing the first digit. -func lexNumber(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexNumber - } - switch r { - case '_': - return lexNumber - case '.', 'e', 'E': - return lexFloat - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexFloat consumes the elements of a float. It allows any sequence of -// float-like characters, so floats emitted by the lexer are only a first -// approximation and must be validated by the parser. -func lexFloat(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexFloat - } - switch r { - case '_', '.', '-', '+', 'e', 'E': - return lexFloat - } - - lx.backup() - lx.emit(itemFloat) - return lx.pop() -} - -// lexBool consumes a bool string: 'true' or 'false. -func lexBool(lx *lexer) stateFn { - var rs []rune - for { - r := lx.next() - if !unicode.IsLetter(r) { - lx.backup() - break - } - rs = append(rs, r) - } - s := string(rs) - switch s { - case "true", "false": - lx.emit(itemBool) - return lx.pop() - } - return lx.errorf("expected value but found %q instead", s) -} - -// lexCommentStart begins the lexing of a comment. It will emit -// itemCommentStart and consume no characters, passing control to lexComment. -func lexCommentStart(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemCommentStart) - return lexComment -} - -// lexComment lexes an entire comment. It assumes that '#' has been consumed. -// It will consume *up to* the first newline character, and pass control -// back to the last state on the stack. -func lexComment(lx *lexer) stateFn { - r := lx.peek() - if isNL(r) || r == eof { - lx.emit(itemText) - return lx.pop() - } - lx.next() - return lexComment -} - -// lexSkip ignores all slurped input and moves on to the next state. -func lexSkip(lx *lexer, nextState stateFn) stateFn { - return func(lx *lexer) stateFn { - lx.ignore() - return nextState - } -} - -// isWhitespace returns true if `r` is a whitespace character according -// to the spec. -func isWhitespace(r rune) bool { - return r == '\t' || r == ' ' -} - -func isNL(r rune) bool { - return r == '\n' || r == '\r' -} - -func isDigit(r rune) bool { - return r >= '0' && r <= '9' -} - -func isHexadecimal(r rune) bool { - return (r >= '0' && r <= '9') || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') -} - -func isBareKeyChar(r rune) bool { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || - r == '-' -} - -func (itype itemType) String() string { - switch itype { - case itemError: - return "Error" - case itemNIL: - return "NIL" - case itemEOF: - return "EOF" - case itemText: - return "Text" - case itemString, itemRawString, itemMultilineString, itemRawMultilineString: - return "String" - case itemBool: - return "Bool" - case itemInteger: - return "Integer" - case itemFloat: - return "Float" - case itemDatetime: - return "DateTime" - case itemTableStart: - return "TableStart" - case itemTableEnd: - return "TableEnd" - case itemKeyStart: - return "KeyStart" - case itemArray: - return "Array" - case itemArrayEnd: - return "ArrayEnd" - case itemCommentStart: - return "CommentStart" - } - panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) -} - -func (item item) String() string { - return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) -} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go deleted file mode 100644 index 50869ef..0000000 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ /dev/null @@ -1,592 +0,0 @@ -package toml - -import ( - "fmt" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" -) - -type parser struct { - mapping map[string]interface{} - types map[string]tomlType - lx *lexer - - // A list of keys in the order that they appear in the TOML data. - ordered []Key - - // the full key for the current hash in scope - context Key - - // the base key name for everything except hashes - currentKey string - - // rough approximation of line number - approxLine int - - // A map of 'key.group.names' to whether they were created implicitly. - implicits map[string]bool -} - -type parseError string - -func (pe parseError) Error() string { - return string(pe) -} - -func parse(data string) (p *parser, err error) { - defer func() { - if r := recover(); r != nil { - var ok bool - if err, ok = r.(parseError); ok { - return - } - panic(r) - } - }() - - p = &parser{ - mapping: make(map[string]interface{}), - types: make(map[string]tomlType), - lx: lex(data), - ordered: make([]Key, 0), - implicits: make(map[string]bool), - } - for { - item := p.next() - if item.typ == itemEOF { - break - } - p.topLevel(item) - } - - return p, nil -} - -func (p *parser) panicf(format string, v ...interface{}) { - msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", - p.approxLine, p.current(), fmt.Sprintf(format, v...)) - panic(parseError(msg)) -} - -func (p *parser) next() item { - it := p.lx.nextItem() - if it.typ == itemError { - p.panicf("%s", it.val) - } - return it -} - -func (p *parser) bug(format string, v ...interface{}) { - panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) -} - -func (p *parser) expect(typ itemType) item { - it := p.next() - p.assertEqual(typ, it.typ) - return it -} - -func (p *parser) assertEqual(expected, got itemType) { - if expected != got { - p.bug("Expected '%s' but got '%s'.", expected, got) - } -} - -func (p *parser) topLevel(item item) { - switch item.typ { - case itemCommentStart: - p.approxLine = item.line - p.expect(itemText) - case itemTableStart: - kg := p.next() - p.approxLine = kg.line - - var key Key - for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) - } - p.assertEqual(itemTableEnd, kg.typ) - - p.establishContext(key, false) - p.setType("", tomlHash) - p.ordered = append(p.ordered, key) - case itemArrayTableStart: - kg := p.next() - p.approxLine = kg.line - - var key Key - for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) - } - p.assertEqual(itemArrayTableEnd, kg.typ) - - p.establishContext(key, true) - p.setType("", tomlArrayHash) - p.ordered = append(p.ordered, key) - case itemKeyStart: - kname := p.next() - p.approxLine = kname.line - p.currentKey = p.keyString(kname) - - val, typ := p.value(p.next()) - p.setValue(p.currentKey, val) - p.setType(p.currentKey, typ) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) - p.currentKey = "" - default: - p.bug("Unexpected type at top level: %s", item.typ) - } -} - -// Gets a string for a key (or part of a key in a table name). -func (p *parser) keyString(it item) string { - switch it.typ { - case itemText: - return it.val - case itemString, itemMultilineString, - itemRawString, itemRawMultilineString: - s, _ := p.value(it) - return s.(string) - default: - p.bug("Unexpected key type: %s", it.typ) - panic("unreachable") - } -} - -// value translates an expected value from the lexer into a Go value wrapped -// as an empty interface. -func (p *parser) value(it item) (interface{}, tomlType) { - switch it.typ { - case itemString: - return p.replaceEscapes(it.val), p.typeOfPrimitive(it) - case itemMultilineString: - trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) - return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) - case itemRawString: - return it.val, p.typeOfPrimitive(it) - case itemRawMultilineString: - return stripFirstNewline(it.val), p.typeOfPrimitive(it) - case itemBool: - switch it.val { - case "true": - return true, p.typeOfPrimitive(it) - case "false": - return false, p.typeOfPrimitive(it) - } - p.bug("Expected boolean value, but got '%s'.", it.val) - case itemInteger: - if !numUnderscoresOK(it.val) { - p.panicf("Invalid integer %q: underscores must be surrounded by digits", - it.val) - } - val := strings.Replace(it.val, "_", "", -1) - num, err := strconv.ParseInt(val, 10, 64) - if err != nil { - // Distinguish integer values. Normally, it'd be a bug if the lexer - // provides an invalid integer, but it's possible that the number is - // out of range of valid values (which the lexer cannot determine). - // So mark the former as a bug but the latter as a legitimate user - // error. - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Integer '%s' is out of the range of 64-bit "+ - "signed integers.", it.val) - } else { - p.bug("Expected integer value, but got '%s'.", it.val) - } - } - return num, p.typeOfPrimitive(it) - case itemFloat: - parts := strings.FieldsFunc(it.val, func(r rune) bool { - switch r { - case '.', 'e', 'E': - return true - } - return false - }) - for _, part := range parts { - if !numUnderscoresOK(part) { - p.panicf("Invalid float %q: underscores must be "+ - "surrounded by digits", it.val) - } - } - if !numPeriodsOK(it.val) { - // As a special case, numbers like '123.' or '1.e2', - // which are valid as far as Go/strconv are concerned, - // must be rejected because TOML says that a fractional - // part consists of '.' followed by 1+ digits. - p.panicf("Invalid float %q: '.' must be followed "+ - "by one or more digits", it.val) - } - val := strings.Replace(it.val, "_", "", -1) - num, err := strconv.ParseFloat(val, 64) - if err != nil { - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Float '%s' is out of the range of 64-bit "+ - "IEEE-754 floating-point numbers.", it.val) - } else { - p.panicf("Invalid float value: %q", it.val) - } - } - return num, p.typeOfPrimitive(it) - case itemDatetime: - var t time.Time - var ok bool - var err error - for _, format := range []string{ - "2006-01-02T15:04:05Z07:00", - "2006-01-02T15:04:05", - "2006-01-02", - } { - t, err = time.ParseInLocation(format, it.val, time.Local) - if err == nil { - ok = true - break - } - } - if !ok { - p.panicf("Invalid TOML Datetime: %q.", it.val) - } - return t, p.typeOfPrimitive(it) - case itemArray: - array := make([]interface{}, 0) - types := make([]tomlType, 0) - - for it = p.next(); it.typ != itemArrayEnd; it = p.next() { - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } - - val, typ := p.value(it) - array = append(array, val) - types = append(types, typ) - } - return array, p.typeOfArray(types) - case itemInlineTableStart: - var ( - hash = make(map[string]interface{}) - outerContext = p.context - outerKey = p.currentKey - ) - - p.context = append(p.context, p.currentKey) - p.currentKey = "" - for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { - if it.typ != itemKeyStart { - p.bug("Expected key start but instead found %q, around line %d", - it.val, p.approxLine) - } - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } - - // retrieve key - k := p.next() - p.approxLine = k.line - kname := p.keyString(k) - - // retrieve value - p.currentKey = kname - val, typ := p.value(p.next()) - // make sure we keep metadata up to date - p.setType(kname, typ) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) - hash[kname] = val - } - p.context = outerContext - p.currentKey = outerKey - return hash, tomlHash - } - p.bug("Unexpected value type: %s", it.typ) - panic("unreachable") -} - -// numUnderscoresOK checks whether each underscore in s is surrounded by -// characters that are not underscores. -func numUnderscoresOK(s string) bool { - accept := false - for _, r := range s { - if r == '_' { - if !accept { - return false - } - accept = false - continue - } - accept = true - } - return accept -} - -// numPeriodsOK checks whether every period in s is followed by a digit. -func numPeriodsOK(s string) bool { - period := false - for _, r := range s { - if period && !isDigit(r) { - return false - } - period = r == '.' - } - return !period -} - -// establishContext sets the current context of the parser, -// where the context is either a hash or an array of hashes. Which one is -// set depends on the value of the `array` parameter. -// -// Establishing the context also makes sure that the key isn't a duplicate, and -// will create implicit hashes automatically. -func (p *parser) establishContext(key Key, array bool) { - var ok bool - - // Always start at the top level and drill down for our context. - hashContext := p.mapping - keyContext := make(Key, 0) - - // We only need implicit hashes for key[0:-1] - for _, k := range key[0 : len(key)-1] { - _, ok = hashContext[k] - keyContext = append(keyContext, k) - - // No key? Make an implicit hash and move on. - if !ok { - p.addImplicit(keyContext) - hashContext[k] = make(map[string]interface{}) - } - - // If the hash context is actually an array of tables, then set - // the hash context to the last element in that array. - // - // Otherwise, it better be a table, since this MUST be a key group (by - // virtue of it not being the last element in a key). - switch t := hashContext[k].(type) { - case []map[string]interface{}: - hashContext = t[len(t)-1] - case map[string]interface{}: - hashContext = t - default: - p.panicf("Key '%s' was already created as a hash.", keyContext) - } - } - - p.context = keyContext - if array { - // If this is the first element for this array, then allocate a new - // list of tables for it. - k := key[len(key)-1] - if _, ok := hashContext[k]; !ok { - hashContext[k] = make([]map[string]interface{}, 0, 5) - } - - // Add a new table. But make sure the key hasn't already been used - // for something else. - if hash, ok := hashContext[k].([]map[string]interface{}); ok { - hashContext[k] = append(hash, make(map[string]interface{})) - } else { - p.panicf("Key '%s' was already created and cannot be used as "+ - "an array.", keyContext) - } - } else { - p.setValue(key[len(key)-1], make(map[string]interface{})) - } - p.context = append(p.context, key[len(key)-1]) -} - -// setValue sets the given key to the given value in the current context. -// It will make sure that the key hasn't already been defined, account for -// implicit key groups. -func (p *parser) setValue(key string, value interface{}) { - var tmpHash interface{} - var ok bool - - hash := p.mapping - keyContext := make(Key, 0) - for _, k := range p.context { - keyContext = append(keyContext, k) - if tmpHash, ok = hash[k]; !ok { - p.bug("Context for key '%s' has not been established.", keyContext) - } - switch t := tmpHash.(type) { - case []map[string]interface{}: - // The context is a table of hashes. Pick the most recent table - // defined as the current hash. - hash = t[len(t)-1] - case map[string]interface{}: - hash = t - default: - p.bug("Expected hash to have type 'map[string]interface{}', but "+ - "it has '%T' instead.", tmpHash) - } - } - keyContext = append(keyContext, key) - - if _, ok := hash[key]; ok { - // Typically, if the given key has already been set, then we have - // to raise an error since duplicate keys are disallowed. However, - // it's possible that a key was previously defined implicitly. In this - // case, it is allowed to be redefined concretely. (See the - // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) - // - // But we have to make sure to stop marking it as an implicit. (So that - // another redefinition provokes an error.) - // - // Note that since it has already been defined (as a hash), we don't - // want to overwrite it. So our business is done. - if p.isImplicit(keyContext) { - p.removeImplicit(keyContext) - return - } - - // Otherwise, we have a concrete key trying to override a previous - // key, which is *always* wrong. - p.panicf("Key '%s' has already been defined.", keyContext) - } - hash[key] = value -} - -// setType sets the type of a particular value at a given key. -// It should be called immediately AFTER setValue. -// -// Note that if `key` is empty, then the type given will be applied to the -// current context (which is either a table or an array of tables). -func (p *parser) setType(key string, typ tomlType) { - keyContext := make(Key, 0, len(p.context)+1) - for _, k := range p.context { - keyContext = append(keyContext, k) - } - if len(key) > 0 { // allow type setting for hashes - keyContext = append(keyContext, key) - } - p.types[keyContext.String()] = typ -} - -// addImplicit sets the given Key as having been created implicitly. -func (p *parser) addImplicit(key Key) { - p.implicits[key.String()] = true -} - -// removeImplicit stops tagging the given key as having been implicitly -// created. -func (p *parser) removeImplicit(key Key) { - p.implicits[key.String()] = false -} - -// isImplicit returns true if the key group pointed to by the key was created -// implicitly. -func (p *parser) isImplicit(key Key) bool { - return p.implicits[key.String()] -} - -// current returns the full key name of the current context. -func (p *parser) current() string { - if len(p.currentKey) == 0 { - return p.context.String() - } - if len(p.context) == 0 { - return p.currentKey - } - return fmt.Sprintf("%s.%s", p.context, p.currentKey) -} - -func stripFirstNewline(s string) string { - if len(s) == 0 || s[0] != '\n' { - return s - } - return s[1:] -} - -func stripEscapedWhitespace(s string) string { - esc := strings.Split(s, "\\\n") - if len(esc) > 1 { - for i := 1; i < len(esc); i++ { - esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) - } - } - return strings.Join(esc, "") -} - -func (p *parser) replaceEscapes(str string) string { - var replaced []rune - s := []byte(str) - r := 0 - for r < len(s) { - if s[r] != '\\' { - c, size := utf8.DecodeRune(s[r:]) - r += size - replaced = append(replaced, c) - continue - } - r += 1 - if r >= len(s) { - p.bug("Escape sequence at end of string.") - return "" - } - switch s[r] { - default: - p.bug("Expected valid escape code after \\, but got %q.", s[r]) - return "" - case 'b': - replaced = append(replaced, rune(0x0008)) - r += 1 - case 't': - replaced = append(replaced, rune(0x0009)) - r += 1 - case 'n': - replaced = append(replaced, rune(0x000A)) - r += 1 - case 'f': - replaced = append(replaced, rune(0x000C)) - r += 1 - case 'r': - replaced = append(replaced, rune(0x000D)) - r += 1 - case '"': - replaced = append(replaced, rune(0x0022)) - r += 1 - case '\\': - replaced = append(replaced, rune(0x005C)) - r += 1 - case 'u': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+5). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) - replaced = append(replaced, escaped) - r += 5 - case 'U': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+9). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) - replaced = append(replaced, escaped) - r += 9 - } - } - return string(replaced) -} - -func (p *parser) asciiEscapeToUnicode(bs []byte) rune { - s := string(bs) - hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) - if err != nil { - p.bug("Could not parse '%s' as a hexadecimal number, but the "+ - "lexer claims it's OK: %s", s, err) - } - if !utf8.ValidRune(rune(hex)) { - p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) - } - return rune(hex) -} - -func isStringType(ty itemType) bool { - return ty == itemString || ty == itemMultilineString || - ty == itemRawString || ty == itemRawMultilineString -} diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim deleted file mode 100644 index 562164b..0000000 --- a/vendor/github.com/BurntSushi/toml/session.vim +++ /dev/null @@ -1 +0,0 @@ -au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go deleted file mode 100644 index c73f8af..0000000 --- a/vendor/github.com/BurntSushi/toml/type_check.go +++ /dev/null @@ -1,91 +0,0 @@ -package toml - -// tomlType represents any Go type that corresponds to a TOML type. -// While the first draft of the TOML spec has a simplistic type system that -// probably doesn't need this level of sophistication, we seem to be militating -// toward adding real composite types. -type tomlType interface { - typeString() string -} - -// typeEqual accepts any two types and returns true if they are equal. -func typeEqual(t1, t2 tomlType) bool { - if t1 == nil || t2 == nil { - return false - } - return t1.typeString() == t2.typeString() -} - -func typeIsHash(t tomlType) bool { - return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) -} - -type tomlBaseType string - -func (btype tomlBaseType) typeString() string { - return string(btype) -} - -func (btype tomlBaseType) String() string { - return btype.typeString() -} - -var ( - tomlInteger tomlBaseType = "Integer" - tomlFloat tomlBaseType = "Float" - tomlDatetime tomlBaseType = "Datetime" - tomlString tomlBaseType = "String" - tomlBool tomlBaseType = "Bool" - tomlArray tomlBaseType = "Array" - tomlHash tomlBaseType = "Hash" - tomlArrayHash tomlBaseType = "ArrayHash" -) - -// typeOfPrimitive returns a tomlType of any primitive value in TOML. -// Primitive values are: Integer, Float, Datetime, String and Bool. -// -// Passing a lexer item other than the following will cause a BUG message -// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. -func (p *parser) typeOfPrimitive(lexItem item) tomlType { - switch lexItem.typ { - case itemInteger: - return tomlInteger - case itemFloat: - return tomlFloat - case itemDatetime: - return tomlDatetime - case itemString: - return tomlString - case itemMultilineString: - return tomlString - case itemRawString: - return tomlString - case itemRawMultilineString: - return tomlString - case itemBool: - return tomlBool - } - p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) - panic("unreachable") -} - -// typeOfArray returns a tomlType for an array given a list of types of its -// values. -// -// In the current spec, if an array is homogeneous, then its type is always -// "Array". If the array is not homogeneous, an error is generated. -func (p *parser) typeOfArray(types []tomlType) tomlType { - // Empty arrays are cool. - if len(types) == 0 { - return tomlArray - } - - theType := types[0] - for _, t := range types[1:] { - if !typeEqual(theType, t) { - p.panicf("Array contains values of type '%s' and '%s', but "+ - "arrays must be homogeneous.", theType, t) - } - } - return tomlArray -} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go deleted file mode 100644 index 608997c..0000000 --- a/vendor/github.com/BurntSushi/toml/type_fields.go +++ /dev/null @@ -1,242 +0,0 @@ -package toml - -// Struct field handling is adapted from code in encoding/json: -// -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the Go distribution. - -import ( - "reflect" - "sort" - "sync" -) - -// A field represents a single field found in a struct. -type field struct { - name string // the name of the field (`toml` tag included) - tag bool // whether field has a `toml` tag - index []int // represents the depth of an anonymous field - typ reflect.Type // the type of the field -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from toml tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that TOML should recognize for the given -// type. The algorithm is breadth-first search over the set of structs to -// include - the top struct and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" && !sf.Anonymous { // unexported - continue - } - opts := getOptions(sf.Tag) - if opts.skip { - continue - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := opts.name != "" - name := opts.name - if name == "" { - name = sf.Name - } - fields = append(fields, field{name, tagged, index, ft}) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - f := field{name: ft.Name(), index: index, typ: ft} - next = append(next, f) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with TOML tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// TOML tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} diff --git a/vendor/github.com/OneOfOne/xxhash/.gitignore b/vendor/github.com/OneOfOne/xxhash/.gitignore deleted file mode 100644 index f4faa7f..0000000 --- a/vendor/github.com/OneOfOne/xxhash/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.txt -*.pprof -cmap2/ -cache/ diff --git a/vendor/github.com/OneOfOne/xxhash/.travis.yml b/vendor/github.com/OneOfOne/xxhash/.travis.yml deleted file mode 100644 index 1c6dc55..0000000 --- a/vendor/github.com/OneOfOne/xxhash/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go -sudo: false - -go: - - "1.10" - - "1.11" - - "1.12" - - master - -script: - - go test -tags safe ./... - - go test ./... - - diff --git a/vendor/github.com/OneOfOne/xxhash/LICENSE b/vendor/github.com/OneOfOne/xxhash/LICENSE deleted file mode 100644 index 9e30b4f..0000000 --- a/vendor/github.com/OneOfOne/xxhash/LICENSE +++ /dev/null @@ -1,187 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. diff --git a/vendor/github.com/OneOfOne/xxhash/README.md b/vendor/github.com/OneOfOne/xxhash/README.md deleted file mode 100644 index 23174eb..0000000 --- a/vendor/github.com/OneOfOne/xxhash/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# xxhash [![GoDoc](https://godoc.org/github.com/OneOfOne/xxhash?status.svg)](https://godoc.org/github.com/OneOfOne/xxhash) [![Build Status](https://travis-ci.org/OneOfOne/xxhash.svg?branch=master)](https://travis-ci.org/OneOfOne/xxhash) [![Coverage](https://gocover.io/_badge/github.com/OneOfOne/xxhash)](https://gocover.io/github.com/OneOfOne/xxhash) - -This is a native Go implementation of the excellent [xxhash](https://github.com/Cyan4973/xxHash)* algorithm, an extremely fast non-cryptographic Hash algorithm, working at speeds close to RAM limits. - -* The C implementation is ([Copyright](https://github.com/Cyan4973/xxHash/blob/master/LICENSE) (c) 2012-2014, Yann Collet) - -## Install - - go get github.com/OneOfOne/xxhash - -## Features - -* On Go 1.7+ the pure go version is faster than CGO for all inputs. -* Supports ChecksumString{32,64} xxhash{32,64}.WriteString, which uses no copies when it can, falls back to copy on appengine. -* The native version falls back to a less optimized version on appengine due to the lack of unsafe. -* Almost as fast as the mostly pure assembly version written by the brilliant [cespare](https://github.com/cespare/xxhash), while also supporting seeds. -* To manually toggle the appengine version build with `-tags safe`. - -## Benchmark - -### Core i7-4790 @ 3.60GHz, Linux 4.12.6-1-ARCH (64bit), Go tip (+ff90f4af66 2017-08-19) - -```bash -➤ go test -bench '64' -count 5 -tags cespare | benchstat /dev/stdin -name time/op - -# https://github.com/cespare/xxhash -XXSum64Cespare/Func-8 160ns ± 2% -XXSum64Cespare/Struct-8 173ns ± 1% -XXSum64ShortCespare/Func-8 6.78ns ± 1% -XXSum64ShortCespare/Struct-8 19.6ns ± 2% - -# this package (default mode, using unsafe) -XXSum64/Func-8 170ns ± 1% -XXSum64/Struct-8 182ns ± 1% -XXSum64Short/Func-8 13.5ns ± 3% -XXSum64Short/Struct-8 20.4ns ± 0% - -# this package (appengine, *not* using unsafe) -XXSum64/Func-8 241ns ± 5% -XXSum64/Struct-8 243ns ± 6% -XXSum64Short/Func-8 15.2ns ± 2% -XXSum64Short/Struct-8 23.7ns ± 5% - -CRC64ISO-8 1.23µs ± 1% -CRC64ISOString-8 2.71µs ± 4% -CRC64ISOShort-8 22.2ns ± 3% - -Fnv64-8 2.34µs ± 1% -Fnv64Short-8 74.7ns ± 8% -# -``` - -## Usage - -```go - h := xxhash.New64() - // r, err := os.Open("......") - // defer f.Close() - r := strings.NewReader(F) - io.Copy(h, r) - fmt.Println("xxhash.Backend:", xxhash.Backend) - fmt.Println("File checksum:", h.Sum64()) -``` - -[playground](http://play.golang.org/p/rhRN3RdQyd) - -## TODO - -* Rewrite the 32bit version to be more optimized. -* General cleanup as the Go inliner gets smarter. - -## License - -This project is released under the Apache v2. licence. See [LICENCE](LICENCE) for more details. diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash.go b/vendor/github.com/OneOfOne/xxhash/xxhash.go deleted file mode 100644 index 6341506..0000000 --- a/vendor/github.com/OneOfOne/xxhash/xxhash.go +++ /dev/null @@ -1,194 +0,0 @@ -package xxhash - -import "hash" - -const ( - prime32x1 uint32 = 2654435761 - prime32x2 uint32 = 2246822519 - prime32x3 uint32 = 3266489917 - prime32x4 uint32 = 668265263 - prime32x5 uint32 = 374761393 - - prime64x1 uint64 = 11400714785074694791 - prime64x2 uint64 = 14029467366897019727 - prime64x3 uint64 = 1609587929392839161 - prime64x4 uint64 = 9650029242287828579 - prime64x5 uint64 = 2870177450012600261 - - maxInt32 int32 = (1<<31 - 1) - - // precomputed zero Vs for seed 0 - zero64x1 = 0x60ea27eeadc0b5d6 - zero64x2 = 0xc2b2ae3d27d4eb4f - zero64x3 = 0x0 - zero64x4 = 0x61c8864e7a143579 -) - -func NewHash32() hash.Hash { return New32() } -func NewHash64() hash.Hash { return New64() } - -// Checksum32 returns the checksum of the input data with the seed set to 0. -func Checksum32(in []byte) uint32 { - return Checksum32S(in, 0) -} - -// ChecksumString32 returns the checksum of the input data, without creating a copy, with the seed set to 0. -func ChecksumString32(s string) uint32 { - return ChecksumString32S(s, 0) -} - -type XXHash32 struct { - mem [16]byte - ln, memIdx int32 - v1, v2, v3, v4 uint32 - seed uint32 -} - -// Size returns the number of bytes Sum will return. -func (xx *XXHash32) Size() int { - return 4 -} - -// BlockSize returns the hash's underlying block size. -// The Write method must be able to accept any amount -// of data, but it may operate more efficiently if all writes -// are a multiple of the block size. -func (xx *XXHash32) BlockSize() int { - return 16 -} - -// NewS32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the specific seed. -func NewS32(seed uint32) (xx *XXHash32) { - xx = &XXHash32{ - seed: seed, - } - xx.Reset() - return -} - -// New32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the seed set to 0. -func New32() *XXHash32 { - return NewS32(0) -} - -func (xx *XXHash32) Reset() { - xx.v1 = xx.seed + prime32x1 + prime32x2 - xx.v2 = xx.seed + prime32x2 - xx.v3 = xx.seed - xx.v4 = xx.seed - prime32x1 - xx.ln, xx.memIdx = 0, 0 -} - -// Sum appends the current hash to b and returns the resulting slice. -// It does not change the underlying hash state. -func (xx *XXHash32) Sum(in []byte) []byte { - s := xx.Sum32() - return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) -} - -// Checksum64 an alias for Checksum64S(in, 0) -func Checksum64(in []byte) uint64 { - return Checksum64S(in, 0) -} - -// ChecksumString64 returns the checksum of the input data, without creating a copy, with the seed set to 0. -func ChecksumString64(s string) uint64 { - return ChecksumString64S(s, 0) -} - -type XXHash64 struct { - v1, v2, v3, v4 uint64 - seed uint64 - ln uint64 - mem [32]byte - memIdx int8 -} - -// Size returns the number of bytes Sum will return. -func (xx *XXHash64) Size() int { - return 8 -} - -// BlockSize returns the hash's underlying block size. -// The Write method must be able to accept any amount -// of data, but it may operate more efficiently if all writes -// are a multiple of the block size. -func (xx *XXHash64) BlockSize() int { - return 32 -} - -// NewS64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the specific seed. -func NewS64(seed uint64) (xx *XXHash64) { - xx = &XXHash64{ - seed: seed, - } - xx.Reset() - return -} - -// New64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the seed set to 0x0. -func New64() *XXHash64 { - return NewS64(0) -} - -func (xx *XXHash64) Reset() { - xx.ln, xx.memIdx = 0, 0 - xx.v1, xx.v2, xx.v3, xx.v4 = resetVs64(xx.seed) -} - -// Sum appends the current hash to b and returns the resulting slice. -// It does not change the underlying hash state. -func (xx *XXHash64) Sum(in []byte) []byte { - s := xx.Sum64() - return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) -} - -// force the compiler to use ROTL instructions - -func rotl32_1(x uint32) uint32 { return (x << 1) | (x >> (32 - 1)) } -func rotl32_7(x uint32) uint32 { return (x << 7) | (x >> (32 - 7)) } -func rotl32_11(x uint32) uint32 { return (x << 11) | (x >> (32 - 11)) } -func rotl32_12(x uint32) uint32 { return (x << 12) | (x >> (32 - 12)) } -func rotl32_13(x uint32) uint32 { return (x << 13) | (x >> (32 - 13)) } -func rotl32_17(x uint32) uint32 { return (x << 17) | (x >> (32 - 17)) } -func rotl32_18(x uint32) uint32 { return (x << 18) | (x >> (32 - 18)) } - -func rotl64_1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) } -func rotl64_7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) } -func rotl64_11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) } -func rotl64_12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) } -func rotl64_18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) } -func rotl64_23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) } -func rotl64_27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) } -func rotl64_31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) } - -func mix64(h uint64) uint64 { - h ^= h >> 33 - h *= prime64x2 - h ^= h >> 29 - h *= prime64x3 - h ^= h >> 32 - return h -} - -func resetVs64(seed uint64) (v1, v2, v3, v4 uint64) { - if seed == 0 { - return zero64x1, zero64x2, zero64x3, zero64x4 - } - return (seed + prime64x1 + prime64x2), (seed + prime64x2), (seed), (seed - prime64x1) -} - -// borrowed from cespare -func round64(h, v uint64) uint64 { - h += v * prime64x2 - h = rotl64_31(h) - h *= prime64x1 - return h -} - -func mergeRound64(h, v uint64) uint64 { - v = round64(0, v) - h ^= v - h = h*prime64x1 + prime64x4 - return h -} diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash_go17.go b/vendor/github.com/OneOfOne/xxhash/xxhash_go17.go deleted file mode 100644 index ae48e0c..0000000 --- a/vendor/github.com/OneOfOne/xxhash/xxhash_go17.go +++ /dev/null @@ -1,161 +0,0 @@ -package xxhash - -func u32(in []byte) uint32 { - return uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 -} - -func u64(in []byte) uint64 { - return uint64(in[0]) | uint64(in[1])<<8 | uint64(in[2])<<16 | uint64(in[3])<<24 | uint64(in[4])<<32 | uint64(in[5])<<40 | uint64(in[6])<<48 | uint64(in[7])<<56 -} - -// Checksum32S returns the checksum of the input bytes with the specific seed. -func Checksum32S(in []byte, seed uint32) (h uint32) { - var i int - - if len(in) > 15 { - var ( - v1 = seed + prime32x1 + prime32x2 - v2 = seed + prime32x2 - v3 = seed + 0 - v4 = seed - prime32x1 - ) - for ; i < len(in)-15; i += 16 { - in := in[i : i+16 : len(in)] - v1 += u32(in[0:4:len(in)]) * prime32x2 - v1 = rotl32_13(v1) * prime32x1 - - v2 += u32(in[4:8:len(in)]) * prime32x2 - v2 = rotl32_13(v2) * prime32x1 - - v3 += u32(in[8:12:len(in)]) * prime32x2 - v3 = rotl32_13(v3) * prime32x1 - - v4 += u32(in[12:16:len(in)]) * prime32x2 - v4 = rotl32_13(v4) * prime32x1 - } - - h = rotl32_1(v1) + rotl32_7(v2) + rotl32_12(v3) + rotl32_18(v4) - - } else { - h = seed + prime32x5 - } - - h += uint32(len(in)) - for ; i <= len(in)-4; i += 4 { - in := in[i : i+4 : len(in)] - h += u32(in[0:4:len(in)]) * prime32x3 - h = rotl32_17(h) * prime32x4 - } - - for ; i < len(in); i++ { - h += uint32(in[i]) * prime32x5 - h = rotl32_11(h) * prime32x1 - } - - h ^= h >> 15 - h *= prime32x2 - h ^= h >> 13 - h *= prime32x3 - h ^= h >> 16 - - return -} - -func (xx *XXHash32) Write(in []byte) (n int, err error) { - i, ml := 0, int(xx.memIdx) - n = len(in) - xx.ln += int32(n) - - if d := 16 - ml; ml > 0 && ml+len(in) > 16 { - xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in[:d])) - ml, in = 16, in[d:len(in):len(in)] - } else if ml+len(in) < 16 { - xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in)) - return - } - - if ml > 0 { - i += 16 - ml - xx.memIdx += int32(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in)) - in := xx.mem[:16:len(xx.mem)] - - xx.v1 += u32(in[0:4:len(in)]) * prime32x2 - xx.v1 = rotl32_13(xx.v1) * prime32x1 - - xx.v2 += u32(in[4:8:len(in)]) * prime32x2 - xx.v2 = rotl32_13(xx.v2) * prime32x1 - - xx.v3 += u32(in[8:12:len(in)]) * prime32x2 - xx.v3 = rotl32_13(xx.v3) * prime32x1 - - xx.v4 += u32(in[12:16:len(in)]) * prime32x2 - xx.v4 = rotl32_13(xx.v4) * prime32x1 - - xx.memIdx = 0 - } - - for ; i <= len(in)-16; i += 16 { - in := in[i : i+16 : len(in)] - xx.v1 += u32(in[0:4:len(in)]) * prime32x2 - xx.v1 = rotl32_13(xx.v1) * prime32x1 - - xx.v2 += u32(in[4:8:len(in)]) * prime32x2 - xx.v2 = rotl32_13(xx.v2) * prime32x1 - - xx.v3 += u32(in[8:12:len(in)]) * prime32x2 - xx.v3 = rotl32_13(xx.v3) * prime32x1 - - xx.v4 += u32(in[12:16:len(in)]) * prime32x2 - xx.v4 = rotl32_13(xx.v4) * prime32x1 - } - - if len(in)-i != 0 { - xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in[i:len(in):len(in)])) - } - - return -} - -func (xx *XXHash32) Sum32() (h uint32) { - var i int32 - if xx.ln > 15 { - h = rotl32_1(xx.v1) + rotl32_7(xx.v2) + rotl32_12(xx.v3) + rotl32_18(xx.v4) - } else { - h = xx.seed + prime32x5 - } - - h += uint32(xx.ln) - - if xx.memIdx > 0 { - for ; i < xx.memIdx-3; i += 4 { - in := xx.mem[i : i+4 : len(xx.mem)] - h += u32(in[0:4:len(in)]) * prime32x3 - h = rotl32_17(h) * prime32x4 - } - - for ; i < xx.memIdx; i++ { - h += uint32(xx.mem[i]) * prime32x5 - h = rotl32_11(h) * prime32x1 - } - } - h ^= h >> 15 - h *= prime32x2 - h ^= h >> 13 - h *= prime32x3 - h ^= h >> 16 - - return -} - -// Checksum64S returns the 64bit xxhash checksum for a single input -func Checksum64S(in []byte, seed uint64) uint64 { - if len(in) == 0 && seed == 0 { - return 0xef46db3751d8e999 - } - - if len(in) > 31 { - return checksum64(in, seed) - } - - return checksum64Short(in, seed) -} diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash_safe.go b/vendor/github.com/OneOfOne/xxhash/xxhash_safe.go deleted file mode 100644 index e92ec29..0000000 --- a/vendor/github.com/OneOfOne/xxhash/xxhash_safe.go +++ /dev/null @@ -1,183 +0,0 @@ -// +build appengine safe ppc64le ppc64be mipsle mips s390x - -package xxhash - -// Backend returns the current version of xxhash being used. -const Backend = "GoSafe" - -func ChecksumString32S(s string, seed uint32) uint32 { - return Checksum32S([]byte(s), seed) -} - -func (xx *XXHash32) WriteString(s string) (int, error) { - if len(s) == 0 { - return 0, nil - } - return xx.Write([]byte(s)) -} - -func ChecksumString64S(s string, seed uint64) uint64 { - return Checksum64S([]byte(s), seed) -} - -func (xx *XXHash64) WriteString(s string) (int, error) { - if len(s) == 0 { - return 0, nil - } - return xx.Write([]byte(s)) -} - -func checksum64(in []byte, seed uint64) (h uint64) { - var ( - v1, v2, v3, v4 = resetVs64(seed) - - i int - ) - - for ; i < len(in)-31; i += 32 { - in := in[i : i+32 : len(in)] - v1 = round64(v1, u64(in[0:8:len(in)])) - v2 = round64(v2, u64(in[8:16:len(in)])) - v3 = round64(v3, u64(in[16:24:len(in)])) - v4 = round64(v4, u64(in[24:32:len(in)])) - } - - h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4) - - h = mergeRound64(h, v1) - h = mergeRound64(h, v2) - h = mergeRound64(h, v3) - h = mergeRound64(h, v4) - - h += uint64(len(in)) - - for ; i < len(in)-7; i += 8 { - h ^= round64(0, u64(in[i:len(in):len(in)])) - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - for ; i < len(in)-3; i += 4 { - h ^= uint64(u32(in[i:len(in):len(in)])) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - } - - for ; i < len(in); i++ { - h ^= uint64(in[i]) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - - return mix64(h) -} - -func checksum64Short(in []byte, seed uint64) uint64 { - var ( - h = seed + prime64x5 + uint64(len(in)) - i int - ) - - for ; i < len(in)-7; i += 8 { - k := u64(in[i : i+8 : len(in)]) - h ^= round64(0, k) - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - for ; i < len(in)-3; i += 4 { - h ^= uint64(u32(in[i:i+4:len(in)])) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - } - - for ; i < len(in); i++ { - h ^= uint64(in[i]) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - - return mix64(h) -} - -func (xx *XXHash64) Write(in []byte) (n int, err error) { - var ( - ml = int(xx.memIdx) - d = 32 - ml - ) - - n = len(in) - xx.ln += uint64(n) - - if ml+len(in) < 32 { - xx.memIdx += int8(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in)) - return - } - - i, v1, v2, v3, v4 := 0, xx.v1, xx.v2, xx.v3, xx.v4 - if ml > 0 && ml+len(in) > 32 { - xx.memIdx += int8(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in[:d:len(in)])) - in = in[d:len(in):len(in)] - - in := xx.mem[0:32:len(xx.mem)] - - v1 = round64(v1, u64(in[0:8:len(in)])) - v2 = round64(v2, u64(in[8:16:len(in)])) - v3 = round64(v3, u64(in[16:24:len(in)])) - v4 = round64(v4, u64(in[24:32:len(in)])) - - xx.memIdx = 0 - } - - for ; i < len(in)-31; i += 32 { - in := in[i : i+32 : len(in)] - v1 = round64(v1, u64(in[0:8:len(in)])) - v2 = round64(v2, u64(in[8:16:len(in)])) - v3 = round64(v3, u64(in[16:24:len(in)])) - v4 = round64(v4, u64(in[24:32:len(in)])) - } - - if len(in)-i != 0 { - xx.memIdx += int8(copy(xx.mem[xx.memIdx:], in[i:len(in):len(in)])) - } - - xx.v1, xx.v2, xx.v3, xx.v4 = v1, v2, v3, v4 - - return -} - -func (xx *XXHash64) Sum64() (h uint64) { - var i int - if xx.ln > 31 { - v1, v2, v3, v4 := xx.v1, xx.v2, xx.v3, xx.v4 - h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4) - - h = mergeRound64(h, v1) - h = mergeRound64(h, v2) - h = mergeRound64(h, v3) - h = mergeRound64(h, v4) - } else { - h = xx.seed + prime64x5 - } - - h += uint64(xx.ln) - if xx.memIdx > 0 { - in := xx.mem[:xx.memIdx] - for ; i < int(xx.memIdx)-7; i += 8 { - in := in[i : i+8 : len(in)] - k := u64(in[0:8:len(in)]) - k *= prime64x2 - k = rotl64_31(k) - k *= prime64x1 - h ^= k - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - for ; i < int(xx.memIdx)-3; i += 4 { - in := in[i : i+4 : len(in)] - h ^= uint64(u32(in[0:4:len(in)])) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - } - - for ; i < int(xx.memIdx); i++ { - h ^= uint64(in[i]) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - } - - return mix64(h) -} diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go b/vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go deleted file mode 100644 index 9c67f8e..0000000 --- a/vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go +++ /dev/null @@ -1,241 +0,0 @@ -// +build !safe -// +build !appengine -// +build !ppc64le -// +build !mipsle -// +build !ppc64be -// +build !mips -// +build !s390x - -package xxhash - -import ( - "reflect" - "unsafe" -) - -// Backend returns the current version of xxhash being used. -const Backend = "GoUnsafe" - -// ChecksumString32S returns the checksum of the input data, without creating a copy, with the specific seed. -func ChecksumString32S(s string, seed uint32) uint32 { - if len(s) == 0 { - return Checksum32S(nil, seed) - } - ss := (*reflect.StringHeader)(unsafe.Pointer(&s)) - return Checksum32S((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)], seed) -} - -func (xx *XXHash32) WriteString(s string) (int, error) { - if len(s) == 0 { - return 0, nil - } - - ss := (*reflect.StringHeader)(unsafe.Pointer(&s)) - return xx.Write((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)]) -} - -// ChecksumString64S returns the checksum of the input data, without creating a copy, with the specific seed. -func ChecksumString64S(s string, seed uint64) uint64 { - if len(s) == 0 { - return Checksum64S(nil, seed) - } - - ss := (*reflect.StringHeader)(unsafe.Pointer(&s)) - return Checksum64S((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)], seed) -} - -func (xx *XXHash64) WriteString(s string) (int, error) { - if len(s) == 0 { - return 0, nil - } - ss := (*reflect.StringHeader)(unsafe.Pointer(&s)) - return xx.Write((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)]) -} - -//go:nocheckptr -func checksum64(in []byte, seed uint64) uint64 { - var ( - wordsLen = len(in) >> 3 - words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen] - - h uint64 = prime64x5 - - v1, v2, v3, v4 = resetVs64(seed) - - i int - ) - - for ; i < len(words)-3; i += 4 { - words := (*[4]uint64)(unsafe.Pointer(&words[i])) - - v1 = round64(v1, words[0]) - v2 = round64(v2, words[1]) - v3 = round64(v3, words[2]) - v4 = round64(v4, words[3]) - } - - h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4) - - h = mergeRound64(h, v1) - h = mergeRound64(h, v2) - h = mergeRound64(h, v3) - h = mergeRound64(h, v4) - - h += uint64(len(in)) - - for _, k := range words[i:] { - h ^= round64(0, k) - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - if in = in[wordsLen<<3 : len(in) : len(in)]; len(in) > 3 { - words := (*[1]uint32)(unsafe.Pointer(&in[0])) - h ^= uint64(words[0]) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - - in = in[4:len(in):len(in)] - } - - for _, b := range in { - h ^= uint64(b) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - - return mix64(h) -} - -//go:nocheckptr -func checksum64Short(in []byte, seed uint64) uint64 { - var ( - h = seed + prime64x5 + uint64(len(in)) - i int - ) - - if len(in) > 7 { - var ( - wordsLen = len(in) >> 3 - words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen] - ) - - for i := range words { - h ^= round64(0, words[i]) - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - i = wordsLen << 3 - } - - if in = in[i:len(in):len(in)]; len(in) > 3 { - words := (*[1]uint32)(unsafe.Pointer(&in[0])) - h ^= uint64(words[0]) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - - in = in[4:len(in):len(in)] - } - - for _, b := range in { - h ^= uint64(b) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - - return mix64(h) -} - -func (xx *XXHash64) Write(in []byte) (n int, err error) { - mem, idx := xx.mem[:], int(xx.memIdx) - - xx.ln, n = xx.ln+uint64(len(in)), len(in) - - if idx+len(in) < 32 { - xx.memIdx += int8(copy(mem[idx:len(mem):len(mem)], in)) - return - } - - var ( - v1, v2, v3, v4 = xx.v1, xx.v2, xx.v3, xx.v4 - - i int - ) - - if d := 32 - int(idx); d > 0 && int(idx)+len(in) > 31 { - copy(mem[idx:len(mem):len(mem)], in[:len(in):len(in)]) - - words := (*[4]uint64)(unsafe.Pointer(&mem[0])) - - v1 = round64(v1, words[0]) - v2 = round64(v2, words[1]) - v3 = round64(v3, words[2]) - v4 = round64(v4, words[3]) - - if in, xx.memIdx = in[d:len(in):len(in)], 0; len(in) == 0 { - goto RET - } - } - - for ; i < len(in)-31; i += 32 { - words := (*[4]uint64)(unsafe.Pointer(&in[i])) - - v1 = round64(v1, words[0]) - v2 = round64(v2, words[1]) - v3 = round64(v3, words[2]) - v4 = round64(v4, words[3]) - } - - if len(in)-i != 0 { - xx.memIdx += int8(copy(mem[xx.memIdx:len(mem):len(mem)], in[i:len(in):len(in)])) - } - -RET: - xx.v1, xx.v2, xx.v3, xx.v4 = v1, v2, v3, v4 - - return -} - -func (xx *XXHash64) Sum64() (h uint64) { - if seed := xx.seed; xx.ln > 31 { - v1, v2, v3, v4 := xx.v1, xx.v2, xx.v3, xx.v4 - h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4) - - h = mergeRound64(h, v1) - h = mergeRound64(h, v2) - h = mergeRound64(h, v3) - h = mergeRound64(h, v4) - } else if seed == 0 { - h = prime64x5 - } else { - h = seed + prime64x5 - } - - h += uint64(xx.ln) - - if xx.memIdx == 0 { - return mix64(h) - } - - var ( - in = xx.mem[:xx.memIdx:xx.memIdx] - wordsLen = len(in) >> 3 - words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen] - ) - - for _, k := range words { - h ^= round64(0, k) - h = rotl64_27(h)*prime64x1 + prime64x4 - } - - if in = in[wordsLen<<3 : len(in) : len(in)]; len(in) > 3 { - words := (*[1]uint32)(unsafe.Pointer(&in[0])) - - h ^= uint64(words[0]) * prime64x1 - h = rotl64_23(h)*prime64x2 + prime64x3 - - in = in[4:len(in):len(in)] - } - - for _, b := range in { - h ^= uint64(b) * prime64x5 - h = rotl64_11(h) * prime64x1 - } - - return mix64(h) -} diff --git a/vendor/github.com/alecthomas/template/LICENSE b/vendor/github.com/alecthomas/template/LICENSE deleted file mode 100644 index 7448756..0000000 --- a/vendor/github.com/alecthomas/template/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/alecthomas/template/README.md b/vendor/github.com/alecthomas/template/README.md deleted file mode 100644 index ef6a8ee..0000000 --- a/vendor/github.com/alecthomas/template/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Go's `text/template` package with newline elision - -This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline. - -eg. - -``` -{{if true}}\ -hello -{{end}}\ -``` - -Will result in: - -``` -hello\n -``` - -Rather than: - -``` -\n -hello\n -\n -``` diff --git a/vendor/github.com/alecthomas/template/doc.go b/vendor/github.com/alecthomas/template/doc.go deleted file mode 100644 index 223c595..0000000 --- a/vendor/github.com/alecthomas/template/doc.go +++ /dev/null @@ -1,406 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package template implements data-driven templates for generating textual output. - -To generate HTML output, see package html/template, which has the same interface -as this package but automatically secures HTML output against certain attacks. - -Templates are executed by applying them to a data structure. Annotations in the -template refer to elements of the data structure (typically a field of a struct -or a key in a map) to control execution and derive values to be displayed. -Execution of the template walks the structure and sets the cursor, represented -by a period '.' and called "dot", to the value at the current location in the -structure as execution proceeds. - -The input text for a template is UTF-8-encoded text in any format. -"Actions"--data evaluations or control structures--are delimited by -"{{" and "}}"; all text outside actions is copied to the output unchanged. -Actions may not span newlines, although comments can. - -Once parsed, a template may be executed safely in parallel. - -Here is a trivial example that prints "17 items are made of wool". - - type Inventory struct { - Material string - Count uint - } - sweaters := Inventory{"wool", 17} - tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}") - if err != nil { panic(err) } - err = tmpl.Execute(os.Stdout, sweaters) - if err != nil { panic(err) } - -More intricate examples appear below. - -Actions - -Here is the list of actions. "Arguments" and "pipelines" are evaluations of -data, defined in detail below. - -*/ -// {{/* a comment */}} -// A comment; discarded. May contain newlines. -// Comments do not nest and must start and end at the -// delimiters, as shown here. -/* - - {{pipeline}} - The default textual representation of the value of the pipeline - is copied to the output. - - {{if pipeline}} T1 {{end}} - If the value of the pipeline is empty, no output is generated; - otherwise, T1 is executed. The empty values are false, 0, any - nil pointer or interface value, and any array, slice, map, or - string of length zero. - Dot is unaffected. - - {{if pipeline}} T1 {{else}} T0 {{end}} - If the value of the pipeline is empty, T0 is executed; - otherwise, T1 is executed. Dot is unaffected. - - {{if pipeline}} T1 {{else if pipeline}} T0 {{end}} - To simplify the appearance of if-else chains, the else action - of an if may include another if directly; the effect is exactly - the same as writing - {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}} - - {{range pipeline}} T1 {{end}} - The value of the pipeline must be an array, slice, map, or channel. - If the value of the pipeline has length zero, nothing is output; - otherwise, dot is set to the successive elements of the array, - slice, or map and T1 is executed. If the value is a map and the - keys are of basic type with a defined order ("comparable"), the - elements will be visited in sorted key order. - - {{range pipeline}} T1 {{else}} T0 {{end}} - The value of the pipeline must be an array, slice, map, or channel. - If the value of the pipeline has length zero, dot is unaffected and - T0 is executed; otherwise, dot is set to the successive elements - of the array, slice, or map and T1 is executed. - - {{template "name"}} - The template with the specified name is executed with nil data. - - {{template "name" pipeline}} - The template with the specified name is executed with dot set - to the value of the pipeline. - - {{with pipeline}} T1 {{end}} - If the value of the pipeline is empty, no output is generated; - otherwise, dot is set to the value of the pipeline and T1 is - executed. - - {{with pipeline}} T1 {{else}} T0 {{end}} - If the value of the pipeline is empty, dot is unaffected and T0 - is executed; otherwise, dot is set to the value of the pipeline - and T1 is executed. - -Arguments - -An argument is a simple value, denoted by one of the following. - - - A boolean, string, character, integer, floating-point, imaginary - or complex constant in Go syntax. These behave like Go's untyped - constants, although raw strings may not span newlines. - - The keyword nil, representing an untyped Go nil. - - The character '.' (period): - . - The result is the value of dot. - - A variable name, which is a (possibly empty) alphanumeric string - preceded by a dollar sign, such as - $piOver2 - or - $ - The result is the value of the variable. - Variables are described below. - - The name of a field of the data, which must be a struct, preceded - by a period, such as - .Field - The result is the value of the field. Field invocations may be - chained: - .Field1.Field2 - Fields can also be evaluated on variables, including chaining: - $x.Field1.Field2 - - The name of a key of the data, which must be a map, preceded - by a period, such as - .Key - The result is the map element value indexed by the key. - Key invocations may be chained and combined with fields to any - depth: - .Field1.Key1.Field2.Key2 - Although the key must be an alphanumeric identifier, unlike with - field names they do not need to start with an upper case letter. - Keys can also be evaluated on variables, including chaining: - $x.key1.key2 - - The name of a niladic method of the data, preceded by a period, - such as - .Method - The result is the value of invoking the method with dot as the - receiver, dot.Method(). Such a method must have one return value (of - any type) or two return values, the second of which is an error. - If it has two and the returned error is non-nil, execution terminates - and an error is returned to the caller as the value of Execute. - Method invocations may be chained and combined with fields and keys - to any depth: - .Field1.Key1.Method1.Field2.Key2.Method2 - Methods can also be evaluated on variables, including chaining: - $x.Method1.Field - - The name of a niladic function, such as - fun - The result is the value of invoking the function, fun(). The return - types and values behave as in methods. Functions and function - names are described below. - - A parenthesized instance of one the above, for grouping. The result - may be accessed by a field or map key invocation. - print (.F1 arg1) (.F2 arg2) - (.StructValuedMethod "arg").Field - -Arguments may evaluate to any type; if they are pointers the implementation -automatically indirects to the base type when required. -If an evaluation yields a function value, such as a function-valued -field of a struct, the function is not invoked automatically, but it -can be used as a truth value for an if action and the like. To invoke -it, use the call function, defined below. - -A pipeline is a possibly chained sequence of "commands". A command is a simple -value (argument) or a function or method call, possibly with multiple arguments: - - Argument - The result is the value of evaluating the argument. - .Method [Argument...] - The method can be alone or the last element of a chain but, - unlike methods in the middle of a chain, it can take arguments. - The result is the value of calling the method with the - arguments: - dot.Method(Argument1, etc.) - functionName [Argument...] - The result is the value of calling the function associated - with the name: - function(Argument1, etc.) - Functions and function names are described below. - -Pipelines - -A pipeline may be "chained" by separating a sequence of commands with pipeline -characters '|'. In a chained pipeline, the result of the each command is -passed as the last argument of the following command. The output of the final -command in the pipeline is the value of the pipeline. - -The output of a command will be either one value or two values, the second of -which has type error. If that second value is present and evaluates to -non-nil, execution terminates and the error is returned to the caller of -Execute. - -Variables - -A pipeline inside an action may initialize a variable to capture the result. -The initialization has syntax - - $variable := pipeline - -where $variable is the name of the variable. An action that declares a -variable produces no output. - -If a "range" action initializes a variable, the variable is set to the -successive elements of the iteration. Also, a "range" may declare two -variables, separated by a comma: - - range $index, $element := pipeline - -in which case $index and $element are set to the successive values of the -array/slice index or map key and element, respectively. Note that if there is -only one variable, it is assigned the element; this is opposite to the -convention in Go range clauses. - -A variable's scope extends to the "end" action of the control structure ("if", -"with", or "range") in which it is declared, or to the end of the template if -there is no such control structure. A template invocation does not inherit -variables from the point of its invocation. - -When execution begins, $ is set to the data argument passed to Execute, that is, -to the starting value of dot. - -Examples - -Here are some example one-line templates demonstrating pipelines and variables. -All produce the quoted word "output": - - {{"\"output\""}} - A string constant. - {{`"output"`}} - A raw string constant. - {{printf "%q" "output"}} - A function call. - {{"output" | printf "%q"}} - A function call whose final argument comes from the previous - command. - {{printf "%q" (print "out" "put")}} - A parenthesized argument. - {{"put" | printf "%s%s" "out" | printf "%q"}} - A more elaborate call. - {{"output" | printf "%s" | printf "%q"}} - A longer chain. - {{with "output"}}{{printf "%q" .}}{{end}} - A with action using dot. - {{with $x := "output" | printf "%q"}}{{$x}}{{end}} - A with action that creates and uses a variable. - {{with $x := "output"}}{{printf "%q" $x}}{{end}} - A with action that uses the variable in another action. - {{with $x := "output"}}{{$x | printf "%q"}}{{end}} - The same, but pipelined. - -Functions - -During execution functions are found in two function maps: first in the -template, then in the global function map. By default, no functions are defined -in the template but the Funcs method can be used to add them. - -Predefined global functions are named as follows. - - and - Returns the boolean AND of its arguments by returning the - first empty argument or the last argument, that is, - "and x y" behaves as "if x then y else x". All the - arguments are evaluated. - call - Returns the result of calling the first argument, which - must be a function, with the remaining arguments as parameters. - Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where - Y is a func-valued field, map entry, or the like. - The first argument must be the result of an evaluation - that yields a value of function type (as distinct from - a predefined function such as print). The function must - return either one or two result values, the second of which - is of type error. If the arguments don't match the function - or the returned error value is non-nil, execution stops. - html - Returns the escaped HTML equivalent of the textual - representation of its arguments. - index - Returns the result of indexing its first argument by the - following arguments. Thus "index x 1 2 3" is, in Go syntax, - x[1][2][3]. Each indexed item must be a map, slice, or array. - js - Returns the escaped JavaScript equivalent of the textual - representation of its arguments. - len - Returns the integer length of its argument. - not - Returns the boolean negation of its single argument. - or - Returns the boolean OR of its arguments by returning the - first non-empty argument or the last argument, that is, - "or x y" behaves as "if x then x else y". All the - arguments are evaluated. - print - An alias for fmt.Sprint - printf - An alias for fmt.Sprintf - println - An alias for fmt.Sprintln - urlquery - Returns the escaped value of the textual representation of - its arguments in a form suitable for embedding in a URL query. - -The boolean functions take any zero value to be false and a non-zero -value to be true. - -There is also a set of binary comparison operators defined as -functions: - - eq - Returns the boolean truth of arg1 == arg2 - ne - Returns the boolean truth of arg1 != arg2 - lt - Returns the boolean truth of arg1 < arg2 - le - Returns the boolean truth of arg1 <= arg2 - gt - Returns the boolean truth of arg1 > arg2 - ge - Returns the boolean truth of arg1 >= arg2 - -For simpler multi-way equality tests, eq (only) accepts two or more -arguments and compares the second and subsequent to the first, -returning in effect - - arg1==arg2 || arg1==arg3 || arg1==arg4 ... - -(Unlike with || in Go, however, eq is a function call and all the -arguments will be evaluated.) - -The comparison functions work on basic types only (or named basic -types, such as "type Celsius float32"). They implement the Go rules -for comparison of values, except that size and exact type are -ignored, so any integer value, signed or unsigned, may be compared -with any other integer value. (The arithmetic value is compared, -not the bit pattern, so all negative integers are less than all -unsigned integers.) However, as usual, one may not compare an int -with a float32 and so on. - -Associated templates - -Each template is named by a string specified when it is created. Also, each -template is associated with zero or more other templates that it may invoke by -name; such associations are transitive and form a name space of templates. - -A template may use a template invocation to instantiate another associated -template; see the explanation of the "template" action above. The name must be -that of a template associated with the template that contains the invocation. - -Nested template definitions - -When parsing a template, another template may be defined and associated with the -template being parsed. Template definitions must appear at the top level of the -template, much like global variables in a Go program. - -The syntax of such definitions is to surround each template declaration with a -"define" and "end" action. - -The define action names the template being created by providing a string -constant. Here is a simple example: - - `{{define "T1"}}ONE{{end}} - {{define "T2"}}TWO{{end}} - {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}} - {{template "T3"}}` - -This defines two templates, T1 and T2, and a third T3 that invokes the other two -when it is executed. Finally it invokes T3. If executed this template will -produce the text - - ONE TWO - -By construction, a template may reside in only one association. If it's -necessary to have a template addressable from multiple associations, the -template definition must be parsed multiple times to create distinct *Template -values, or must be copied with the Clone or AddParseTree method. - -Parse may be called multiple times to assemble the various associated templates; -see the ParseFiles and ParseGlob functions and methods for simple ways to parse -related templates stored in files. - -A template may be executed directly or through ExecuteTemplate, which executes -an associated template identified by name. To invoke our example above, we -might write, - - err := tmpl.Execute(os.Stdout, "no data needed") - if err != nil { - log.Fatalf("execution failed: %s", err) - } - -or to invoke a particular template explicitly by name, - - err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed") - if err != nil { - log.Fatalf("execution failed: %s", err) - } - -*/ -package template diff --git a/vendor/github.com/alecthomas/template/exec.go b/vendor/github.com/alecthomas/template/exec.go deleted file mode 100644 index c3078e5..0000000 --- a/vendor/github.com/alecthomas/template/exec.go +++ /dev/null @@ -1,845 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "bytes" - "fmt" - "io" - "reflect" - "runtime" - "sort" - "strings" - - "github.com/alecthomas/template/parse" -) - -// state represents the state of an execution. It's not part of the -// template so that multiple executions of the same template -// can execute in parallel. -type state struct { - tmpl *Template - wr io.Writer - node parse.Node // current node, for errors - vars []variable // push-down stack of variable values. -} - -// variable holds the dynamic value of a variable such as $, $x etc. -type variable struct { - name string - value reflect.Value -} - -// push pushes a new variable on the stack. -func (s *state) push(name string, value reflect.Value) { - s.vars = append(s.vars, variable{name, value}) -} - -// mark returns the length of the variable stack. -func (s *state) mark() int { - return len(s.vars) -} - -// pop pops the variable stack up to the mark. -func (s *state) pop(mark int) { - s.vars = s.vars[0:mark] -} - -// setVar overwrites the top-nth variable on the stack. Used by range iterations. -func (s *state) setVar(n int, value reflect.Value) { - s.vars[len(s.vars)-n].value = value -} - -// varValue returns the value of the named variable. -func (s *state) varValue(name string) reflect.Value { - for i := s.mark() - 1; i >= 0; i-- { - if s.vars[i].name == name { - return s.vars[i].value - } - } - s.errorf("undefined variable: %s", name) - return zero -} - -var zero reflect.Value - -// at marks the state to be on node n, for error reporting. -func (s *state) at(node parse.Node) { - s.node = node -} - -// doublePercent returns the string with %'s replaced by %%, if necessary, -// so it can be used safely inside a Printf format string. -func doublePercent(str string) string { - if strings.Contains(str, "%") { - str = strings.Replace(str, "%", "%%", -1) - } - return str -} - -// errorf formats the error and terminates processing. -func (s *state) errorf(format string, args ...interface{}) { - name := doublePercent(s.tmpl.Name()) - if s.node == nil { - format = fmt.Sprintf("template: %s: %s", name, format) - } else { - location, context := s.tmpl.ErrorContext(s.node) - format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format) - } - panic(fmt.Errorf(format, args...)) -} - -// errRecover is the handler that turns panics into returns from the top -// level of Parse. -func errRecover(errp *error) { - e := recover() - if e != nil { - switch err := e.(type) { - case runtime.Error: - panic(e) - case error: - *errp = err - default: - panic(e) - } - } -} - -// ExecuteTemplate applies the template associated with t that has the given name -// to the specified data object and writes the output to wr. -// If an error occurs executing the template or writing its output, -// execution stops, but partial results may already have been written to -// the output writer. -// A template may be executed safely in parallel. -func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error { - tmpl := t.tmpl[name] - if tmpl == nil { - return fmt.Errorf("template: no template %q associated with template %q", name, t.name) - } - return tmpl.Execute(wr, data) -} - -// Execute applies a parsed template to the specified data object, -// and writes the output to wr. -// If an error occurs executing the template or writing its output, -// execution stops, but partial results may already have been written to -// the output writer. -// A template may be executed safely in parallel. -func (t *Template) Execute(wr io.Writer, data interface{}) (err error) { - defer errRecover(&err) - value := reflect.ValueOf(data) - state := &state{ - tmpl: t, - wr: wr, - vars: []variable{{"$", value}}, - } - t.init() - if t.Tree == nil || t.Root == nil { - var b bytes.Buffer - for name, tmpl := range t.tmpl { - if tmpl.Tree == nil || tmpl.Root == nil { - continue - } - if b.Len() > 0 { - b.WriteString(", ") - } - fmt.Fprintf(&b, "%q", name) - } - var s string - if b.Len() > 0 { - s = "; defined templates are: " + b.String() - } - state.errorf("%q is an incomplete or empty template%s", t.Name(), s) - } - state.walk(value, t.Root) - return -} - -// Walk functions step through the major pieces of the template structure, -// generating output as they go. -func (s *state) walk(dot reflect.Value, node parse.Node) { - s.at(node) - switch node := node.(type) { - case *parse.ActionNode: - // Do not pop variables so they persist until next end. - // Also, if the action declares variables, don't print the result. - val := s.evalPipeline(dot, node.Pipe) - if len(node.Pipe.Decl) == 0 { - s.printValue(node, val) - } - case *parse.IfNode: - s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList) - case *parse.ListNode: - for _, node := range node.Nodes { - s.walk(dot, node) - } - case *parse.RangeNode: - s.walkRange(dot, node) - case *parse.TemplateNode: - s.walkTemplate(dot, node) - case *parse.TextNode: - if _, err := s.wr.Write(node.Text); err != nil { - s.errorf("%s", err) - } - case *parse.WithNode: - s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList) - default: - s.errorf("unknown node: %s", node) - } -} - -// walkIfOrWith walks an 'if' or 'with' node. The two control structures -// are identical in behavior except that 'with' sets dot. -func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) { - defer s.pop(s.mark()) - val := s.evalPipeline(dot, pipe) - truth, ok := isTrue(val) - if !ok { - s.errorf("if/with can't use %v", val) - } - if truth { - if typ == parse.NodeWith { - s.walk(val, list) - } else { - s.walk(dot, list) - } - } else if elseList != nil { - s.walk(dot, elseList) - } -} - -// isTrue reports whether the value is 'true', in the sense of not the zero of its type, -// and whether the value has a meaningful truth value. -func isTrue(val reflect.Value) (truth, ok bool) { - if !val.IsValid() { - // Something like var x interface{}, never set. It's a form of nil. - return false, true - } - switch val.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - truth = val.Len() > 0 - case reflect.Bool: - truth = val.Bool() - case reflect.Complex64, reflect.Complex128: - truth = val.Complex() != 0 - case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface: - truth = !val.IsNil() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - truth = val.Int() != 0 - case reflect.Float32, reflect.Float64: - truth = val.Float() != 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - truth = val.Uint() != 0 - case reflect.Struct: - truth = true // Struct values are always true. - default: - return - } - return truth, true -} - -func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) { - s.at(r) - defer s.pop(s.mark()) - val, _ := indirect(s.evalPipeline(dot, r.Pipe)) - // mark top of stack before any variables in the body are pushed. - mark := s.mark() - oneIteration := func(index, elem reflect.Value) { - // Set top var (lexically the second if there are two) to the element. - if len(r.Pipe.Decl) > 0 { - s.setVar(1, elem) - } - // Set next var (lexically the first if there are two) to the index. - if len(r.Pipe.Decl) > 1 { - s.setVar(2, index) - } - s.walk(elem, r.List) - s.pop(mark) - } - switch val.Kind() { - case reflect.Array, reflect.Slice: - if val.Len() == 0 { - break - } - for i := 0; i < val.Len(); i++ { - oneIteration(reflect.ValueOf(i), val.Index(i)) - } - return - case reflect.Map: - if val.Len() == 0 { - break - } - for _, key := range sortKeys(val.MapKeys()) { - oneIteration(key, val.MapIndex(key)) - } - return - case reflect.Chan: - if val.IsNil() { - break - } - i := 0 - for ; ; i++ { - elem, ok := val.Recv() - if !ok { - break - } - oneIteration(reflect.ValueOf(i), elem) - } - if i == 0 { - break - } - return - case reflect.Invalid: - break // An invalid value is likely a nil map, etc. and acts like an empty map. - default: - s.errorf("range can't iterate over %v", val) - } - if r.ElseList != nil { - s.walk(dot, r.ElseList) - } -} - -func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) { - s.at(t) - tmpl := s.tmpl.tmpl[t.Name] - if tmpl == nil { - s.errorf("template %q not defined", t.Name) - } - // Variables declared by the pipeline persist. - dot = s.evalPipeline(dot, t.Pipe) - newState := *s - newState.tmpl = tmpl - // No dynamic scoping: template invocations inherit no variables. - newState.vars = []variable{{"$", dot}} - newState.walk(dot, tmpl.Root) -} - -// Eval functions evaluate pipelines, commands, and their elements and extract -// values from the data structure by examining fields, calling methods, and so on. -// The printing of those values happens only through walk functions. - -// evalPipeline returns the value acquired by evaluating a pipeline. If the -// pipeline has a variable declaration, the variable will be pushed on the -// stack. Callers should therefore pop the stack after they are finished -// executing commands depending on the pipeline value. -func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) { - if pipe == nil { - return - } - s.at(pipe) - for _, cmd := range pipe.Cmds { - value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg. - // If the object has type interface{}, dig down one level to the thing inside. - if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 { - value = reflect.ValueOf(value.Interface()) // lovely! - } - } - for _, variable := range pipe.Decl { - s.push(variable.Ident[0], value) - } - return value -} - -func (s *state) notAFunction(args []parse.Node, final reflect.Value) { - if len(args) > 1 || final.IsValid() { - s.errorf("can't give argument to non-function %s", args[0]) - } -} - -func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value { - firstWord := cmd.Args[0] - switch n := firstWord.(type) { - case *parse.FieldNode: - return s.evalFieldNode(dot, n, cmd.Args, final) - case *parse.ChainNode: - return s.evalChainNode(dot, n, cmd.Args, final) - case *parse.IdentifierNode: - // Must be a function. - return s.evalFunction(dot, n, cmd, cmd.Args, final) - case *parse.PipeNode: - // Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored. - return s.evalPipeline(dot, n) - case *parse.VariableNode: - return s.evalVariableNode(dot, n, cmd.Args, final) - } - s.at(firstWord) - s.notAFunction(cmd.Args, final) - switch word := firstWord.(type) { - case *parse.BoolNode: - return reflect.ValueOf(word.True) - case *parse.DotNode: - return dot - case *parse.NilNode: - s.errorf("nil is not a command") - case *parse.NumberNode: - return s.idealConstant(word) - case *parse.StringNode: - return reflect.ValueOf(word.Text) - } - s.errorf("can't evaluate command %q", firstWord) - panic("not reached") -} - -// idealConstant is called to return the value of a number in a context where -// we don't know the type. In that case, the syntax of the number tells us -// its type, and we use Go rules to resolve. Note there is no such thing as -// a uint ideal constant in this situation - the value must be of int type. -func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value { - // These are ideal constants but we don't know the type - // and we have no context. (If it was a method argument, - // we'd know what we need.) The syntax guides us to some extent. - s.at(constant) - switch { - case constant.IsComplex: - return reflect.ValueOf(constant.Complex128) // incontrovertible. - case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0: - return reflect.ValueOf(constant.Float64) - case constant.IsInt: - n := int(constant.Int64) - if int64(n) != constant.Int64 { - s.errorf("%s overflows int", constant.Text) - } - return reflect.ValueOf(n) - case constant.IsUint: - s.errorf("%s overflows int", constant.Text) - } - return zero -} - -func isHexConstant(s string) bool { - return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') -} - -func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value { - s.at(field) - return s.evalFieldChain(dot, dot, field, field.Ident, args, final) -} - -func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value { - s.at(chain) - // (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields. - pipe := s.evalArg(dot, nil, chain.Node) - if len(chain.Field) == 0 { - s.errorf("internal error: no fields in evalChainNode") - } - return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final) -} - -func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value { - // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields. - s.at(variable) - value := s.varValue(variable.Ident[0]) - if len(variable.Ident) == 1 { - s.notAFunction(args, final) - return value - } - return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final) -} - -// evalFieldChain evaluates .X.Y.Z possibly followed by arguments. -// dot is the environment in which to evaluate arguments, while -// receiver is the value being walked along the chain. -func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value { - n := len(ident) - for i := 0; i < n-1; i++ { - receiver = s.evalField(dot, ident[i], node, nil, zero, receiver) - } - // Now if it's a method, it gets the arguments. - return s.evalField(dot, ident[n-1], node, args, final, receiver) -} - -func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value { - s.at(node) - name := node.Ident - function, ok := findFunction(name, s.tmpl) - if !ok { - s.errorf("%q is not a defined function", name) - } - return s.evalCall(dot, function, cmd, name, args, final) -} - -// evalField evaluates an expression like (.Field) or (.Field arg1 arg2). -// The 'final' argument represents the return value from the preceding -// value of the pipeline, if any. -func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value { - if !receiver.IsValid() { - return zero - } - typ := receiver.Type() - receiver, _ = indirect(receiver) - // Unless it's an interface, need to get to a value of type *T to guarantee - // we see all methods of T and *T. - ptr := receiver - if ptr.Kind() != reflect.Interface && ptr.CanAddr() { - ptr = ptr.Addr() - } - if method := ptr.MethodByName(fieldName); method.IsValid() { - return s.evalCall(dot, method, node, fieldName, args, final) - } - hasArgs := len(args) > 1 || final.IsValid() - // It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil. - receiver, isNil := indirect(receiver) - if isNil { - s.errorf("nil pointer evaluating %s.%s", typ, fieldName) - } - switch receiver.Kind() { - case reflect.Struct: - tField, ok := receiver.Type().FieldByName(fieldName) - if ok { - field := receiver.FieldByIndex(tField.Index) - if tField.PkgPath != "" { // field is unexported - s.errorf("%s is an unexported field of struct type %s", fieldName, typ) - } - // If it's a function, we must call it. - if hasArgs { - s.errorf("%s has arguments but cannot be invoked as function", fieldName) - } - return field - } - s.errorf("%s is not a field of struct type %s", fieldName, typ) - case reflect.Map: - // If it's a map, attempt to use the field name as a key. - nameVal := reflect.ValueOf(fieldName) - if nameVal.Type().AssignableTo(receiver.Type().Key()) { - if hasArgs { - s.errorf("%s is not a method but has arguments", fieldName) - } - return receiver.MapIndex(nameVal) - } - } - s.errorf("can't evaluate field %s in type %s", fieldName, typ) - panic("not reached") -} - -var ( - errorType = reflect.TypeOf((*error)(nil)).Elem() - fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() -) - -// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so -// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0] -// as the function itself. -func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value { - if args != nil { - args = args[1:] // Zeroth arg is function name/node; not passed to function. - } - typ := fun.Type() - numIn := len(args) - if final.IsValid() { - numIn++ - } - numFixed := len(args) - if typ.IsVariadic() { - numFixed = typ.NumIn() - 1 // last arg is the variadic one. - if numIn < numFixed { - s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args)) - } - } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() { - s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args)) - } - if !goodFunc(typ) { - // TODO: This could still be a confusing error; maybe goodFunc should provide info. - s.errorf("can't call method/function %q with %d results", name, typ.NumOut()) - } - // Build the arg list. - argv := make([]reflect.Value, numIn) - // Args must be evaluated. Fixed args first. - i := 0 - for ; i < numFixed && i < len(args); i++ { - argv[i] = s.evalArg(dot, typ.In(i), args[i]) - } - // Now the ... args. - if typ.IsVariadic() { - argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice. - for ; i < len(args); i++ { - argv[i] = s.evalArg(dot, argType, args[i]) - } - } - // Add final value if necessary. - if final.IsValid() { - t := typ.In(typ.NumIn() - 1) - if typ.IsVariadic() { - t = t.Elem() - } - argv[i] = s.validateType(final, t) - } - result := fun.Call(argv) - // If we have an error that is not nil, stop execution and return that error to the caller. - if len(result) == 2 && !result[1].IsNil() { - s.at(node) - s.errorf("error calling %s: %s", name, result[1].Interface().(error)) - } - return result[0] -} - -// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero. -func canBeNil(typ reflect.Type) bool { - switch typ.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return true - } - return false -} - -// validateType guarantees that the value is valid and assignable to the type. -func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value { - if !value.IsValid() { - if typ == nil || canBeNil(typ) { - // An untyped nil interface{}. Accept as a proper nil value. - return reflect.Zero(typ) - } - s.errorf("invalid value; expected %s", typ) - } - if typ != nil && !value.Type().AssignableTo(typ) { - if value.Kind() == reflect.Interface && !value.IsNil() { - value = value.Elem() - if value.Type().AssignableTo(typ) { - return value - } - // fallthrough - } - // Does one dereference or indirection work? We could do more, as we - // do with method receivers, but that gets messy and method receivers - // are much more constrained, so it makes more sense there than here. - // Besides, one is almost always all you need. - switch { - case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ): - value = value.Elem() - if !value.IsValid() { - s.errorf("dereference of nil pointer of type %s", typ) - } - case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr(): - value = value.Addr() - default: - s.errorf("wrong type for value; expected %s; got %s", typ, value.Type()) - } - } - return value -} - -func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - switch arg := n.(type) { - case *parse.DotNode: - return s.validateType(dot, typ) - case *parse.NilNode: - if canBeNil(typ) { - return reflect.Zero(typ) - } - s.errorf("cannot assign nil to %s", typ) - case *parse.FieldNode: - return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ) - case *parse.VariableNode: - return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ) - case *parse.PipeNode: - return s.validateType(s.evalPipeline(dot, arg), typ) - case *parse.IdentifierNode: - return s.evalFunction(dot, arg, arg, nil, zero) - case *parse.ChainNode: - return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ) - } - switch typ.Kind() { - case reflect.Bool: - return s.evalBool(typ, n) - case reflect.Complex64, reflect.Complex128: - return s.evalComplex(typ, n) - case reflect.Float32, reflect.Float64: - return s.evalFloat(typ, n) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return s.evalInteger(typ, n) - case reflect.Interface: - if typ.NumMethod() == 0 { - return s.evalEmptyInterface(dot, n) - } - case reflect.String: - return s.evalString(typ, n) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return s.evalUnsignedInteger(typ, n) - } - s.errorf("can't handle %s for arg of type %s", n, typ) - panic("not reached") -} - -func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.BoolNode); ok { - value := reflect.New(typ).Elem() - value.SetBool(n.True) - return value - } - s.errorf("expected bool; found %s", n) - panic("not reached") -} - -func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.StringNode); ok { - value := reflect.New(typ).Elem() - value.SetString(n.Text) - return value - } - s.errorf("expected string; found %s", n) - panic("not reached") -} - -func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.NumberNode); ok && n.IsInt { - value := reflect.New(typ).Elem() - value.SetInt(n.Int64) - return value - } - s.errorf("expected integer; found %s", n) - panic("not reached") -} - -func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.NumberNode); ok && n.IsUint { - value := reflect.New(typ).Elem() - value.SetUint(n.Uint64) - return value - } - s.errorf("expected unsigned integer; found %s", n) - panic("not reached") -} - -func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.NumberNode); ok && n.IsFloat { - value := reflect.New(typ).Elem() - value.SetFloat(n.Float64) - return value - } - s.errorf("expected float; found %s", n) - panic("not reached") -} - -func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value { - if n, ok := n.(*parse.NumberNode); ok && n.IsComplex { - value := reflect.New(typ).Elem() - value.SetComplex(n.Complex128) - return value - } - s.errorf("expected complex; found %s", n) - panic("not reached") -} - -func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value { - s.at(n) - switch n := n.(type) { - case *parse.BoolNode: - return reflect.ValueOf(n.True) - case *parse.DotNode: - return dot - case *parse.FieldNode: - return s.evalFieldNode(dot, n, nil, zero) - case *parse.IdentifierNode: - return s.evalFunction(dot, n, n, nil, zero) - case *parse.NilNode: - // NilNode is handled in evalArg, the only place that calls here. - s.errorf("evalEmptyInterface: nil (can't happen)") - case *parse.NumberNode: - return s.idealConstant(n) - case *parse.StringNode: - return reflect.ValueOf(n.Text) - case *parse.VariableNode: - return s.evalVariableNode(dot, n, nil, zero) - case *parse.PipeNode: - return s.evalPipeline(dot, n) - } - s.errorf("can't handle assignment of %s to empty interface argument", n) - panic("not reached") -} - -// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. -// We indirect through pointers and empty interfaces (only) because -// non-empty interfaces have methods we might need. -func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { - for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { - if v.IsNil() { - return v, true - } - if v.Kind() == reflect.Interface && v.NumMethod() > 0 { - break - } - } - return v, false -} - -// printValue writes the textual representation of the value to the output of -// the template. -func (s *state) printValue(n parse.Node, v reflect.Value) { - s.at(n) - iface, ok := printableValue(v) - if !ok { - s.errorf("can't print %s of type %s", n, v.Type()) - } - fmt.Fprint(s.wr, iface) -} - -// printableValue returns the, possibly indirected, interface value inside v that -// is best for a call to formatted printer. -func printableValue(v reflect.Value) (interface{}, bool) { - if v.Kind() == reflect.Ptr { - v, _ = indirect(v) // fmt.Fprint handles nil. - } - if !v.IsValid() { - return "", true - } - - if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { - if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) { - v = v.Addr() - } else { - switch v.Kind() { - case reflect.Chan, reflect.Func: - return nil, false - } - } - } - return v.Interface(), true -} - -// Types to help sort the keys in a map for reproducible output. - -type rvs []reflect.Value - -func (x rvs) Len() int { return len(x) } -func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -type rvInts struct{ rvs } - -func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() } - -type rvUints struct{ rvs } - -func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() } - -type rvFloats struct{ rvs } - -func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() } - -type rvStrings struct{ rvs } - -func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() } - -// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys. -func sortKeys(v []reflect.Value) []reflect.Value { - if len(v) <= 1 { - return v - } - switch v[0].Kind() { - case reflect.Float32, reflect.Float64: - sort.Sort(rvFloats{v}) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - sort.Sort(rvInts{v}) - case reflect.String: - sort.Sort(rvStrings{v}) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - sort.Sort(rvUints{v}) - } - return v -} diff --git a/vendor/github.com/alecthomas/template/funcs.go b/vendor/github.com/alecthomas/template/funcs.go deleted file mode 100644 index 39ee5ed..0000000 --- a/vendor/github.com/alecthomas/template/funcs.go +++ /dev/null @@ -1,598 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "bytes" - "errors" - "fmt" - "io" - "net/url" - "reflect" - "strings" - "unicode" - "unicode/utf8" -) - -// FuncMap is the type of the map defining the mapping from names to functions. -// Each function must have either a single return value, or two return values of -// which the second has type error. In that case, if the second (error) -// return value evaluates to non-nil during execution, execution terminates and -// Execute returns that error. -type FuncMap map[string]interface{} - -var builtins = FuncMap{ - "and": and, - "call": call, - "html": HTMLEscaper, - "index": index, - "js": JSEscaper, - "len": length, - "not": not, - "or": or, - "print": fmt.Sprint, - "printf": fmt.Sprintf, - "println": fmt.Sprintln, - "urlquery": URLQueryEscaper, - - // Comparisons - "eq": eq, // == - "ge": ge, // >= - "gt": gt, // > - "le": le, // <= - "lt": lt, // < - "ne": ne, // != -} - -var builtinFuncs = createValueFuncs(builtins) - -// createValueFuncs turns a FuncMap into a map[string]reflect.Value -func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { - m := make(map[string]reflect.Value) - addValueFuncs(m, funcMap) - return m -} - -// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values. -func addValueFuncs(out map[string]reflect.Value, in FuncMap) { - for name, fn := range in { - v := reflect.ValueOf(fn) - if v.Kind() != reflect.Func { - panic("value for " + name + " not a function") - } - if !goodFunc(v.Type()) { - panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut())) - } - out[name] = v - } -} - -// addFuncs adds to values the functions in funcs. It does no checking of the input - -// call addValueFuncs first. -func addFuncs(out, in FuncMap) { - for name, fn := range in { - out[name] = fn - } -} - -// goodFunc checks that the function or method has the right result signature. -func goodFunc(typ reflect.Type) bool { - // We allow functions with 1 result or 2 results where the second is an error. - switch { - case typ.NumOut() == 1: - return true - case typ.NumOut() == 2 && typ.Out(1) == errorType: - return true - } - return false -} - -// findFunction looks for a function in the template, and global map. -func findFunction(name string, tmpl *Template) (reflect.Value, bool) { - if tmpl != nil && tmpl.common != nil { - if fn := tmpl.execFuncs[name]; fn.IsValid() { - return fn, true - } - } - if fn := builtinFuncs[name]; fn.IsValid() { - return fn, true - } - return reflect.Value{}, false -} - -// Indexing. - -// index returns the result of indexing its first argument by the following -// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each -// indexed item must be a map, slice, or array. -func index(item interface{}, indices ...interface{}) (interface{}, error) { - v := reflect.ValueOf(item) - for _, i := range indices { - index := reflect.ValueOf(i) - var isNil bool - if v, isNil = indirect(v); isNil { - return nil, fmt.Errorf("index of nil pointer") - } - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.String: - var x int64 - switch index.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x = index.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x = int64(index.Uint()) - default: - return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type()) - } - if x < 0 || x >= int64(v.Len()) { - return nil, fmt.Errorf("index out of range: %d", x) - } - v = v.Index(int(x)) - case reflect.Map: - if !index.IsValid() { - index = reflect.Zero(v.Type().Key()) - } - if !index.Type().AssignableTo(v.Type().Key()) { - return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type()) - } - if x := v.MapIndex(index); x.IsValid() { - v = x - } else { - v = reflect.Zero(v.Type().Elem()) - } - default: - return nil, fmt.Errorf("can't index item of type %s", v.Type()) - } - } - return v.Interface(), nil -} - -// Length - -// length returns the length of the item, with an error if it has no defined length. -func length(item interface{}) (int, error) { - v, isNil := indirect(reflect.ValueOf(item)) - if isNil { - return 0, fmt.Errorf("len of nil pointer") - } - switch v.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: - return v.Len(), nil - } - return 0, fmt.Errorf("len of type %s", v.Type()) -} - -// Function invocation - -// call returns the result of evaluating the first argument as a function. -// The function must return 1 result, or 2 results, the second of which is an error. -func call(fn interface{}, args ...interface{}) (interface{}, error) { - v := reflect.ValueOf(fn) - typ := v.Type() - if typ.Kind() != reflect.Func { - return nil, fmt.Errorf("non-function of type %s", typ) - } - if !goodFunc(typ) { - return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut()) - } - numIn := typ.NumIn() - var dddType reflect.Type - if typ.IsVariadic() { - if len(args) < numIn-1 { - return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1) - } - dddType = typ.In(numIn - 1).Elem() - } else { - if len(args) != numIn { - return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn) - } - } - argv := make([]reflect.Value, len(args)) - for i, arg := range args { - value := reflect.ValueOf(arg) - // Compute the expected type. Clumsy because of variadics. - var argType reflect.Type - if !typ.IsVariadic() || i < numIn-1 { - argType = typ.In(i) - } else { - argType = dddType - } - if !value.IsValid() && canBeNil(argType) { - value = reflect.Zero(argType) - } - if !value.Type().AssignableTo(argType) { - return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType) - } - argv[i] = value - } - result := v.Call(argv) - if len(result) == 2 && !result[1].IsNil() { - return result[0].Interface(), result[1].Interface().(error) - } - return result[0].Interface(), nil -} - -// Boolean logic. - -func truth(a interface{}) bool { - t, _ := isTrue(reflect.ValueOf(a)) - return t -} - -// and computes the Boolean AND of its arguments, returning -// the first false argument it encounters, or the last argument. -func and(arg0 interface{}, args ...interface{}) interface{} { - if !truth(arg0) { - return arg0 - } - for i := range args { - arg0 = args[i] - if !truth(arg0) { - break - } - } - return arg0 -} - -// or computes the Boolean OR of its arguments, returning -// the first true argument it encounters, or the last argument. -func or(arg0 interface{}, args ...interface{}) interface{} { - if truth(arg0) { - return arg0 - } - for i := range args { - arg0 = args[i] - if truth(arg0) { - break - } - } - return arg0 -} - -// not returns the Boolean negation of its argument. -func not(arg interface{}) (truth bool) { - truth, _ = isTrue(reflect.ValueOf(arg)) - return !truth -} - -// Comparison. - -// TODO: Perhaps allow comparison between signed and unsigned integers. - -var ( - errBadComparisonType = errors.New("invalid type for comparison") - errBadComparison = errors.New("incompatible types for comparison") - errNoComparison = errors.New("missing argument for comparison") -) - -type kind int - -const ( - invalidKind kind = iota - boolKind - complexKind - intKind - floatKind - integerKind - stringKind - uintKind -) - -func basicKind(v reflect.Value) (kind, error) { - switch v.Kind() { - case reflect.Bool: - return boolKind, nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return intKind, nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return uintKind, nil - case reflect.Float32, reflect.Float64: - return floatKind, nil - case reflect.Complex64, reflect.Complex128: - return complexKind, nil - case reflect.String: - return stringKind, nil - } - return invalidKind, errBadComparisonType -} - -// eq evaluates the comparison a == b || a == c || ... -func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - if len(arg2) == 0 { - return false, errNoComparison - } - for _, arg := range arg2 { - v2 := reflect.ValueOf(arg) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind: - truth = v1.Bool() == v2.Bool() - case complexKind: - truth = v1.Complex() == v2.Complex() - case floatKind: - truth = v1.Float() == v2.Float() - case intKind: - truth = v1.Int() == v2.Int() - case stringKind: - truth = v1.String() == v2.String() - case uintKind: - truth = v1.Uint() == v2.Uint() - default: - panic("invalid kind") - } - } - if truth { - return true, nil - } - } - return false, nil -} - -// ne evaluates the comparison a != b. -func ne(arg1, arg2 interface{}) (bool, error) { - // != is the inverse of ==. - equal, err := eq(arg1, arg2) - return !equal, err -} - -// lt evaluates the comparison a < b. -func lt(arg1, arg2 interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - v2 := reflect.ValueOf(arg2) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind, complexKind: - return false, errBadComparisonType - case floatKind: - truth = v1.Float() < v2.Float() - case intKind: - truth = v1.Int() < v2.Int() - case stringKind: - truth = v1.String() < v2.String() - case uintKind: - truth = v1.Uint() < v2.Uint() - default: - panic("invalid kind") - } - } - return truth, nil -} - -// le evaluates the comparison <= b. -func le(arg1, arg2 interface{}) (bool, error) { - // <= is < or ==. - lessThan, err := lt(arg1, arg2) - if lessThan || err != nil { - return lessThan, err - } - return eq(arg1, arg2) -} - -// gt evaluates the comparison a > b. -func gt(arg1, arg2 interface{}) (bool, error) { - // > is the inverse of <=. - lessOrEqual, err := le(arg1, arg2) - if err != nil { - return false, err - } - return !lessOrEqual, nil -} - -// ge evaluates the comparison a >= b. -func ge(arg1, arg2 interface{}) (bool, error) { - // >= is the inverse of <. - lessThan, err := lt(arg1, arg2) - if err != nil { - return false, err - } - return !lessThan, nil -} - -// HTML escaping. - -var ( - htmlQuot = []byte(""") // shorter than """ - htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5 - htmlAmp = []byte("&") - htmlLt = []byte("<") - htmlGt = []byte(">") -) - -// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. -func HTMLEscape(w io.Writer, b []byte) { - last := 0 - for i, c := range b { - var html []byte - switch c { - case '"': - html = htmlQuot - case '\'': - html = htmlApos - case '&': - html = htmlAmp - case '<': - html = htmlLt - case '>': - html = htmlGt - default: - continue - } - w.Write(b[last:i]) - w.Write(html) - last = i + 1 - } - w.Write(b[last:]) -} - -// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. -func HTMLEscapeString(s string) string { - // Avoid allocation if we can. - if strings.IndexAny(s, `'"&<>`) < 0 { - return s - } - var b bytes.Buffer - HTMLEscape(&b, []byte(s)) - return b.String() -} - -// HTMLEscaper returns the escaped HTML equivalent of the textual -// representation of its arguments. -func HTMLEscaper(args ...interface{}) string { - return HTMLEscapeString(evalArgs(args)) -} - -// JavaScript escaping. - -var ( - jsLowUni = []byte(`\u00`) - hex = []byte("0123456789ABCDEF") - - jsBackslash = []byte(`\\`) - jsApos = []byte(`\'`) - jsQuot = []byte(`\"`) - jsLt = []byte(`\x3C`) - jsGt = []byte(`\x3E`) -) - -// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. -func JSEscape(w io.Writer, b []byte) { - last := 0 - for i := 0; i < len(b); i++ { - c := b[i] - - if !jsIsSpecial(rune(c)) { - // fast path: nothing to do - continue - } - w.Write(b[last:i]) - - if c < utf8.RuneSelf { - // Quotes, slashes and angle brackets get quoted. - // Control characters get written as \u00XX. - switch c { - case '\\': - w.Write(jsBackslash) - case '\'': - w.Write(jsApos) - case '"': - w.Write(jsQuot) - case '<': - w.Write(jsLt) - case '>': - w.Write(jsGt) - default: - w.Write(jsLowUni) - t, b := c>>4, c&0x0f - w.Write(hex[t : t+1]) - w.Write(hex[b : b+1]) - } - } else { - // Unicode rune. - r, size := utf8.DecodeRune(b[i:]) - if unicode.IsPrint(r) { - w.Write(b[i : i+size]) - } else { - fmt.Fprintf(w, "\\u%04X", r) - } - i += size - 1 - } - last = i + 1 - } - w.Write(b[last:]) -} - -// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. -func JSEscapeString(s string) string { - // Avoid allocation if we can. - if strings.IndexFunc(s, jsIsSpecial) < 0 { - return s - } - var b bytes.Buffer - JSEscape(&b, []byte(s)) - return b.String() -} - -func jsIsSpecial(r rune) bool { - switch r { - case '\\', '\'', '"', '<', '>': - return true - } - return r < ' ' || utf8.RuneSelf <= r -} - -// JSEscaper returns the escaped JavaScript equivalent of the textual -// representation of its arguments. -func JSEscaper(args ...interface{}) string { - return JSEscapeString(evalArgs(args)) -} - -// URLQueryEscaper returns the escaped value of the textual representation of -// its arguments in a form suitable for embedding in a URL query. -func URLQueryEscaper(args ...interface{}) string { - return url.QueryEscape(evalArgs(args)) -} - -// evalArgs formats the list of arguments into a string. It is therefore equivalent to -// fmt.Sprint(args...) -// except that each argument is indirected (if a pointer), as required, -// using the same rules as the default string evaluation during template -// execution. -func evalArgs(args []interface{}) string { - ok := false - var s string - // Fast path for simple common case. - if len(args) == 1 { - s, ok = args[0].(string) - } - if !ok { - for i, arg := range args { - a, ok := printableValue(reflect.ValueOf(arg)) - if ok { - args[i] = a - } // else left fmt do its thing - } - s = fmt.Sprint(args...) - } - return s -} diff --git a/vendor/github.com/alecthomas/template/helper.go b/vendor/github.com/alecthomas/template/helper.go deleted file mode 100644 index 3636fb5..0000000 --- a/vendor/github.com/alecthomas/template/helper.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Helper functions to make constructing templates easier. - -package template - -import ( - "fmt" - "io/ioutil" - "path/filepath" -) - -// Functions and methods to parse templates. - -// Must is a helper that wraps a call to a function returning (*Template, error) -// and panics if the error is non-nil. It is intended for use in variable -// initializations such as -// var t = template.Must(template.New("name").Parse("text")) -func Must(t *Template, err error) *Template { - if err != nil { - panic(err) - } - return t -} - -// ParseFiles creates a new Template and parses the template definitions from -// the named files. The returned template's name will have the (base) name and -// (parsed) contents of the first file. There must be at least one file. -// If an error occurs, parsing stops and the returned *Template is nil. -func ParseFiles(filenames ...string) (*Template, error) { - return parseFiles(nil, filenames...) -} - -// ParseFiles parses the named files and associates the resulting templates with -// t. If an error occurs, parsing stops and the returned template is nil; -// otherwise it is t. There must be at least one file. -func (t *Template) ParseFiles(filenames ...string) (*Template, error) { - return parseFiles(t, filenames...) -} - -// parseFiles is the helper for the method and function. If the argument -// template is nil, it is created from the first file. -func parseFiles(t *Template, filenames ...string) (*Template, error) { - if len(filenames) == 0 { - // Not really a problem, but be consistent. - return nil, fmt.Errorf("template: no files named in call to ParseFiles") - } - for _, filename := range filenames { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - s := string(b) - name := filepath.Base(filename) - // First template becomes return value if not already defined, - // and we use that one for subsequent New calls to associate - // all the templates together. Also, if this file has the same name - // as t, this file becomes the contents of t, so - // t, err := New(name).Funcs(xxx).ParseFiles(name) - // works. Otherwise we create a new template associated with t. - var tmpl *Template - if t == nil { - t = New(name) - } - if name == t.Name() { - tmpl = t - } else { - tmpl = t.New(name) - } - _, err = tmpl.Parse(s) - if err != nil { - return nil, err - } - } - return t, nil -} - -// ParseGlob creates a new Template and parses the template definitions from the -// files identified by the pattern, which must match at least one file. The -// returned template will have the (base) name and (parsed) contents of the -// first file matched by the pattern. ParseGlob is equivalent to calling -// ParseFiles with the list of files matched by the pattern. -func ParseGlob(pattern string) (*Template, error) { - return parseGlob(nil, pattern) -} - -// ParseGlob parses the template definitions in the files identified by the -// pattern and associates the resulting templates with t. The pattern is -// processed by filepath.Glob and must match at least one file. ParseGlob is -// equivalent to calling t.ParseFiles with the list of files matched by the -// pattern. -func (t *Template) ParseGlob(pattern string) (*Template, error) { - return parseGlob(t, pattern) -} - -// parseGlob is the implementation of the function and method ParseGlob. -func parseGlob(t *Template, pattern string) (*Template, error) { - filenames, err := filepath.Glob(pattern) - if err != nil { - return nil, err - } - if len(filenames) == 0 { - return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern) - } - return parseFiles(t, filenames...) -} diff --git a/vendor/github.com/alecthomas/template/parse/lex.go b/vendor/github.com/alecthomas/template/parse/lex.go deleted file mode 100644 index 55f1c05..0000000 --- a/vendor/github.com/alecthomas/template/parse/lex.go +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package parse - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -// item represents a token or text string returned from the scanner. -type item struct { - typ itemType // The type of this item. - pos Pos // The starting position, in bytes, of this item in the input string. - val string // The value of this item. -} - -func (i item) String() string { - switch { - case i.typ == itemEOF: - return "EOF" - case i.typ == itemError: - return i.val - case i.typ > itemKeyword: - return fmt.Sprintf("<%s>", i.val) - case len(i.val) > 10: - return fmt.Sprintf("%.10q...", i.val) - } - return fmt.Sprintf("%q", i.val) -} - -// itemType identifies the type of lex items. -type itemType int - -const ( - itemError itemType = iota // error occurred; value is text of error - itemBool // boolean constant - itemChar // printable ASCII character; grab bag for comma etc. - itemCharConstant // character constant - itemComplex // complex constant (1+2i); imaginary is just a number - itemColonEquals // colon-equals (':=') introducing a declaration - itemEOF - itemField // alphanumeric identifier starting with '.' - itemIdentifier // alphanumeric identifier not starting with '.' - itemLeftDelim // left action delimiter - itemLeftParen // '(' inside action - itemNumber // simple number, including imaginary - itemPipe // pipe symbol - itemRawString // raw quoted string (includes quotes) - itemRightDelim // right action delimiter - itemElideNewline // elide newline after right delim - itemRightParen // ')' inside action - itemSpace // run of spaces separating arguments - itemString // quoted string (includes quotes) - itemText // plain text - itemVariable // variable starting with '$', such as '$' or '$1' or '$hello' - // Keywords appear after all the rest. - itemKeyword // used only to delimit the keywords - itemDot // the cursor, spelled '.' - itemDefine // define keyword - itemElse // else keyword - itemEnd // end keyword - itemIf // if keyword - itemNil // the untyped nil constant, easiest to treat as a keyword - itemRange // range keyword - itemTemplate // template keyword - itemWith // with keyword -) - -var key = map[string]itemType{ - ".": itemDot, - "define": itemDefine, - "else": itemElse, - "end": itemEnd, - "if": itemIf, - "range": itemRange, - "nil": itemNil, - "template": itemTemplate, - "with": itemWith, -} - -const eof = -1 - -// stateFn represents the state of the scanner as a function that returns the next state. -type stateFn func(*lexer) stateFn - -// lexer holds the state of the scanner. -type lexer struct { - name string // the name of the input; used only for error reports - input string // the string being scanned - leftDelim string // start of action - rightDelim string // end of action - state stateFn // the next lexing function to enter - pos Pos // current position in the input - start Pos // start position of this item - width Pos // width of last rune read from input - lastPos Pos // position of most recent item returned by nextItem - items chan item // channel of scanned items - parenDepth int // nesting depth of ( ) exprs -} - -// next returns the next rune in the input. -func (l *lexer) next() rune { - if int(l.pos) >= len(l.input) { - l.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(l.input[l.pos:]) - l.width = Pos(w) - l.pos += l.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (l *lexer) peek() rune { - r := l.next() - l.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (l *lexer) backup() { - l.pos -= l.width -} - -// emit passes an item back to the client. -func (l *lexer) emit(t itemType) { - l.items <- item{t, l.start, l.input[l.start:l.pos]} - l.start = l.pos -} - -// ignore skips over the pending input before this point. -func (l *lexer) ignore() { - l.start = l.pos -} - -// accept consumes the next rune if it's from the valid set. -func (l *lexer) accept(valid string) bool { - if strings.IndexRune(valid, l.next()) >= 0 { - return true - } - l.backup() - return false -} - -// acceptRun consumes a run of runes from the valid set. -func (l *lexer) acceptRun(valid string) { - for strings.IndexRune(valid, l.next()) >= 0 { - } - l.backup() -} - -// lineNumber reports which line we're on, based on the position of -// the previous item returned by nextItem. Doing it this way -// means we don't have to worry about peek double counting. -func (l *lexer) lineNumber() int { - return 1 + strings.Count(l.input[:l.lastPos], "\n") -} - -// errorf returns an error token and terminates the scan by passing -// back a nil pointer that will be the next state, terminating l.nextItem. -func (l *lexer) errorf(format string, args ...interface{}) stateFn { - l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} - return nil -} - -// nextItem returns the next item from the input. -func (l *lexer) nextItem() item { - item := <-l.items - l.lastPos = item.pos - return item -} - -// lex creates a new scanner for the input string. -func lex(name, input, left, right string) *lexer { - if left == "" { - left = leftDelim - } - if right == "" { - right = rightDelim - } - l := &lexer{ - name: name, - input: input, - leftDelim: left, - rightDelim: right, - items: make(chan item), - } - go l.run() - return l -} - -// run runs the state machine for the lexer. -func (l *lexer) run() { - for l.state = lexText; l.state != nil; { - l.state = l.state(l) - } -} - -// state functions - -const ( - leftDelim = "{{" - rightDelim = "}}" - leftComment = "/*" - rightComment = "*/" -) - -// lexText scans until an opening action delimiter, "{{". -func lexText(l *lexer) stateFn { - for { - if strings.HasPrefix(l.input[l.pos:], l.leftDelim) { - if l.pos > l.start { - l.emit(itemText) - } - return lexLeftDelim - } - if l.next() == eof { - break - } - } - // Correctly reached EOF. - if l.pos > l.start { - l.emit(itemText) - } - l.emit(itemEOF) - return nil -} - -// lexLeftDelim scans the left delimiter, which is known to be present. -func lexLeftDelim(l *lexer) stateFn { - l.pos += Pos(len(l.leftDelim)) - if strings.HasPrefix(l.input[l.pos:], leftComment) { - return lexComment - } - l.emit(itemLeftDelim) - l.parenDepth = 0 - return lexInsideAction -} - -// lexComment scans a comment. The left comment marker is known to be present. -func lexComment(l *lexer) stateFn { - l.pos += Pos(len(leftComment)) - i := strings.Index(l.input[l.pos:], rightComment) - if i < 0 { - return l.errorf("unclosed comment") - } - l.pos += Pos(i + len(rightComment)) - if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) { - return l.errorf("comment ends before closing delimiter") - - } - l.pos += Pos(len(l.rightDelim)) - l.ignore() - return lexText -} - -// lexRightDelim scans the right delimiter, which is known to be present. -func lexRightDelim(l *lexer) stateFn { - l.pos += Pos(len(l.rightDelim)) - l.emit(itemRightDelim) - if l.peek() == '\\' { - l.pos++ - l.emit(itemElideNewline) - } - return lexText -} - -// lexInsideAction scans the elements inside action delimiters. -func lexInsideAction(l *lexer) stateFn { - // Either number, quoted string, or identifier. - // Spaces separate arguments; runs of spaces turn into itemSpace. - // Pipe symbols separate and are emitted. - if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) { - if l.parenDepth == 0 { - return lexRightDelim - } - return l.errorf("unclosed left paren") - } - switch r := l.next(); { - case r == eof || isEndOfLine(r): - return l.errorf("unclosed action") - case isSpace(r): - return lexSpace - case r == ':': - if l.next() != '=' { - return l.errorf("expected :=") - } - l.emit(itemColonEquals) - case r == '|': - l.emit(itemPipe) - case r == '"': - return lexQuote - case r == '`': - return lexRawQuote - case r == '$': - return lexVariable - case r == '\'': - return lexChar - case r == '.': - // special look-ahead for ".field" so we don't break l.backup(). - if l.pos < Pos(len(l.input)) { - r := l.input[l.pos] - if r < '0' || '9' < r { - return lexField - } - } - fallthrough // '.' can start a number. - case r == '+' || r == '-' || ('0' <= r && r <= '9'): - l.backup() - return lexNumber - case isAlphaNumeric(r): - l.backup() - return lexIdentifier - case r == '(': - l.emit(itemLeftParen) - l.parenDepth++ - return lexInsideAction - case r == ')': - l.emit(itemRightParen) - l.parenDepth-- - if l.parenDepth < 0 { - return l.errorf("unexpected right paren %#U", r) - } - return lexInsideAction - case r <= unicode.MaxASCII && unicode.IsPrint(r): - l.emit(itemChar) - return lexInsideAction - default: - return l.errorf("unrecognized character in action: %#U", r) - } - return lexInsideAction -} - -// lexSpace scans a run of space characters. -// One space has already been seen. -func lexSpace(l *lexer) stateFn { - for isSpace(l.peek()) { - l.next() - } - l.emit(itemSpace) - return lexInsideAction -} - -// lexIdentifier scans an alphanumeric. -func lexIdentifier(l *lexer) stateFn { -Loop: - for { - switch r := l.next(); { - case isAlphaNumeric(r): - // absorb. - default: - l.backup() - word := l.input[l.start:l.pos] - if !l.atTerminator() { - return l.errorf("bad character %#U", r) - } - switch { - case key[word] > itemKeyword: - l.emit(key[word]) - case word[0] == '.': - l.emit(itemField) - case word == "true", word == "false": - l.emit(itemBool) - default: - l.emit(itemIdentifier) - } - break Loop - } - } - return lexInsideAction -} - -// lexField scans a field: .Alphanumeric. -// The . has been scanned. -func lexField(l *lexer) stateFn { - return lexFieldOrVariable(l, itemField) -} - -// lexVariable scans a Variable: $Alphanumeric. -// The $ has been scanned. -func lexVariable(l *lexer) stateFn { - if l.atTerminator() { // Nothing interesting follows -> "$". - l.emit(itemVariable) - return lexInsideAction - } - return lexFieldOrVariable(l, itemVariable) -} - -// lexVariable scans a field or variable: [.$]Alphanumeric. -// The . or $ has been scanned. -func lexFieldOrVariable(l *lexer, typ itemType) stateFn { - if l.atTerminator() { // Nothing interesting follows -> "." or "$". - if typ == itemVariable { - l.emit(itemVariable) - } else { - l.emit(itemDot) - } - return lexInsideAction - } - var r rune - for { - r = l.next() - if !isAlphaNumeric(r) { - l.backup() - break - } - } - if !l.atTerminator() { - return l.errorf("bad character %#U", r) - } - l.emit(typ) - return lexInsideAction -} - -// atTerminator reports whether the input is at valid termination character to -// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases -// like "$x+2" not being acceptable without a space, in case we decide one -// day to implement arithmetic. -func (l *lexer) atTerminator() bool { - r := l.peek() - if isSpace(r) || isEndOfLine(r) { - return true - } - switch r { - case eof, '.', ',', '|', ':', ')', '(': - return true - } - // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will - // succeed but should fail) but only in extremely rare cases caused by willfully - // bad choice of delimiter. - if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r { - return true - } - return false -} - -// lexChar scans a character constant. The initial quote is already -// scanned. Syntax checking is done by the parser. -func lexChar(l *lexer) stateFn { -Loop: - for { - switch l.next() { - case '\\': - if r := l.next(); r != eof && r != '\n' { - break - } - fallthrough - case eof, '\n': - return l.errorf("unterminated character constant") - case '\'': - break Loop - } - } - l.emit(itemCharConstant) - return lexInsideAction -} - -// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This -// isn't a perfect number scanner - for instance it accepts "." and "0x0.2" -// and "089" - but when it's wrong the input is invalid and the parser (via -// strconv) will notice. -func lexNumber(l *lexer) stateFn { - if !l.scanNumber() { - return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) - } - if sign := l.peek(); sign == '+' || sign == '-' { - // Complex: 1+2i. No spaces, must end in 'i'. - if !l.scanNumber() || l.input[l.pos-1] != 'i' { - return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) - } - l.emit(itemComplex) - } else { - l.emit(itemNumber) - } - return lexInsideAction -} - -func (l *lexer) scanNumber() bool { - // Optional leading sign. - l.accept("+-") - // Is it hex? - digits := "0123456789" - if l.accept("0") && l.accept("xX") { - digits = "0123456789abcdefABCDEF" - } - l.acceptRun(digits) - if l.accept(".") { - l.acceptRun(digits) - } - if l.accept("eE") { - l.accept("+-") - l.acceptRun("0123456789") - } - // Is it imaginary? - l.accept("i") - // Next thing mustn't be alphanumeric. - if isAlphaNumeric(l.peek()) { - l.next() - return false - } - return true -} - -// lexQuote scans a quoted string. -func lexQuote(l *lexer) stateFn { -Loop: - for { - switch l.next() { - case '\\': - if r := l.next(); r != eof && r != '\n' { - break - } - fallthrough - case eof, '\n': - return l.errorf("unterminated quoted string") - case '"': - break Loop - } - } - l.emit(itemString) - return lexInsideAction -} - -// lexRawQuote scans a raw quoted string. -func lexRawQuote(l *lexer) stateFn { -Loop: - for { - switch l.next() { - case eof, '\n': - return l.errorf("unterminated raw quoted string") - case '`': - break Loop - } - } - l.emit(itemRawString) - return lexInsideAction -} - -// isSpace reports whether r is a space character. -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -// isEndOfLine reports whether r is an end-of-line character. -func isEndOfLine(r rune) bool { - return r == '\r' || r == '\n' -} - -// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. -func isAlphaNumeric(r rune) bool { - return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) -} diff --git a/vendor/github.com/alecthomas/template/parse/node.go b/vendor/github.com/alecthomas/template/parse/node.go deleted file mode 100644 index 55c37f6..0000000 --- a/vendor/github.com/alecthomas/template/parse/node.go +++ /dev/null @@ -1,834 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Parse nodes. - -package parse - -import ( - "bytes" - "fmt" - "strconv" - "strings" -) - -var textFormat = "%s" // Changed to "%q" in tests for better error messages. - -// A Node is an element in the parse tree. The interface is trivial. -// The interface contains an unexported method so that only -// types local to this package can satisfy it. -type Node interface { - Type() NodeType - String() string - // Copy does a deep copy of the Node and all its components. - // To avoid type assertions, some XxxNodes also have specialized - // CopyXxx methods that return *XxxNode. - Copy() Node - Position() Pos // byte position of start of node in full original input string - // tree returns the containing *Tree. - // It is unexported so all implementations of Node are in this package. - tree() *Tree -} - -// NodeType identifies the type of a parse tree node. -type NodeType int - -// Pos represents a byte position in the original input text from which -// this template was parsed. -type Pos int - -func (p Pos) Position() Pos { - return p -} - -// Type returns itself and provides an easy default implementation -// for embedding in a Node. Embedded in all non-trivial Nodes. -func (t NodeType) Type() NodeType { - return t -} - -const ( - NodeText NodeType = iota // Plain text. - NodeAction // A non-control action such as a field evaluation. - NodeBool // A boolean constant. - NodeChain // A sequence of field accesses. - NodeCommand // An element of a pipeline. - NodeDot // The cursor, dot. - nodeElse // An else action. Not added to tree. - nodeEnd // An end action. Not added to tree. - NodeField // A field or method name. - NodeIdentifier // An identifier; always a function name. - NodeIf // An if action. - NodeList // A list of Nodes. - NodeNil // An untyped nil constant. - NodeNumber // A numerical constant. - NodePipe // A pipeline of commands. - NodeRange // A range action. - NodeString // A string constant. - NodeTemplate // A template invocation action. - NodeVariable // A $ variable. - NodeWith // A with action. -) - -// Nodes. - -// ListNode holds a sequence of nodes. -type ListNode struct { - NodeType - Pos - tr *Tree - Nodes []Node // The element nodes in lexical order. -} - -func (t *Tree) newList(pos Pos) *ListNode { - return &ListNode{tr: t, NodeType: NodeList, Pos: pos} -} - -func (l *ListNode) append(n Node) { - l.Nodes = append(l.Nodes, n) -} - -func (l *ListNode) tree() *Tree { - return l.tr -} - -func (l *ListNode) String() string { - b := new(bytes.Buffer) - for _, n := range l.Nodes { - fmt.Fprint(b, n) - } - return b.String() -} - -func (l *ListNode) CopyList() *ListNode { - if l == nil { - return l - } - n := l.tr.newList(l.Pos) - for _, elem := range l.Nodes { - n.append(elem.Copy()) - } - return n -} - -func (l *ListNode) Copy() Node { - return l.CopyList() -} - -// TextNode holds plain text. -type TextNode struct { - NodeType - Pos - tr *Tree - Text []byte // The text; may span newlines. -} - -func (t *Tree) newText(pos Pos, text string) *TextNode { - return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)} -} - -func (t *TextNode) String() string { - return fmt.Sprintf(textFormat, t.Text) -} - -func (t *TextNode) tree() *Tree { - return t.tr -} - -func (t *TextNode) Copy() Node { - return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)} -} - -// PipeNode holds a pipeline with optional declaration -type PipeNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Decl []*VariableNode // Variable declarations in lexical order. - Cmds []*CommandNode // The commands in lexical order. -} - -func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode { - return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl} -} - -func (p *PipeNode) append(command *CommandNode) { - p.Cmds = append(p.Cmds, command) -} - -func (p *PipeNode) String() string { - s := "" - if len(p.Decl) > 0 { - for i, v := range p.Decl { - if i > 0 { - s += ", " - } - s += v.String() - } - s += " := " - } - for i, c := range p.Cmds { - if i > 0 { - s += " | " - } - s += c.String() - } - return s -} - -func (p *PipeNode) tree() *Tree { - return p.tr -} - -func (p *PipeNode) CopyPipe() *PipeNode { - if p == nil { - return p - } - var decl []*VariableNode - for _, d := range p.Decl { - decl = append(decl, d.Copy().(*VariableNode)) - } - n := p.tr.newPipeline(p.Pos, p.Line, decl) - for _, c := range p.Cmds { - n.append(c.Copy().(*CommandNode)) - } - return n -} - -func (p *PipeNode) Copy() Node { - return p.CopyPipe() -} - -// ActionNode holds an action (something bounded by delimiters). -// Control actions have their own nodes; ActionNode represents simple -// ones such as field evaluations and parenthesized pipelines. -type ActionNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Pipe *PipeNode // The pipeline in the action. -} - -func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode { - return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe} -} - -func (a *ActionNode) String() string { - return fmt.Sprintf("{{%s}}", a.Pipe) - -} - -func (a *ActionNode) tree() *Tree { - return a.tr -} - -func (a *ActionNode) Copy() Node { - return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe()) - -} - -// CommandNode holds a command (a pipeline inside an evaluating action). -type CommandNode struct { - NodeType - Pos - tr *Tree - Args []Node // Arguments in lexical order: Identifier, field, or constant. -} - -func (t *Tree) newCommand(pos Pos) *CommandNode { - return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos} -} - -func (c *CommandNode) append(arg Node) { - c.Args = append(c.Args, arg) -} - -func (c *CommandNode) String() string { - s := "" - for i, arg := range c.Args { - if i > 0 { - s += " " - } - if arg, ok := arg.(*PipeNode); ok { - s += "(" + arg.String() + ")" - continue - } - s += arg.String() - } - return s -} - -func (c *CommandNode) tree() *Tree { - return c.tr -} - -func (c *CommandNode) Copy() Node { - if c == nil { - return c - } - n := c.tr.newCommand(c.Pos) - for _, c := range c.Args { - n.append(c.Copy()) - } - return n -} - -// IdentifierNode holds an identifier. -type IdentifierNode struct { - NodeType - Pos - tr *Tree - Ident string // The identifier's name. -} - -// NewIdentifier returns a new IdentifierNode with the given identifier name. -func NewIdentifier(ident string) *IdentifierNode { - return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident} -} - -// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature. -// Chained for convenience. -// TODO: fix one day? -func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode { - i.Pos = pos - return i -} - -// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature. -// Chained for convenience. -// TODO: fix one day? -func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode { - i.tr = t - return i -} - -func (i *IdentifierNode) String() string { - return i.Ident -} - -func (i *IdentifierNode) tree() *Tree { - return i.tr -} - -func (i *IdentifierNode) Copy() Node { - return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos) -} - -// VariableNode holds a list of variable names, possibly with chained field -// accesses. The dollar sign is part of the (first) name. -type VariableNode struct { - NodeType - Pos - tr *Tree - Ident []string // Variable name and fields in lexical order. -} - -func (t *Tree) newVariable(pos Pos, ident string) *VariableNode { - return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")} -} - -func (v *VariableNode) String() string { - s := "" - for i, id := range v.Ident { - if i > 0 { - s += "." - } - s += id - } - return s -} - -func (v *VariableNode) tree() *Tree { - return v.tr -} - -func (v *VariableNode) Copy() Node { - return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)} -} - -// DotNode holds the special identifier '.'. -type DotNode struct { - NodeType - Pos - tr *Tree -} - -func (t *Tree) newDot(pos Pos) *DotNode { - return &DotNode{tr: t, NodeType: NodeDot, Pos: pos} -} - -func (d *DotNode) Type() NodeType { - // Override method on embedded NodeType for API compatibility. - // TODO: Not really a problem; could change API without effect but - // api tool complains. - return NodeDot -} - -func (d *DotNode) String() string { - return "." -} - -func (d *DotNode) tree() *Tree { - return d.tr -} - -func (d *DotNode) Copy() Node { - return d.tr.newDot(d.Pos) -} - -// NilNode holds the special identifier 'nil' representing an untyped nil constant. -type NilNode struct { - NodeType - Pos - tr *Tree -} - -func (t *Tree) newNil(pos Pos) *NilNode { - return &NilNode{tr: t, NodeType: NodeNil, Pos: pos} -} - -func (n *NilNode) Type() NodeType { - // Override method on embedded NodeType for API compatibility. - // TODO: Not really a problem; could change API without effect but - // api tool complains. - return NodeNil -} - -func (n *NilNode) String() string { - return "nil" -} - -func (n *NilNode) tree() *Tree { - return n.tr -} - -func (n *NilNode) Copy() Node { - return n.tr.newNil(n.Pos) -} - -// FieldNode holds a field (identifier starting with '.'). -// The names may be chained ('.x.y'). -// The period is dropped from each ident. -type FieldNode struct { - NodeType - Pos - tr *Tree - Ident []string // The identifiers in lexical order. -} - -func (t *Tree) newField(pos Pos, ident string) *FieldNode { - return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period -} - -func (f *FieldNode) String() string { - s := "" - for _, id := range f.Ident { - s += "." + id - } - return s -} - -func (f *FieldNode) tree() *Tree { - return f.tr -} - -func (f *FieldNode) Copy() Node { - return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)} -} - -// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.'). -// The names may be chained ('.x.y'). -// The periods are dropped from each ident. -type ChainNode struct { - NodeType - Pos - tr *Tree - Node Node - Field []string // The identifiers in lexical order. -} - -func (t *Tree) newChain(pos Pos, node Node) *ChainNode { - return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node} -} - -// Add adds the named field (which should start with a period) to the end of the chain. -func (c *ChainNode) Add(field string) { - if len(field) == 0 || field[0] != '.' { - panic("no dot in field") - } - field = field[1:] // Remove leading dot. - if field == "" { - panic("empty field") - } - c.Field = append(c.Field, field) -} - -func (c *ChainNode) String() string { - s := c.Node.String() - if _, ok := c.Node.(*PipeNode); ok { - s = "(" + s + ")" - } - for _, field := range c.Field { - s += "." + field - } - return s -} - -func (c *ChainNode) tree() *Tree { - return c.tr -} - -func (c *ChainNode) Copy() Node { - return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)} -} - -// BoolNode holds a boolean constant. -type BoolNode struct { - NodeType - Pos - tr *Tree - True bool // The value of the boolean constant. -} - -func (t *Tree) newBool(pos Pos, true bool) *BoolNode { - return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true} -} - -func (b *BoolNode) String() string { - if b.True { - return "true" - } - return "false" -} - -func (b *BoolNode) tree() *Tree { - return b.tr -} - -func (b *BoolNode) Copy() Node { - return b.tr.newBool(b.Pos, b.True) -} - -// NumberNode holds a number: signed or unsigned integer, float, or complex. -// The value is parsed and stored under all the types that can represent the value. -// This simulates in a small amount of code the behavior of Go's ideal constants. -type NumberNode struct { - NodeType - Pos - tr *Tree - IsInt bool // Number has an integral value. - IsUint bool // Number has an unsigned integral value. - IsFloat bool // Number has a floating-point value. - IsComplex bool // Number is complex. - Int64 int64 // The signed integer value. - Uint64 uint64 // The unsigned integer value. - Float64 float64 // The floating-point value. - Complex128 complex128 // The complex value. - Text string // The original textual representation from the input. -} - -func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) { - n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text} - switch typ { - case itemCharConstant: - rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0]) - if err != nil { - return nil, err - } - if tail != "'" { - return nil, fmt.Errorf("malformed character constant: %s", text) - } - n.Int64 = int64(rune) - n.IsInt = true - n.Uint64 = uint64(rune) - n.IsUint = true - n.Float64 = float64(rune) // odd but those are the rules. - n.IsFloat = true - return n, nil - case itemComplex: - // fmt.Sscan can parse the pair, so let it do the work. - if _, err := fmt.Sscan(text, &n.Complex128); err != nil { - return nil, err - } - n.IsComplex = true - n.simplifyComplex() - return n, nil - } - // Imaginary constants can only be complex unless they are zero. - if len(text) > 0 && text[len(text)-1] == 'i' { - f, err := strconv.ParseFloat(text[:len(text)-1], 64) - if err == nil { - n.IsComplex = true - n.Complex128 = complex(0, f) - n.simplifyComplex() - return n, nil - } - } - // Do integer test first so we get 0x123 etc. - u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below. - if err == nil { - n.IsUint = true - n.Uint64 = u - } - i, err := strconv.ParseInt(text, 0, 64) - if err == nil { - n.IsInt = true - n.Int64 = i - if i == 0 { - n.IsUint = true // in case of -0. - n.Uint64 = u - } - } - // If an integer extraction succeeded, promote the float. - if n.IsInt { - n.IsFloat = true - n.Float64 = float64(n.Int64) - } else if n.IsUint { - n.IsFloat = true - n.Float64 = float64(n.Uint64) - } else { - f, err := strconv.ParseFloat(text, 64) - if err == nil { - n.IsFloat = true - n.Float64 = f - // If a floating-point extraction succeeded, extract the int if needed. - if !n.IsInt && float64(int64(f)) == f { - n.IsInt = true - n.Int64 = int64(f) - } - if !n.IsUint && float64(uint64(f)) == f { - n.IsUint = true - n.Uint64 = uint64(f) - } - } - } - if !n.IsInt && !n.IsUint && !n.IsFloat { - return nil, fmt.Errorf("illegal number syntax: %q", text) - } - return n, nil -} - -// simplifyComplex pulls out any other types that are represented by the complex number. -// These all require that the imaginary part be zero. -func (n *NumberNode) simplifyComplex() { - n.IsFloat = imag(n.Complex128) == 0 - if n.IsFloat { - n.Float64 = real(n.Complex128) - n.IsInt = float64(int64(n.Float64)) == n.Float64 - if n.IsInt { - n.Int64 = int64(n.Float64) - } - n.IsUint = float64(uint64(n.Float64)) == n.Float64 - if n.IsUint { - n.Uint64 = uint64(n.Float64) - } - } -} - -func (n *NumberNode) String() string { - return n.Text -} - -func (n *NumberNode) tree() *Tree { - return n.tr -} - -func (n *NumberNode) Copy() Node { - nn := new(NumberNode) - *nn = *n // Easy, fast, correct. - return nn -} - -// StringNode holds a string constant. The value has been "unquoted". -type StringNode struct { - NodeType - Pos - tr *Tree - Quoted string // The original text of the string, with quotes. - Text string // The string, after quote processing. -} - -func (t *Tree) newString(pos Pos, orig, text string) *StringNode { - return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text} -} - -func (s *StringNode) String() string { - return s.Quoted -} - -func (s *StringNode) tree() *Tree { - return s.tr -} - -func (s *StringNode) Copy() Node { - return s.tr.newString(s.Pos, s.Quoted, s.Text) -} - -// endNode represents an {{end}} action. -// It does not appear in the final parse tree. -type endNode struct { - NodeType - Pos - tr *Tree -} - -func (t *Tree) newEnd(pos Pos) *endNode { - return &endNode{tr: t, NodeType: nodeEnd, Pos: pos} -} - -func (e *endNode) String() string { - return "{{end}}" -} - -func (e *endNode) tree() *Tree { - return e.tr -} - -func (e *endNode) Copy() Node { - return e.tr.newEnd(e.Pos) -} - -// elseNode represents an {{else}} action. Does not appear in the final tree. -type elseNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) -} - -func (t *Tree) newElse(pos Pos, line int) *elseNode { - return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line} -} - -func (e *elseNode) Type() NodeType { - return nodeElse -} - -func (e *elseNode) String() string { - return "{{else}}" -} - -func (e *elseNode) tree() *Tree { - return e.tr -} - -func (e *elseNode) Copy() Node { - return e.tr.newElse(e.Pos, e.Line) -} - -// BranchNode is the common representation of if, range, and with. -type BranchNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Pipe *PipeNode // The pipeline to be evaluated. - List *ListNode // What to execute if the value is non-empty. - ElseList *ListNode // What to execute if the value is empty (nil if absent). -} - -func (b *BranchNode) String() string { - name := "" - switch b.NodeType { - case NodeIf: - name = "if" - case NodeRange: - name = "range" - case NodeWith: - name = "with" - default: - panic("unknown branch type") - } - if b.ElseList != nil { - return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList) - } - return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List) -} - -func (b *BranchNode) tree() *Tree { - return b.tr -} - -func (b *BranchNode) Copy() Node { - switch b.NodeType { - case NodeIf: - return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) - case NodeRange: - return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) - case NodeWith: - return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) - default: - panic("unknown branch type") - } -} - -// IfNode represents an {{if}} action and its commands. -type IfNode struct { - BranchNode -} - -func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode { - return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} -} - -func (i *IfNode) Copy() Node { - return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList()) -} - -// RangeNode represents a {{range}} action and its commands. -type RangeNode struct { - BranchNode -} - -func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode { - return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} -} - -func (r *RangeNode) Copy() Node { - return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList()) -} - -// WithNode represents a {{with}} action and its commands. -type WithNode struct { - BranchNode -} - -func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode { - return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} -} - -func (w *WithNode) Copy() Node { - return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList()) -} - -// TemplateNode represents a {{template}} action. -type TemplateNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Name string // The name of the template (unquoted). - Pipe *PipeNode // The command to evaluate as dot for the template. -} - -func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode { - return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe} -} - -func (t *TemplateNode) String() string { - if t.Pipe == nil { - return fmt.Sprintf("{{template %q}}", t.Name) - } - return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe) -} - -func (t *TemplateNode) tree() *Tree { - return t.tr -} - -func (t *TemplateNode) Copy() Node { - return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe()) -} diff --git a/vendor/github.com/alecthomas/template/parse/parse.go b/vendor/github.com/alecthomas/template/parse/parse.go deleted file mode 100644 index 0d77ade..0000000 --- a/vendor/github.com/alecthomas/template/parse/parse.go +++ /dev/null @@ -1,700 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package parse builds parse trees for templates as defined by text/template -// and html/template. Clients should use those packages to construct templates -// rather than this one, which provides shared internal data structures not -// intended for general use. -package parse - -import ( - "bytes" - "fmt" - "runtime" - "strconv" - "strings" -) - -// Tree is the representation of a single parsed template. -type Tree struct { - Name string // name of the template represented by the tree. - ParseName string // name of the top-level template during parsing, for error messages. - Root *ListNode // top-level root of the tree. - text string // text parsed to create the template (or its parent) - // Parsing only; cleared after parse. - funcs []map[string]interface{} - lex *lexer - token [3]item // three-token lookahead for parser. - peekCount int - vars []string // variables defined at the moment. -} - -// Copy returns a copy of the Tree. Any parsing state is discarded. -func (t *Tree) Copy() *Tree { - if t == nil { - return nil - } - return &Tree{ - Name: t.Name, - ParseName: t.ParseName, - Root: t.Root.CopyList(), - text: t.text, - } -} - -// Parse returns a map from template name to parse.Tree, created by parsing the -// templates described in the argument string. The top-level template will be -// given the specified name. If an error is encountered, parsing stops and an -// empty map is returned with the error. -func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) { - treeSet = make(map[string]*Tree) - t := New(name) - t.text = text - _, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...) - return -} - -// next returns the next token. -func (t *Tree) next() item { - if t.peekCount > 0 { - t.peekCount-- - } else { - t.token[0] = t.lex.nextItem() - } - return t.token[t.peekCount] -} - -// backup backs the input stream up one token. -func (t *Tree) backup() { - t.peekCount++ -} - -// backup2 backs the input stream up two tokens. -// The zeroth token is already there. -func (t *Tree) backup2(t1 item) { - t.token[1] = t1 - t.peekCount = 2 -} - -// backup3 backs the input stream up three tokens -// The zeroth token is already there. -func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back. - t.token[1] = t1 - t.token[2] = t2 - t.peekCount = 3 -} - -// peek returns but does not consume the next token. -func (t *Tree) peek() item { - if t.peekCount > 0 { - return t.token[t.peekCount-1] - } - t.peekCount = 1 - t.token[0] = t.lex.nextItem() - return t.token[0] -} - -// nextNonSpace returns the next non-space token. -func (t *Tree) nextNonSpace() (token item) { - for { - token = t.next() - if token.typ != itemSpace { - break - } - } - return token -} - -// peekNonSpace returns but does not consume the next non-space token. -func (t *Tree) peekNonSpace() (token item) { - for { - token = t.next() - if token.typ != itemSpace { - break - } - } - t.backup() - return token -} - -// Parsing. - -// New allocates a new parse tree with the given name. -func New(name string, funcs ...map[string]interface{}) *Tree { - return &Tree{ - Name: name, - funcs: funcs, - } -} - -// ErrorContext returns a textual representation of the location of the node in the input text. -// The receiver is only used when the node does not have a pointer to the tree inside, -// which can occur in old code. -func (t *Tree) ErrorContext(n Node) (location, context string) { - pos := int(n.Position()) - tree := n.tree() - if tree == nil { - tree = t - } - text := tree.text[:pos] - byteNum := strings.LastIndex(text, "\n") - if byteNum == -1 { - byteNum = pos // On first line. - } else { - byteNum++ // After the newline. - byteNum = pos - byteNum - } - lineNum := 1 + strings.Count(text, "\n") - context = n.String() - if len(context) > 20 { - context = fmt.Sprintf("%.20s...", context) - } - return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context -} - -// errorf formats the error and terminates processing. -func (t *Tree) errorf(format string, args ...interface{}) { - t.Root = nil - format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format) - panic(fmt.Errorf(format, args...)) -} - -// error terminates processing. -func (t *Tree) error(err error) { - t.errorf("%s", err) -} - -// expect consumes the next token and guarantees it has the required type. -func (t *Tree) expect(expected itemType, context string) item { - token := t.nextNonSpace() - if token.typ != expected { - t.unexpected(token, context) - } - return token -} - -// expectOneOf consumes the next token and guarantees it has one of the required types. -func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item { - token := t.nextNonSpace() - if token.typ != expected1 && token.typ != expected2 { - t.unexpected(token, context) - } - return token -} - -// unexpected complains about the token and terminates processing. -func (t *Tree) unexpected(token item, context string) { - t.errorf("unexpected %s in %s", token, context) -} - -// recover is the handler that turns panics into returns from the top level of Parse. -func (t *Tree) recover(errp *error) { - e := recover() - if e != nil { - if _, ok := e.(runtime.Error); ok { - panic(e) - } - if t != nil { - t.stopParse() - } - *errp = e.(error) - } - return -} - -// startParse initializes the parser, using the lexer. -func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) { - t.Root = nil - t.lex = lex - t.vars = []string{"$"} - t.funcs = funcs -} - -// stopParse terminates parsing. -func (t *Tree) stopParse() { - t.lex = nil - t.vars = nil - t.funcs = nil -} - -// Parse parses the template definition string to construct a representation of -// the template for execution. If either action delimiter string is empty, the -// default ("{{" or "}}") is used. Embedded template definitions are added to -// the treeSet map. -func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) { - defer t.recover(&err) - t.ParseName = t.Name - t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim)) - t.text = text - t.parse(treeSet) - t.add(treeSet) - t.stopParse() - return t, nil -} - -// add adds tree to the treeSet. -func (t *Tree) add(treeSet map[string]*Tree) { - tree := treeSet[t.Name] - if tree == nil || IsEmptyTree(tree.Root) { - treeSet[t.Name] = t - return - } - if !IsEmptyTree(t.Root) { - t.errorf("template: multiple definition of template %q", t.Name) - } -} - -// IsEmptyTree reports whether this tree (node) is empty of everything but space. -func IsEmptyTree(n Node) bool { - switch n := n.(type) { - case nil: - return true - case *ActionNode: - case *IfNode: - case *ListNode: - for _, node := range n.Nodes { - if !IsEmptyTree(node) { - return false - } - } - return true - case *RangeNode: - case *TemplateNode: - case *TextNode: - return len(bytes.TrimSpace(n.Text)) == 0 - case *WithNode: - default: - panic("unknown node: " + n.String()) - } - return false -} - -// parse is the top-level parser for a template, essentially the same -// as itemList except it also parses {{define}} actions. -// It runs to EOF. -func (t *Tree) parse(treeSet map[string]*Tree) (next Node) { - t.Root = t.newList(t.peek().pos) - for t.peek().typ != itemEOF { - if t.peek().typ == itemLeftDelim { - delim := t.next() - if t.nextNonSpace().typ == itemDefine { - newT := New("definition") // name will be updated once we know it. - newT.text = t.text - newT.ParseName = t.ParseName - newT.startParse(t.funcs, t.lex) - newT.parseDefinition(treeSet) - continue - } - t.backup2(delim) - } - n := t.textOrAction() - if n.Type() == nodeEnd { - t.errorf("unexpected %s", n) - } - t.Root.append(n) - } - return nil -} - -// parseDefinition parses a {{define}} ... {{end}} template definition and -// installs the definition in the treeSet map. The "define" keyword has already -// been scanned. -func (t *Tree) parseDefinition(treeSet map[string]*Tree) { - const context = "define clause" - name := t.expectOneOf(itemString, itemRawString, context) - var err error - t.Name, err = strconv.Unquote(name.val) - if err != nil { - t.error(err) - } - t.expect(itemRightDelim, context) - var end Node - t.Root, end = t.itemList() - if end.Type() != nodeEnd { - t.errorf("unexpected %s in %s", end, context) - } - t.add(treeSet) - t.stopParse() -} - -// itemList: -// textOrAction* -// Terminates at {{end}} or {{else}}, returned separately. -func (t *Tree) itemList() (list *ListNode, next Node) { - list = t.newList(t.peekNonSpace().pos) - for t.peekNonSpace().typ != itemEOF { - n := t.textOrAction() - switch n.Type() { - case nodeEnd, nodeElse: - return list, n - } - list.append(n) - } - t.errorf("unexpected EOF") - return -} - -// textOrAction: -// text | action -func (t *Tree) textOrAction() Node { - switch token := t.nextNonSpace(); token.typ { - case itemElideNewline: - return t.elideNewline() - case itemText: - return t.newText(token.pos, token.val) - case itemLeftDelim: - return t.action() - default: - t.unexpected(token, "input") - } - return nil -} - -// elideNewline: -// Remove newlines trailing rightDelim if \\ is present. -func (t *Tree) elideNewline() Node { - token := t.peek() - if token.typ != itemText { - t.unexpected(token, "input") - return nil - } - - t.next() - stripped := strings.TrimLeft(token.val, "\n\r") - diff := len(token.val) - len(stripped) - if diff > 0 { - // This is a bit nasty. We mutate the token in-place to remove - // preceding newlines. - token.pos += Pos(diff) - token.val = stripped - } - return t.newText(token.pos, token.val) -} - -// Action: -// control -// command ("|" command)* -// Left delim is past. Now get actions. -// First word could be a keyword such as range. -func (t *Tree) action() (n Node) { - switch token := t.nextNonSpace(); token.typ { - case itemElse: - return t.elseControl() - case itemEnd: - return t.endControl() - case itemIf: - return t.ifControl() - case itemRange: - return t.rangeControl() - case itemTemplate: - return t.templateControl() - case itemWith: - return t.withControl() - } - t.backup() - // Do not pop variables; they persist until "end". - return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command")) -} - -// Pipeline: -// declarations? command ('|' command)* -func (t *Tree) pipeline(context string) (pipe *PipeNode) { - var decl []*VariableNode - pos := t.peekNonSpace().pos - // Are there declarations? - for { - if v := t.peekNonSpace(); v.typ == itemVariable { - t.next() - // Since space is a token, we need 3-token look-ahead here in the worst case: - // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an - // argument variable rather than a declaration. So remember the token - // adjacent to the variable so we can push it back if necessary. - tokenAfterVariable := t.peek() - if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") { - t.nextNonSpace() - variable := t.newVariable(v.pos, v.val) - decl = append(decl, variable) - t.vars = append(t.vars, v.val) - if next.typ == itemChar && next.val == "," { - if context == "range" && len(decl) < 2 { - continue - } - t.errorf("too many declarations in %s", context) - } - } else if tokenAfterVariable.typ == itemSpace { - t.backup3(v, tokenAfterVariable) - } else { - t.backup2(v) - } - } - break - } - pipe = t.newPipeline(pos, t.lex.lineNumber(), decl) - for { - switch token := t.nextNonSpace(); token.typ { - case itemRightDelim, itemRightParen: - if len(pipe.Cmds) == 0 { - t.errorf("missing value for %s", context) - } - if token.typ == itemRightParen { - t.backup() - } - return - case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier, - itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen: - t.backup() - pipe.append(t.command()) - default: - t.unexpected(token, context) - } - } -} - -func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) { - defer t.popVars(len(t.vars)) - line = t.lex.lineNumber() - pipe = t.pipeline(context) - var next Node - list, next = t.itemList() - switch next.Type() { - case nodeEnd: //done - case nodeElse: - if allowElseIf { - // Special case for "else if". If the "else" is followed immediately by an "if", - // the elseControl will have left the "if" token pending. Treat - // {{if a}}_{{else if b}}_{{end}} - // as - // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}. - // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}} - // is assumed. This technique works even for long if-else-if chains. - // TODO: Should we allow else-if in with and range? - if t.peek().typ == itemIf { - t.next() // Consume the "if" token. - elseList = t.newList(next.Position()) - elseList.append(t.ifControl()) - // Do not consume the next item - only one {{end}} required. - break - } - } - elseList, next = t.itemList() - if next.Type() != nodeEnd { - t.errorf("expected end; found %s", next) - } - } - return pipe.Position(), line, pipe, list, elseList -} - -// If: -// {{if pipeline}} itemList {{end}} -// {{if pipeline}} itemList {{else}} itemList {{end}} -// If keyword is past. -func (t *Tree) ifControl() Node { - return t.newIf(t.parseControl(true, "if")) -} - -// Range: -// {{range pipeline}} itemList {{end}} -// {{range pipeline}} itemList {{else}} itemList {{end}} -// Range keyword is past. -func (t *Tree) rangeControl() Node { - return t.newRange(t.parseControl(false, "range")) -} - -// With: -// {{with pipeline}} itemList {{end}} -// {{with pipeline}} itemList {{else}} itemList {{end}} -// If keyword is past. -func (t *Tree) withControl() Node { - return t.newWith(t.parseControl(false, "with")) -} - -// End: -// {{end}} -// End keyword is past. -func (t *Tree) endControl() Node { - return t.newEnd(t.expect(itemRightDelim, "end").pos) -} - -// Else: -// {{else}} -// Else keyword is past. -func (t *Tree) elseControl() Node { - // Special case for "else if". - peek := t.peekNonSpace() - if peek.typ == itemIf { - // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ". - return t.newElse(peek.pos, t.lex.lineNumber()) - } - return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber()) -} - -// Template: -// {{template stringValue pipeline}} -// Template keyword is past. The name must be something that can evaluate -// to a string. -func (t *Tree) templateControl() Node { - var name string - token := t.nextNonSpace() - switch token.typ { - case itemString, itemRawString: - s, err := strconv.Unquote(token.val) - if err != nil { - t.error(err) - } - name = s - default: - t.unexpected(token, "template invocation") - } - var pipe *PipeNode - if t.nextNonSpace().typ != itemRightDelim { - t.backup() - // Do not pop variables; they persist until "end". - pipe = t.pipeline("template") - } - return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe) -} - -// command: -// operand (space operand)* -// space-separated arguments up to a pipeline character or right delimiter. -// we consume the pipe character but leave the right delim to terminate the action. -func (t *Tree) command() *CommandNode { - cmd := t.newCommand(t.peekNonSpace().pos) - for { - t.peekNonSpace() // skip leading spaces. - operand := t.operand() - if operand != nil { - cmd.append(operand) - } - switch token := t.next(); token.typ { - case itemSpace: - continue - case itemError: - t.errorf("%s", token.val) - case itemRightDelim, itemRightParen: - t.backup() - case itemPipe: - default: - t.errorf("unexpected %s in operand; missing space?", token) - } - break - } - if len(cmd.Args) == 0 { - t.errorf("empty command") - } - return cmd -} - -// operand: -// term .Field* -// An operand is a space-separated component of a command, -// a term possibly followed by field accesses. -// A nil return means the next item is not an operand. -func (t *Tree) operand() Node { - node := t.term() - if node == nil { - return nil - } - if t.peek().typ == itemField { - chain := t.newChain(t.peek().pos, node) - for t.peek().typ == itemField { - chain.Add(t.next().val) - } - // Compatibility with original API: If the term is of type NodeField - // or NodeVariable, just put more fields on the original. - // Otherwise, keep the Chain node. - // TODO: Switch to Chains always when we can. - switch node.Type() { - case NodeField: - node = t.newField(chain.Position(), chain.String()) - case NodeVariable: - node = t.newVariable(chain.Position(), chain.String()) - default: - node = chain - } - } - return node -} - -// term: -// literal (number, string, nil, boolean) -// function (identifier) -// . -// .Field -// $ -// '(' pipeline ')' -// A term is a simple "expression". -// A nil return means the next item is not a term. -func (t *Tree) term() Node { - switch token := t.nextNonSpace(); token.typ { - case itemError: - t.errorf("%s", token.val) - case itemIdentifier: - if !t.hasFunction(token.val) { - t.errorf("function %q not defined", token.val) - } - return NewIdentifier(token.val).SetTree(t).SetPos(token.pos) - case itemDot: - return t.newDot(token.pos) - case itemNil: - return t.newNil(token.pos) - case itemVariable: - return t.useVar(token.pos, token.val) - case itemField: - return t.newField(token.pos, token.val) - case itemBool: - return t.newBool(token.pos, token.val == "true") - case itemCharConstant, itemComplex, itemNumber: - number, err := t.newNumber(token.pos, token.val, token.typ) - if err != nil { - t.error(err) - } - return number - case itemLeftParen: - pipe := t.pipeline("parenthesized pipeline") - if token := t.next(); token.typ != itemRightParen { - t.errorf("unclosed right paren: unexpected %s", token) - } - return pipe - case itemString, itemRawString: - s, err := strconv.Unquote(token.val) - if err != nil { - t.error(err) - } - return t.newString(token.pos, token.val, s) - } - t.backup() - return nil -} - -// hasFunction reports if a function name exists in the Tree's maps. -func (t *Tree) hasFunction(name string) bool { - for _, funcMap := range t.funcs { - if funcMap == nil { - continue - } - if funcMap[name] != nil { - return true - } - } - return false -} - -// popVars trims the variable list to the specified length -func (t *Tree) popVars(n int) { - t.vars = t.vars[:n] -} - -// useVar returns a node for a variable reference. It errors if the -// variable is not defined. -func (t *Tree) useVar(pos Pos, name string) Node { - v := t.newVariable(pos, name) - for _, varName := range t.vars { - if varName == v.Ident[0] { - return v - } - } - t.errorf("undefined variable %q", v.Ident[0]) - return nil -} diff --git a/vendor/github.com/alecthomas/template/template.go b/vendor/github.com/alecthomas/template/template.go deleted file mode 100644 index 447ed2a..0000000 --- a/vendor/github.com/alecthomas/template/template.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "fmt" - "reflect" - - "github.com/alecthomas/template/parse" -) - -// common holds the information shared by related templates. -type common struct { - tmpl map[string]*Template - // We use two maps, one for parsing and one for execution. - // This separation makes the API cleaner since it doesn't - // expose reflection to the client. - parseFuncs FuncMap - execFuncs map[string]reflect.Value -} - -// Template is the representation of a parsed template. The *parse.Tree -// field is exported only for use by html/template and should be treated -// as unexported by all other clients. -type Template struct { - name string - *parse.Tree - *common - leftDelim string - rightDelim string -} - -// New allocates a new template with the given name. -func New(name string) *Template { - return &Template{ - name: name, - } -} - -// Name returns the name of the template. -func (t *Template) Name() string { - return t.name -} - -// New allocates a new template associated with the given one and with the same -// delimiters. The association, which is transitive, allows one template to -// invoke another with a {{template}} action. -func (t *Template) New(name string) *Template { - t.init() - return &Template{ - name: name, - common: t.common, - leftDelim: t.leftDelim, - rightDelim: t.rightDelim, - } -} - -func (t *Template) init() { - if t.common == nil { - t.common = new(common) - t.tmpl = make(map[string]*Template) - t.parseFuncs = make(FuncMap) - t.execFuncs = make(map[string]reflect.Value) - } -} - -// Clone returns a duplicate of the template, including all associated -// templates. The actual representation is not copied, but the name space of -// associated templates is, so further calls to Parse in the copy will add -// templates to the copy but not to the original. Clone can be used to prepare -// common templates and use them with variant definitions for other templates -// by adding the variants after the clone is made. -func (t *Template) Clone() (*Template, error) { - nt := t.copy(nil) - nt.init() - nt.tmpl[t.name] = nt - for k, v := range t.tmpl { - if k == t.name { // Already installed. - continue - } - // The associated templates share nt's common structure. - tmpl := v.copy(nt.common) - nt.tmpl[k] = tmpl - } - for k, v := range t.parseFuncs { - nt.parseFuncs[k] = v - } - for k, v := range t.execFuncs { - nt.execFuncs[k] = v - } - return nt, nil -} - -// copy returns a shallow copy of t, with common set to the argument. -func (t *Template) copy(c *common) *Template { - nt := New(t.name) - nt.Tree = t.Tree - nt.common = c - nt.leftDelim = t.leftDelim - nt.rightDelim = t.rightDelim - return nt -} - -// AddParseTree creates a new template with the name and parse tree -// and associates it with t. -func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) { - if t.common != nil && t.tmpl[name] != nil { - return nil, fmt.Errorf("template: redefinition of template %q", name) - } - nt := t.New(name) - nt.Tree = tree - t.tmpl[name] = nt - return nt, nil -} - -// Templates returns a slice of the templates associated with t, including t -// itself. -func (t *Template) Templates() []*Template { - if t.common == nil { - return nil - } - // Return a slice so we don't expose the map. - m := make([]*Template, 0, len(t.tmpl)) - for _, v := range t.tmpl { - m = append(m, v) - } - return m -} - -// Delims sets the action delimiters to the specified strings, to be used in -// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template -// definitions will inherit the settings. An empty delimiter stands for the -// corresponding default: {{ or }}. -// The return value is the template, so calls can be chained. -func (t *Template) Delims(left, right string) *Template { - t.leftDelim = left - t.rightDelim = right - return t -} - -// Funcs adds the elements of the argument map to the template's function map. -// It panics if a value in the map is not a function with appropriate return -// type. However, it is legal to overwrite elements of the map. The return -// value is the template, so calls can be chained. -func (t *Template) Funcs(funcMap FuncMap) *Template { - t.init() - addValueFuncs(t.execFuncs, funcMap) - addFuncs(t.parseFuncs, funcMap) - return t -} - -// Lookup returns the template with the given name that is associated with t, -// or nil if there is no such template. -func (t *Template) Lookup(name string) *Template { - if t.common == nil { - return nil - } - return t.tmpl[name] -} - -// Parse parses a string into a template. Nested template definitions will be -// associated with the top-level template t. Parse may be called multiple times -// to parse definitions of templates to associate with t. It is an error if a -// resulting template is non-empty (contains content other than template -// definitions) and would replace a non-empty template with the same name. -// (In multiple calls to Parse with the same receiver template, only one call -// can contain text other than space, comments, and template definitions.) -func (t *Template) Parse(text string) (*Template, error) { - t.init() - trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins) - if err != nil { - return nil, err - } - // Add the newly parsed trees, including the one for t, into our common structure. - for name, tree := range trees { - // If the name we parsed is the name of this template, overwrite this template. - // The associate method checks it's not a redefinition. - tmpl := t - if name != t.name { - tmpl = t.New(name) - } - // Even if t == tmpl, we need to install it in the common.tmpl map. - if replace, err := t.associate(tmpl, tree); err != nil { - return nil, err - } else if replace { - tmpl.Tree = tree - } - tmpl.leftDelim = t.leftDelim - tmpl.rightDelim = t.rightDelim - } - return t, nil -} - -// associate installs the new template into the group of templates associated -// with t. It is an error to reuse a name except to overwrite an empty -// template. The two are already known to share the common structure. -// The boolean return value reports wither to store this tree as t.Tree. -func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) { - if new.common != t.common { - panic("internal error: associate not common") - } - name := new.name - if old := t.tmpl[name]; old != nil { - oldIsEmpty := parse.IsEmptyTree(old.Root) - newIsEmpty := parse.IsEmptyTree(tree.Root) - if newIsEmpty { - // Whether old is empty or not, new is empty; no reason to replace old. - return false, nil - } - if !oldIsEmpty { - return false, fmt.Errorf("template: redefinition of template %q", name) - } - } - t.tmpl[name] = new - return true, nil -} diff --git a/vendor/github.com/alecthomas/units/COPYING b/vendor/github.com/alecthomas/units/COPYING deleted file mode 100644 index 2993ec0..0000000 --- a/vendor/github.com/alecthomas/units/COPYING +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2014 Alec Thomas - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/alecthomas/units/README.md b/vendor/github.com/alecthomas/units/README.md deleted file mode 100644 index bee884e..0000000 --- a/vendor/github.com/alecthomas/units/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Units - Helpful unit multipliers and functions for Go - -The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package. - -It allows for code like this: - -```go -n, err := ParseBase2Bytes("1KB") -// n == 1024 -n = units.Mebibyte * 512 -``` diff --git a/vendor/github.com/alecthomas/units/bytes.go b/vendor/github.com/alecthomas/units/bytes.go deleted file mode 100644 index 61d0ca4..0000000 --- a/vendor/github.com/alecthomas/units/bytes.go +++ /dev/null @@ -1,85 +0,0 @@ -package units - -// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte, -// etc.). -type Base2Bytes int64 - -// Base-2 byte units. -const ( - Kibibyte Base2Bytes = 1024 - KiB = Kibibyte - Mebibyte = Kibibyte * 1024 - MiB = Mebibyte - Gibibyte = Mebibyte * 1024 - GiB = Gibibyte - Tebibyte = Gibibyte * 1024 - TiB = Tebibyte - Pebibyte = Tebibyte * 1024 - PiB = Pebibyte - Exbibyte = Pebibyte * 1024 - EiB = Exbibyte -) - -var ( - bytesUnitMap = MakeUnitMap("iB", "B", 1024) - oldBytesUnitMap = MakeUnitMap("B", "B", 1024) -) - -// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB -// and KiB are both 1024. -// However "kB", which is the correct SI spelling of 1000 Bytes, is rejected. -func ParseBase2Bytes(s string) (Base2Bytes, error) { - n, err := ParseUnit(s, bytesUnitMap) - if err != nil { - n, err = ParseUnit(s, oldBytesUnitMap) - } - return Base2Bytes(n), err -} - -func (b Base2Bytes) String() string { - return ToString(int64(b), 1024, "iB", "B") -} - -var ( - metricBytesUnitMap = MakeUnitMap("B", "B", 1000) -) - -// MetricBytes are SI byte units (1000 bytes in a kilobyte). -type MetricBytes SI - -// SI base-10 byte units. -const ( - Kilobyte MetricBytes = 1000 - KB = Kilobyte - Megabyte = Kilobyte * 1000 - MB = Megabyte - Gigabyte = Megabyte * 1000 - GB = Gigabyte - Terabyte = Gigabyte * 1000 - TB = Terabyte - Petabyte = Terabyte * 1000 - PB = Petabyte - Exabyte = Petabyte * 1000 - EB = Exabyte -) - -// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes. -func ParseMetricBytes(s string) (MetricBytes, error) { - n, err := ParseUnit(s, metricBytesUnitMap) - return MetricBytes(n), err -} - -// TODO: represents 1000B as uppercase "KB", while SI standard requires "kB". -func (m MetricBytes) String() string { - return ToString(int64(m), 1000, "B", "B") -} - -// ParseStrictBytes supports both iB and B suffixes for base 2 and metric, -// respectively. That is, KiB represents 1024 and kB, KB represent 1000. -func ParseStrictBytes(s string) (int64, error) { - n, err := ParseUnit(s, bytesUnitMap) - if err != nil { - n, err = ParseUnit(s, metricBytesUnitMap) - } - return int64(n), err -} diff --git a/vendor/github.com/alecthomas/units/doc.go b/vendor/github.com/alecthomas/units/doc.go deleted file mode 100644 index 156ae38..0000000 --- a/vendor/github.com/alecthomas/units/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Package units provides helpful unit multipliers and functions for Go. -// -// The goal of this package is to have functionality similar to the time [1] package. -// -// -// [1] http://golang.org/pkg/time/ -// -// It allows for code like this: -// -// n, err := ParseBase2Bytes("1KB") -// // n == 1024 -// n = units.Mebibyte * 512 -package units diff --git a/vendor/github.com/alecthomas/units/si.go b/vendor/github.com/alecthomas/units/si.go deleted file mode 100644 index 99b2fa4..0000000 --- a/vendor/github.com/alecthomas/units/si.go +++ /dev/null @@ -1,50 +0,0 @@ -package units - -// SI units. -type SI int64 - -// SI unit multiples. -const ( - Kilo SI = 1000 - Mega = Kilo * 1000 - Giga = Mega * 1000 - Tera = Giga * 1000 - Peta = Tera * 1000 - Exa = Peta * 1000 -) - -func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 { - res := map[string]float64{ - shortSuffix: 1, - // see below for "k" / "K" - "M" + suffix: float64(scale * scale), - "G" + suffix: float64(scale * scale * scale), - "T" + suffix: float64(scale * scale * scale * scale), - "P" + suffix: float64(scale * scale * scale * scale * scale), - "E" + suffix: float64(scale * scale * scale * scale * scale * scale), - } - - // Standard SI prefixes use lowercase "k" for kilo = 1000. - // For compatibility, and to be fool-proof, we accept both "k" and "K" in metric mode. - // - // However, official binary prefixes are always capitalized - "KiB" - - // and we specifically never parse "kB" as 1024B because: - // - // (1) people pedantic enough to use lowercase according to SI unlikely to abuse "k" to mean 1024 :-) - // - // (2) Use of capital K for 1024 was an informal tradition predating IEC prefixes: - // "The binary meaning of the kilobyte for 1024 bytes typically uses the symbol KB, with an - // uppercase letter K." - // -- https://en.wikipedia.org/wiki/Kilobyte#Base_2_(1024_bytes) - // "Capitalization of the letter K became the de facto standard for binary notation, although this - // could not be extended to higher powers, and use of the lowercase k did persist.[13][14][15]" - // -- https://en.wikipedia.org/wiki/Binary_prefix#History - // See also the extensive https://en.wikipedia.org/wiki/Timeline_of_binary_prefixes. - if scale == 1024 { - res["K"+suffix] = float64(scale) - } else { - res["k"+suffix] = float64(scale) - res["K"+suffix] = float64(scale) - } - return res -} diff --git a/vendor/github.com/alecthomas/units/util.go b/vendor/github.com/alecthomas/units/util.go deleted file mode 100644 index 6527e92..0000000 --- a/vendor/github.com/alecthomas/units/util.go +++ /dev/null @@ -1,138 +0,0 @@ -package units - -import ( - "errors" - "fmt" - "strings" -) - -var ( - siUnits = []string{"", "K", "M", "G", "T", "P", "E"} -) - -func ToString(n int64, scale int64, suffix, baseSuffix string) string { - mn := len(siUnits) - out := make([]string, mn) - for i, m := range siUnits { - if n%scale != 0 || i == 0 && n == 0 { - s := suffix - if i == 0 { - s = baseSuffix - } - out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s) - } - n /= scale - if n == 0 { - break - } - } - return strings.Join(out, "") -} - -// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123 -var errLeadingInt = errors.New("units: bad [0-9]*") // never printed - -// leadingInt consumes the leading [0-9]* from s. -func leadingInt(s string) (x int64, rem string, err error) { - i := 0 - for ; i < len(s); i++ { - c := s[i] - if c < '0' || c > '9' { - break - } - if x >= (1<<63-10)/10 { - // overflow - return 0, "", errLeadingInt - } - x = x*10 + int64(c) - '0' - } - return x, s[i:], nil -} - -func ParseUnit(s string, unitMap map[string]float64) (int64, error) { - // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+ - orig := s - f := float64(0) - neg := false - - // Consume [-+]? - if s != "" { - c := s[0] - if c == '-' || c == '+' { - neg = c == '-' - s = s[1:] - } - } - // Special case: if all that is left is "0", this is zero. - if s == "0" { - return 0, nil - } - if s == "" { - return 0, errors.New("units: invalid " + orig) - } - for s != "" { - g := float64(0) // this element of the sequence - - var x int64 - var err error - - // The next character must be [0-9.] - if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) { - return 0, errors.New("units: invalid " + orig) - } - // Consume [0-9]* - pl := len(s) - x, s, err = leadingInt(s) - if err != nil { - return 0, errors.New("units: invalid " + orig) - } - g = float64(x) - pre := pl != len(s) // whether we consumed anything before a period - - // Consume (\.[0-9]*)? - post := false - if s != "" && s[0] == '.' { - s = s[1:] - pl := len(s) - x, s, err = leadingInt(s) - if err != nil { - return 0, errors.New("units: invalid " + orig) - } - scale := 1.0 - for n := pl - len(s); n > 0; n-- { - scale *= 10 - } - g += float64(x) / scale - post = pl != len(s) - } - if !pre && !post { - // no digits (e.g. ".s" or "-.s") - return 0, errors.New("units: invalid " + orig) - } - - // Consume unit. - i := 0 - for ; i < len(s); i++ { - c := s[i] - if c == '.' || ('0' <= c && c <= '9') { - break - } - } - u := s[:i] - s = s[i:] - unit, ok := unitMap[u] - if !ok { - return 0, errors.New("units: unknown unit " + u + " in " + orig) - } - - f += g * unit - } - - if neg { - f = -f - } - if f < float64(-1<<63) || f > float64(1<<63-1) { - return 0, errors.New("units: overflow parsing unit") - } - return int64(f), nil -} diff --git a/vendor/github.com/beevik/etree/.travis.yml b/vendor/github.com/beevik/etree/.travis.yml deleted file mode 100644 index f4cb25d..0000000 --- a/vendor/github.com/beevik/etree/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -sudo: false - -go: - - 1.11.x - - tip - -matrix: - allow_failures: - - go: tip - -script: - - go vet ./... - - go test -v ./... diff --git a/vendor/github.com/beevik/etree/CONTRIBUTORS b/vendor/github.com/beevik/etree/CONTRIBUTORS deleted file mode 100644 index 03211a8..0000000 --- a/vendor/github.com/beevik/etree/CONTRIBUTORS +++ /dev/null @@ -1,10 +0,0 @@ -Brett Vickers (beevik) -Felix Geisendörfer (felixge) -Kamil Kisiel (kisielk) -Graham King (grahamking) -Matt Smith (ma314smith) -Michal Jemala (michaljemala) -Nicolas Piganeau (npiganeau) -Chris Brown (ccbrown) -Earncef Sequeira (earncef) -Gabriel de Labachelerie (wuzuf) diff --git a/vendor/github.com/beevik/etree/LICENSE b/vendor/github.com/beevik/etree/LICENSE deleted file mode 100644 index 26f1f77..0000000 --- a/vendor/github.com/beevik/etree/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright 2015-2019 Brett Vickers. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/beevik/etree/README.md b/vendor/github.com/beevik/etree/README.md deleted file mode 100644 index 08ec26b..0000000 --- a/vendor/github.com/beevik/etree/README.md +++ /dev/null @@ -1,205 +0,0 @@ -[![Build Status](https://travis-ci.org/beevik/etree.svg?branch=master)](https://travis-ci.org/beevik/etree) -[![GoDoc](https://godoc.org/github.com/beevik/etree?status.svg)](https://godoc.org/github.com/beevik/etree) - -etree -===== - -The etree package is a lightweight, pure go package that expresses XML in -the form of an element tree. Its design was inspired by the Python -[ElementTree](http://docs.python.org/2/library/xml.etree.elementtree.html) -module. - -Some of the package's capabilities and features: - -* Represents XML documents as trees of elements for easy traversal. -* Imports, serializes, modifies or creates XML documents from scratch. -* Writes and reads XML to/from files, byte slices, strings and io interfaces. -* Performs simple or complex searches with lightweight XPath-like query APIs. -* Auto-indents XML using spaces or tabs for better readability. -* Implemented in pure go; depends only on standard go libraries. -* Built on top of the go [encoding/xml](http://golang.org/pkg/encoding/xml) - package. - -### Creating an XML document - -The following example creates an XML document from scratch using the etree -package and outputs its indented contents to stdout. -```go -doc := etree.NewDocument() -doc.CreateProcInst("xml", `version="1.0" encoding="UTF-8"`) -doc.CreateProcInst("xml-stylesheet", `type="text/xsl" href="style.xsl"`) - -people := doc.CreateElement("People") -people.CreateComment("These are all known people") - -jon := people.CreateElement("Person") -jon.CreateAttr("name", "Jon") - -sally := people.CreateElement("Person") -sally.CreateAttr("name", "Sally") - -doc.Indent(2) -doc.WriteTo(os.Stdout) -``` - -Output: -```xml - - - - - - - -``` - -### Reading an XML file - -Suppose you have a file on disk called `bookstore.xml` containing the -following data: - -```xml - - - - Everyday Italian - Giada De Laurentiis - 2005 - 30.00 - - - - Harry Potter - J K. Rowling - 2005 - 29.99 - - - - XQuery Kick Start - James McGovern - Per Bothner - Kurt Cagle - James Linn - Vaidyanathan Nagarajan - 2003 - 49.99 - - - - Learning XML - Erik T. Ray - 2003 - 39.95 - - - -``` - -This code reads the file's contents into an etree document. -```go -doc := etree.NewDocument() -if err := doc.ReadFromFile("bookstore.xml"); err != nil { - panic(err) -} -``` - -You can also read XML from a string, a byte slice, or an `io.Reader`. - -### Processing elements and attributes - -This example illustrates several ways to access elements and attributes using -etree selection queries. -```go -root := doc.SelectElement("bookstore") -fmt.Println("ROOT element:", root.Tag) - -for _, book := range root.SelectElements("book") { - fmt.Println("CHILD element:", book.Tag) - if title := book.SelectElement("title"); title != nil { - lang := title.SelectAttrValue("lang", "unknown") - fmt.Printf(" TITLE: %s (%s)\n", title.Text(), lang) - } - for _, attr := range book.Attr { - fmt.Printf(" ATTR: %s=%s\n", attr.Key, attr.Value) - } -} -``` -Output: -``` -ROOT element: bookstore -CHILD element: book - TITLE: Everyday Italian (en) - ATTR: category=COOKING -CHILD element: book - TITLE: Harry Potter (en) - ATTR: category=CHILDREN -CHILD element: book - TITLE: XQuery Kick Start (en) - ATTR: category=WEB -CHILD element: book - TITLE: Learning XML (en) - ATTR: category=WEB -``` - -### Path queries - -This example uses etree's path functions to select all book titles that fall -into the category of 'WEB'. The double-slash prefix in the path causes the -search for book elements to occur recursively; book elements may appear at any -level of the XML hierarchy. -```go -for _, t := range doc.FindElements("//book[@category='WEB']/title") { - fmt.Println("Title:", t.Text()) -} -``` - -Output: -``` -Title: XQuery Kick Start -Title: Learning XML -``` - -This example finds the first book element under the root bookstore element and -outputs the tag and text of each of its child elements. -```go -for _, e := range doc.FindElements("./bookstore/book[1]/*") { - fmt.Printf("%s: %s\n", e.Tag, e.Text()) -} -``` - -Output: -``` -title: Everyday Italian -author: Giada De Laurentiis -year: 2005 -price: 30.00 -``` - -This example finds all books with a price of 49.99 and outputs their titles. -```go -path := etree.MustCompilePath("./bookstore/book[p:price='49.99']/title") -for _, e := range doc.FindElementsPath(path) { - fmt.Println(e.Text()) -} -``` - -Output: -``` -XQuery Kick Start -``` - -Note that this example uses the FindElementsPath function, which takes as an -argument a pre-compiled path object. Use precompiled paths when you plan to -search with the same path more than once. - -### Other features - -These are just a few examples of the things the etree package can do. See the -[documentation](http://godoc.org/github.com/beevik/etree) for a complete -description of its capabilities. - -### Contributing - -This project accepts contributions. Just fork the repo and submit a pull -request! diff --git a/vendor/github.com/beevik/etree/RELEASE_NOTES.md b/vendor/github.com/beevik/etree/RELEASE_NOTES.md deleted file mode 100644 index ee59d7a..0000000 --- a/vendor/github.com/beevik/etree/RELEASE_NOTES.md +++ /dev/null @@ -1,109 +0,0 @@ -Release v1.1.0 -============== - -**New Features** - -* New attribute helpers. - * Added the `Element.SortAttrs` method, which lexicographically sorts an - element's attributes by key. -* New `ReadSettings` properties. - * Added `Entity` for the support of custom entity maps. -* New `WriteSettings` properties. - * Added `UseCRLF` to allow the output of CR-LF newlines instead of the - default LF newlines. This is useful on Windows systems. -* Additional support for text and CDATA sections. - * The `Element.Text` method now returns the concatenation of all consecutive - character data tokens immediately following an element's opening tag. - * Added `Element.SetCData` to replace the character data immediately - following an element's opening tag with a CDATA section. - * Added `Element.CreateCData` to create and add a CDATA section child - `CharData` token to an element. - * Added `Element.CreateText` to create and add a child text `CharData` token - to an element. - * Added `NewCData` to create a parentless CDATA section `CharData` token. - * Added `NewText` to create a parentless text `CharData` - token. - * Added `CharData.IsCData` to detect if the token contains a CDATA section. - * Added `CharData.IsWhitespace` to detect if the token contains whitespace - inserted by one of the document Indent functions. - * Modified `Element.SetText` so that it replaces a run of consecutive - character data tokens following the element's opening tag (instead of just - the first one). -* New "tail text" support. - * Added the `Element.Tail` method, which returns the text immediately - following an element's closing tag. - * Added the `Element.SetTail` method, which modifies the text immediately - following an element's closing tag. -* New element child insertion and removal methods. - * Added the `Element.InsertChildAt` method, which inserts a new child token - before the specified child token index. - * Added the `Element.RemoveChildAt` method, which removes the child token at - the specified child token index. -* New element and attribute queries. - * Added the `Element.Index` method, which returns the element's index within - its parent element's child token list. - * Added the `Element.NamespaceURI` method to return the namespace URI - associated with an element. - * Added the `Attr.NamespaceURI` method to return the namespace URI - associated with an element. - * Added the `Attr.Element` method to return the element that an attribute - belongs to. -* New Path filter functions. - * Added `[local-name()='val']` to keep elements whose unprefixed tag matches - the desired value. - * Added `[name()='val']` to keep elements whose full tag matches the desired - value. - * Added `[namespace-prefix()='val']` to keep elements whose namespace prefix - matches the desired value. - * Added `[namespace-uri()='val']` to keep elements whose namespace URI - matches the desired value. - -**Bug Fixes** - -* A default XML `CharSetReader` is now used to prevent failed parsing of XML - documents using certain encodings. - ([Issue](https://github.com/beevik/etree/issues/53)). -* All characters are now properly escaped according to XML parsing rules. - ([Issue](https://github.com/beevik/etree/issues/55)). -* The `Document.Indent` and `Document.IndentTabs` functions no longer insert - empty string `CharData` tokens. - -**Deprecated** - -* `Element` - * The `InsertChild` method is deprecated. Use `InsertChildAt` instead. - * The `CreateCharData` method is deprecated. Use `CreateText` instead. -* `CharData` - * The `NewCharData` method is deprecated. Use `NewText` instead. - - -Release v1.0.1 -============== - -**Changes** - -* Added support for absolute etree Path queries. An absolute path begins with - `/` or `//` and begins its search from the element's document root. -* Added [`GetPath`](https://godoc.org/github.com/beevik/etree#Element.GetPath) - and [`GetRelativePath`](https://godoc.org/github.com/beevik/etree#Element.GetRelativePath) - functions to the [`Element`](https://godoc.org/github.com/beevik/etree#Element) - type. - -**Breaking changes** - -* A path starting with `//` is now interpreted as an absolute path. - Previously, it was interpreted as a relative path starting from the element - whose - [`FindElement`](https://godoc.org/github.com/beevik/etree#Element.FindElement) - method was called. To remain compatible with this release, all paths - prefixed with `//` should be prefixed with `.//` when called from any - element other than the document's root. -* [**edit 2/1/2019**]: Minor releases should not contain breaking changes. - Even though this breaking change was very minor, it was a mistake to include - it in this minor release. In the future, all breaking changes will be - limited to major releases (e.g., version 2.0.0). - -Release v1.0.0 -============== - -Initial release. diff --git a/vendor/github.com/beevik/etree/etree.go b/vendor/github.com/beevik/etree/etree.go deleted file mode 100644 index 9e24f90..0000000 --- a/vendor/github.com/beevik/etree/etree.go +++ /dev/null @@ -1,1453 +0,0 @@ -// Copyright 2015-2019 Brett Vickers. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package etree provides XML services through an Element Tree -// abstraction. -package etree - -import ( - "bufio" - "bytes" - "encoding/xml" - "errors" - "io" - "os" - "sort" - "strings" -) - -const ( - // NoIndent is used with Indent to disable all indenting. - NoIndent = -1 -) - -// ErrXML is returned when XML parsing fails due to incorrect formatting. -var ErrXML = errors.New("etree: invalid XML format") - -// ReadSettings allow for changing the default behavior of the ReadFrom* -// methods. -type ReadSettings struct { - // CharsetReader to be passed to standard xml.Decoder. Default: nil. - CharsetReader func(charset string, input io.Reader) (io.Reader, error) - - // Permissive allows input containing common mistakes such as missing tags - // or attribute values. Default: false. - Permissive bool - - // Entity to be passed to standard xml.Decoder. Default: nil. - Entity map[string]string -} - -// newReadSettings creates a default ReadSettings record. -func newReadSettings() ReadSettings { - return ReadSettings{ - CharsetReader: func(label string, input io.Reader) (io.Reader, error) { - return input, nil - }, - Permissive: false, - } -} - -// WriteSettings allow for changing the serialization behavior of the WriteTo* -// methods. -type WriteSettings struct { - // CanonicalEndTags forces the production of XML end tags, even for - // elements that have no child elements. Default: false. - CanonicalEndTags bool - - // CanonicalText forces the production of XML character references for - // text data characters &, <, and >. If false, XML character references - // are also produced for " and '. Default: false. - CanonicalText bool - - // CanonicalAttrVal forces the production of XML character references for - // attribute value characters &, < and ". If false, XML character - // references are also produced for > and '. Default: false. - CanonicalAttrVal bool - - // When outputting indented XML, use a carriage return and linefeed - // ("\r\n") as a new-line delimiter instead of just a linefeed ("\n"). - // This is useful on Windows-based systems. - UseCRLF bool -} - -// newWriteSettings creates a default WriteSettings record. -func newWriteSettings() WriteSettings { - return WriteSettings{ - CanonicalEndTags: false, - CanonicalText: false, - CanonicalAttrVal: false, - UseCRLF: false, - } -} - -// A Token is an empty interface that represents an Element, CharData, -// Comment, Directive, or ProcInst. -type Token interface { - Parent() *Element - Index() int - dup(parent *Element) Token - setParent(parent *Element) - setIndex(index int) - writeTo(w *bufio.Writer, s *WriteSettings) -} - -// A Document is a container holding a complete XML hierarchy. Its embedded -// element contains zero or more children, one of which is usually the root -// element. The embedded element may include other children such as -// processing instructions or BOM CharData tokens. -type Document struct { - Element - ReadSettings ReadSettings - WriteSettings WriteSettings -} - -// An Element represents an XML element, its attributes, and its child tokens. -type Element struct { - Space, Tag string // namespace prefix and tag - Attr []Attr // key-value attribute pairs - Child []Token // child tokens (elements, comments, etc.) - parent *Element // parent element - index int // token index in parent's children -} - -// An Attr represents a key-value attribute of an XML element. -type Attr struct { - Space, Key string // The attribute's namespace prefix and key - Value string // The attribute value string - element *Element // element containing the attribute -} - -// charDataFlags are used with CharData tokens to store additional settings. -type charDataFlags uint8 - -const ( - // The CharData was created by an indent function as whitespace. - whitespaceFlag charDataFlags = 1 << iota - - // The CharData contains a CDATA section. - cdataFlag -) - -// CharData can be used to represent character data or a CDATA section within -// an XML document. -type CharData struct { - Data string - parent *Element - index int - flags charDataFlags -} - -// A Comment represents an XML comment. -type Comment struct { - Data string - parent *Element - index int -} - -// A Directive represents an XML directive. -type Directive struct { - Data string - parent *Element - index int -} - -// A ProcInst represents an XML processing instruction. -type ProcInst struct { - Target string - Inst string - parent *Element - index int -} - -// NewDocument creates an XML document without a root element. -func NewDocument() *Document { - return &Document{ - Element{Child: make([]Token, 0)}, - newReadSettings(), - newWriteSettings(), - } -} - -// Copy returns a recursive, deep copy of the document. -func (d *Document) Copy() *Document { - return &Document{*(d.dup(nil).(*Element)), d.ReadSettings, d.WriteSettings} -} - -// Root returns the root element of the document, or nil if there is no root -// element. -func (d *Document) Root() *Element { - for _, t := range d.Child { - if c, ok := t.(*Element); ok { - return c - } - } - return nil -} - -// SetRoot replaces the document's root element with e. If the document -// already has a root when this function is called, then the document's -// original root is unbound first. If the element e is bound to another -// document (or to another element within a document), then it is unbound -// first. -func (d *Document) SetRoot(e *Element) { - if e.parent != nil { - e.parent.RemoveChild(e) - } - - p := &d.Element - e.setParent(p) - - // If there is already a root element, replace it. - for i, t := range p.Child { - if _, ok := t.(*Element); ok { - t.setParent(nil) - t.setIndex(-1) - p.Child[i] = e - e.setIndex(i) - return - } - } - - // No existing root element, so add it. - p.addChild(e) -} - -// ReadFrom reads XML from the reader r into the document d. It returns the -// number of bytes read and any error encountered. -func (d *Document) ReadFrom(r io.Reader) (n int64, err error) { - return d.Element.readFrom(r, d.ReadSettings) -} - -// ReadFromFile reads XML from the string s into the document d. -func (d *Document) ReadFromFile(filename string) error { - f, err := os.Open(filename) - if err != nil { - return err - } - defer f.Close() - _, err = d.ReadFrom(f) - return err -} - -// ReadFromBytes reads XML from the byte slice b into the document d. -func (d *Document) ReadFromBytes(b []byte) error { - _, err := d.ReadFrom(bytes.NewReader(b)) - return err -} - -// ReadFromString reads XML from the string s into the document d. -func (d *Document) ReadFromString(s string) error { - _, err := d.ReadFrom(strings.NewReader(s)) - return err -} - -// WriteTo serializes an XML document into the writer w. It -// returns the number of bytes written and any error encountered. -func (d *Document) WriteTo(w io.Writer) (n int64, err error) { - cw := newCountWriter(w) - b := bufio.NewWriter(cw) - for _, c := range d.Child { - c.writeTo(b, &d.WriteSettings) - } - err, n = b.Flush(), cw.bytes - return -} - -// WriteToFile serializes an XML document into the file named -// filename. -func (d *Document) WriteToFile(filename string) error { - f, err := os.Create(filename) - if err != nil { - return err - } - defer f.Close() - _, err = d.WriteTo(f) - return err -} - -// WriteToBytes serializes the XML document into a slice of -// bytes. -func (d *Document) WriteToBytes() (b []byte, err error) { - var buf bytes.Buffer - if _, err = d.WriteTo(&buf); err != nil { - return - } - return buf.Bytes(), nil -} - -// WriteToString serializes the XML document into a string. -func (d *Document) WriteToString() (s string, err error) { - var b []byte - if b, err = d.WriteToBytes(); err != nil { - return - } - return string(b), nil -} - -type indentFunc func(depth int) string - -// Indent modifies the document's element tree by inserting character data -// tokens containing newlines and indentation. The amount of indentation per -// depth level is given as spaces. Pass etree.NoIndent for spaces if you want -// no indentation at all. -func (d *Document) Indent(spaces int) { - var indent indentFunc - switch { - case spaces < 0: - indent = func(depth int) string { return "" } - case d.WriteSettings.UseCRLF == true: - indent = func(depth int) string { return indentCRLF(depth*spaces, indentSpaces) } - default: - indent = func(depth int) string { return indentLF(depth*spaces, indentSpaces) } - } - d.Element.indent(0, indent) -} - -// IndentTabs modifies the document's element tree by inserting CharData -// tokens containing newlines and tabs for indentation. One tab is used per -// indentation level. -func (d *Document) IndentTabs() { - var indent indentFunc - switch d.WriteSettings.UseCRLF { - case true: - indent = func(depth int) string { return indentCRLF(depth, indentTabs) } - default: - indent = func(depth int) string { return indentLF(depth, indentTabs) } - } - d.Element.indent(0, indent) -} - -// NewElement creates an unparented element with the specified tag. The tag -// may be prefixed by a namespace prefix and a colon. -func NewElement(tag string) *Element { - space, stag := spaceDecompose(tag) - return newElement(space, stag, nil) -} - -// newElement is a helper function that creates an element and binds it to -// a parent element if possible. -func newElement(space, tag string, parent *Element) *Element { - e := &Element{ - Space: space, - Tag: tag, - Attr: make([]Attr, 0), - Child: make([]Token, 0), - parent: parent, - index: -1, - } - if parent != nil { - parent.addChild(e) - } - return e -} - -// Copy creates a recursive, deep copy of the element and all its attributes -// and children. The returned element has no parent but can be parented to a -// another element using AddElement, or to a document using SetRoot. -func (e *Element) Copy() *Element { - return e.dup(nil).(*Element) -} - -// FullTag returns the element e's complete tag, including namespace prefix if -// present. -func (e *Element) FullTag() string { - if e.Space == "" { - return e.Tag - } - return e.Space + ":" + e.Tag -} - -// NamespaceURI returns the XML namespace URI associated with the element. If -// the element is part of the XML default namespace, NamespaceURI returns the -// empty string. -func (e *Element) NamespaceURI() string { - if e.Space == "" { - return e.findDefaultNamespaceURI() - } - return e.findLocalNamespaceURI(e.Space) -} - -// findLocalNamespaceURI finds the namespace URI corresponding to the -// requested prefix. -func (e *Element) findLocalNamespaceURI(prefix string) string { - for _, a := range e.Attr { - if a.Space == "xmlns" && a.Key == prefix { - return a.Value - } - } - - if e.parent == nil { - return "" - } - - return e.parent.findLocalNamespaceURI(prefix) -} - -// findDefaultNamespaceURI finds the default namespace URI of the element. -func (e *Element) findDefaultNamespaceURI() string { - for _, a := range e.Attr { - if a.Space == "" && a.Key == "xmlns" { - return a.Value - } - } - - if e.parent == nil { - return "" - } - - return e.parent.findDefaultNamespaceURI() -} - -// hasText returns true if the element has character data immediately -// folllowing the element's opening tag. -func (e *Element) hasText() bool { - if len(e.Child) == 0 { - return false - } - _, ok := e.Child[0].(*CharData) - return ok -} - -// namespacePrefix returns the namespace prefix associated with the element. -func (e *Element) namespacePrefix() string { - return e.Space -} - -// name returns the tag associated with the element. -func (e *Element) name() string { - return e.Tag -} - -// Text returns all character data immediately following the element's opening -// tag. -func (e *Element) Text() string { - if len(e.Child) == 0 { - return "" - } - - text := "" - for _, ch := range e.Child { - if cd, ok := ch.(*CharData); ok { - if text == "" { - text = cd.Data - } else { - text = text + cd.Data - } - } else { - break - } - } - return text -} - -// SetText replaces all character data immediately following an element's -// opening tag with the requested string. -func (e *Element) SetText(text string) { - e.replaceText(0, text, 0) -} - -// SetCData replaces all character data immediately following an element's -// opening tag with a CDATA section. -func (e *Element) SetCData(text string) { - e.replaceText(0, text, cdataFlag) -} - -// Tail returns all character data immediately following the element's end -// tag. -func (e *Element) Tail() string { - if e.Parent() == nil { - return "" - } - - p := e.Parent() - i := e.Index() - - text := "" - for _, ch := range p.Child[i+1:] { - if cd, ok := ch.(*CharData); ok { - if text == "" { - text = cd.Data - } else { - text = text + cd.Data - } - } else { - break - } - } - return text -} - -// SetTail replaces all character data immediately following the element's end -// tag with the requested string. -func (e *Element) SetTail(text string) { - if e.Parent() == nil { - return - } - - p := e.Parent() - p.replaceText(e.Index()+1, text, 0) -} - -// replaceText is a helper function that replaces a series of chardata tokens -// starting at index i with the requested text. -func (e *Element) replaceText(i int, text string, flags charDataFlags) { - end := e.findTermCharDataIndex(i) - - switch { - case end == i: - if text != "" { - // insert a new chardata token at index i - cd := newCharData(text, flags, nil) - e.InsertChildAt(i, cd) - } - - case end == i+1: - if text == "" { - // remove the chardata token at index i - e.RemoveChildAt(i) - } else { - // replace the first and only character token at index i - cd := e.Child[i].(*CharData) - cd.Data, cd.flags = text, flags - } - - default: - if text == "" { - // remove all chardata tokens starting from index i - copy(e.Child[i:], e.Child[end:]) - removed := end - i - e.Child = e.Child[:len(e.Child)-removed] - for j := i; j < len(e.Child); j++ { - e.Child[j].setIndex(j) - } - } else { - // replace the first chardata token at index i and remove all - // subsequent chardata tokens - cd := e.Child[i].(*CharData) - cd.Data, cd.flags = text, flags - copy(e.Child[i+1:], e.Child[end:]) - removed := end - (i + 1) - e.Child = e.Child[:len(e.Child)-removed] - for j := i + 1; j < len(e.Child); j++ { - e.Child[j].setIndex(j) - } - } - } -} - -// findTermCharDataIndex finds the index of the first child token that isn't -// a CharData token. It starts from the requested start index. -func (e *Element) findTermCharDataIndex(start int) int { - for i := start; i < len(e.Child); i++ { - if _, ok := e.Child[i].(*CharData); !ok { - return i - } - } - return len(e.Child) -} - -// CreateElement creates an element with the specified tag and adds it as the -// last child element of the element e. The tag may be prefixed by a namespace -// prefix and a colon. -func (e *Element) CreateElement(tag string) *Element { - space, stag := spaceDecompose(tag) - return newElement(space, stag, e) -} - -// AddChild adds the token t as the last child of element e. If token t was -// already the child of another element, it is first removed from its current -// parent element. -func (e *Element) AddChild(t Token) { - if t.Parent() != nil { - t.Parent().RemoveChild(t) - } - - t.setParent(e) - e.addChild(t) -} - -// InsertChild inserts the token t before e's existing child token ex. If ex -// is nil or ex is not a child of e, then t is added to the end of e's child -// token list. If token t was already the child of another element, it is -// first removed from its current parent element. -// -// Deprecated: InsertChild is deprecated. Use InsertChildAt instead. -func (e *Element) InsertChild(ex Token, t Token) { - if ex == nil || ex.Parent() != e { - e.AddChild(t) - return - } - - if t.Parent() != nil { - t.Parent().RemoveChild(t) - } - - t.setParent(e) - - i := ex.Index() - e.Child = append(e.Child, nil) - copy(e.Child[i+1:], e.Child[i:]) - e.Child[i] = t - - for j := i; j < len(e.Child); j++ { - e.Child[j].setIndex(j) - } -} - -// InsertChildAt inserts the token t into the element e's list of child tokens -// just before the requested index. If the index is greater than or equal to -// the length of the list of child tokens, the token t is added to the end of -// the list. -func (e *Element) InsertChildAt(index int, t Token) { - if index >= len(e.Child) { - e.AddChild(t) - return - } - - if t.Parent() != nil { - if t.Parent() == e && t.Index() > index { - index-- - } - t.Parent().RemoveChild(t) - } - - t.setParent(e) - - e.Child = append(e.Child, nil) - copy(e.Child[index+1:], e.Child[index:]) - e.Child[index] = t - - for j := index; j < len(e.Child); j++ { - e.Child[j].setIndex(j) - } -} - -// RemoveChild attempts to remove the token t from element e's list of -// children. If the token t is a child of e, then it is returned. Otherwise, -// nil is returned. -func (e *Element) RemoveChild(t Token) Token { - if t.Parent() != e { - return nil - } - return e.RemoveChildAt(t.Index()) -} - -// RemoveChildAt removes the index-th child token from the element e. The -// removed child token is returned. If the index is out of bounds, no child is -// removed and nil is returned. -func (e *Element) RemoveChildAt(index int) Token { - if index >= len(e.Child) { - return nil - } - - t := e.Child[index] - for j := index + 1; j < len(e.Child); j++ { - e.Child[j].setIndex(j - 1) - } - e.Child = append(e.Child[:index], e.Child[index+1:]...) - t.setIndex(-1) - t.setParent(nil) - return t -} - -// ReadFrom reads XML from the reader r and stores the result as a new child -// of element e. -func (e *Element) readFrom(ri io.Reader, settings ReadSettings) (n int64, err error) { - r := newCountReader(ri) - dec := xml.NewDecoder(r) - dec.CharsetReader = settings.CharsetReader - dec.Strict = !settings.Permissive - dec.Entity = settings.Entity - var stack stack - stack.push(e) - for { - t, err := dec.RawToken() - switch { - case err == io.EOF: - return r.bytes, nil - case err != nil: - return r.bytes, err - case stack.empty(): - return r.bytes, ErrXML - } - - top := stack.peek().(*Element) - - switch t := t.(type) { - case xml.StartElement: - e := newElement(t.Name.Space, t.Name.Local, top) - for _, a := range t.Attr { - e.createAttr(a.Name.Space, a.Name.Local, a.Value, e) - } - stack.push(e) - case xml.EndElement: - stack.pop() - case xml.CharData: - data := string(t) - var flags charDataFlags - if isWhitespace(data) { - flags = whitespaceFlag - } - newCharData(data, flags, top) - case xml.Comment: - newComment(string(t), top) - case xml.Directive: - newDirective(string(t), top) - case xml.ProcInst: - newProcInst(t.Target, string(t.Inst), top) - } - } -} - -// SelectAttr finds an element attribute matching the requested key and -// returns it if found. Returns nil if no matching attribute is found. The key -// may be prefixed by a namespace prefix and a colon. -func (e *Element) SelectAttr(key string) *Attr { - space, skey := spaceDecompose(key) - for i, a := range e.Attr { - if spaceMatch(space, a.Space) && skey == a.Key { - return &e.Attr[i] - } - } - return nil -} - -// SelectAttrValue finds an element attribute matching the requested key and -// returns its value if found. The key may be prefixed by a namespace prefix -// and a colon. If the key is not found, the dflt value is returned instead. -func (e *Element) SelectAttrValue(key, dflt string) string { - space, skey := spaceDecompose(key) - for _, a := range e.Attr { - if spaceMatch(space, a.Space) && skey == a.Key { - return a.Value - } - } - return dflt -} - -// ChildElements returns all elements that are children of element e. -func (e *Element) ChildElements() []*Element { - var elements []*Element - for _, t := range e.Child { - if c, ok := t.(*Element); ok { - elements = append(elements, c) - } - } - return elements -} - -// SelectElement returns the first child element with the given tag. The tag -// may be prefixed by a namespace prefix and a colon. Returns nil if no -// element with a matching tag was found. -func (e *Element) SelectElement(tag string) *Element { - space, stag := spaceDecompose(tag) - for _, t := range e.Child { - if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag { - return c - } - } - return nil -} - -// SelectElements returns a slice of all child elements with the given tag. -// The tag may be prefixed by a namespace prefix and a colon. -func (e *Element) SelectElements(tag string) []*Element { - space, stag := spaceDecompose(tag) - var elements []*Element - for _, t := range e.Child { - if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag { - elements = append(elements, c) - } - } - return elements -} - -// FindElement returns the first element matched by the XPath-like path -// string. Returns nil if no element is found using the path. Panics if an -// invalid path string is supplied. -func (e *Element) FindElement(path string) *Element { - return e.FindElementPath(MustCompilePath(path)) -} - -// FindElementPath returns the first element matched by the XPath-like path -// string. Returns nil if no element is found using the path. -func (e *Element) FindElementPath(path Path) *Element { - p := newPather() - elements := p.traverse(e, path) - switch { - case len(elements) > 0: - return elements[0] - default: - return nil - } -} - -// FindElements returns a slice of elements matched by the XPath-like path -// string. Panics if an invalid path string is supplied. -func (e *Element) FindElements(path string) []*Element { - return e.FindElementsPath(MustCompilePath(path)) -} - -// FindElementsPath returns a slice of elements matched by the Path object. -func (e *Element) FindElementsPath(path Path) []*Element { - p := newPather() - return p.traverse(e, path) -} - -// GetPath returns the absolute path of the element. -func (e *Element) GetPath() string { - path := []string{} - for seg := e; seg != nil; seg = seg.Parent() { - if seg.Tag != "" { - path = append(path, seg.Tag) - } - } - - // Reverse the path. - for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { - path[i], path[j] = path[j], path[i] - } - - return "/" + strings.Join(path, "/") -} - -// GetRelativePath returns the path of the element relative to the source -// element. If the two elements are not part of the same element tree, then -// GetRelativePath returns the empty string. -func (e *Element) GetRelativePath(source *Element) string { - var path []*Element - - if source == nil { - return "" - } - - // Build a reverse path from the element toward the root. Stop if the - // source element is encountered. - var seg *Element - for seg = e; seg != nil && seg != source; seg = seg.Parent() { - path = append(path, seg) - } - - // If we found the source element, reverse the path and compose the - // string. - if seg == source { - if len(path) == 0 { - return "." - } - parts := []string{} - for i := len(path) - 1; i >= 0; i-- { - parts = append(parts, path[i].Tag) - } - return "./" + strings.Join(parts, "/") - } - - // The source wasn't encountered, so climb from the source element toward - // the root of the tree until an element in the reversed path is - // encountered. - - findPathIndex := func(e *Element, path []*Element) int { - for i, ee := range path { - if e == ee { - return i - } - } - return -1 - } - - climb := 0 - for seg = source; seg != nil; seg = seg.Parent() { - i := findPathIndex(seg, path) - if i >= 0 { - path = path[:i] // truncate at found segment - break - } - climb++ - } - - // No element in the reversed path was encountered, so the two elements - // must not be part of the same tree. - if seg == nil { - return "" - } - - // Reverse the (possibly truncated) path and prepend ".." segments to - // climb. - parts := []string{} - for i := 0; i < climb; i++ { - parts = append(parts, "..") - } - for i := len(path) - 1; i >= 0; i-- { - parts = append(parts, path[i].Tag) - } - return strings.Join(parts, "/") -} - -// indent recursively inserts proper indentation between an -// XML element's child tokens. -func (e *Element) indent(depth int, indent indentFunc) { - e.stripIndent() - n := len(e.Child) - if n == 0 { - return - } - - oldChild := e.Child - e.Child = make([]Token, 0, n*2+1) - isCharData, firstNonCharData := false, true - for _, c := range oldChild { - // Insert NL+indent before child if it's not character data. - // Exceptions: when it's the first non-character-data child, or when - // the child is at root depth. - _, isCharData = c.(*CharData) - if !isCharData { - if !firstNonCharData || depth > 0 { - s := indent(depth) - if s != "" { - newCharData(s, whitespaceFlag, e) - } - } - firstNonCharData = false - } - - e.addChild(c) - - // Recursively process child elements. - if ce, ok := c.(*Element); ok { - ce.indent(depth+1, indent) - } - } - - // Insert NL+indent before the last child. - if !isCharData { - if !firstNonCharData || depth > 0 { - s := indent(depth - 1) - if s != "" { - newCharData(s, whitespaceFlag, e) - } - } - } -} - -// stripIndent removes any previously inserted indentation. -func (e *Element) stripIndent() { - // Count the number of non-indent child tokens - n := len(e.Child) - for _, c := range e.Child { - if cd, ok := c.(*CharData); ok && cd.IsWhitespace() { - n-- - } - } - if n == len(e.Child) { - return - } - - // Strip out indent CharData - newChild := make([]Token, n) - j := 0 - for _, c := range e.Child { - if cd, ok := c.(*CharData); ok && cd.IsWhitespace() { - continue - } - newChild[j] = c - newChild[j].setIndex(j) - j++ - } - e.Child = newChild -} - -// dup duplicates the element. -func (e *Element) dup(parent *Element) Token { - ne := &Element{ - Space: e.Space, - Tag: e.Tag, - Attr: make([]Attr, len(e.Attr)), - Child: make([]Token, len(e.Child)), - parent: parent, - index: e.index, - } - for i, t := range e.Child { - ne.Child[i] = t.dup(ne) - } - for i, a := range e.Attr { - ne.Attr[i] = a - } - return ne -} - -// Parent returns the element token's parent element, or nil if it has no -// parent. -func (e *Element) Parent() *Element { - return e.parent -} - -// Index returns the index of this element within its parent element's -// list of child tokens. If this element has no parent element, the index -// is -1. -func (e *Element) Index() int { - return e.index -} - -// setParent replaces the element token's parent. -func (e *Element) setParent(parent *Element) { - e.parent = parent -} - -// setIndex sets the element token's index within its parent's Child slice. -func (e *Element) setIndex(index int) { - e.index = index -} - -// writeTo serializes the element to the writer w. -func (e *Element) writeTo(w *bufio.Writer, s *WriteSettings) { - w.WriteByte('<') - w.WriteString(e.FullTag()) - for _, a := range e.Attr { - w.WriteByte(' ') - a.writeTo(w, s) - } - if len(e.Child) > 0 { - w.WriteString(">") - for _, c := range e.Child { - c.writeTo(w, s) - } - w.Write([]byte{'<', '/'}) - w.WriteString(e.FullTag()) - w.WriteByte('>') - } else { - if s.CanonicalEndTags { - w.Write([]byte{'>', '<', '/'}) - w.WriteString(e.FullTag()) - w.WriteByte('>') - } else { - w.Write([]byte{'/', '>'}) - } - } -} - -// addChild adds a child token to the element e. -func (e *Element) addChild(t Token) { - t.setIndex(len(e.Child)) - e.Child = append(e.Child, t) -} - -// CreateAttr creates an attribute and adds it to element e. The key may be -// prefixed by a namespace prefix and a colon. If an attribute with the key -// already exists, its value is replaced. -func (e *Element) CreateAttr(key, value string) *Attr { - space, skey := spaceDecompose(key) - return e.createAttr(space, skey, value, e) -} - -// createAttr is a helper function that creates attributes. -func (e *Element) createAttr(space, key, value string, parent *Element) *Attr { - for i, a := range e.Attr { - if space == a.Space && key == a.Key { - e.Attr[i].Value = value - return &e.Attr[i] - } - } - a := Attr{ - Space: space, - Key: key, - Value: value, - element: parent, - } - e.Attr = append(e.Attr, a) - return &e.Attr[len(e.Attr)-1] -} - -// RemoveAttr removes and returns a copy of the first attribute of the element -// whose key matches the given key. The key may be prefixed by a namespace -// prefix and a colon. If a matching attribute does not exist, nil is -// returned. -func (e *Element) RemoveAttr(key string) *Attr { - space, skey := spaceDecompose(key) - for i, a := range e.Attr { - if space == a.Space && skey == a.Key { - e.Attr = append(e.Attr[0:i], e.Attr[i+1:]...) - return &Attr{ - Space: a.Space, - Key: a.Key, - Value: a.Value, - element: nil, - } - } - } - return nil -} - -// SortAttrs sorts the element's attributes lexicographically by key. -func (e *Element) SortAttrs() { - sort.Sort(byAttr(e.Attr)) -} - -type byAttr []Attr - -func (a byAttr) Len() int { - return len(a) -} - -func (a byAttr) Swap(i, j int) { - a[i], a[j] = a[j], a[i] -} - -func (a byAttr) Less(i, j int) bool { - sp := strings.Compare(a[i].Space, a[j].Space) - if sp == 0 { - return strings.Compare(a[i].Key, a[j].Key) < 0 - } - return sp < 0 -} - -// FullKey returns the attribute a's complete key, including namespace prefix -// if present. -func (a *Attr) FullKey() string { - if a.Space == "" { - return a.Key - } - return a.Space + ":" + a.Key -} - -// Element returns the element containing the attribute. -func (a *Attr) Element() *Element { - return a.element -} - -// NamespaceURI returns the XML namespace URI associated with the attribute. -// If the element is part of the XML default namespace, NamespaceURI returns -// the empty string. -func (a *Attr) NamespaceURI() string { - return a.element.NamespaceURI() -} - -// writeTo serializes the attribute to the writer. -func (a *Attr) writeTo(w *bufio.Writer, s *WriteSettings) { - w.WriteString(a.FullKey()) - w.WriteString(`="`) - var m escapeMode - if s.CanonicalAttrVal { - m = escapeCanonicalAttr - } else { - m = escapeNormal - } - escapeString(w, a.Value, m) - w.WriteByte('"') -} - -// NewText creates a parentless CharData token containing character data. -func NewText(text string) *CharData { - return newCharData(text, 0, nil) -} - -// NewCData creates a parentless XML character CDATA section. -func NewCData(data string) *CharData { - return newCharData(data, cdataFlag, nil) -} - -// NewCharData creates a parentless CharData token containing character data. -// -// Deprecated: NewCharData is deprecated. Instead, use NewText, which does the -// same thing. -func NewCharData(data string) *CharData { - return newCharData(data, 0, nil) -} - -// newCharData creates a character data token and binds it to a parent -// element. If parent is nil, the CharData token remains unbound. -func newCharData(data string, flags charDataFlags, parent *Element) *CharData { - c := &CharData{ - Data: data, - parent: parent, - index: -1, - flags: flags, - } - if parent != nil { - parent.addChild(c) - } - return c -} - -// CreateText creates a CharData token containing character data and adds it -// as a child of element e. -func (e *Element) CreateText(text string) *CharData { - return newCharData(text, 0, e) -} - -// CreateCData creates a CharData token containing a CDATA section and adds it -// as a child of element e. -func (e *Element) CreateCData(data string) *CharData { - return newCharData(data, cdataFlag, e) -} - -// CreateCharData creates a CharData token containing character data and adds -// it as a child of element e. -// -// Deprecated: CreateCharData is deprecated. Instead, use CreateText, which -// does the same thing. -func (e *Element) CreateCharData(data string) *CharData { - return newCharData(data, 0, e) -} - -// dup duplicates the character data. -func (c *CharData) dup(parent *Element) Token { - return &CharData{ - Data: c.Data, - flags: c.flags, - parent: parent, - index: c.index, - } -} - -// IsCData returns true if the character data token is to be encoded as a -// CDATA section. -func (c *CharData) IsCData() bool { - return (c.flags & cdataFlag) != 0 -} - -// IsWhitespace returns true if the character data token was created by one of -// the document Indent methods to contain only whitespace. -func (c *CharData) IsWhitespace() bool { - return (c.flags & whitespaceFlag) != 0 -} - -// Parent returns the character data token's parent element, or nil if it has -// no parent. -func (c *CharData) Parent() *Element { - return c.parent -} - -// Index returns the index of this CharData token within its parent element's -// list of child tokens. If this CharData token has no parent element, the -// index is -1. -func (c *CharData) Index() int { - return c.index -} - -// setParent replaces the character data token's parent. -func (c *CharData) setParent(parent *Element) { - c.parent = parent -} - -// setIndex sets the CharData token's index within its parent element's Child -// slice. -func (c *CharData) setIndex(index int) { - c.index = index -} - -// writeTo serializes character data to the writer. -func (c *CharData) writeTo(w *bufio.Writer, s *WriteSettings) { - if c.IsCData() { - w.WriteString(``) - } else { - var m escapeMode - if s.CanonicalText { - m = escapeCanonicalText - } else { - m = escapeNormal - } - escapeString(w, c.Data, m) - } -} - -// NewComment creates a parentless XML comment. -func NewComment(comment string) *Comment { - return newComment(comment, nil) -} - -// NewComment creates an XML comment and binds it to a parent element. If -// parent is nil, the Comment remains unbound. -func newComment(comment string, parent *Element) *Comment { - c := &Comment{ - Data: comment, - parent: parent, - index: -1, - } - if parent != nil { - parent.addChild(c) - } - return c -} - -// CreateComment creates an XML comment and adds it as a child of element e. -func (e *Element) CreateComment(comment string) *Comment { - return newComment(comment, e) -} - -// dup duplicates the comment. -func (c *Comment) dup(parent *Element) Token { - return &Comment{ - Data: c.Data, - parent: parent, - index: c.index, - } -} - -// Parent returns comment token's parent element, or nil if it has no parent. -func (c *Comment) Parent() *Element { - return c.parent -} - -// Index returns the index of this Comment token within its parent element's -// list of child tokens. If this Comment token has no parent element, the -// index is -1. -func (c *Comment) Index() int { - return c.index -} - -// setParent replaces the comment token's parent. -func (c *Comment) setParent(parent *Element) { - c.parent = parent -} - -// setIndex sets the Comment token's index within its parent element's Child -// slice. -func (c *Comment) setIndex(index int) { - c.index = index -} - -// writeTo serialies the comment to the writer. -func (c *Comment) writeTo(w *bufio.Writer, s *WriteSettings) { - w.WriteString("") -} - -// NewDirective creates a parentless XML directive. -func NewDirective(data string) *Directive { - return newDirective(data, nil) -} - -// newDirective creates an XML directive and binds it to a parent element. If -// parent is nil, the Directive remains unbound. -func newDirective(data string, parent *Element) *Directive { - d := &Directive{ - Data: data, - parent: parent, - index: -1, - } - if parent != nil { - parent.addChild(d) - } - return d -} - -// CreateDirective creates an XML directive and adds it as the last child of -// element e. -func (e *Element) CreateDirective(data string) *Directive { - return newDirective(data, e) -} - -// dup duplicates the directive. -func (d *Directive) dup(parent *Element) Token { - return &Directive{ - Data: d.Data, - parent: parent, - index: d.index, - } -} - -// Parent returns directive token's parent element, or nil if it has no -// parent. -func (d *Directive) Parent() *Element { - return d.parent -} - -// Index returns the index of this Directive token within its parent element's -// list of child tokens. If this Directive token has no parent element, the -// index is -1. -func (d *Directive) Index() int { - return d.index -} - -// setParent replaces the directive token's parent. -func (d *Directive) setParent(parent *Element) { - d.parent = parent -} - -// setIndex sets the Directive token's index within its parent element's Child -// slice. -func (d *Directive) setIndex(index int) { - d.index = index -} - -// writeTo serializes the XML directive to the writer. -func (d *Directive) writeTo(w *bufio.Writer, s *WriteSettings) { - w.WriteString("") -} - -// NewProcInst creates a parentless XML processing instruction. -func NewProcInst(target, inst string) *ProcInst { - return newProcInst(target, inst, nil) -} - -// newProcInst creates an XML processing instruction and binds it to a parent -// element. If parent is nil, the ProcInst remains unbound. -func newProcInst(target, inst string, parent *Element) *ProcInst { - p := &ProcInst{ - Target: target, - Inst: inst, - parent: parent, - index: -1, - } - if parent != nil { - parent.addChild(p) - } - return p -} - -// CreateProcInst creates a processing instruction and adds it as a child of -// element e. -func (e *Element) CreateProcInst(target, inst string) *ProcInst { - return newProcInst(target, inst, e) -} - -// dup duplicates the procinst. -func (p *ProcInst) dup(parent *Element) Token { - return &ProcInst{ - Target: p.Target, - Inst: p.Inst, - parent: parent, - index: p.index, - } -} - -// Parent returns processing instruction token's parent element, or nil if it -// has no parent. -func (p *ProcInst) Parent() *Element { - return p.parent -} - -// Index returns the index of this ProcInst token within its parent element's -// list of child tokens. If this ProcInst token has no parent element, the -// index is -1. -func (p *ProcInst) Index() int { - return p.index -} - -// setParent replaces the processing instruction token's parent. -func (p *ProcInst) setParent(parent *Element) { - p.parent = parent -} - -// setIndex sets the processing instruction token's index within its parent -// element's Child slice. -func (p *ProcInst) setIndex(index int) { - p.index = index -} - -// writeTo serializes the processing instruction to the writer. -func (p *ProcInst) writeTo(w *bufio.Writer, s *WriteSettings) { - w.WriteString("") -} diff --git a/vendor/github.com/beevik/etree/helpers.go b/vendor/github.com/beevik/etree/helpers.go deleted file mode 100644 index 825e14e..0000000 --- a/vendor/github.com/beevik/etree/helpers.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2015-2019 Brett Vickers. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package etree - -import ( - "bufio" - "io" - "strings" - "unicode/utf8" -) - -// A simple stack -type stack struct { - data []interface{} -} - -func (s *stack) empty() bool { - return len(s.data) == 0 -} - -func (s *stack) push(value interface{}) { - s.data = append(s.data, value) -} - -func (s *stack) pop() interface{} { - value := s.data[len(s.data)-1] - s.data[len(s.data)-1] = nil - s.data = s.data[:len(s.data)-1] - return value -} - -func (s *stack) peek() interface{} { - return s.data[len(s.data)-1] -} - -// A fifo is a simple first-in-first-out queue. -type fifo struct { - data []interface{} - head, tail int -} - -func (f *fifo) add(value interface{}) { - if f.len()+1 >= len(f.data) { - f.grow() - } - f.data[f.tail] = value - if f.tail++; f.tail == len(f.data) { - f.tail = 0 - } -} - -func (f *fifo) remove() interface{} { - value := f.data[f.head] - f.data[f.head] = nil - if f.head++; f.head == len(f.data) { - f.head = 0 - } - return value -} - -func (f *fifo) len() int { - if f.tail >= f.head { - return f.tail - f.head - } - return len(f.data) - f.head + f.tail -} - -func (f *fifo) grow() { - c := len(f.data) * 2 - if c == 0 { - c = 4 - } - buf, count := make([]interface{}, c), f.len() - if f.tail >= f.head { - copy(buf[0:count], f.data[f.head:f.tail]) - } else { - hindex := len(f.data) - f.head - copy(buf[0:hindex], f.data[f.head:]) - copy(buf[hindex:count], f.data[:f.tail]) - } - f.data, f.head, f.tail = buf, 0, count -} - -// countReader implements a proxy reader that counts the number of -// bytes read from its encapsulated reader. -type countReader struct { - r io.Reader - bytes int64 -} - -func newCountReader(r io.Reader) *countReader { - return &countReader{r: r} -} - -func (cr *countReader) Read(p []byte) (n int, err error) { - b, err := cr.r.Read(p) - cr.bytes += int64(b) - return b, err -} - -// countWriter implements a proxy writer that counts the number of -// bytes written by its encapsulated writer. -type countWriter struct { - w io.Writer - bytes int64 -} - -func newCountWriter(w io.Writer) *countWriter { - return &countWriter{w: w} -} - -func (cw *countWriter) Write(p []byte) (n int, err error) { - b, err := cw.w.Write(p) - cw.bytes += int64(b) - return b, err -} - -// isWhitespace returns true if the byte slice contains only -// whitespace characters. -func isWhitespace(s string) bool { - for i := 0; i < len(s); i++ { - if c := s[i]; c != ' ' && c != '\t' && c != '\n' && c != '\r' { - return false - } - } - return true -} - -// spaceMatch returns true if namespace a is the empty string -// or if namespace a equals namespace b. -func spaceMatch(a, b string) bool { - switch { - case a == "": - return true - default: - return a == b - } -} - -// spaceDecompose breaks a namespace:tag identifier at the ':' -// and returns the two parts. -func spaceDecompose(str string) (space, key string) { - colon := strings.IndexByte(str, ':') - if colon == -1 { - return "", str - } - return str[:colon], str[colon+1:] -} - -// Strings used by indentCRLF and indentLF -const ( - indentSpaces = "\r\n " - indentTabs = "\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t" -) - -// indentCRLF returns a CRLF newline followed by n copies of the first -// non-CRLF character in the source string. -func indentCRLF(n int, source string) string { - switch { - case n < 0: - return source[:2] - case n < len(source)-1: - return source[:n+2] - default: - return source + strings.Repeat(source[2:3], n-len(source)+2) - } -} - -// indentLF returns a LF newline followed by n copies of the first non-LF -// character in the source string. -func indentLF(n int, source string) string { - switch { - case n < 0: - return source[1:2] - case n < len(source)-1: - return source[1 : n+2] - default: - return source[1:] + strings.Repeat(source[2:3], n-len(source)+2) - } -} - -// nextIndex returns the index of the next occurrence of sep in s, -// starting from offset. It returns -1 if the sep string is not found. -func nextIndex(s, sep string, offset int) int { - switch i := strings.Index(s[offset:], sep); i { - case -1: - return -1 - default: - return offset + i - } -} - -// isInteger returns true if the string s contains an integer. -func isInteger(s string) bool { - for i := 0; i < len(s); i++ { - if (s[i] < '0' || s[i] > '9') && !(i == 0 && s[i] == '-') { - return false - } - } - return true -} - -type escapeMode byte - -const ( - escapeNormal escapeMode = iota - escapeCanonicalText - escapeCanonicalAttr -) - -// escapeString writes an escaped version of a string to the writer. -func escapeString(w *bufio.Writer, s string, m escapeMode) { - var esc []byte - last := 0 - for i := 0; i < len(s); { - r, width := utf8.DecodeRuneInString(s[i:]) - i += width - switch r { - case '&': - esc = []byte("&") - case '<': - esc = []byte("<") - case '>': - if m == escapeCanonicalAttr { - continue - } - esc = []byte(">") - case '\'': - if m != escapeNormal { - continue - } - esc = []byte("'") - case '"': - if m == escapeCanonicalText { - continue - } - esc = []byte(""") - case '\t': - if m != escapeCanonicalAttr { - continue - } - esc = []byte(" ") - case '\n': - if m != escapeCanonicalAttr { - continue - } - esc = []byte(" ") - case '\r': - if m == escapeNormal { - continue - } - esc = []byte(" ") - default: - if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { - esc = []byte("\uFFFD") - break - } - continue - } - w.WriteString(s[last : i-width]) - w.Write(esc) - last = i - } - w.WriteString(s[last:]) -} - -func isInCharacterRange(r rune) bool { - return r == 0x09 || - r == 0x0A || - r == 0x0D || - r >= 0x20 && r <= 0xD7FF || - r >= 0xE000 && r <= 0xFFFD || - r >= 0x10000 && r <= 0x10FFFF -} diff --git a/vendor/github.com/beevik/etree/path.go b/vendor/github.com/beevik/etree/path.go deleted file mode 100644 index 82db0ac..0000000 --- a/vendor/github.com/beevik/etree/path.go +++ /dev/null @@ -1,582 +0,0 @@ -// Copyright 2015-2019 Brett Vickers. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package etree - -import ( - "strconv" - "strings" -) - -/* -A Path is a string that represents a search path through an etree starting -from the document root or an arbitrary element. Paths are used with the -Element object's Find* methods to locate and return desired elements. - -A Path consists of a series of slash-separated "selectors", each of which may -be modified by one or more bracket-enclosed "filters". Selectors are used to -traverse the etree from element to element, while filters are used to narrow -the list of candidate elements at each node. - -Although etree Path strings are similar to XPath strings -(https://www.w3.org/TR/1999/REC-xpath-19991116/), they have a more limited set -of selectors and filtering options. - -The following selectors are supported by etree Path strings: - - . Select the current element. - .. Select the parent of the current element. - * Select all child elements of the current element. - / Select the root element when used at the start of a path. - // Select all descendants of the current element. - tag Select all child elements with a name matching the tag. - -The following basic filters are supported by etree Path strings: - - [@attrib] Keep elements with an attribute named attrib. - [@attrib='val'] Keep elements with an attribute named attrib and value matching val. - [tag] Keep elements with a child element named tag. - [tag='val'] Keep elements with a child element named tag and text matching val. - [n] Keep the n-th element, where n is a numeric index starting from 1. - -The following function filters are also supported: - - [text()] Keep elements with non-empty text. - [text()='val'] Keep elements whose text matches val. - [local-name()='val'] Keep elements whose un-prefixed tag matches val. - [name()='val'] Keep elements whose full tag exactly matches val. - [namespace-prefix()='val'] Keep elements whose namespace prefix matches val. - [namespace-uri()='val'] Keep elements whose namespace URI matches val. - -Here are some examples of Path strings: - -- Select the bookstore child element of the root element: - /bookstore - -- Beginning from the root element, select the title elements of all -descendant book elements having a 'category' attribute of 'WEB': - //book[@category='WEB']/title - -- Beginning from the current element, select the first descendant -book element with a title child element containing the text 'Great -Expectations': - .//book[title='Great Expectations'][1] - -- Beginning from the current element, select all child elements of -book elements with an attribute 'language' set to 'english': - ./book/*[@language='english'] - -- Beginning from the current element, select all child elements of -book elements containing the text 'special': - ./book/*[text()='special'] - -- Beginning from the current element, select all descendant book -elements whose title child element has a 'language' attribute of 'french': - .//book/title[@language='french']/.. - -- Beginning from the current element, select all book elements -belonging to the http://www.w3.org/TR/html4/ namespace: - .//book[namespace-uri()='http://www.w3.org/TR/html4/'] - -*/ -type Path struct { - segments []segment -} - -// ErrPath is returned by path functions when an invalid etree path is provided. -type ErrPath string - -// Error returns the string describing a path error. -func (err ErrPath) Error() string { - return "etree: " + string(err) -} - -// CompilePath creates an optimized version of an XPath-like string that -// can be used to query elements in an element tree. -func CompilePath(path string) (Path, error) { - var comp compiler - segments := comp.parsePath(path) - if comp.err != ErrPath("") { - return Path{nil}, comp.err - } - return Path{segments}, nil -} - -// MustCompilePath creates an optimized version of an XPath-like string that -// can be used to query elements in an element tree. Panics if an error -// occurs. Use this function to create Paths when you know the path is -// valid (i.e., if it's hard-coded). -func MustCompilePath(path string) Path { - p, err := CompilePath(path) - if err != nil { - panic(err) - } - return p -} - -// A segment is a portion of a path between "/" characters. -// It contains one selector and zero or more [filters]. -type segment struct { - sel selector - filters []filter -} - -func (seg *segment) apply(e *Element, p *pather) { - seg.sel.apply(e, p) - for _, f := range seg.filters { - f.apply(p) - } -} - -// A selector selects XML elements for consideration by the -// path traversal. -type selector interface { - apply(e *Element, p *pather) -} - -// A filter pares down a list of candidate XML elements based -// on a path filter in [brackets]. -type filter interface { - apply(p *pather) -} - -// A pather is helper object that traverses an element tree using -// a Path object. It collects and deduplicates all elements matching -// the path query. -type pather struct { - queue fifo - results []*Element - inResults map[*Element]bool - candidates []*Element - scratch []*Element // used by filters -} - -// A node represents an element and the remaining path segments that -// should be applied against it by the pather. -type node struct { - e *Element - segments []segment -} - -func newPather() *pather { - return &pather{ - results: make([]*Element, 0), - inResults: make(map[*Element]bool), - candidates: make([]*Element, 0), - scratch: make([]*Element, 0), - } -} - -// traverse follows the path from the element e, collecting -// and then returning all elements that match the path's selectors -// and filters. -func (p *pather) traverse(e *Element, path Path) []*Element { - for p.queue.add(node{e, path.segments}); p.queue.len() > 0; { - p.eval(p.queue.remove().(node)) - } - return p.results -} - -// eval evalutes the current path node by applying the remaining -// path's selector rules against the node's element. -func (p *pather) eval(n node) { - p.candidates = p.candidates[0:0] - seg, remain := n.segments[0], n.segments[1:] - seg.apply(n.e, p) - - if len(remain) == 0 { - for _, c := range p.candidates { - if in := p.inResults[c]; !in { - p.inResults[c] = true - p.results = append(p.results, c) - } - } - } else { - for _, c := range p.candidates { - p.queue.add(node{c, remain}) - } - } -} - -// A compiler generates a compiled path from a path string. -type compiler struct { - err ErrPath -} - -// parsePath parses an XPath-like string describing a path -// through an element tree and returns a slice of segment -// descriptors. -func (c *compiler) parsePath(path string) []segment { - // If path ends with //, fix it - if strings.HasSuffix(path, "//") { - path = path + "*" - } - - var segments []segment - - // Check for an absolute path - if strings.HasPrefix(path, "/") { - segments = append(segments, segment{new(selectRoot), []filter{}}) - path = path[1:] - } - - // Split path into segments - for _, s := range splitPath(path) { - segments = append(segments, c.parseSegment(s)) - if c.err != ErrPath("") { - break - } - } - return segments -} - -func splitPath(path string) []string { - pieces := make([]string, 0) - start := 0 - inquote := false - for i := 0; i+1 <= len(path); i++ { - if path[i] == '\'' { - inquote = !inquote - } else if path[i] == '/' && !inquote { - pieces = append(pieces, path[start:i]) - start = i + 1 - } - } - return append(pieces, path[start:]) -} - -// parseSegment parses a path segment between / characters. -func (c *compiler) parseSegment(path string) segment { - pieces := strings.Split(path, "[") - seg := segment{ - sel: c.parseSelector(pieces[0]), - filters: []filter{}, - } - for i := 1; i < len(pieces); i++ { - fpath := pieces[i] - if fpath[len(fpath)-1] != ']' { - c.err = ErrPath("path has invalid filter [brackets].") - break - } - seg.filters = append(seg.filters, c.parseFilter(fpath[:len(fpath)-1])) - } - return seg -} - -// parseSelector parses a selector at the start of a path segment. -func (c *compiler) parseSelector(path string) selector { - switch path { - case ".": - return new(selectSelf) - case "..": - return new(selectParent) - case "*": - return new(selectChildren) - case "": - return new(selectDescendants) - default: - return newSelectChildrenByTag(path) - } -} - -var fnTable = map[string]struct { - hasFn func(e *Element) bool - getValFn func(e *Element) string -}{ - "local-name": {nil, (*Element).name}, - "name": {nil, (*Element).FullTag}, - "namespace-prefix": {nil, (*Element).namespacePrefix}, - "namespace-uri": {nil, (*Element).NamespaceURI}, - "text": {(*Element).hasText, (*Element).Text}, -} - -// parseFilter parses a path filter contained within [brackets]. -func (c *compiler) parseFilter(path string) filter { - if len(path) == 0 { - c.err = ErrPath("path contains an empty filter expression.") - return nil - } - - // Filter contains [@attr='val'], [fn()='val'], or [tag='val']? - eqindex := strings.Index(path, "='") - if eqindex >= 0 { - rindex := nextIndex(path, "'", eqindex+2) - if rindex != len(path)-1 { - c.err = ErrPath("path has mismatched filter quotes.") - return nil - } - - key := path[:eqindex] - value := path[eqindex+2 : rindex] - - switch { - case key[0] == '@': - return newFilterAttrVal(key[1:], value) - case strings.HasSuffix(key, "()"): - fn := key[:len(key)-2] - if t, ok := fnTable[fn]; ok && t.getValFn != nil { - return newFilterFuncVal(t.getValFn, value) - } - c.err = ErrPath("path has unknown function " + fn) - return nil - default: - return newFilterChildText(key, value) - } - } - - // Filter contains [@attr], [N], [tag] or [fn()] - switch { - case path[0] == '@': - return newFilterAttr(path[1:]) - case strings.HasSuffix(path, "()"): - fn := path[:len(path)-2] - if t, ok := fnTable[fn]; ok && t.hasFn != nil { - return newFilterFunc(t.hasFn) - } - c.err = ErrPath("path has unknown function " + fn) - return nil - case isInteger(path): - pos, _ := strconv.Atoi(path) - switch { - case pos > 0: - return newFilterPos(pos - 1) - default: - return newFilterPos(pos) - } - default: - return newFilterChild(path) - } -} - -// selectSelf selects the current element into the candidate list. -type selectSelf struct{} - -func (s *selectSelf) apply(e *Element, p *pather) { - p.candidates = append(p.candidates, e) -} - -// selectRoot selects the element's root node. -type selectRoot struct{} - -func (s *selectRoot) apply(e *Element, p *pather) { - root := e - for root.parent != nil { - root = root.parent - } - p.candidates = append(p.candidates, root) -} - -// selectParent selects the element's parent into the candidate list. -type selectParent struct{} - -func (s *selectParent) apply(e *Element, p *pather) { - if e.parent != nil { - p.candidates = append(p.candidates, e.parent) - } -} - -// selectChildren selects the element's child elements into the -// candidate list. -type selectChildren struct{} - -func (s *selectChildren) apply(e *Element, p *pather) { - for _, c := range e.Child { - if c, ok := c.(*Element); ok { - p.candidates = append(p.candidates, c) - } - } -} - -// selectDescendants selects all descendant child elements -// of the element into the candidate list. -type selectDescendants struct{} - -func (s *selectDescendants) apply(e *Element, p *pather) { - var queue fifo - for queue.add(e); queue.len() > 0; { - e := queue.remove().(*Element) - p.candidates = append(p.candidates, e) - for _, c := range e.Child { - if c, ok := c.(*Element); ok { - queue.add(c) - } - } - } -} - -// selectChildrenByTag selects into the candidate list all child -// elements of the element having the specified tag. -type selectChildrenByTag struct { - space, tag string -} - -func newSelectChildrenByTag(path string) *selectChildrenByTag { - s, l := spaceDecompose(path) - return &selectChildrenByTag{s, l} -} - -func (s *selectChildrenByTag) apply(e *Element, p *pather) { - for _, c := range e.Child { - if c, ok := c.(*Element); ok && spaceMatch(s.space, c.Space) && s.tag == c.Tag { - p.candidates = append(p.candidates, c) - } - } -} - -// filterPos filters the candidate list, keeping only the -// candidate at the specified index. -type filterPos struct { - index int -} - -func newFilterPos(pos int) *filterPos { - return &filterPos{pos} -} - -func (f *filterPos) apply(p *pather) { - if f.index >= 0 { - if f.index < len(p.candidates) { - p.scratch = append(p.scratch, p.candidates[f.index]) - } - } else { - if -f.index <= len(p.candidates) { - p.scratch = append(p.scratch, p.candidates[len(p.candidates)+f.index]) - } - } - p.candidates, p.scratch = p.scratch, p.candidates[0:0] -} - -// filterAttr filters the candidate list for elements having -// the specified attribute. -type filterAttr struct { - space, key string -} - -func newFilterAttr(str string) *filterAttr { - s, l := spaceDecompose(str) - return &filterAttr{s, l} -} - -func (f *filterAttr) apply(p *pather) { - for _, c := range p.candidates { - for _, a := range c.Attr { - if spaceMatch(f.space, a.Space) && f.key == a.Key { - p.scratch = append(p.scratch, c) - break - } - } - } - p.candidates, p.scratch = p.scratch, p.candidates[0:0] -} - -// filterAttrVal filters the candidate list for elements having -// the specified attribute with the specified value. -type filterAttrVal struct { - space, key, val string -} - -func newFilterAttrVal(str, value string) *filterAttrVal { - s, l := spaceDecompose(str) - return &filterAttrVal{s, l, value} -} - -func (f *filterAttrVal) apply(p *pather) { - for _, c := range p.candidates { - for _, a := range c.Attr { - if spaceMatch(f.space, a.Space) && f.key == a.Key && f.val == a.Value { - p.scratch = append(p.scratch, c) - break - } - } - } - p.candidates, p.scratch = p.scratch, p.candidates[0:0] -} - -// filterFunc filters the candidate list for elements satisfying a custom -// boolean function. -type filterFunc struct { - fn func(e *Element) bool -} - -func newFilterFunc(fn func(e *Element) bool) *filterFunc { - return &filterFunc{fn} -} - -func (f *filterFunc) apply(p *pather) { - for _, c := range p.candidates { - if f.fn(c) { - p.scratch = append(p.scratch, c) - } - } - p.candidates, p.scratch = p.scratch, p.candidates[0:0] -} - -// filterFuncVal filters the candidate list for elements containing a value -// matching the result of a custom function. -type filterFuncVal struct { - fn func(e *Element) string - val string -} - -func newFilterFuncVal(fn func(e *Element) string, value string) *filterFuncVal { - return &filterFuncVal{fn, value} -} - -func (f *filterFuncVal) apply(p *pather) { - for _, c := range p.candidates { - if f.fn(c) == f.val { - p.scratch = append(p.scratch, c) - } - } - p.candidates, p.scratch = p.scratch, p.candidates[0:0] -} - -// filterChild filters the candidate list for elements having -// a child element with the specified tag. -type filterChild struct { - space, tag string -} - -func newFilterChild(str string) *filterChild { - s, l := spaceDecompose(str) - return &filterChild{s, l} -} - -func (f *filterChild) apply(p *pather) { - for _, c := range p.candidates { - for _, cc := range c.Child { - if cc, ok := cc.(*Element); ok && - spaceMatch(f.space, cc.Space) && - f.tag == cc.Tag { - p.scratch = append(p.scratch, c) - } - } - } - p.candidates, p.scratch = p.scratch, p.candidates[0:0] -} - -// filterChildText filters the candidate list for elements having -// a child element with the specified tag and text. -type filterChildText struct { - space, tag, text string -} - -func newFilterChildText(str, text string) *filterChildText { - s, l := spaceDecompose(str) - return &filterChildText{s, l, text} -} - -func (f *filterChildText) apply(p *pather) { - for _, c := range p.candidates { - for _, cc := range c.Child { - if cc, ok := cc.(*Element); ok && - spaceMatch(f.space, cc.Space) && - f.tag == cc.Tag && - f.text == cc.Text() { - p.scratch = append(p.scratch, c) - } - } - } - p.candidates, p.scratch = p.scratch, p.candidates[0:0] -} diff --git a/vendor/github.com/brancz/gojsontoyaml/.gitignore b/vendor/github.com/brancz/gojsontoyaml/.gitignore deleted file mode 100644 index a1338d6..0000000 --- a/vendor/github.com/brancz/gojsontoyaml/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ diff --git a/vendor/github.com/brancz/gojsontoyaml/Gopkg.lock b/vendor/github.com/brancz/gojsontoyaml/Gopkg.lock deleted file mode 100644 index 8bf5b4e..0000000 --- a/vendor/github.com/brancz/gojsontoyaml/Gopkg.lock +++ /dev/null @@ -1,21 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/ghodss/yaml" - packages = ["."] - revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" - version = "v1.0.0" - -[[projects]] - name = "gopkg.in/yaml.v2" - packages = ["."] - revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5" - version = "v2.1.1" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "714ea508b74c96cca7230568cb19bac8cb14719312298add5ffedd390410353e" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/brancz/gojsontoyaml/Gopkg.toml b/vendor/github.com/brancz/gojsontoyaml/Gopkg.toml deleted file mode 100644 index 26d497b..0000000 --- a/vendor/github.com/brancz/gojsontoyaml/Gopkg.toml +++ /dev/null @@ -1,25 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - name = "github.com/ghodss/yaml" - version = "1.0.0" diff --git a/vendor/github.com/brancz/gojsontoyaml/LICENSE b/vendor/github.com/brancz/gojsontoyaml/LICENSE deleted file mode 100644 index 4f6aa1b..0000000 --- a/vendor/github.com/brancz/gojsontoyaml/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2018 Frederic Branczyk - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/brancz/gojsontoyaml/README.md b/vendor/github.com/brancz/gojsontoyaml/README.md deleted file mode 100644 index 50b9ca9..0000000 --- a/vendor/github.com/brancz/gojsontoyaml/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# gojsontoyaml - -This is a small tool written in go to convert json to yaml reading from STDIN and writing to STDOUT. The heavy lifting is actually done by [ghodss/yaml](https://github.com/ghodss/yaml) and [gopkg.in/yaml.v2](http://gopkg.in/yaml.v2). - -## Install - -To install simply - -``` -go get github.com/brancz/gojsontoyaml -``` - -## Usage - -Simply pipe a json string into `gojsontoyaml` and it will print the converted yaml string to STDOUT. - -``` -$ echo '{"test":"test string with\\nmultiple lines"}' | gojsontoyaml -test: |- - test string with - multiple lines -``` - -## Motivation - -You may ask yourself why this was developed. The answer is simple, when I wrote this there was no simple to use binary for this purpose that supported yaml multiline strings. All alternatives out there that I tried kept line breaks in the string rather than making use of the yaml multiline strings. diff --git a/vendor/github.com/brancz/gojsontoyaml/main.go b/vendor/github.com/brancz/gojsontoyaml/main.go deleted file mode 100644 index 092865c..0000000 --- a/vendor/github.com/brancz/gojsontoyaml/main.go +++ /dev/null @@ -1,35 +0,0 @@ -package main - -import ( - "flag" - "io/ioutil" - "log" - "os" - - "github.com/ghodss/yaml" -) - -func main() { - yamltojson := flag.Bool("yamltojson", false, "Convert yaml to json instead of the default json to yaml.") - flag.Parse() - - inBytes, err := ioutil.ReadAll(os.Stdin) - if err != nil { - log.Fatal(err) - } - - var outBytes []byte - if *yamltojson { - outBytes, err = yaml.YAMLToJSON(inBytes) - if err != nil { - log.Fatal(err) - } - } else { - outBytes, err = yaml.JSONToYAML(inBytes) - if err != nil { - log.Fatal(err) - } - } - - os.Stdout.Write(outBytes) -} diff --git a/vendor/github.com/brancz/kube-rbac-proxy/LICENSE b/vendor/github.com/brancz/kube-rbac-proxy/LICENSE deleted file mode 100644 index 63b4598..0000000 --- a/vendor/github.com/brancz/kube-rbac-proxy/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2017 Frederic Branczyk - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/brancz/kube-rbac-proxy/pkg/tls/reloader.go b/vendor/github.com/brancz/kube-rbac-proxy/pkg/tls/reloader.go deleted file mode 100644 index b1eeb97..0000000 --- a/vendor/github.com/brancz/kube-rbac-proxy/pkg/tls/reloader.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright 2017 Frederic Branczyk All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tls - -import ( - "bytes" - "context" - "crypto/tls" - "fmt" - "io/ioutil" - "sync" - "time" - - "k8s.io/klog" -) - -// CertReloader is the struct that parses a certificate/key pair, -// providing a goroutine safe GetCertificate method to retrieve the parsed content. -// -// The GetCertificate signature is compatible with https://golang.org/pkg/crypto/tls/#Config.GetCertificate -// and can be used to hot-reload a certificate/key pair. -// -// For hot-reloading the Watch method must be started explicitly. -type CertReloader struct { - certPath, keyPath string - interval time.Duration - - mu sync.RWMutex // protects the fields below - cert *tls.Certificate - certRaw, keyRaw []byte -} - -func NewCertReloader(certPath, keyPath string, interval time.Duration) (*CertReloader, error) { - r := &CertReloader{ - certPath: certPath, - keyPath: keyPath, - interval: interval, - } - - if err := r.reload(); err != nil { - return nil, fmt.Errorf("error loading certificates: %v", err) - } - - return r, nil -} - -// Watch watches the configured certificate and key path and blocks the current goroutine -// until the scenario context is done or an error occurred during reloading. -func (r *CertReloader) Watch(ctx context.Context) error { - t := time.NewTicker(r.interval) - - for { - select { - case <-t.C: - case <-ctx.Done(): - return nil - } - - if err := r.reload(); err != nil { - return fmt.Errorf("reloading failed: %v", err) - } - } -} - -func (r *CertReloader) reload() error { - certRaw, err := ioutil.ReadFile(r.certPath) - if err != nil { - return fmt.Errorf("error loading certificate: %v", err) - } - - keyRaw, err := ioutil.ReadFile(r.keyPath) - if err != nil { - return fmt.Errorf("error loading key: %v", err) - } - - r.mu.RLock() - equal := bytes.Equal(keyRaw, r.keyRaw) && bytes.Equal(certRaw, r.certRaw) - r.mu.RUnlock() - - if equal { - return nil - } - - klog.V(4).Info("reloading key ", r.keyPath, " certificate ", r.certPath) - - cert, err := tls.X509KeyPair(certRaw, keyRaw) - if err != nil { - return fmt.Errorf("error parsing certificate: %v", err) - } - - r.mu.Lock() - r.cert = &cert - r.certRaw = certRaw - r.keyRaw = keyRaw - r.mu.Unlock() - - return nil -} - -// GetCertificate returns the current valid certificate. -// The ClientHello message is ignored -// and is just there to be compatible with https://golang.org/pkg/crypto/tls/#Config.GetCertificate. -func (r *CertReloader) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { - r.mu.RLock() - defer r.mu.RUnlock() - - return r.cert, nil -} diff --git a/vendor/github.com/campoy/embedmd/.gitignore b/vendor/github.com/campoy/embedmd/.gitignore deleted file mode 100644 index 6d343e7..0000000 --- a/vendor/github.com/campoy/embedmd/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.vscode/ -*.test -embed -.DS_Store diff --git a/vendor/github.com/campoy/embedmd/.travis.yml b/vendor/github.com/campoy/embedmd/.travis.yml deleted file mode 100644 index c9b96e5..0000000 --- a/vendor/github.com/campoy/embedmd/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.7.x - - 1.8.x - - 1.x - - master -script: - - go test ./... diff --git a/vendor/github.com/campoy/embedmd/LICENSE b/vendor/github.com/campoy/embedmd/LICENSE deleted file mode 100644 index e06d208..0000000 --- a/vendor/github.com/campoy/embedmd/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/campoy/embedmd/README.md b/vendor/github.com/campoy/embedmd/README.md deleted file mode 100644 index c55fcd6..0000000 --- a/vendor/github.com/campoy/embedmd/README.md +++ /dev/null @@ -1,152 +0,0 @@ -[![Build Status](https://travis-ci.org/campoy/embedmd.svg)](https://travis-ci.org/campoy/embedmd) [![Go Report Card](https://goreportcard.com/badge/github.com/campoy/embedmd)](https://goreportcard.com/report/github.com/campoy/embedmd) - - -# embedmd - -Are you tired of copy pasting your code into your `README.md` file, just to -forget about it later on and have unsynced copies? Or even worse, code -that does not even compile? - -Then `embedmd` is for you! - -`embedmd` embeds files or fractions of files into Markdown files. It does -so by searching `embedmd` commands, which are a subset of the Markdown -syntax for comments. This means they are invisible when Markdown is -rendered, so they can be kept in the file as pointers to the origin of -the embedded text. - -The command receives a list of Markdown files. If no list is given, the command -reads from the standard input. - -The format of an `embedmd` command is: - -```Markdown -[embedmd]:# (pathOrURL language /start regexp/ /end regexp/) -``` - -The embedded code will be extracted from the file at `pathOrURL`, -which can either be a relative path to a file in the local file -system (using always forward slashes as directory separator) or -a URL starting with `http://` or `https://`. -If the `pathOrURL` is a URL the tool will fetch the content in that URL. -The embedded content starts at the first line that matches `/start regexp/` -and finishes at the first line matching `/end regexp/`. - -Omitting the the second regular expression will embed only the piece of text -that matches `/regexp/`: - -```Markdown -[embedmd]:# (pathOrURL language /regexp/) -``` - -To embed the whole line matching a regular expression you can use: - -```Markdown -[embedmd]:# (pathOrURL language /.*regexp.*/) -``` - -To embed from a point to the end you should use: - -```Markdown -[embedmd]:# (pathOrURL language /start regexp/ $) -``` - -To embed a whole file, omit both regular expressions: - -```Markdown -[embedmd]:# (pathOrURL language) -``` - -You can omit the language in any of the previous commands, and the extension -of the file will be used for the snippet syntax highlighting. - -This works when the file extensions matches the name of the language (like Go -files, since `.go` matches `go`). However, this will fail with other files like -`.md` whose language name is `markdown`. - -```Markdown -[embedmd]:# (file.ext) -``` - -## Installation - -> You can install Go by following [these instructions](https://golang.org/doc/install). - -`embedmd` is written in Go, so if you have Go installed you can install it with -`go get`: - -``` -go get github.com/campoy/embedmd -``` - -This will download the code, compile it, and leave an `embedmd` binary -in `$GOPATH/bin`. - -Eventually, and if there's enough interest, I will provide binaries for -every OS and architecture out there ... _eventually_. - -## Usage: - -Given the two files in [sample](sample): - -*hello.go:* - -[embedmd]:# (sample/hello.go) -```go -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package main - -import ( - "fmt" - "time" -) - -func main() { - fmt.Println("Hello, there, it is", time.Now()) -} -``` - -*docs.md:* - -[embedmd]:# (sample/docs.md Markdown /./ /embedmd.*time.*/) -```Markdown -# A hello world in Go - -Go is very simple, here you can see a whole "hello, world" program. - -[embedmd]:# (hello.go) - -We can try to embed a file from a directory. - -[embedmd]:# (test/hello.go /func main/ $) - -You always start with a `package` statement like: - -[embedmd]:# (hello.go /package.*/) - -Followed by an `import` statement: - -[embedmd]:# (hello.go /import/ /\)/) - -You can also see how to get the current time: - -[embedmd]:# (hello.go /time\.[^)]*\)/) -``` - -# Flags - -* `-w`: Executing `embedmd -w docs.md` will modify `docs.md` -and add the corresponding code snippets, as shown in -[sample/result.md](sample/result.md). - -* `-d`: Executing `embedmd -d docs.md` will display the difference -between the contents of `docs.md` and the output of -`embedmd docs.md`. - -### Disclaimer - -This is not an official Google product (experimental or otherwise), it is just -code that happens to be owned by Google. diff --git a/vendor/github.com/campoy/embedmd/embedmd/command.go b/vendor/github.com/campoy/embedmd/embedmd/command.go deleted file mode 100644 index 35ff28e..0000000 --- a/vendor/github.com/campoy/embedmd/embedmd/command.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to writing, software distributed -// under the License is distributed on a "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. - -package embedmd - -import ( - "errors" - "path/filepath" - "strings" -) - -type command struct { - path, lang string - start, end *string -} - -func parseCommand(s string) (*command, error) { - s = strings.TrimSpace(s) - if len(s) < 2 || s[0] != '(' || s[len(s)-1] != ')' { - return nil, errors.New("argument list should be in parenthesis") - } - - args, err := fields(s[1 : len(s)-1]) - if err != nil { - return nil, err - } - if len(args) == 0 { - return nil, errors.New("missing file name") - } - - cmd := &command{path: args[0]} - args = args[1:] - if len(args) > 0 && args[0][0] != '/' { - cmd.lang, args = args[0], args[1:] - } else { - ext := filepath.Ext(cmd.path[1:]) - if len(ext) == 0 { - return nil, errors.New("language is required when file has no extension") - } - cmd.lang = ext[1:] - } - - switch { - case len(args) == 1: - cmd.start = &args[0] - case len(args) == 2: - cmd.start, cmd.end = &args[0], &args[1] - case len(args) > 2: - return nil, errors.New("too many arguments") - } - - return cmd, nil -} - -// fields returns a list of the groups of text separated by blanks, -// keeping all text surrounded by / as a group. -func fields(s string) ([]string, error) { - var args []string - - for s = strings.TrimSpace(s); len(s) > 0; s = strings.TrimSpace(s) { - if s[0] == '/' { - sep := nextSlash(s[1:]) - if sep < 0 { - return nil, errors.New("unbalanced /") - } - args, s = append(args, s[:sep+2]), s[sep+2:] - } else { - sep := strings.IndexByte(s[1:], ' ') - if sep < 0 { - return append(args, s), nil - } - args, s = append(args, s[:sep+1]), s[sep+1:] - } - } - - return args, nil -} - -// nextSlash will find the index of the next unescaped slash in a string. -func nextSlash(s string) int { - for sep := 0; ; sep++ { - i := strings.IndexByte(s[sep:], '/') - if i < 0 { - return -1 - } - sep += i - if sep == 0 || s[sep-1] != '\\' { - return sep - } - } -} diff --git a/vendor/github.com/campoy/embedmd/embedmd/content.go b/vendor/github.com/campoy/embedmd/embedmd/content.go deleted file mode 100644 index b9001af..0000000 --- a/vendor/github.com/campoy/embedmd/embedmd/content.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to writing, software distributed -// under the License is distributed on a "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. - -package embedmd - -import ( - "fmt" - "io/ioutil" - "net/http" - "path/filepath" - "strings" -) - -// Fetcher provides an abstraction on a file system. -// The Fetch function is called anytime some content needs to be fetched. -// For now this includes files and URLs. -// The first parameter is the base directory that could be used to resolve -// relative paths. This base directory will be ignored for absolute paths, -// such as URLs. -type Fetcher interface { - Fetch(dir, path string) ([]byte, error) -} - -type fetcher struct{} - -func (fetcher) Fetch(dir, path string) ([]byte, error) { - if !strings.HasPrefix(path, "http://") && !strings.HasPrefix(path, "https://") { - path = filepath.Join(dir, filepath.FromSlash(path)) - return ioutil.ReadFile(path) - } - - res, err := http.Get(path) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, fmt.Errorf("status %s", res.Status) - } - return ioutil.ReadAll(res.Body) -} diff --git a/vendor/github.com/campoy/embedmd/embedmd/embedmd.go b/vendor/github.com/campoy/embedmd/embedmd/embedmd.go deleted file mode 100644 index 7135f75..0000000 --- a/vendor/github.com/campoy/embedmd/embedmd/embedmd.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to writing, software distributed -// under the License is distributed on a "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package embedmd provides a single function, Process, that parses markdown -// searching for markdown comments. -// -// The format of an embedmd command is: -// -// [embedmd]:# (pathOrURL language /start regexp/ /end regexp/) -// -// The embedded code will be extracted from the file at pathOrURL, -// which can either be a relative path to a file in the local file -// system (using always forward slashes as directory separator) or -// a url starting with http:// or https://. -// If the pathOrURL is a url the tool will fetch the content in that url. -// The embedded content starts at the first line that matches /start regexp/ -// and finishes at the first line matching /end regexp/. -// -// Omitting the the second regular expression will embed only the piece of -// text that matches /regexp/: -// -// [embedmd]:# (pathOrURL language /regexp/) -// -// To embed the whole line matching a regular expression you can use: -// -// [embedmd]:# (pathOrURL language /.*regexp.*\n/) -// -// If you want to embed from a point to the end you should use: -// -// [embedmd]:# (pathOrURL language /start regexp/ $) -// -// Finally you can embed a whole file by omitting both regular expressions: -// -// [embedmd]:# (pathOrURL language) -// -// You can ommit the language in any of the previous commands, and the extension -// of the file will be used for the snippet syntax highlighting. Note that while -// this works Go files, since the file extension .go matches the name of the language -// go, this will fail with other files like .md whose language name is markdown. -// -// [embedmd]:# (file.ext) -// -package embedmd - -import ( - "fmt" - "io" - "regexp" -) - -// Process reads markdown from the given io.Reader searching for an embedmd -// command. When a command is found, it is executed and the output is written -// into the given io.Writer with the rest of standard markdown. -func Process(out io.Writer, in io.Reader, opts ...Option) error { - e := embedder{Fetcher: fetcher{}} - for _, opt := range opts { - opt.f(&e) - } - return process(out, in, e.runCommand) -} - -// An Option provides a way to adapt the Process function to your needs. -type Option struct{ f func(*embedder) } - -// WithBaseDir indicates that the given path should be used to resolve relative -// paths. -func WithBaseDir(path string) Option { - return Option{func(e *embedder) { e.baseDir = path }} -} - -// WithFetcher provides a custom Fetcher to be used whenever a path or url needs -// to be fetched. -func WithFetcher(c Fetcher) Option { - return Option{func(e *embedder) { e.Fetcher = c }} -} - -type embedder struct { - Fetcher - baseDir string -} - -func (e *embedder) runCommand(w io.Writer, cmd *command) error { - b, err := e.Fetch(e.baseDir, cmd.path) - if err != nil { - return fmt.Errorf("could not read %s: %v", cmd.path, err) - } - - b, err = extract(b, cmd.start, cmd.end) - if err != nil { - return fmt.Errorf("could not extract content from %s: %v", cmd.path, err) - } - - if len(b) > 0 && b[len(b)-1] != '\n' { - b = append(b, '\n') - } - - fmt.Fprintln(w, "```"+cmd.lang) - w.Write(b) - fmt.Fprintln(w, "```") - return nil -} - -func extract(b []byte, start, end *string) ([]byte, error) { - if start == nil && end == nil { - return b, nil - } - - match := func(s string) ([]int, error) { - if len(s) <= 2 || s[0] != '/' || s[len(s)-1] != '/' { - return nil, fmt.Errorf("missing slashes (/) around %q", s) - } - re, err := regexp.CompilePOSIX(s[1 : len(s)-1]) - if err != nil { - return nil, err - } - loc := re.FindIndex(b) - if loc == nil { - return nil, fmt.Errorf("could not match %q", s) - } - return loc, nil - } - - if *start != "" { - loc, err := match(*start) - if err != nil { - return nil, err - } - if end == nil { - return b[loc[0]:loc[1]], nil - } - b = b[loc[0]:] - } - - if *end != "$" { - loc, err := match(*end) - if err != nil { - return nil, err - } - b = b[:loc[1]] - } - - return b, nil -} diff --git a/vendor/github.com/campoy/embedmd/embedmd/parser.go b/vendor/github.com/campoy/embedmd/embedmd/parser.go deleted file mode 100644 index 02eccf9..0000000 --- a/vendor/github.com/campoy/embedmd/embedmd/parser.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to writing, software distributed -// under the License is distributed on a "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. - -package embedmd - -import ( - "bufio" - "fmt" - "io" - "strings" -) - -type commandRunner func(io.Writer, *command) error - -func process(out io.Writer, in io.Reader, run commandRunner) error { - s := &countingScanner{bufio.NewScanner(in), 0} - - state := parsingText - var err error - for state != nil { - state, err = state(out, s, run) - if err != nil { - return fmt.Errorf("%d: %v", s.line, err) - } - } - - if err := s.Err(); err != nil { - return fmt.Errorf("%d: %v", s.line, err) - } - return nil -} - -type countingScanner struct { - *bufio.Scanner - line int -} - -func (c *countingScanner) Scan() bool { - b := c.Scanner.Scan() - if b { - c.line++ - } - return b -} - -type textScanner interface { - Text() string - Scan() bool -} - -type state func(io.Writer, textScanner, commandRunner) (state, error) - -func parsingText(out io.Writer, s textScanner, run commandRunner) (state, error) { - if !s.Scan() { - return nil, nil // end of file, which is fine. - } - switch line := s.Text(); { - case strings.HasPrefix(line, "[embedmd]:#"): - return parsingCmd, nil - case strings.HasPrefix(line, "```"): - return codeParser{print: true}.parse, nil - default: - fmt.Fprintln(out, s.Text()) - return parsingText, nil - } -} - -func parsingCmd(out io.Writer, s textScanner, run commandRunner) (state, error) { - line := s.Text() - fmt.Fprintln(out, line) - args := line[strings.Index(line, "#")+1:] - cmd, err := parseCommand(args) - if err != nil { - return nil, err - } - if err := run(out, cmd); err != nil { - return nil, err - } - if !s.Scan() { - return nil, nil // end of file, which is fine. - } - if strings.HasPrefix(s.Text(), "```") { - return codeParser{print: false}.parse, nil - } - fmt.Fprintln(out, s.Text()) - return parsingText, nil -} - -type codeParser struct{ print bool } - -func (c codeParser) parse(out io.Writer, s textScanner, run commandRunner) (state, error) { - if c.print { - fmt.Fprintln(out, s.Text()) - } - if !s.Scan() { - return nil, fmt.Errorf("unbalanced code section") - } - if !strings.HasPrefix(s.Text(), "```") { - return c.parse, nil - } - - // print the end of the code section if needed and go back to parsing text. - if c.print { - fmt.Fprintln(out, s.Text()) - } - return parsingText, nil -} diff --git a/vendor/github.com/campoy/embedmd/main.go b/vendor/github.com/campoy/embedmd/main.go deleted file mode 100644 index bdbe8d4..0000000 --- a/vendor/github.com/campoy/embedmd/main.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to writing, software distributed -// under the License is distributed on a "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. -// -// See the License for the specific language governing permissions and -// limitations under the License. - -// embedmd -// -// embedmd embeds files or fractions of files into markdown files. -// It does so by searching embedmd commands, which are a subset of the -// markdown syntax for comments. This means they are invisible when -// markdown is rendered, so they can be kept in the file as pointers -// to the origin of the embedded text. -// -// The command receives a list of markdown files, if none is given it -// reads from the standard input. -// -// embedmd supports two flags: -// -d: will print the difference of the input file with what the output -// would have been if executed. -// -w: rewrites the given files rather than writing the output to the standard -// output. -// -// For more information on the format of the commands, read the documentation -// of the github.com/campoy/embedmd/embedmd package. -package main - -import ( - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/campoy/embedmd/embedmd" - "github.com/pmezard/go-difflib/difflib" -) - -// modified while building by -ldflags. -var version = "unkown" - -func usage() { - fmt.Fprintf(os.Stderr, "usage: embedmd [flags] [path ...]\n") - flag.PrintDefaults() -} - -func main() { - rewrite := flag.Bool("w", false, "write result to (markdown) file instead of stdout") - doDiff := flag.Bool("d", false, "display diffs instead of rewriting files") - printVersion := flag.Bool("v", false, "display embedmd version") - flag.Usage = usage - flag.Parse() - - if *printVersion { - fmt.Println("embedmd version: " + version) - return - } - - diff, err := embed(flag.Args(), *rewrite, *doDiff) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(2) - } - if diff && *doDiff { - os.Exit(2) - } -} - -var ( - stdout io.Writer = os.Stdout - stdin io.Reader = os.Stdin -) - -func embed(paths []string, rewrite, doDiff bool) (foundDiff bool, err error) { - if rewrite && doDiff { - return false, fmt.Errorf("error: cannot use -w and -d simultaneously") - } - - if len(paths) == 0 { - if rewrite { - return false, fmt.Errorf("error: cannot use -w with standard input") - } - if !doDiff { - return false, embedmd.Process(stdout, stdin) - } - - var out, in bytes.Buffer - if err := embedmd.Process(&out, io.TeeReader(stdin, &in)); err != nil { - return false, err - } - d, err := diff(in.String(), out.String()) - if err != nil || len(d) == 0 { - return false, err - } - fmt.Fprintf(stdout, "%s", d) - return true, nil - } - - for _, path := range paths { - d, err := processFile(path, rewrite, doDiff) - if err != nil { - return false, fmt.Errorf("%s:%v", path, err) - } - foundDiff = foundDiff || d - } - return foundDiff, nil -} - -type file interface { - io.ReadCloser - io.WriterAt - Truncate(int64) error -} - -// replaced by testing functions. -var openFile = func(name string) (file, error) { - return os.OpenFile(name, os.O_RDWR, 0666) -} - -func readFile(path string) ([]byte, error) { - f, err := openFile(path) - if err != nil { - return nil, err - } - defer f.Close() - return ioutil.ReadAll(f) -} - -func processFile(path string, rewrite, doDiff bool) (foundDiff bool, err error) { - if filepath.Ext(path) != ".md" { - return false, fmt.Errorf("not a markdown file") - } - - f, err := openFile(path) - if err != nil { - return false, err - } - defer f.Close() - - buf := new(bytes.Buffer) - if err := embedmd.Process(buf, f, embedmd.WithBaseDir(filepath.Dir(path))); err != nil { - return false, err - } - - if doDiff { - f, err := readFile(path) - if err != nil { - return false, fmt.Errorf("could not read %s for diff: %v", path, err) - } - data, err := diff(string(f), buf.String()) - if err != nil || len(data) == 0 { - return false, err - } - fmt.Fprintf(stdout, "%s", data) - return true, nil - } - - if rewrite { - n, err := f.WriteAt(buf.Bytes(), 0) - if err != nil { - return false, fmt.Errorf("could not write: %v", err) - } - return false, f.Truncate(int64(n)) - } - - io.Copy(stdout, buf) - return false, nil -} - -func diff(a, b string) (string, error) { - return difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(a), - B: difflib.SplitLines(b), - Context: 3, - }) -} diff --git a/vendor/github.com/cespare/xxhash/README.md b/vendor/github.com/cespare/xxhash/README.md deleted file mode 100644 index 0982fd2..0000000 --- a/vendor/github.com/cespare/xxhash/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# xxhash - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -The API is very small, taking its cue from the other hashing packages in the -standard library: - - $ go doc github.com/cespare/xxhash ! - package xxhash // import "github.com/cespare/xxhash" - - Package xxhash implements the 64-bit variant of xxHash (XXH64) as described - at http://cyan4973.github.io/xxHash/. - - func New() hash.Hash64 - func Sum64(b []byte) uint64 - func Sum64String(s string) uint64 - -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64 against another popular Go XXH64 implementation, -[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash): - -| input size | OneOfOne | cespare (purego) | cespare | -| --- | --- | --- | --- | -| 5 B | 416 MB/s | 720 MB/s | 872 MB/s | -| 100 B | 3980 MB/s | 5013 MB/s | 5252 MB/s | -| 4 KB | 12727 MB/s | 12999 MB/s | 13026 MB/s | -| 10 MB | 9879 MB/s | 10775 MB/s | 10913 MB/s | - -These numbers were generated with: - -``` -$ go test -benchtime 10s -bench '/OneOfOne,' -$ go test -tags purego -benchtime 10s -bench '/xxhash,' -$ go test -benchtime 10s -bench '/xxhash,' -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) diff --git a/vendor/github.com/cespare/xxhash/rotate.go b/vendor/github.com/cespare/xxhash/rotate.go deleted file mode 100644 index f3eac5e..0000000 --- a/vendor/github.com/cespare/xxhash/rotate.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !go1.9 - -package xxhash - -// TODO(caleb): After Go 1.10 comes out, remove this fallback code. - -func rol1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) } -func rol7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) } -func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) } -func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) } -func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) } -func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) } -func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) } -func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) } diff --git a/vendor/github.com/cespare/xxhash/rotate19.go b/vendor/github.com/cespare/xxhash/rotate19.go deleted file mode 100644 index b99612b..0000000 --- a/vendor/github.com/cespare/xxhash/rotate19.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build go1.9 - -package xxhash - -import "math/bits" - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml deleted file mode 100644 index c516ea8..0000000 --- a/vendor/github.com/cespare/xxhash/v2/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - "1.x" - - master -env: - - TAGS="" - - TAGS="-tags purego" -script: go test $TAGS -v ./... diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 2fd8693..33c8830 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -1,10 +1,9 @@ # xxhash -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) +[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) +[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ ## Compatibility @@ -45,23 +47,28 @@ I recommend using the latest release of Go. Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package - [InfluxDB](https://github.com/influxdata/influxdb) - [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) +- [Ristretto](https://github.com/dgraph-io/ristretto) +- [Badger](https://github.com/dgraph-io/badger) diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 0000000..94b9c44 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index db0b35f..78bddf1 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -16,21 +16,16 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array for the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. +// +// Note that a zero-valued Digest is not ready to receive writes. +// Call Reset or create a Digest using New before calling other methods. type Digest struct { v1 uint64 v2 uint64 @@ -41,19 +36,31 @@ type Digest struct { n int // how much of mem is used } -// New creates a new Digest that computes the 64-bit xxHash algorithm. +// New creates a new Digest with a zero seed. func New() *Digest { + return NewWithSeed(0) +} + +// NewWithSeed creates a new Digest with the given seed. +func NewWithSeed(seed uint64) *Digest { var d Digest - d.Reset() + d.ResetWithSeed(seed) return &d } // Reset clears the Digest's state so that it can be reused. +// It uses a seed value of zero. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -prime1v + d.ResetWithSeed(0) +} + +// ResetWithSeed clears the Digest's state so that it can be reused. +// It uses the given seed to initialize the state. +func (d *Digest) ResetWithSeed(seed uint64) { + d.v1 = seed + prime1 + prime2 + d.v2 = seed + prime2 + d.v3 = seed + d.v4 = seed - prime1 d.total = 0 d.n = 0 } @@ -69,21 +76,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -133,21 +142,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 @@ -193,7 +201,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error { b, d.v4 = consumeUint64(b) b, d.total = consumeUint64(b) copy(d.mem[:], b) - b = b[len(d.mem):] d.n = int(d.total % uint64(len(d.mem))) return nil } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go deleted file mode 100644 index ad14b80..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s index d580e32..3e8b132 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -1,215 +1,209 @@ +//go:build !appengine && gc && !purego // +build !appengine // +build gc // +build !purego #include "textflag.h" -// Register allocation: -// AX h -// CX pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// R15 prime4v - -// round reads from and advances the buffer pointer in CX. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ R15, acc +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), CX - MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until CX > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. - ADDQ $24, BX - - CMPQ CX, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ R15, AX - - CMPQ CX, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ CX, BX - JG singles - - MOVL (CX), R8 - ADDQ $4, CX - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ CX, BX + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ CX, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), CX - MOVQ b_len+16(FP), DX - LEAQ (CX)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is CX minus the old base pointer. - SUBQ b_base+8(FP), CX - MOVQ CX, ret+32(FP) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 0000000..7e3145a --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go new file mode 100644 index 0000000..78f95f2 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -0,0 +1,15 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 4a5a821..118e49e 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -1,8 +1,9 @@ -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. func Sum64(b []byte) uint64 { // A simpler version would be // d := New() @@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index fc9bea7..05f5e7d 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -1,10 +1,11 @@ +//go:build appengine // +build appengine // This file contains the safe implementations of otherwise unsafe-using code. package xxhash -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. func Sum64String(s string) uint64 { return Sum64([]byte(s)) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 53bf76e..cf9d42a 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -1,3 +1,4 @@ +//go:build !appengine // +build !appengine // This file encapsulates usage of unsafe. @@ -6,41 +7,52 @@ package xxhash import ( - "reflect" "unsafe" ) -// Notes: +// In the future it's possible that compiler optimizations will make these +// XxxString functions unnecessary by realizing that calls such as +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. +// If that happens, even if we keep these functions they can be replaced with +// the trivial safe code. + +// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is: // -// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ -// for some discussion about these unsafe conversions. +// var b []byte +// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) +// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data +// bh.Len = len(s) +// bh.Cap = len(s) // -// In the future it's possible that compiler optimizations will make these -// unsafe operations unnecessary: https://golang.org/issue/2205. +// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough +// weight to this sequence of expressions that any function that uses it will +// not be inlined. Instead, the functions below use a different unsafe +// conversion designed to minimize the inliner weight and allow both to be +// inlined. There is also a test (TestInlining) which verifies that these are +// inlined. // -// Both of these wrapper functions still incur function call overhead since they -// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write -// for strings to squeeze out a bit more speed. Mid-stack inlining should -// eventually fix this. +// See https://github.com/golang/go/issues/42739 for discussion. -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) + b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) return Sum64(b) } // WriteString adds more data to d. It always returns len(s), nil. // It may be faster than Write([]byte(s)) by avoiding a copy. func (d *Digest) WriteString(s string) (n int, err error) { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return d.Write(b) + d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))) + // d.Write always returns len(s), nil. + // Ignoring the return output and returning these fixed values buys a + // savings of 6 in the inliner's cost model. + return len(s), nil +} + +// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout +// of the first two words is the same as the layout of a string. +type sliceHeader struct { + s string + cap int } diff --git a/vendor/github.com/cespare/xxhash/xxhash.go b/vendor/github.com/cespare/xxhash/xxhash.go deleted file mode 100644 index f896bd2..0000000 --- a/vendor/github.com/cespare/xxhash/xxhash.go +++ /dev/null @@ -1,168 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -package xxhash - -import ( - "encoding/binary" - "hash" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) - -type xxh struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total int - mem [32]byte - n int // how much of mem is used -} - -// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm. -func New() hash.Hash64 { - var x xxh - x.Reset() - return &x -} - -func (x *xxh) Reset() { - x.n = 0 - x.total = 0 - x.v1 = prime1v + prime2 - x.v2 = prime2 - x.v3 = 0 - x.v4 = -prime1v -} - -func (x *xxh) Size() int { return 8 } -func (x *xxh) BlockSize() int { return 32 } - -// Write adds more data to x. It always returns len(b), nil. -func (x *xxh) Write(b []byte) (n int, err error) { - n = len(b) - x.total += len(b) - - if x.n+len(b) < 32 { - // This new data doesn't even fill the current block. - copy(x.mem[x.n:], b) - x.n += len(b) - return - } - - if x.n > 0 { - // Finish off the partial block. - copy(x.mem[x.n:], b) - x.v1 = round(x.v1, u64(x.mem[0:8])) - x.v2 = round(x.v2, u64(x.mem[8:16])) - x.v3 = round(x.v3, u64(x.mem[16:24])) - x.v4 = round(x.v4, u64(x.mem[24:32])) - b = b[32-x.n:] - x.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - b = writeBlocks(x, b) - } - - // Store any remaining partial block. - copy(x.mem[:], b) - x.n = len(b) - - return -} - -func (x *xxh) Sum(b []byte) []byte { - s := x.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -func (x *xxh) Sum64() uint64 { - var h uint64 - - if x.total >= 32 { - v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = x.v3 + prime5 - } - - h += uint64(x.total) - - i, end := 0, x.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(x.mem[i:i+8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(x.mem[i:i+4])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for i < end { - h ^= uint64(x.mem[i]) * prime5 - h = rol11(h) * prime1 - i++ - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/xxhash_amd64.go deleted file mode 100644 index d617652..0000000 --- a/vendor/github.com/cespare/xxhash/xxhash_amd64.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -func writeBlocks(x *xxh, b []byte) []byte diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/xxhash_amd64.s deleted file mode 100644 index 757f201..0000000 --- a/vendor/github.com/cespare/xxhash/xxhash_amd64.s +++ /dev/null @@ -1,233 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Register allocation: -// AX h -// CX pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// R15 prime4v - -// round reads from and advances the buffer pointer in CX. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ R15, acc - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 - // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 - - // Load slice. - MOVQ b_base+0(FP), CX - MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX - - // The first loop limit will be len(b)-32. - SUBQ $32, BX - - // Check whether we have at least one block. - CMPQ DX, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until CX > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) - - JMP afterBlocks - -noBlocks: - MOVQ ·prime5v(SB), AX - -afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. - ADDQ $24, BX - - CMPQ CX, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ R15, AX - - CMPQ CX, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ CX, BX - JG singles - - MOVL (CX), R8 - ADDQ $4, CX - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ CX, BX - JGE finalize - -singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX - IMULQ ·prime5v(SB), R12 - XORQ R12, AX - - ROLQ $11, AX - IMULQ R13, AX - - CMPQ CX, BX - JL singlesLoop - -finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) - RET - -// writeBlocks uses the same registers as above except that it uses AX to store -// the x pointer. - -// func writeBlocks(x *xxh, b []byte) []byte -TEXT ·writeBlocks(SB), NOSPLIT, $0-56 - // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - - // Load slice. - MOVQ b_base+8(FP), CX - MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below - MOVQ b_len+16(FP), DX - LEAQ (CX)(DX*1), BX - SUBQ $32, BX - - // Load vN from x. - MOVQ x+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - // Copy vN back to x. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // Construct return slice. - // NOTE: It's important that we don't construct a slice that has a base - // pointer off the end of the original slice, as in Go 1.7+ this will - // cause runtime crashes. (See discussion in, for example, - // https://github.com/golang/go/issues/16772.) - // Therefore, we calculate the length/cap first, and if they're zero, we - // keep the old base. This is what the compiler does as well if you - // write code like - // b = b[len(b):] - - // New length is 32 - (CX - BX) -> BX+32 - CX. - ADDQ $32, BX - SUBQ CX, BX - JZ afterSetBase - - MOVQ CX, ret_base+32(FP) - -afterSetBase: - MOVQ BX, ret_len+40(FP) - MOVQ BX, ret_cap+48(FP) // set cap == len - - RET diff --git a/vendor/github.com/cespare/xxhash/xxhash_other.go b/vendor/github.com/cespare/xxhash/xxhash_other.go deleted file mode 100644 index c68d13f..0000000 --- a/vendor/github.com/cespare/xxhash/xxhash_other.go +++ /dev/null @@ -1,75 +0,0 @@ -// +build !amd64 appengine !gc purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // x := New() - // x.Write(b) - // return x.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := prime1v + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -prime1v - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(x *xxh, b []byte) []byte { - v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4 - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4 - return b -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_safe.go b/vendor/github.com/cespare/xxhash/xxhash_safe.go deleted file mode 100644 index dfa15ab..0000000 --- a/vendor/github.com/cespare/xxhash/xxhash_safe.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build appengine - -// This file contains the safe implementations of otherwise unsafe-using code. - -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/xxhash_unsafe.go deleted file mode 100644 index d2b64e8..0000000 --- a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !appengine - -// This file encapsulates usage of unsafe. -// xxhash_safe.go contains the safe implementations. - -package xxhash - -import ( - "reflect" - "unsafe" -) - -// Sum64String computes the 64-bit xxHash digest of s. -// It may be faster than Sum64([]byte(s)) by avoiding a copy. -// -// TODO(caleb): Consider removing this if an optimization is ever added to make -// it unnecessary: https://golang.org/issue/2205. -// -// TODO(caleb): We still have a function call; we could instead write Go/asm -// copies of Sum64 for strings to squeeze out a bit more speed. -func Sum64String(s string) uint64 { - // See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ - // for some discussion about this unsafe conversion. - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return Sum64(b) -} diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE deleted file mode 100644 index 37ec93a..0000000 --- a/vendor/github.com/coreos/go-systemd/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/NOTICE b/vendor/github.com/coreos/go-systemd/NOTICE deleted file mode 100644 index 23a0ada..0000000 --- a/vendor/github.com/coreos/go-systemd/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2018 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go deleted file mode 100644 index a0f4837..0000000 --- a/vendor/github.com/coreos/go-systemd/journal/journal.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package journal provides write bindings to the local systemd journal. -// It is implemented in pure Go and connects to the journal directly over its -// unix socket. -// -// To read from the journal, see the "sdjournal" package, which wraps the -// sd-journal a C API. -// -// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html -package journal - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "strconv" - "strings" - "sync" - "sync/atomic" - "syscall" - "unsafe" -) - -// Priority of a journal message -type Priority int - -const ( - PriEmerg Priority = iota - PriAlert - PriCrit - PriErr - PriWarning - PriNotice - PriInfo - PriDebug -) - -var ( - // This can be overridden at build-time: - // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable - journalSocket = "/run/systemd/journal/socket" - - // unixConnPtr atomically holds the local unconnected Unix-domain socket. - // Concrete safe pointer type: *net.UnixConn - unixConnPtr unsafe.Pointer - // onceConn ensures that unixConnPtr is initialized exactly once. - onceConn sync.Once -) - -func init() { - onceConn.Do(initConn) -} - -// Enabled checks whether the local systemd journal is available for logging. -func Enabled() bool { - onceConn.Do(initConn) - - if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil { - return false - } - - if _, err := net.Dial("unixgram", journalSocket); err != nil { - return false - } - - return true -} - -// Send a message to the local systemd journal. vars is a map of journald -// fields to values. Fields must be composed of uppercase letters, numbers, -// and underscores, but must not start with an underscore. Within these -// restrictions, any arbitrary field name may be used. Some names have special -// significance: see the journalctl documentation -// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) -// for more details. vars may be nil. -func Send(message string, priority Priority, vars map[string]string) error { - conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) - if conn == nil { - return errors.New("could not initialize socket to journald") - } - - socketAddr := &net.UnixAddr{ - Name: journalSocket, - Net: "unixgram", - } - - data := new(bytes.Buffer) - appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) - appendVariable(data, "MESSAGE", message) - for k, v := range vars { - appendVariable(data, k, v) - } - - _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr) - if err == nil { - return nil - } - if !isSocketSpaceError(err) { - return err - } - - // Large log entry, send it via tempfile and ancillary-fd. - file, err := tempFd() - if err != nil { - return err - } - defer file.Close() - _, err = io.Copy(file, data) - if err != nil { - return err - } - rights := syscall.UnixRights(int(file.Fd())) - _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr) - if err != nil { - return err - } - - return nil -} - -// Print prints a message to the local systemd journal using Send(). -func Print(priority Priority, format string, a ...interface{}) error { - return Send(fmt.Sprintf(format, a...), priority, nil) -} - -func appendVariable(w io.Writer, name, value string) { - if err := validVarName(name); err != nil { - fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) - } - if strings.ContainsRune(value, '\n') { - /* When the value contains a newline, we write: - * - the variable name, followed by a newline - * - the size (in 64bit little endian format) - * - the data, followed by a newline - */ - fmt.Fprintln(w, name) - binary.Write(w, binary.LittleEndian, uint64(len(value))) - fmt.Fprintln(w, value) - } else { - /* just write the variable and value all on one line */ - fmt.Fprintf(w, "%s=%s\n", name, value) - } -} - -// validVarName validates a variable name to make sure journald will accept it. -// The variable name must be in uppercase and consist only of characters, -// numbers and underscores, and may not begin with an underscore: -// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html -func validVarName(name string) error { - if name == "" { - return errors.New("Empty variable name") - } else if name[0] == '_' { - return errors.New("Variable name begins with an underscore") - } - - for _, c := range name { - if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') { - return errors.New("Variable name contains invalid characters") - } - } - return nil -} - -// isSocketSpaceError checks whether the error is signaling -// an "overlarge message" condition. -func isSocketSpaceError(err error) bool { - opErr, ok := err.(*net.OpError) - if !ok || opErr == nil { - return false - } - - sysErr, ok := opErr.Err.(*os.SyscallError) - if !ok || sysErr == nil { - return false - } - - return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS -} - -// tempFd creates a temporary, unlinked file under `/dev/shm`. -func tempFd() (*os.File, error) { - file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") - if err != nil { - return nil, err - } - err = syscall.Unlink(file.Name()) - if err != nil { - return nil, err - } - return file, nil -} - -// initConn initializes the global `unixConnPtr` socket. -// It is meant to be called exactly once, at program startup. -func initConn() { - autobind, err := net.ResolveUnixAddr("unixgram", "") - if err != nil { - return - } - - sock, err := net.ListenUnixgram("unixgram", autobind) - if err != nil { - return - } - - atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock)) -} diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE deleted file mode 100644 index e06d208..0000000 --- a/vendor/github.com/coreos/pkg/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE deleted file mode 100644 index b39ddfa..0000000 --- a/vendor/github.com/coreos/pkg/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2014 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md deleted file mode 100644 index f79dbfc..0000000 --- a/vendor/github.com/coreos/pkg/capnslog/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# capnslog, the CoreOS logging package - -There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?). -capnslog provides a simple but consistent logging interface suitable for all kinds of projects. - -### Design Principles - -##### `package main` is the place where logging gets turned on and routed - -A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak. - -##### All log options are runtime-configurable. - -Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. - -##### There is one log object per package. It is registered under its repository and package name. - -`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs. - -##### There is *one* output stream, and it is an `io.Writer` composed with a formatter. - -Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer. - -Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependent. These are, at best, provided as options, but more likely, provided by your application. - -##### Log objects are an interface - -An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed. - -##### Log levels have specific meanings: - - * Critical: Unrecoverable. Must fail. - * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost - * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. - * Notice: Normal, but important (uncommon) log information. - * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. - * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. - * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query. - diff --git a/vendor/github.com/coreos/pkg/capnslog/formatters.go b/vendor/github.com/coreos/pkg/capnslog/formatters.go deleted file mode 100644 index b305a84..0000000 --- a/vendor/github.com/coreos/pkg/capnslog/formatters.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "bufio" - "fmt" - "io" - "log" - "runtime" - "strings" - "time" -) - -type Formatter interface { - Format(pkg string, level LogLevel, depth int, entries ...interface{}) - Flush() -} - -func NewStringFormatter(w io.Writer) Formatter { - return &StringFormatter{ - w: bufio.NewWriter(w), - } -} - -type StringFormatter struct { - w *bufio.Writer -} - -func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) { - now := time.Now().UTC() - s.w.WriteString(now.Format(time.RFC3339)) - s.w.WriteByte(' ') - writeEntries(s.w, pkg, l, i, entries...) - s.Flush() -} - -func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) { - if pkg != "" { - w.WriteString(pkg + ": ") - } - str := fmt.Sprint(entries...) - endsInNL := strings.HasSuffix(str, "\n") - w.WriteString(str) - if !endsInNL { - w.WriteString("\n") - } -} - -func (s *StringFormatter) Flush() { - s.w.Flush() -} - -func NewPrettyFormatter(w io.Writer, debug bool) Formatter { - return &PrettyFormatter{ - w: bufio.NewWriter(w), - debug: debug, - } -} - -type PrettyFormatter struct { - w *bufio.Writer - debug bool -} - -func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) { - now := time.Now() - ts := now.Format("2006-01-02 15:04:05") - c.w.WriteString(ts) - ms := now.Nanosecond() / 1000 - c.w.WriteString(fmt.Sprintf(".%06d", ms)) - if c.debug { - _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - if line < 0 { - line = 0 // not a real line number - } - c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line)) - } - c.w.WriteString(fmt.Sprint(" ", l.Char(), " | ")) - writeEntries(c.w, pkg, l, depth, entries...) - c.Flush() -} - -func (c *PrettyFormatter) Flush() { - c.w.Flush() -} - -// LogFormatter emulates the form of the traditional built-in logger. -type LogFormatter struct { - logger *log.Logger - prefix string -} - -// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the -// golang log package to actually do the logging work so that logs look similar. -func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter { - return &LogFormatter{ - logger: log.New(w, "", flag), // don't use prefix here - prefix: prefix, // save it instead - } -} - -// Format builds a log message for the LogFormatter. The LogLevel is ignored. -func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) { - str := fmt.Sprint(entries...) - prefix := lf.prefix - if pkg != "" { - prefix = fmt.Sprintf("%s%s: ", prefix, pkg) - } - lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5 -} - -// Flush is included so that the interface is complete, but is a no-op. -func (lf *LogFormatter) Flush() { - // noop -} - -// NilFormatter is a no-op log formatter that does nothing. -type NilFormatter struct { -} - -// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no -// messages so that you can cause part of your logging to be silent. -func NewNilFormatter() Formatter { - return &NilFormatter{} -} - -// Format does nothing. -func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) { - // noop -} - -// Flush is included so that the interface is complete, but is a no-op. -func (_ *NilFormatter) Flush() { - // noop -} diff --git a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go deleted file mode 100644 index 426603e..0000000 --- a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "bufio" - "bytes" - "io" - "os" - "runtime" - "strconv" - "strings" - "time" -) - -var pid = os.Getpid() - -type GlogFormatter struct { - StringFormatter -} - -func NewGlogFormatter(w io.Writer) *GlogFormatter { - g := &GlogFormatter{} - g.w = bufio.NewWriter(w) - return g -} - -func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) { - g.w.Write(GlogHeader(level, depth+1)) - g.StringFormatter.Format(pkg, level, depth+1, entries...) -} - -func GlogHeader(level LogLevel, depth int) []byte { - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - now := time.Now().UTC() - _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - if line < 0 { - line = 0 // not a real line number - } - buf := &bytes.Buffer{} - buf.Grow(30) - _, month, day := now.Date() - hour, minute, second := now.Clock() - buf.WriteString(level.Char()) - twoDigits(buf, int(month)) - twoDigits(buf, day) - buf.WriteByte(' ') - twoDigits(buf, hour) - buf.WriteByte(':') - twoDigits(buf, minute) - buf.WriteByte(':') - twoDigits(buf, second) - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000)) - buf.WriteByte('Z') - buf.WriteByte(' ') - buf.WriteString(strconv.Itoa(pid)) - buf.WriteByte(' ') - buf.WriteString(file) - buf.WriteByte(':') - buf.WriteString(strconv.Itoa(line)) - buf.WriteByte(']') - buf.WriteByte(' ') - return buf.Bytes() -} - -const digits = "0123456789" - -func twoDigits(b *bytes.Buffer, d int) { - c2 := digits[d%10] - d /= 10 - c1 := digits[d%10] - b.WriteByte(c1) - b.WriteByte(c2) -} diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go deleted file mode 100644 index 38ce6d2..0000000 --- a/vendor/github.com/coreos/pkg/capnslog/init.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// +build !windows - -package capnslog - -import ( - "io" - "os" - "syscall" -) - -// Here's where the opinionation comes in. We need some sensible defaults, -// especially after taking over the log package. Your project (whatever it may -// be) may see things differently. That's okay; there should be no defaults in -// the main package that cannot be controlled or overridden programatically, -// otherwise it's a bug. Doing so is creating your own init_log.go file much -// like this one. - -func init() { - initHijack() - - // Go `log` package uses os.Stderr. - SetFormatter(NewDefaultFormatter(os.Stderr)) - SetGlobalLogLevel(INFO) -} - -func NewDefaultFormatter(out io.Writer) Formatter { - if syscall.Getppid() == 1 { - // We're running under init, which may be systemd. - f, err := NewJournaldFormatter() - if err == nil { - return f - } - } - return NewPrettyFormatter(out, false) -} diff --git a/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/github.com/coreos/pkg/capnslog/init_windows.go deleted file mode 100644 index 4553050..0000000 --- a/vendor/github.com/coreos/pkg/capnslog/init_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import "os" - -func init() { - initHijack() - - // Go `log` package uses os.Stderr. - SetFormatter(NewPrettyFormatter(os.Stderr, false)) - SetGlobalLogLevel(INFO) -} diff --git a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go deleted file mode 100644 index 72e0520..0000000 --- a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// +build !windows - -package capnslog - -import ( - "errors" - "fmt" - "os" - "path/filepath" - - "github.com/coreos/go-systemd/journal" -) - -func NewJournaldFormatter() (Formatter, error) { - if !journal.Enabled() { - return nil, errors.New("No systemd detected") - } - return &journaldFormatter{}, nil -} - -type journaldFormatter struct{} - -func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { - var pri journal.Priority - switch l { - case CRITICAL: - pri = journal.PriCrit - case ERROR: - pri = journal.PriErr - case WARNING: - pri = journal.PriWarning - case NOTICE: - pri = journal.PriNotice - case INFO: - pri = journal.PriInfo - case DEBUG: - pri = journal.PriDebug - case TRACE: - pri = journal.PriDebug - default: - panic("Unhandled loglevel") - } - msg := fmt.Sprint(entries...) - tags := map[string]string{ - "PACKAGE": pkg, - "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), - } - err := journal.Send(msg, pri, tags) - if err != nil { - fmt.Fprintln(os.Stderr, err) - } -} - -func (j *journaldFormatter) Flush() {} diff --git a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go deleted file mode 100644 index 970086b..0000000 --- a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "log" -) - -func initHijack() { - pkg := NewPackageLogger("log", "") - w := packageWriter{pkg} - log.SetFlags(0) - log.SetPrefix("") - log.SetOutput(w) -} - -type packageWriter struct { - pl *PackageLogger -} - -func (p packageWriter) Write(b []byte) (int, error) { - if p.pl.level < INFO { - return 0, nil - } - p.pl.internalLog(calldepth+2, INFO, string(b)) - return len(b), nil -} diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go deleted file mode 100644 index 226b60c..0000000 --- a/vendor/github.com/coreos/pkg/capnslog/logmap.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "errors" - "strings" - "sync" -) - -// LogLevel is the set of all log levels. -type LogLevel int8 - -const ( - // CRITICAL is the lowest log level; only errors which will end the program will be propagated. - CRITICAL LogLevel = iota - 1 - // ERROR is for errors that are not fatal but lead to troubling behavior. - ERROR - // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations. - WARNING - // NOTICE is for normal but significant conditions. - NOTICE - // INFO is a log level for common, everyday log updates. - INFO - // DEBUG is the default hidden level for more verbose updates about internal processes. - DEBUG - // TRACE is for (potentially) call by call tracing of programs. - TRACE -) - -// Char returns a single-character representation of the log level. -func (l LogLevel) Char() string { - switch l { - case CRITICAL: - return "C" - case ERROR: - return "E" - case WARNING: - return "W" - case NOTICE: - return "N" - case INFO: - return "I" - case DEBUG: - return "D" - case TRACE: - return "T" - default: - panic("Unhandled loglevel") - } -} - -// String returns a multi-character representation of the log level. -func (l LogLevel) String() string { - switch l { - case CRITICAL: - return "CRITICAL" - case ERROR: - return "ERROR" - case WARNING: - return "WARNING" - case NOTICE: - return "NOTICE" - case INFO: - return "INFO" - case DEBUG: - return "DEBUG" - case TRACE: - return "TRACE" - default: - panic("Unhandled loglevel") - } -} - -// Update using the given string value. Fulfills the flag.Value interface. -func (l *LogLevel) Set(s string) error { - value, err := ParseLevel(s) - if err != nil { - return err - } - - *l = value - return nil -} - -// Returns an empty string, only here to fulfill the pflag.Value interface. -func (l *LogLevel) Type() string { - return "" -} - -// ParseLevel translates some potential loglevel strings into their corresponding levels. -func ParseLevel(s string) (LogLevel, error) { - switch s { - case "CRITICAL", "C": - return CRITICAL, nil - case "ERROR", "0", "E": - return ERROR, nil - case "WARNING", "1", "W": - return WARNING, nil - case "NOTICE", "2", "N": - return NOTICE, nil - case "INFO", "3", "I": - return INFO, nil - case "DEBUG", "4", "D": - return DEBUG, nil - case "TRACE", "5", "T": - return TRACE, nil - } - return CRITICAL, errors.New("couldn't parse log level " + s) -} - -type RepoLogger map[string]*PackageLogger - -type loggerStruct struct { - sync.Mutex - repoMap map[string]RepoLogger - formatter Formatter -} - -// logger is the global logger -var logger = new(loggerStruct) - -// SetGlobalLogLevel sets the log level for all packages in all repositories -// registered with capnslog. -func SetGlobalLogLevel(l LogLevel) { - logger.Lock() - defer logger.Unlock() - for _, r := range logger.repoMap { - r.setRepoLogLevelInternal(l) - } -} - -// GetRepoLogger may return the handle to the repository's set of packages' loggers. -func GetRepoLogger(repo string) (RepoLogger, error) { - logger.Lock() - defer logger.Unlock() - r, ok := logger.repoMap[repo] - if !ok { - return nil, errors.New("no packages registered for repo " + repo) - } - return r, nil -} - -// MustRepoLogger returns the handle to the repository's packages' loggers. -func MustRepoLogger(repo string) RepoLogger { - r, err := GetRepoLogger(repo) - if err != nil { - panic(err) - } - return r -} - -// SetRepoLogLevel sets the log level for all packages in the repository. -func (r RepoLogger) SetRepoLogLevel(l LogLevel) { - logger.Lock() - defer logger.Unlock() - r.setRepoLogLevelInternal(l) -} - -func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) { - for _, v := range r { - v.level = l - } -} - -// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in -// order, and returns a map of the results, for use in SetLogLevel. -func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) { - setlist := strings.Split(conf, ",") - out := make(map[string]LogLevel) - for _, setstring := range setlist { - setting := strings.Split(setstring, "=") - if len(setting) != 2 { - return nil, errors.New("oddly structured `pkg=level` option: " + setstring) - } - l, err := ParseLevel(setting[1]) - if err != nil { - return nil, err - } - out[setting[0]] = l - } - return out, nil -} - -// SetLogLevel takes a map of package names within a repository to their desired -// loglevel, and sets the levels appropriately. Unknown packages are ignored. -// "*" is a special package name that corresponds to all packages, and will be -// processed first. -func (r RepoLogger) SetLogLevel(m map[string]LogLevel) { - logger.Lock() - defer logger.Unlock() - if l, ok := m["*"]; ok { - r.setRepoLogLevelInternal(l) - } - for k, v := range m { - l, ok := r[k] - if !ok { - continue - } - l.level = v - } -} - -// SetFormatter sets the formatting function for all logs. -func SetFormatter(f Formatter) { - logger.Lock() - defer logger.Unlock() - logger.formatter = f -} - -// NewPackageLogger creates a package logger object. -// This should be defined as a global var in your package, referencing your repo. -func NewPackageLogger(repo string, pkg string) (p *PackageLogger) { - logger.Lock() - defer logger.Unlock() - if logger.repoMap == nil { - logger.repoMap = make(map[string]RepoLogger) - } - r, rok := logger.repoMap[repo] - if !rok { - logger.repoMap[repo] = make(RepoLogger) - r = logger.repoMap[repo] - } - p, pok := r[pkg] - if !pok { - r[pkg] = &PackageLogger{ - pkg: pkg, - level: INFO, - } - p = r[pkg] - } - return -} diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go deleted file mode 100644 index 00ff371..0000000 --- a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "fmt" - "os" -) - -type PackageLogger struct { - pkg string - level LogLevel -} - -const calldepth = 2 - -func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) { - logger.Lock() - defer logger.Unlock() - if inLevel != CRITICAL && p.level < inLevel { - return - } - if logger.formatter != nil { - logger.formatter.Format(p.pkg, inLevel, depth+1, entries...) - } -} - -// SetLevel allows users to change the current logging level. -func (p *PackageLogger) SetLevel(l LogLevel) { - logger.Lock() - defer logger.Unlock() - p.level = l -} - -// LevelAt checks if the given log level will be outputted under current setting. -func (p *PackageLogger) LevelAt(l LogLevel) bool { - logger.Lock() - defer logger.Unlock() - return p.level >= l -} - -// Log a formatted string at any level between ERROR and TRACE -func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) { - p.internalLog(calldepth, l, fmt.Sprintf(format, args...)) -} - -// Log a message at any level between ERROR and TRACE -func (p *PackageLogger) Log(l LogLevel, args ...interface{}) { - p.internalLog(calldepth, l, fmt.Sprint(args...)) -} - -// log stdlib compatibility - -func (p *PackageLogger) Println(args ...interface{}) { - p.internalLog(calldepth, INFO, fmt.Sprintln(args...)) -} - -func (p *PackageLogger) Printf(format string, args ...interface{}) { - p.Logf(INFO, format, args...) -} - -func (p *PackageLogger) Print(args ...interface{}) { - p.internalLog(calldepth, INFO, fmt.Sprint(args...)) -} - -// Panic and fatal - -func (p *PackageLogger) Panicf(format string, args ...interface{}) { - s := fmt.Sprintf(format, args...) - p.internalLog(calldepth, CRITICAL, s) - panic(s) -} - -func (p *PackageLogger) Panic(args ...interface{}) { - s := fmt.Sprint(args...) - p.internalLog(calldepth, CRITICAL, s) - panic(s) -} - -func (p *PackageLogger) Panicln(args ...interface{}) { - s := fmt.Sprintln(args...) - p.internalLog(calldepth, CRITICAL, s) - panic(s) -} - -func (p *PackageLogger) Fatalf(format string, args ...interface{}) { - p.Logf(CRITICAL, format, args...) - os.Exit(1) -} - -func (p *PackageLogger) Fatal(args ...interface{}) { - s := fmt.Sprint(args...) - p.internalLog(calldepth, CRITICAL, s) - os.Exit(1) -} - -func (p *PackageLogger) Fatalln(args ...interface{}) { - s := fmt.Sprintln(args...) - p.internalLog(calldepth, CRITICAL, s) - os.Exit(1) -} - -// Error Functions - -func (p *PackageLogger) Errorf(format string, args ...interface{}) { - p.Logf(ERROR, format, args...) -} - -func (p *PackageLogger) Error(entries ...interface{}) { - p.internalLog(calldepth, ERROR, entries...) -} - -// Warning Functions - -func (p *PackageLogger) Warningf(format string, args ...interface{}) { - p.Logf(WARNING, format, args...) -} - -func (p *PackageLogger) Warning(entries ...interface{}) { - p.internalLog(calldepth, WARNING, entries...) -} - -// Notice Functions - -func (p *PackageLogger) Noticef(format string, args ...interface{}) { - p.Logf(NOTICE, format, args...) -} - -func (p *PackageLogger) Notice(entries ...interface{}) { - p.internalLog(calldepth, NOTICE, entries...) -} - -// Info Functions - -func (p *PackageLogger) Infof(format string, args ...interface{}) { - p.Logf(INFO, format, args...) -} - -func (p *PackageLogger) Info(entries ...interface{}) { - p.internalLog(calldepth, INFO, entries...) -} - -// Debug Functions - -func (p *PackageLogger) Debugf(format string, args ...interface{}) { - if p.level < DEBUG { - return - } - p.Logf(DEBUG, format, args...) -} - -func (p *PackageLogger) Debug(entries ...interface{}) { - if p.level < DEBUG { - return - } - p.internalLog(calldepth, DEBUG, entries...) -} - -// Trace Functions - -func (p *PackageLogger) Tracef(format string, args ...interface{}) { - if p.level < TRACE { - return - } - p.Logf(TRACE, format, args...) -} - -func (p *PackageLogger) Trace(entries ...interface{}) { - if p.level < TRACE { - return - } - p.internalLog(calldepth, TRACE, entries...) -} - -func (p *PackageLogger) Flush() { - logger.Lock() - defer logger.Unlock() - logger.formatter.Flush() -} diff --git a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go deleted file mode 100644 index 4be5a1f..0000000 --- a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// +build !windows - -package capnslog - -import ( - "fmt" - "log/syslog" -) - -func NewSyslogFormatter(w *syslog.Writer) Formatter { - return &syslogFormatter{w} -} - -func NewDefaultSyslogFormatter(tag string) (Formatter, error) { - w, err := syslog.New(syslog.LOG_DEBUG, tag) - if err != nil { - return nil, err - } - return NewSyslogFormatter(w), nil -} - -type syslogFormatter struct { - w *syslog.Writer -} - -func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { - for _, entry := range entries { - str := fmt.Sprint(entry) - switch l { - case CRITICAL: - s.w.Crit(str) - case ERROR: - s.w.Err(str) - case WARNING: - s.w.Warning(str) - case NOTICE: - s.w.Notice(str) - case INFO: - s.w.Info(str) - case DEBUG: - s.w.Debug(str) - case TRACE: - s.w.Debug(str) - default: - panic("Unhandled loglevel") - } - } -} - -func (s *syslogFormatter) Flush() { -} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md deleted file mode 100644 index 1cade6c..0000000 --- a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Brian Goff - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go deleted file mode 100644 index b480056..0000000 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go +++ /dev/null @@ -1,14 +0,0 @@ -package md2man - -import ( - "github.com/russross/blackfriday/v2" -) - -// Render converts a markdown document into a roff formatted document. -func Render(doc []byte) []byte { - renderer := NewRoffRenderer() - - return blackfriday.Run(doc, - []blackfriday.Option{blackfriday.WithRenderer(renderer), - blackfriday.WithExtensions(renderer.GetExtensions())}...) -} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go deleted file mode 100644 index 0668a66..0000000 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ /dev/null @@ -1,345 +0,0 @@ -package md2man - -import ( - "fmt" - "io" - "os" - "strings" - - "github.com/russross/blackfriday/v2" -) - -// roffRenderer implements the blackfriday.Renderer interface for creating -// roff format (manpages) from markdown text -type roffRenderer struct { - extensions blackfriday.Extensions - listCounters []int - firstHeader bool - defineTerm bool - listDepth int -} - -const ( - titleHeader = ".TH " - topLevelHeader = "\n\n.SH " - secondLevelHdr = "\n.SH " - otherHeader = "\n.SS " - crTag = "\n" - emphTag = "\\fI" - emphCloseTag = "\\fP" - strongTag = "\\fB" - strongCloseTag = "\\fP" - breakTag = "\n.br\n" - paraTag = "\n.PP\n" - hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" - linkTag = "\n\\[la]" - linkCloseTag = "\\[ra]" - codespanTag = "\\fB\\fC" - codespanCloseTag = "\\fR" - codeTag = "\n.PP\n.RS\n\n.nf\n" - codeCloseTag = "\n.fi\n.RE\n" - quoteTag = "\n.PP\n.RS\n" - quoteCloseTag = "\n.RE\n" - listTag = "\n.RS\n" - listCloseTag = "\n.RE\n" - arglistTag = "\n.TP\n" - tableStart = "\n.TS\nallbox;\n" - tableEnd = ".TE\n" - tableCellStart = "T{\n" - tableCellEnd = "\nT}\n" -) - -// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents -// from markdown -func NewRoffRenderer() *roffRenderer { // nolint: golint - var extensions blackfriday.Extensions - - extensions |= blackfriday.NoIntraEmphasis - extensions |= blackfriday.Tables - extensions |= blackfriday.FencedCode - extensions |= blackfriday.SpaceHeadings - extensions |= blackfriday.Footnotes - extensions |= blackfriday.Titleblock - extensions |= blackfriday.DefinitionLists - return &roffRenderer{ - extensions: extensions, - } -} - -// GetExtensions returns the list of extensions used by this renderer implementation -func (r *roffRenderer) GetExtensions() blackfriday.Extensions { - return r.extensions -} - -// RenderHeader handles outputting the header at document start -func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { - // disable hyphenation - out(w, ".nh\n") -} - -// RenderFooter handles outputting the footer at the document end; the roff -// renderer has no footer information -func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { -} - -// RenderNode is called for each node in a markdown document; based on the node -// type the equivalent roff output is sent to the writer -func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { - - var walkAction = blackfriday.GoToNext - - switch node.Type { - case blackfriday.Text: - r.handleText(w, node, entering) - case blackfriday.Softbreak: - out(w, crTag) - case blackfriday.Hardbreak: - out(w, breakTag) - case blackfriday.Emph: - if entering { - out(w, emphTag) - } else { - out(w, emphCloseTag) - } - case blackfriday.Strong: - if entering { - out(w, strongTag) - } else { - out(w, strongCloseTag) - } - case blackfriday.Link: - if !entering { - out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag) - } - case blackfriday.Image: - // ignore images - walkAction = blackfriday.SkipChildren - case blackfriday.Code: - out(w, codespanTag) - escapeSpecialChars(w, node.Literal) - out(w, codespanCloseTag) - case blackfriday.Document: - break - case blackfriday.Paragraph: - // roff .PP markers break lists - if r.listDepth > 0 { - return blackfriday.GoToNext - } - if entering { - out(w, paraTag) - } else { - out(w, crTag) - } - case blackfriday.BlockQuote: - if entering { - out(w, quoteTag) - } else { - out(w, quoteCloseTag) - } - case blackfriday.Heading: - r.handleHeading(w, node, entering) - case blackfriday.HorizontalRule: - out(w, hruleTag) - case blackfriday.List: - r.handleList(w, node, entering) - case blackfriday.Item: - r.handleItem(w, node, entering) - case blackfriday.CodeBlock: - out(w, codeTag) - escapeSpecialChars(w, node.Literal) - out(w, codeCloseTag) - case blackfriday.Table: - r.handleTable(w, node, entering) - case blackfriday.TableCell: - r.handleTableCell(w, node, entering) - case blackfriday.TableHead: - case blackfriday.TableBody: - case blackfriday.TableRow: - // no action as cell entries do all the nroff formatting - return blackfriday.GoToNext - default: - fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String()) - } - return walkAction -} - -func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) { - var ( - start, end string - ) - // handle special roff table cell text encapsulation - if node.Parent.Type == blackfriday.TableCell { - if len(node.Literal) > 30 { - start = tableCellStart - end = tableCellEnd - } else { - // end rows that aren't terminated by "tableCellEnd" with a cr if end of row - if node.Parent.Next == nil && !node.Parent.IsHeader { - end = crTag - } - } - } - out(w, start) - escapeSpecialChars(w, node.Literal) - out(w, end) -} - -func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) { - if entering { - switch node.Level { - case 1: - if !r.firstHeader { - out(w, titleHeader) - r.firstHeader = true - break - } - out(w, topLevelHeader) - case 2: - out(w, secondLevelHdr) - default: - out(w, otherHeader) - } - } -} - -func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) { - openTag := listTag - closeTag := listCloseTag - if node.ListFlags&blackfriday.ListTypeDefinition != 0 { - // tags for definition lists handled within Item node - openTag = "" - closeTag = "" - } - if entering { - r.listDepth++ - if node.ListFlags&blackfriday.ListTypeOrdered != 0 { - r.listCounters = append(r.listCounters, 1) - } - out(w, openTag) - } else { - if node.ListFlags&blackfriday.ListTypeOrdered != 0 { - r.listCounters = r.listCounters[:len(r.listCounters)-1] - } - out(w, closeTag) - r.listDepth-- - } -} - -func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) { - if entering { - if node.ListFlags&blackfriday.ListTypeOrdered != 0 { - out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1])) - r.listCounters[len(r.listCounters)-1]++ - } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { - // state machine for handling terms and following definitions - // since blackfriday does not distinguish them properly, nor - // does it seperate them into separate lists as it should - if !r.defineTerm { - out(w, arglistTag) - r.defineTerm = true - } else { - r.defineTerm = false - } - } else { - out(w, ".IP \\(bu 2\n") - } - } else { - out(w, "\n") - } -} - -func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) { - if entering { - out(w, tableStart) - //call walker to count cells (and rows?) so format section can be produced - columns := countColumns(node) - out(w, strings.Repeat("l ", columns)+"\n") - out(w, strings.Repeat("l ", columns)+".\n") - } else { - out(w, tableEnd) - } -} - -func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) { - var ( - start, end string - ) - if node.IsHeader { - start = codespanTag - end = codespanCloseTag - } - if entering { - if node.Prev != nil && node.Prev.Type == blackfriday.TableCell { - out(w, "\t"+start) - } else { - out(w, start) - } - } else { - // need to carriage return if we are at the end of the header row - if node.IsHeader && node.Next == nil { - end = end + crTag - } - out(w, end) - } -} - -// because roff format requires knowing the column count before outputting any table -// data we need to walk a table tree and count the columns -func countColumns(node *blackfriday.Node) int { - var columns int - - node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { - switch node.Type { - case blackfriday.TableRow: - if !entering { - return blackfriday.Terminate - } - case blackfriday.TableCell: - if entering { - columns++ - } - default: - } - return blackfriday.GoToNext - }) - return columns -} - -func out(w io.Writer, output string) { - io.WriteString(w, output) // nolint: errcheck -} - -func needsBackslash(c byte) bool { - for _, r := range []byte("-_&\\~") { - if c == r { - return true - } - } - return false -} - -func escapeSpecialChars(w io.Writer, text []byte) { - for i := 0; i < len(text); i++ { - // escape initial apostrophe or period - if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { - out(w, "\\&") - } - - // directly copy normal characters - org := i - - for i < len(text) && !needsBackslash(text[i]) { - i++ - } - if i > org { - w.Write(text[org:i]) // nolint: errcheck - } - - // escape a character - if i >= len(text) { - break - } - - w.Write([]byte{'\\', text[i]}) // nolint: errcheck - } -} diff --git a/vendor/github.com/dexidp/dex/LICENSE b/vendor/github.com/dexidp/dex/LICENSE deleted file mode 100644 index d645695..0000000 --- a/vendor/github.com/dexidp/dex/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/dexidp/dex/NOTICE b/vendor/github.com/dexidp/dex/NOTICE deleted file mode 100644 index 23a0ada..0000000 --- a/vendor/github.com/dexidp/dex/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2018 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/dexidp/dex/api/api.pb.go b/vendor/github.com/dexidp/dex/api/api.pb.go deleted file mode 100644 index 5bac4e9..0000000 --- a/vendor/github.com/dexidp/dex/api/api.pb.go +++ /dev/null @@ -1,1758 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: api/api.proto - -package api - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// Client represents an OAuth2 client. -type Client struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Secret string `protobuf:"bytes,2,opt,name=secret,proto3" json:"secret,omitempty"` - RedirectUris []string `protobuf:"bytes,3,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"` - TrustedPeers []string `protobuf:"bytes,4,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"` - Public bool `protobuf:"varint,5,opt,name=public,proto3" json:"public,omitempty"` - Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` - LogoUrl string `protobuf:"bytes,7,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Client) Reset() { *m = Client{} } -func (m *Client) String() string { return proto.CompactTextString(m) } -func (*Client) ProtoMessage() {} -func (*Client) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{0} -} - -func (m *Client) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Client.Unmarshal(m, b) -} -func (m *Client) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Client.Marshal(b, m, deterministic) -} -func (m *Client) XXX_Merge(src proto.Message) { - xxx_messageInfo_Client.Merge(m, src) -} -func (m *Client) XXX_Size() int { - return xxx_messageInfo_Client.Size(m) -} -func (m *Client) XXX_DiscardUnknown() { - xxx_messageInfo_Client.DiscardUnknown(m) -} - -var xxx_messageInfo_Client proto.InternalMessageInfo - -func (m *Client) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *Client) GetSecret() string { - if m != nil { - return m.Secret - } - return "" -} - -func (m *Client) GetRedirectUris() []string { - if m != nil { - return m.RedirectUris - } - return nil -} - -func (m *Client) GetTrustedPeers() []string { - if m != nil { - return m.TrustedPeers - } - return nil -} - -func (m *Client) GetPublic() bool { - if m != nil { - return m.Public - } - return false -} - -func (m *Client) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Client) GetLogoUrl() string { - if m != nil { - return m.LogoUrl - } - return "" -} - -// CreateClientReq is a request to make a client. -type CreateClientReq struct { - Client *Client `protobuf:"bytes,1,opt,name=client,proto3" json:"client,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateClientReq) Reset() { *m = CreateClientReq{} } -func (m *CreateClientReq) String() string { return proto.CompactTextString(m) } -func (*CreateClientReq) ProtoMessage() {} -func (*CreateClientReq) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{1} -} - -func (m *CreateClientReq) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateClientReq.Unmarshal(m, b) -} -func (m *CreateClientReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateClientReq.Marshal(b, m, deterministic) -} -func (m *CreateClientReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateClientReq.Merge(m, src) -} -func (m *CreateClientReq) XXX_Size() int { - return xxx_messageInfo_CreateClientReq.Size(m) -} -func (m *CreateClientReq) XXX_DiscardUnknown() { - xxx_messageInfo_CreateClientReq.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateClientReq proto.InternalMessageInfo - -func (m *CreateClientReq) GetClient() *Client { - if m != nil { - return m.Client - } - return nil -} - -// CreateClientResp returns the response from creating a client. -type CreateClientResp struct { - AlreadyExists bool `protobuf:"varint,1,opt,name=already_exists,json=alreadyExists,proto3" json:"already_exists,omitempty"` - Client *Client `protobuf:"bytes,2,opt,name=client,proto3" json:"client,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateClientResp) Reset() { *m = CreateClientResp{} } -func (m *CreateClientResp) String() string { return proto.CompactTextString(m) } -func (*CreateClientResp) ProtoMessage() {} -func (*CreateClientResp) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{2} -} - -func (m *CreateClientResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateClientResp.Unmarshal(m, b) -} -func (m *CreateClientResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateClientResp.Marshal(b, m, deterministic) -} -func (m *CreateClientResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateClientResp.Merge(m, src) -} -func (m *CreateClientResp) XXX_Size() int { - return xxx_messageInfo_CreateClientResp.Size(m) -} -func (m *CreateClientResp) XXX_DiscardUnknown() { - xxx_messageInfo_CreateClientResp.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateClientResp proto.InternalMessageInfo - -func (m *CreateClientResp) GetAlreadyExists() bool { - if m != nil { - return m.AlreadyExists - } - return false -} - -func (m *CreateClientResp) GetClient() *Client { - if m != nil { - return m.Client - } - return nil -} - -// DeleteClientReq is a request to delete a client. -type DeleteClientReq struct { - // The ID of the client. - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteClientReq) Reset() { *m = DeleteClientReq{} } -func (m *DeleteClientReq) String() string { return proto.CompactTextString(m) } -func (*DeleteClientReq) ProtoMessage() {} -func (*DeleteClientReq) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{3} -} - -func (m *DeleteClientReq) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteClientReq.Unmarshal(m, b) -} -func (m *DeleteClientReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteClientReq.Marshal(b, m, deterministic) -} -func (m *DeleteClientReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteClientReq.Merge(m, src) -} -func (m *DeleteClientReq) XXX_Size() int { - return xxx_messageInfo_DeleteClientReq.Size(m) -} -func (m *DeleteClientReq) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteClientReq.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteClientReq proto.InternalMessageInfo - -func (m *DeleteClientReq) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -// DeleteClientResp determines if the client is deleted successfully. -type DeleteClientResp struct { - NotFound bool `protobuf:"varint,1,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteClientResp) Reset() { *m = DeleteClientResp{} } -func (m *DeleteClientResp) String() string { return proto.CompactTextString(m) } -func (*DeleteClientResp) ProtoMessage() {} -func (*DeleteClientResp) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{4} -} - -func (m *DeleteClientResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteClientResp.Unmarshal(m, b) -} -func (m *DeleteClientResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteClientResp.Marshal(b, m, deterministic) -} -func (m *DeleteClientResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteClientResp.Merge(m, src) -} -func (m *DeleteClientResp) XXX_Size() int { - return xxx_messageInfo_DeleteClientResp.Size(m) -} -func (m *DeleteClientResp) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteClientResp.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteClientResp proto.InternalMessageInfo - -func (m *DeleteClientResp) GetNotFound() bool { - if m != nil { - return m.NotFound - } - return false -} - -// UpdateClientReq is a request to update an exisitng client. -type UpdateClientReq struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - RedirectUris []string `protobuf:"bytes,2,rep,name=redirect_uris,json=redirectUris,proto3" json:"redirect_uris,omitempty"` - TrustedPeers []string `protobuf:"bytes,3,rep,name=trusted_peers,json=trustedPeers,proto3" json:"trusted_peers,omitempty"` - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - LogoUrl string `protobuf:"bytes,5,opt,name=logo_url,json=logoUrl,proto3" json:"logo_url,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpdateClientReq) Reset() { *m = UpdateClientReq{} } -func (m *UpdateClientReq) String() string { return proto.CompactTextString(m) } -func (*UpdateClientReq) ProtoMessage() {} -func (*UpdateClientReq) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{5} -} - -func (m *UpdateClientReq) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UpdateClientReq.Unmarshal(m, b) -} -func (m *UpdateClientReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UpdateClientReq.Marshal(b, m, deterministic) -} -func (m *UpdateClientReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateClientReq.Merge(m, src) -} -func (m *UpdateClientReq) XXX_Size() int { - return xxx_messageInfo_UpdateClientReq.Size(m) -} -func (m *UpdateClientReq) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateClientReq.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdateClientReq proto.InternalMessageInfo - -func (m *UpdateClientReq) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *UpdateClientReq) GetRedirectUris() []string { - if m != nil { - return m.RedirectUris - } - return nil -} - -func (m *UpdateClientReq) GetTrustedPeers() []string { - if m != nil { - return m.TrustedPeers - } - return nil -} - -func (m *UpdateClientReq) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *UpdateClientReq) GetLogoUrl() string { - if m != nil { - return m.LogoUrl - } - return "" -} - -// UpdateClientResp returns the reponse form updating a client. -type UpdateClientResp struct { - NotFound bool `protobuf:"varint,1,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpdateClientResp) Reset() { *m = UpdateClientResp{} } -func (m *UpdateClientResp) String() string { return proto.CompactTextString(m) } -func (*UpdateClientResp) ProtoMessage() {} -func (*UpdateClientResp) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{6} -} - -func (m *UpdateClientResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UpdateClientResp.Unmarshal(m, b) -} -func (m *UpdateClientResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UpdateClientResp.Marshal(b, m, deterministic) -} -func (m *UpdateClientResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateClientResp.Merge(m, src) -} -func (m *UpdateClientResp) XXX_Size() int { - return xxx_messageInfo_UpdateClientResp.Size(m) -} -func (m *UpdateClientResp) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateClientResp.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdateClientResp proto.InternalMessageInfo - -func (m *UpdateClientResp) GetNotFound() bool { - if m != nil { - return m.NotFound - } - return false -} - -// Password is an email for password mapping managed by the storage. -type Password struct { - Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` - // Currently we do not accept plain text passwords. Could be an option in the future. - Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` - Username string `protobuf:"bytes,3,opt,name=username,proto3" json:"username,omitempty"` - UserId string `protobuf:"bytes,4,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Password) Reset() { *m = Password{} } -func (m *Password) String() string { return proto.CompactTextString(m) } -func (*Password) ProtoMessage() {} -func (*Password) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{7} -} - -func (m *Password) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Password.Unmarshal(m, b) -} -func (m *Password) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Password.Marshal(b, m, deterministic) -} -func (m *Password) XXX_Merge(src proto.Message) { - xxx_messageInfo_Password.Merge(m, src) -} -func (m *Password) XXX_Size() int { - return xxx_messageInfo_Password.Size(m) -} -func (m *Password) XXX_DiscardUnknown() { - xxx_messageInfo_Password.DiscardUnknown(m) -} - -var xxx_messageInfo_Password proto.InternalMessageInfo - -func (m *Password) GetEmail() string { - if m != nil { - return m.Email - } - return "" -} - -func (m *Password) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -func (m *Password) GetUsername() string { - if m != nil { - return m.Username - } - return "" -} - -func (m *Password) GetUserId() string { - if m != nil { - return m.UserId - } - return "" -} - -// CreatePasswordReq is a request to make a password. -type CreatePasswordReq struct { - Password *Password `protobuf:"bytes,1,opt,name=password,proto3" json:"password,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreatePasswordReq) Reset() { *m = CreatePasswordReq{} } -func (m *CreatePasswordReq) String() string { return proto.CompactTextString(m) } -func (*CreatePasswordReq) ProtoMessage() {} -func (*CreatePasswordReq) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{8} -} - -func (m *CreatePasswordReq) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreatePasswordReq.Unmarshal(m, b) -} -func (m *CreatePasswordReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreatePasswordReq.Marshal(b, m, deterministic) -} -func (m *CreatePasswordReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreatePasswordReq.Merge(m, src) -} -func (m *CreatePasswordReq) XXX_Size() int { - return xxx_messageInfo_CreatePasswordReq.Size(m) -} -func (m *CreatePasswordReq) XXX_DiscardUnknown() { - xxx_messageInfo_CreatePasswordReq.DiscardUnknown(m) -} - -var xxx_messageInfo_CreatePasswordReq proto.InternalMessageInfo - -func (m *CreatePasswordReq) GetPassword() *Password { - if m != nil { - return m.Password - } - return nil -} - -// CreatePasswordResp returns the response from creating a password. -type CreatePasswordResp struct { - AlreadyExists bool `protobuf:"varint,1,opt,name=already_exists,json=alreadyExists,proto3" json:"already_exists,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreatePasswordResp) Reset() { *m = CreatePasswordResp{} } -func (m *CreatePasswordResp) String() string { return proto.CompactTextString(m) } -func (*CreatePasswordResp) ProtoMessage() {} -func (*CreatePasswordResp) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{9} -} - -func (m *CreatePasswordResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreatePasswordResp.Unmarshal(m, b) -} -func (m *CreatePasswordResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreatePasswordResp.Marshal(b, m, deterministic) -} -func (m *CreatePasswordResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreatePasswordResp.Merge(m, src) -} -func (m *CreatePasswordResp) XXX_Size() int { - return xxx_messageInfo_CreatePasswordResp.Size(m) -} -func (m *CreatePasswordResp) XXX_DiscardUnknown() { - xxx_messageInfo_CreatePasswordResp.DiscardUnknown(m) -} - -var xxx_messageInfo_CreatePasswordResp proto.InternalMessageInfo - -func (m *CreatePasswordResp) GetAlreadyExists() bool { - if m != nil { - return m.AlreadyExists - } - return false -} - -// UpdatePasswordReq is a request to modify an existing password. -type UpdatePasswordReq struct { - // The email used to lookup the password. This field cannot be modified - Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` - NewHash []byte `protobuf:"bytes,2,opt,name=new_hash,json=newHash,proto3" json:"new_hash,omitempty"` - NewUsername string `protobuf:"bytes,3,opt,name=new_username,json=newUsername,proto3" json:"new_username,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpdatePasswordReq) Reset() { *m = UpdatePasswordReq{} } -func (m *UpdatePasswordReq) String() string { return proto.CompactTextString(m) } -func (*UpdatePasswordReq) ProtoMessage() {} -func (*UpdatePasswordReq) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{10} -} - -func (m *UpdatePasswordReq) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UpdatePasswordReq.Unmarshal(m, b) -} -func (m *UpdatePasswordReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UpdatePasswordReq.Marshal(b, m, deterministic) -} -func (m *UpdatePasswordReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdatePasswordReq.Merge(m, src) -} -func (m *UpdatePasswordReq) XXX_Size() int { - return xxx_messageInfo_UpdatePasswordReq.Size(m) -} -func (m *UpdatePasswordReq) XXX_DiscardUnknown() { - xxx_messageInfo_UpdatePasswordReq.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdatePasswordReq proto.InternalMessageInfo - -func (m *UpdatePasswordReq) GetEmail() string { - if m != nil { - return m.Email - } - return "" -} - -func (m *UpdatePasswordReq) GetNewHash() []byte { - if m != nil { - return m.NewHash - } - return nil -} - -func (m *UpdatePasswordReq) GetNewUsername() string { - if m != nil { - return m.NewUsername - } - return "" -} - -// UpdatePasswordResp returns the response from modifying an existing password. -type UpdatePasswordResp struct { - NotFound bool `protobuf:"varint,1,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpdatePasswordResp) Reset() { *m = UpdatePasswordResp{} } -func (m *UpdatePasswordResp) String() string { return proto.CompactTextString(m) } -func (*UpdatePasswordResp) ProtoMessage() {} -func (*UpdatePasswordResp) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{11} -} - -func (m *UpdatePasswordResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UpdatePasswordResp.Unmarshal(m, b) -} -func (m *UpdatePasswordResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UpdatePasswordResp.Marshal(b, m, deterministic) -} -func (m *UpdatePasswordResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdatePasswordResp.Merge(m, src) -} -func (m *UpdatePasswordResp) XXX_Size() int { - return xxx_messageInfo_UpdatePasswordResp.Size(m) -} -func (m *UpdatePasswordResp) XXX_DiscardUnknown() { - xxx_messageInfo_UpdatePasswordResp.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdatePasswordResp proto.InternalMessageInfo - -func (m *UpdatePasswordResp) GetNotFound() bool { - if m != nil { - return m.NotFound - } - return false -} - -// DeletePasswordReq is a request to delete a password. -type DeletePasswordReq struct { - Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeletePasswordReq) Reset() { *m = DeletePasswordReq{} } -func (m *DeletePasswordReq) String() string { return proto.CompactTextString(m) } -func (*DeletePasswordReq) ProtoMessage() {} -func (*DeletePasswordReq) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{12} -} - -func (m *DeletePasswordReq) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeletePasswordReq.Unmarshal(m, b) -} -func (m *DeletePasswordReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeletePasswordReq.Marshal(b, m, deterministic) -} -func (m *DeletePasswordReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeletePasswordReq.Merge(m, src) -} -func (m *DeletePasswordReq) XXX_Size() int { - return xxx_messageInfo_DeletePasswordReq.Size(m) -} -func (m *DeletePasswordReq) XXX_DiscardUnknown() { - xxx_messageInfo_DeletePasswordReq.DiscardUnknown(m) -} - -var xxx_messageInfo_DeletePasswordReq proto.InternalMessageInfo - -func (m *DeletePasswordReq) GetEmail() string { - if m != nil { - return m.Email - } - return "" -} - -// DeletePasswordResp returns the response from deleting a password. -type DeletePasswordResp struct { - NotFound bool `protobuf:"varint,1,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeletePasswordResp) Reset() { *m = DeletePasswordResp{} } -func (m *DeletePasswordResp) String() string { return proto.CompactTextString(m) } -func (*DeletePasswordResp) ProtoMessage() {} -func (*DeletePasswordResp) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{13} -} - -func (m *DeletePasswordResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeletePasswordResp.Unmarshal(m, b) -} -func (m *DeletePasswordResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeletePasswordResp.Marshal(b, m, deterministic) -} -func (m *DeletePasswordResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeletePasswordResp.Merge(m, src) -} -func (m *DeletePasswordResp) XXX_Size() int { - return xxx_messageInfo_DeletePasswordResp.Size(m) -} -func (m *DeletePasswordResp) XXX_DiscardUnknown() { - xxx_messageInfo_DeletePasswordResp.DiscardUnknown(m) -} - -var xxx_messageInfo_DeletePasswordResp proto.InternalMessageInfo - -func (m *DeletePasswordResp) GetNotFound() bool { - if m != nil { - return m.NotFound - } - return false -} - -// ListPasswordReq is a request to enumerate passwords. -type ListPasswordReq struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListPasswordReq) Reset() { *m = ListPasswordReq{} } -func (m *ListPasswordReq) String() string { return proto.CompactTextString(m) } -func (*ListPasswordReq) ProtoMessage() {} -func (*ListPasswordReq) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{14} -} - -func (m *ListPasswordReq) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListPasswordReq.Unmarshal(m, b) -} -func (m *ListPasswordReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListPasswordReq.Marshal(b, m, deterministic) -} -func (m *ListPasswordReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListPasswordReq.Merge(m, src) -} -func (m *ListPasswordReq) XXX_Size() int { - return xxx_messageInfo_ListPasswordReq.Size(m) -} -func (m *ListPasswordReq) XXX_DiscardUnknown() { - xxx_messageInfo_ListPasswordReq.DiscardUnknown(m) -} - -var xxx_messageInfo_ListPasswordReq proto.InternalMessageInfo - -// ListPasswordResp returns a list of passwords. -type ListPasswordResp struct { - Passwords []*Password `protobuf:"bytes,1,rep,name=passwords,proto3" json:"passwords,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListPasswordResp) Reset() { *m = ListPasswordResp{} } -func (m *ListPasswordResp) String() string { return proto.CompactTextString(m) } -func (*ListPasswordResp) ProtoMessage() {} -func (*ListPasswordResp) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{15} -} - -func (m *ListPasswordResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListPasswordResp.Unmarshal(m, b) -} -func (m *ListPasswordResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListPasswordResp.Marshal(b, m, deterministic) -} -func (m *ListPasswordResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListPasswordResp.Merge(m, src) -} -func (m *ListPasswordResp) XXX_Size() int { - return xxx_messageInfo_ListPasswordResp.Size(m) -} -func (m *ListPasswordResp) XXX_DiscardUnknown() { - xxx_messageInfo_ListPasswordResp.DiscardUnknown(m) -} - -var xxx_messageInfo_ListPasswordResp proto.InternalMessageInfo - -func (m *ListPasswordResp) GetPasswords() []*Password { - if m != nil { - return m.Passwords - } - return nil -} - -// VersionReq is a request to fetch version info. -type VersionReq struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VersionReq) Reset() { *m = VersionReq{} } -func (m *VersionReq) String() string { return proto.CompactTextString(m) } -func (*VersionReq) ProtoMessage() {} -func (*VersionReq) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{16} -} - -func (m *VersionReq) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VersionReq.Unmarshal(m, b) -} -func (m *VersionReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VersionReq.Marshal(b, m, deterministic) -} -func (m *VersionReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_VersionReq.Merge(m, src) -} -func (m *VersionReq) XXX_Size() int { - return xxx_messageInfo_VersionReq.Size(m) -} -func (m *VersionReq) XXX_DiscardUnknown() { - xxx_messageInfo_VersionReq.DiscardUnknown(m) -} - -var xxx_messageInfo_VersionReq proto.InternalMessageInfo - -// VersionResp holds the version info of components. -type VersionResp struct { - // Semantic version of the server. - Server string `protobuf:"bytes,1,opt,name=server,proto3" json:"server,omitempty"` - // Numeric version of the API. It increases everytime a new call is added to the API. - // Clients should use this info to determine if the server supports specific features. - Api int32 `protobuf:"varint,2,opt,name=api,proto3" json:"api,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VersionResp) Reset() { *m = VersionResp{} } -func (m *VersionResp) String() string { return proto.CompactTextString(m) } -func (*VersionResp) ProtoMessage() {} -func (*VersionResp) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{17} -} - -func (m *VersionResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VersionResp.Unmarshal(m, b) -} -func (m *VersionResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VersionResp.Marshal(b, m, deterministic) -} -func (m *VersionResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_VersionResp.Merge(m, src) -} -func (m *VersionResp) XXX_Size() int { - return xxx_messageInfo_VersionResp.Size(m) -} -func (m *VersionResp) XXX_DiscardUnknown() { - xxx_messageInfo_VersionResp.DiscardUnknown(m) -} - -var xxx_messageInfo_VersionResp proto.InternalMessageInfo - -func (m *VersionResp) GetServer() string { - if m != nil { - return m.Server - } - return "" -} - -func (m *VersionResp) GetApi() int32 { - if m != nil { - return m.Api - } - return 0 -} - -// RefreshTokenRef contains the metadata for a refresh token that is managed by the storage. -type RefreshTokenRef struct { - // ID of the refresh token. - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` - CreatedAt int64 `protobuf:"varint,5,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - LastUsed int64 `protobuf:"varint,6,opt,name=last_used,json=lastUsed,proto3" json:"last_used,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RefreshTokenRef) Reset() { *m = RefreshTokenRef{} } -func (m *RefreshTokenRef) String() string { return proto.CompactTextString(m) } -func (*RefreshTokenRef) ProtoMessage() {} -func (*RefreshTokenRef) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{18} -} - -func (m *RefreshTokenRef) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RefreshTokenRef.Unmarshal(m, b) -} -func (m *RefreshTokenRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RefreshTokenRef.Marshal(b, m, deterministic) -} -func (m *RefreshTokenRef) XXX_Merge(src proto.Message) { - xxx_messageInfo_RefreshTokenRef.Merge(m, src) -} -func (m *RefreshTokenRef) XXX_Size() int { - return xxx_messageInfo_RefreshTokenRef.Size(m) -} -func (m *RefreshTokenRef) XXX_DiscardUnknown() { - xxx_messageInfo_RefreshTokenRef.DiscardUnknown(m) -} - -var xxx_messageInfo_RefreshTokenRef proto.InternalMessageInfo - -func (m *RefreshTokenRef) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *RefreshTokenRef) GetClientId() string { - if m != nil { - return m.ClientId - } - return "" -} - -func (m *RefreshTokenRef) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -func (m *RefreshTokenRef) GetLastUsed() int64 { - if m != nil { - return m.LastUsed - } - return 0 -} - -// ListRefreshReq is a request to enumerate the refresh tokens of a user. -type ListRefreshReq struct { - // The "sub" claim returned in the ID Token. - UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListRefreshReq) Reset() { *m = ListRefreshReq{} } -func (m *ListRefreshReq) String() string { return proto.CompactTextString(m) } -func (*ListRefreshReq) ProtoMessage() {} -func (*ListRefreshReq) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{19} -} - -func (m *ListRefreshReq) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListRefreshReq.Unmarshal(m, b) -} -func (m *ListRefreshReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListRefreshReq.Marshal(b, m, deterministic) -} -func (m *ListRefreshReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListRefreshReq.Merge(m, src) -} -func (m *ListRefreshReq) XXX_Size() int { - return xxx_messageInfo_ListRefreshReq.Size(m) -} -func (m *ListRefreshReq) XXX_DiscardUnknown() { - xxx_messageInfo_ListRefreshReq.DiscardUnknown(m) -} - -var xxx_messageInfo_ListRefreshReq proto.InternalMessageInfo - -func (m *ListRefreshReq) GetUserId() string { - if m != nil { - return m.UserId - } - return "" -} - -// ListRefreshResp returns a list of refresh tokens for a user. -type ListRefreshResp struct { - RefreshTokens []*RefreshTokenRef `protobuf:"bytes,1,rep,name=refresh_tokens,json=refreshTokens,proto3" json:"refresh_tokens,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListRefreshResp) Reset() { *m = ListRefreshResp{} } -func (m *ListRefreshResp) String() string { return proto.CompactTextString(m) } -func (*ListRefreshResp) ProtoMessage() {} -func (*ListRefreshResp) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{20} -} - -func (m *ListRefreshResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListRefreshResp.Unmarshal(m, b) -} -func (m *ListRefreshResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListRefreshResp.Marshal(b, m, deterministic) -} -func (m *ListRefreshResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListRefreshResp.Merge(m, src) -} -func (m *ListRefreshResp) XXX_Size() int { - return xxx_messageInfo_ListRefreshResp.Size(m) -} -func (m *ListRefreshResp) XXX_DiscardUnknown() { - xxx_messageInfo_ListRefreshResp.DiscardUnknown(m) -} - -var xxx_messageInfo_ListRefreshResp proto.InternalMessageInfo - -func (m *ListRefreshResp) GetRefreshTokens() []*RefreshTokenRef { - if m != nil { - return m.RefreshTokens - } - return nil -} - -// RevokeRefreshReq is a request to revoke the refresh token of the user-client pair. -type RevokeRefreshReq struct { - // The "sub" claim returned in the ID Token. - UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RevokeRefreshReq) Reset() { *m = RevokeRefreshReq{} } -func (m *RevokeRefreshReq) String() string { return proto.CompactTextString(m) } -func (*RevokeRefreshReq) ProtoMessage() {} -func (*RevokeRefreshReq) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{21} -} - -func (m *RevokeRefreshReq) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RevokeRefreshReq.Unmarshal(m, b) -} -func (m *RevokeRefreshReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RevokeRefreshReq.Marshal(b, m, deterministic) -} -func (m *RevokeRefreshReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_RevokeRefreshReq.Merge(m, src) -} -func (m *RevokeRefreshReq) XXX_Size() int { - return xxx_messageInfo_RevokeRefreshReq.Size(m) -} -func (m *RevokeRefreshReq) XXX_DiscardUnknown() { - xxx_messageInfo_RevokeRefreshReq.DiscardUnknown(m) -} - -var xxx_messageInfo_RevokeRefreshReq proto.InternalMessageInfo - -func (m *RevokeRefreshReq) GetUserId() string { - if m != nil { - return m.UserId - } - return "" -} - -func (m *RevokeRefreshReq) GetClientId() string { - if m != nil { - return m.ClientId - } - return "" -} - -// RevokeRefreshResp determines if the refresh token is revoked successfully. -type RevokeRefreshResp struct { - // Set to true is refresh token was not found and token could not be revoked. - NotFound bool `protobuf:"varint,1,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RevokeRefreshResp) Reset() { *m = RevokeRefreshResp{} } -func (m *RevokeRefreshResp) String() string { return proto.CompactTextString(m) } -func (*RevokeRefreshResp) ProtoMessage() {} -func (*RevokeRefreshResp) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{22} -} - -func (m *RevokeRefreshResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RevokeRefreshResp.Unmarshal(m, b) -} -func (m *RevokeRefreshResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RevokeRefreshResp.Marshal(b, m, deterministic) -} -func (m *RevokeRefreshResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_RevokeRefreshResp.Merge(m, src) -} -func (m *RevokeRefreshResp) XXX_Size() int { - return xxx_messageInfo_RevokeRefreshResp.Size(m) -} -func (m *RevokeRefreshResp) XXX_DiscardUnknown() { - xxx_messageInfo_RevokeRefreshResp.DiscardUnknown(m) -} - -var xxx_messageInfo_RevokeRefreshResp proto.InternalMessageInfo - -func (m *RevokeRefreshResp) GetNotFound() bool { - if m != nil { - return m.NotFound - } - return false -} - -type VerifyPasswordReq struct { - Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VerifyPasswordReq) Reset() { *m = VerifyPasswordReq{} } -func (m *VerifyPasswordReq) String() string { return proto.CompactTextString(m) } -func (*VerifyPasswordReq) ProtoMessage() {} -func (*VerifyPasswordReq) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{23} -} - -func (m *VerifyPasswordReq) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VerifyPasswordReq.Unmarshal(m, b) -} -func (m *VerifyPasswordReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VerifyPasswordReq.Marshal(b, m, deterministic) -} -func (m *VerifyPasswordReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_VerifyPasswordReq.Merge(m, src) -} -func (m *VerifyPasswordReq) XXX_Size() int { - return xxx_messageInfo_VerifyPasswordReq.Size(m) -} -func (m *VerifyPasswordReq) XXX_DiscardUnknown() { - xxx_messageInfo_VerifyPasswordReq.DiscardUnknown(m) -} - -var xxx_messageInfo_VerifyPasswordReq proto.InternalMessageInfo - -func (m *VerifyPasswordReq) GetEmail() string { - if m != nil { - return m.Email - } - return "" -} - -func (m *VerifyPasswordReq) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - -type VerifyPasswordResp struct { - Verified bool `protobuf:"varint,1,opt,name=verified,proto3" json:"verified,omitempty"` - NotFound bool `protobuf:"varint,2,opt,name=not_found,json=notFound,proto3" json:"not_found,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VerifyPasswordResp) Reset() { *m = VerifyPasswordResp{} } -func (m *VerifyPasswordResp) String() string { return proto.CompactTextString(m) } -func (*VerifyPasswordResp) ProtoMessage() {} -func (*VerifyPasswordResp) Descriptor() ([]byte, []int) { - return fileDescriptor_1b40cafcd4234784, []int{24} -} - -func (m *VerifyPasswordResp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VerifyPasswordResp.Unmarshal(m, b) -} -func (m *VerifyPasswordResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VerifyPasswordResp.Marshal(b, m, deterministic) -} -func (m *VerifyPasswordResp) XXX_Merge(src proto.Message) { - xxx_messageInfo_VerifyPasswordResp.Merge(m, src) -} -func (m *VerifyPasswordResp) XXX_Size() int { - return xxx_messageInfo_VerifyPasswordResp.Size(m) -} -func (m *VerifyPasswordResp) XXX_DiscardUnknown() { - xxx_messageInfo_VerifyPasswordResp.DiscardUnknown(m) -} - -var xxx_messageInfo_VerifyPasswordResp proto.InternalMessageInfo - -func (m *VerifyPasswordResp) GetVerified() bool { - if m != nil { - return m.Verified - } - return false -} - -func (m *VerifyPasswordResp) GetNotFound() bool { - if m != nil { - return m.NotFound - } - return false -} - -func init() { - proto.RegisterType((*Client)(nil), "api.Client") - proto.RegisterType((*CreateClientReq)(nil), "api.CreateClientReq") - proto.RegisterType((*CreateClientResp)(nil), "api.CreateClientResp") - proto.RegisterType((*DeleteClientReq)(nil), "api.DeleteClientReq") - proto.RegisterType((*DeleteClientResp)(nil), "api.DeleteClientResp") - proto.RegisterType((*UpdateClientReq)(nil), "api.UpdateClientReq") - proto.RegisterType((*UpdateClientResp)(nil), "api.UpdateClientResp") - proto.RegisterType((*Password)(nil), "api.Password") - proto.RegisterType((*CreatePasswordReq)(nil), "api.CreatePasswordReq") - proto.RegisterType((*CreatePasswordResp)(nil), "api.CreatePasswordResp") - proto.RegisterType((*UpdatePasswordReq)(nil), "api.UpdatePasswordReq") - proto.RegisterType((*UpdatePasswordResp)(nil), "api.UpdatePasswordResp") - proto.RegisterType((*DeletePasswordReq)(nil), "api.DeletePasswordReq") - proto.RegisterType((*DeletePasswordResp)(nil), "api.DeletePasswordResp") - proto.RegisterType((*ListPasswordReq)(nil), "api.ListPasswordReq") - proto.RegisterType((*ListPasswordResp)(nil), "api.ListPasswordResp") - proto.RegisterType((*VersionReq)(nil), "api.VersionReq") - proto.RegisterType((*VersionResp)(nil), "api.VersionResp") - proto.RegisterType((*RefreshTokenRef)(nil), "api.RefreshTokenRef") - proto.RegisterType((*ListRefreshReq)(nil), "api.ListRefreshReq") - proto.RegisterType((*ListRefreshResp)(nil), "api.ListRefreshResp") - proto.RegisterType((*RevokeRefreshReq)(nil), "api.RevokeRefreshReq") - proto.RegisterType((*RevokeRefreshResp)(nil), "api.RevokeRefreshResp") - proto.RegisterType((*VerifyPasswordReq)(nil), "api.VerifyPasswordReq") - proto.RegisterType((*VerifyPasswordResp)(nil), "api.VerifyPasswordResp") -} - -func init() { proto.RegisterFile("api/api.proto", fileDescriptor_1b40cafcd4234784) } - -var fileDescriptor_1b40cafcd4234784 = []byte{ - // 905 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xeb, 0x6e, 0xdb, 0x36, - 0x14, 0xb6, 0xad, 0xd8, 0x96, 0x8f, 0xef, 0x9c, 0x9b, 0xba, 0x2e, 0x06, 0xa4, 0x2c, 0x06, 0xa4, - 0x18, 0xe0, 0xac, 0x1d, 0xb0, 0x01, 0x2b, 0xd6, 0x5d, 0xd2, 0x6e, 0x2d, 0xb0, 0x0d, 0x85, 0x30, - 0xe7, 0xe7, 0x04, 0xc5, 0x3a, 0x4e, 0x88, 0x28, 0x92, 0x46, 0xd2, 0x71, 0xb2, 0x47, 0xd9, 0xdb, - 0xec, 0xd7, 0x5e, 0xab, 0x20, 0x45, 0x29, 0xba, 0x38, 0x71, 0xfe, 0xf9, 0x7c, 0xe2, 0xb9, 0x7d, - 0x87, 0xe7, 0xa3, 0xa1, 0xef, 0xc5, 0xec, 0xc8, 0x8b, 0xd9, 0x3c, 0xe6, 0x91, 0x8c, 0x88, 0xe5, - 0xc5, 0x8c, 0xfe, 0x57, 0x87, 0xd6, 0x71, 0xc0, 0x30, 0x94, 0x64, 0x00, 0x0d, 0xe6, 0x4f, 0xeb, - 0x07, 0xf5, 0xc3, 0x8e, 0xd3, 0x60, 0x3e, 0xd9, 0x87, 0x96, 0xc0, 0x25, 0x47, 0x39, 0x6d, 0x68, - 0xcc, 0x58, 0xe4, 0x39, 0xf4, 0x39, 0xfa, 0x8c, 0xe3, 0x52, 0xba, 0x6b, 0xce, 0xc4, 0xd4, 0x3a, - 0xb0, 0x0e, 0x3b, 0x4e, 0x2f, 0x05, 0x17, 0x9c, 0x09, 0x75, 0x48, 0xf2, 0xb5, 0x90, 0xe8, 0xbb, - 0x31, 0x22, 0x17, 0xd3, 0xbd, 0xe4, 0x90, 0x01, 0x3f, 0x2a, 0x4c, 0x65, 0x88, 0xd7, 0xa7, 0x01, - 0x5b, 0x4e, 0x9b, 0x07, 0xf5, 0x43, 0xdb, 0x31, 0x16, 0x21, 0xb0, 0x17, 0x7a, 0x97, 0x38, 0x6d, - 0xe9, 0xbc, 0xfa, 0x37, 0x79, 0x02, 0x76, 0x10, 0x9d, 0x45, 0xee, 0x9a, 0x07, 0xd3, 0xb6, 0xc6, - 0xdb, 0xca, 0x5e, 0xf0, 0x80, 0x7e, 0x03, 0xc3, 0x63, 0x8e, 0x9e, 0xc4, 0xa4, 0x11, 0x07, 0xff, - 0x26, 0xcf, 0xa1, 0xb5, 0xd4, 0x86, 0xee, 0xa7, 0xfb, 0xaa, 0x3b, 0x57, 0x7d, 0x9b, 0xef, 0xe6, - 0x13, 0xfd, 0x0b, 0x46, 0x45, 0x3f, 0x11, 0x93, 0x2f, 0x60, 0xe0, 0x05, 0x1c, 0x3d, 0xff, 0xc6, - 0xc5, 0x6b, 0x26, 0xa4, 0xd0, 0x01, 0x6c, 0xa7, 0x6f, 0xd0, 0x77, 0x1a, 0xcc, 0xc5, 0x6f, 0xdc, - 0x1d, 0xff, 0x19, 0x0c, 0xdf, 0x62, 0x80, 0xf9, 0xba, 0x4a, 0x1c, 0xd3, 0x23, 0x18, 0x15, 0x8f, - 0x88, 0x98, 0x3c, 0x85, 0x4e, 0x18, 0x49, 0x77, 0x15, 0xad, 0x43, 0xdf, 0x64, 0xb7, 0xc3, 0x48, - 0xfe, 0xa2, 0x6c, 0xfa, 0x6f, 0x1d, 0x86, 0x8b, 0xd8, 0xf7, 0xee, 0x09, 0x5a, 0x1d, 0x50, 0xe3, - 0x21, 0x03, 0xb2, 0xb6, 0x0c, 0x28, 0x1d, 0xc4, 0xde, 0x1d, 0x83, 0x68, 0x16, 0x07, 0x71, 0x04, - 0xa3, 0x62, 0x6d, 0xbb, 0xba, 0x61, 0x60, 0x7f, 0xf4, 0x84, 0xd8, 0x44, 0xdc, 0x27, 0x13, 0x68, - 0xe2, 0xa5, 0xc7, 0x02, 0xd3, 0x48, 0x62, 0xa8, 0x0a, 0xce, 0x3d, 0x71, 0xae, 0x69, 0xee, 0x39, - 0xfa, 0x37, 0x99, 0x81, 0xbd, 0x16, 0xc8, 0x75, 0x65, 0x96, 0x3e, 0x9c, 0xd9, 0xe4, 0x31, 0xb4, - 0xd5, 0x6f, 0x97, 0xf9, 0xa6, 0xe8, 0x96, 0x32, 0x3f, 0xf8, 0xf4, 0x0d, 0x8c, 0x93, 0x61, 0xa7, - 0x09, 0x15, 0x73, 0x2f, 0xc0, 0x8e, 0x8d, 0x69, 0x2e, 0x4a, 0x5f, 0x0f, 0x32, 0x3b, 0x93, 0x7d, - 0xa6, 0xaf, 0x81, 0x94, 0xfd, 0x1f, 0x7c, 0x5d, 0xe8, 0x19, 0x8c, 0x13, 0x62, 0xf2, 0xc9, 0xb7, - 0x37, 0xfc, 0x04, 0xec, 0x10, 0x37, 0x6e, 0xae, 0xe9, 0x76, 0x88, 0x9b, 0xf7, 0xaa, 0xef, 0x67, - 0xd0, 0x53, 0x9f, 0x4a, 0xbd, 0x77, 0x43, 0xdc, 0x2c, 0x0c, 0x44, 0x5f, 0x02, 0x29, 0x27, 0xda, - 0x35, 0x83, 0x17, 0x30, 0x4e, 0xae, 0xe0, 0xce, 0xda, 0x54, 0xf4, 0xf2, 0xd1, 0x5d, 0xd1, 0xc7, - 0x30, 0xfc, 0x8d, 0x09, 0x99, 0x8b, 0x4d, 0x7f, 0x80, 0x51, 0x11, 0x12, 0x31, 0xf9, 0x12, 0x3a, - 0x29, 0xd3, 0x8a, 0x42, 0xab, 0x3a, 0x89, 0xdb, 0xef, 0xb4, 0x07, 0x70, 0x82, 0x5c, 0xb0, 0x28, - 0x54, 0xe1, 0xbe, 0x85, 0x6e, 0x66, 0x89, 0x38, 0x51, 0x2d, 0x7e, 0x85, 0xdc, 0x94, 0x6e, 0x2c, - 0x32, 0x02, 0xa5, 0x77, 0x9a, 0xd2, 0xa6, 0xa3, 0xa5, 0xef, 0x1f, 0x18, 0x3a, 0xb8, 0xe2, 0x28, - 0xce, 0xff, 0x8c, 0x2e, 0x30, 0x74, 0x70, 0x55, 0xd9, 0xa4, 0xa7, 0xd0, 0x49, 0x76, 0x59, 0xdd, - 0xa7, 0x44, 0x05, 0xed, 0x04, 0xf8, 0xe0, 0x93, 0xcf, 0x01, 0x96, 0xfa, 0x46, 0xf8, 0xae, 0x27, - 0xf5, 0x2a, 0x58, 0x4e, 0xc7, 0x20, 0x3f, 0x49, 0xe5, 0x1b, 0x78, 0x42, 0xaa, 0x71, 0xf9, 0x5a, - 0xc9, 0x2c, 0xc7, 0x56, 0xc0, 0x42, 0xa0, 0x22, 0x7d, 0xa0, 0x38, 0x30, 0xf9, 0x15, 0xe3, 0xb9, - 0x8b, 0x5b, 0x2f, 0x5c, 0xdc, 0x3f, 0x12, 0x06, 0xb3, 0xa3, 0x22, 0x26, 0xaf, 0x61, 0xc0, 0x13, - 0xd3, 0x95, 0xaa, 0xf4, 0x94, 0xb2, 0x89, 0xa6, 0xac, 0xd4, 0x94, 0xd3, 0xe7, 0x39, 0x40, 0xd0, - 0xf7, 0x30, 0x72, 0xf0, 0x2a, 0xba, 0xc0, 0x07, 0x24, 0xbf, 0x97, 0x00, 0xfa, 0x15, 0x8c, 0x4b, - 0x91, 0x76, 0xdd, 0x86, 0x77, 0x30, 0x3e, 0x41, 0xce, 0x56, 0x37, 0xbb, 0xf7, 0x60, 0x96, 0x5b, - 0x4d, 0x93, 0x38, 0xdb, 0xc5, 0xdf, 0x81, 0x94, 0xc3, 0x88, 0x58, 0x79, 0x5c, 0x29, 0x94, 0x61, - 0x96, 0x38, 0xb5, 0x8b, 0x55, 0x35, 0x8a, 0x55, 0xbd, 0xfa, 0xbf, 0x09, 0xd6, 0x5b, 0xbc, 0x26, - 0xdf, 0x43, 0x2f, 0xff, 0x1e, 0x90, 0x84, 0xce, 0xd2, 0xd3, 0x32, 0x7b, 0xb4, 0x05, 0x15, 0x31, - 0xad, 0x29, 0xf7, 0xbc, 0xfa, 0x19, 0xf7, 0x92, 0x58, 0x1b, 0xf7, 0xb2, 0x4c, 0x26, 0xee, 0xf9, - 0xa7, 0xc0, 0xb8, 0x97, 0x1e, 0x10, 0xe3, 0x5e, 0x7e, 0x33, 0x68, 0x8d, 0x1c, 0xc3, 0xa0, 0xa8, - 0x4f, 0x64, 0x3f, 0x57, 0x68, 0x8e, 0xef, 0xd9, 0xe3, 0xad, 0x78, 0x1a, 0xa4, 0x28, 0x1f, 0x26, - 0x48, 0x45, 0xbc, 0x4c, 0x90, 0xaa, 0xd6, 0x24, 0x41, 0x8a, 0x2a, 0x61, 0x82, 0x54, 0x54, 0xc6, - 0x04, 0xa9, 0x4a, 0x0a, 0xad, 0x91, 0x37, 0xd0, 0xcf, 0x8b, 0x84, 0x30, 0x74, 0x94, 0xb4, 0xc4, - 0xd0, 0x51, 0x96, 0x13, 0x5a, 0x23, 0x2f, 0x01, 0x7e, 0x45, 0x69, 0x84, 0x81, 0x0c, 0xf5, 0xb1, - 0x5b, 0xd1, 0x98, 0x8d, 0x8a, 0x80, 0x76, 0xf9, 0x0e, 0xba, 0xb9, 0x45, 0x23, 0x9f, 0x65, 0xa1, - 0x6f, 0x17, 0x65, 0x36, 0xa9, 0x82, 0xda, 0xf7, 0x47, 0xe8, 0x17, 0x56, 0x81, 0x3c, 0x32, 0xab, - 0x58, 0x5c, 0xb4, 0xd9, 0xfe, 0x36, 0x38, 0x65, 0xad, 0x78, 0xa7, 0x0d, 0x6b, 0x95, 0x7d, 0x31, - 0xac, 0x55, 0x17, 0x80, 0xd6, 0x7e, 0x9e, 0x00, 0x59, 0x46, 0x97, 0xf3, 0x65, 0xc4, 0x31, 0x12, - 0x73, 0x1f, 0xaf, 0xd5, 0xd1, 0xd3, 0x96, 0xfe, 0xbf, 0xf7, 0xf5, 0xa7, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x49, 0x46, 0x0e, 0xa3, 0x00, 0x0a, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// DexClient is the client API for Dex service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type DexClient interface { - // CreateClient creates a client. - CreateClient(ctx context.Context, in *CreateClientReq, opts ...grpc.CallOption) (*CreateClientResp, error) - // UpdateClient updates an existing client - UpdateClient(ctx context.Context, in *UpdateClientReq, opts ...grpc.CallOption) (*UpdateClientResp, error) - // DeleteClient deletes the provided client. - DeleteClient(ctx context.Context, in *DeleteClientReq, opts ...grpc.CallOption) (*DeleteClientResp, error) - // CreatePassword creates a password. - CreatePassword(ctx context.Context, in *CreatePasswordReq, opts ...grpc.CallOption) (*CreatePasswordResp, error) - // UpdatePassword modifies existing password. - UpdatePassword(ctx context.Context, in *UpdatePasswordReq, opts ...grpc.CallOption) (*UpdatePasswordResp, error) - // DeletePassword deletes the password. - DeletePassword(ctx context.Context, in *DeletePasswordReq, opts ...grpc.CallOption) (*DeletePasswordResp, error) - // ListPassword lists all password entries. - ListPasswords(ctx context.Context, in *ListPasswordReq, opts ...grpc.CallOption) (*ListPasswordResp, error) - // GetVersion returns version information of the server. - GetVersion(ctx context.Context, in *VersionReq, opts ...grpc.CallOption) (*VersionResp, error) - // ListRefresh lists all the refresh token entries for a particular user. - ListRefresh(ctx context.Context, in *ListRefreshReq, opts ...grpc.CallOption) (*ListRefreshResp, error) - // RevokeRefresh revokes the refresh token for the provided user-client pair. - // - // Note that each user-client pair can have only one refresh token at a time. - RevokeRefresh(ctx context.Context, in *RevokeRefreshReq, opts ...grpc.CallOption) (*RevokeRefreshResp, error) - // VerifyPassword returns whether a password matches a hash for a specific email or not. - VerifyPassword(ctx context.Context, in *VerifyPasswordReq, opts ...grpc.CallOption) (*VerifyPasswordResp, error) -} - -type dexClient struct { - cc *grpc.ClientConn -} - -func NewDexClient(cc *grpc.ClientConn) DexClient { - return &dexClient{cc} -} - -func (c *dexClient) CreateClient(ctx context.Context, in *CreateClientReq, opts ...grpc.CallOption) (*CreateClientResp, error) { - out := new(CreateClientResp) - err := c.cc.Invoke(ctx, "/api.Dex/CreateClient", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *dexClient) UpdateClient(ctx context.Context, in *UpdateClientReq, opts ...grpc.CallOption) (*UpdateClientResp, error) { - out := new(UpdateClientResp) - err := c.cc.Invoke(ctx, "/api.Dex/UpdateClient", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *dexClient) DeleteClient(ctx context.Context, in *DeleteClientReq, opts ...grpc.CallOption) (*DeleteClientResp, error) { - out := new(DeleteClientResp) - err := c.cc.Invoke(ctx, "/api.Dex/DeleteClient", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *dexClient) CreatePassword(ctx context.Context, in *CreatePasswordReq, opts ...grpc.CallOption) (*CreatePasswordResp, error) { - out := new(CreatePasswordResp) - err := c.cc.Invoke(ctx, "/api.Dex/CreatePassword", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *dexClient) UpdatePassword(ctx context.Context, in *UpdatePasswordReq, opts ...grpc.CallOption) (*UpdatePasswordResp, error) { - out := new(UpdatePasswordResp) - err := c.cc.Invoke(ctx, "/api.Dex/UpdatePassword", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *dexClient) DeletePassword(ctx context.Context, in *DeletePasswordReq, opts ...grpc.CallOption) (*DeletePasswordResp, error) { - out := new(DeletePasswordResp) - err := c.cc.Invoke(ctx, "/api.Dex/DeletePassword", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *dexClient) ListPasswords(ctx context.Context, in *ListPasswordReq, opts ...grpc.CallOption) (*ListPasswordResp, error) { - out := new(ListPasswordResp) - err := c.cc.Invoke(ctx, "/api.Dex/ListPasswords", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *dexClient) GetVersion(ctx context.Context, in *VersionReq, opts ...grpc.CallOption) (*VersionResp, error) { - out := new(VersionResp) - err := c.cc.Invoke(ctx, "/api.Dex/GetVersion", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *dexClient) ListRefresh(ctx context.Context, in *ListRefreshReq, opts ...grpc.CallOption) (*ListRefreshResp, error) { - out := new(ListRefreshResp) - err := c.cc.Invoke(ctx, "/api.Dex/ListRefresh", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *dexClient) RevokeRefresh(ctx context.Context, in *RevokeRefreshReq, opts ...grpc.CallOption) (*RevokeRefreshResp, error) { - out := new(RevokeRefreshResp) - err := c.cc.Invoke(ctx, "/api.Dex/RevokeRefresh", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *dexClient) VerifyPassword(ctx context.Context, in *VerifyPasswordReq, opts ...grpc.CallOption) (*VerifyPasswordResp, error) { - out := new(VerifyPasswordResp) - err := c.cc.Invoke(ctx, "/api.Dex/VerifyPassword", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// DexServer is the server API for Dex service. -type DexServer interface { - // CreateClient creates a client. - CreateClient(context.Context, *CreateClientReq) (*CreateClientResp, error) - // UpdateClient updates an existing client - UpdateClient(context.Context, *UpdateClientReq) (*UpdateClientResp, error) - // DeleteClient deletes the provided client. - DeleteClient(context.Context, *DeleteClientReq) (*DeleteClientResp, error) - // CreatePassword creates a password. - CreatePassword(context.Context, *CreatePasswordReq) (*CreatePasswordResp, error) - // UpdatePassword modifies existing password. - UpdatePassword(context.Context, *UpdatePasswordReq) (*UpdatePasswordResp, error) - // DeletePassword deletes the password. - DeletePassword(context.Context, *DeletePasswordReq) (*DeletePasswordResp, error) - // ListPassword lists all password entries. - ListPasswords(context.Context, *ListPasswordReq) (*ListPasswordResp, error) - // GetVersion returns version information of the server. - GetVersion(context.Context, *VersionReq) (*VersionResp, error) - // ListRefresh lists all the refresh token entries for a particular user. - ListRefresh(context.Context, *ListRefreshReq) (*ListRefreshResp, error) - // RevokeRefresh revokes the refresh token for the provided user-client pair. - // - // Note that each user-client pair can have only one refresh token at a time. - RevokeRefresh(context.Context, *RevokeRefreshReq) (*RevokeRefreshResp, error) - // VerifyPassword returns whether a password matches a hash for a specific email or not. - VerifyPassword(context.Context, *VerifyPasswordReq) (*VerifyPasswordResp, error) -} - -// UnimplementedDexServer can be embedded to have forward compatible implementations. -type UnimplementedDexServer struct { -} - -func (*UnimplementedDexServer) CreateClient(ctx context.Context, req *CreateClientReq) (*CreateClientResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateClient not implemented") -} -func (*UnimplementedDexServer) UpdateClient(ctx context.Context, req *UpdateClientReq) (*UpdateClientResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateClient not implemented") -} -func (*UnimplementedDexServer) DeleteClient(ctx context.Context, req *DeleteClientReq) (*DeleteClientResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteClient not implemented") -} -func (*UnimplementedDexServer) CreatePassword(ctx context.Context, req *CreatePasswordReq) (*CreatePasswordResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreatePassword not implemented") -} -func (*UnimplementedDexServer) UpdatePassword(ctx context.Context, req *UpdatePasswordReq) (*UpdatePasswordResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdatePassword not implemented") -} -func (*UnimplementedDexServer) DeletePassword(ctx context.Context, req *DeletePasswordReq) (*DeletePasswordResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeletePassword not implemented") -} -func (*UnimplementedDexServer) ListPasswords(ctx context.Context, req *ListPasswordReq) (*ListPasswordResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListPasswords not implemented") -} -func (*UnimplementedDexServer) GetVersion(ctx context.Context, req *VersionReq) (*VersionResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetVersion not implemented") -} -func (*UnimplementedDexServer) ListRefresh(ctx context.Context, req *ListRefreshReq) (*ListRefreshResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListRefresh not implemented") -} -func (*UnimplementedDexServer) RevokeRefresh(ctx context.Context, req *RevokeRefreshReq) (*RevokeRefreshResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method RevokeRefresh not implemented") -} -func (*UnimplementedDexServer) VerifyPassword(ctx context.Context, req *VerifyPasswordReq) (*VerifyPasswordResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method VerifyPassword not implemented") -} - -func RegisterDexServer(s *grpc.Server, srv DexServer) { - s.RegisterService(&_Dex_serviceDesc, srv) -} - -func _Dex_CreateClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateClientReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DexServer).CreateClient(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.Dex/CreateClient", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DexServer).CreateClient(ctx, req.(*CreateClientReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _Dex_UpdateClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateClientReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DexServer).UpdateClient(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.Dex/UpdateClient", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DexServer).UpdateClient(ctx, req.(*UpdateClientReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _Dex_DeleteClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteClientReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DexServer).DeleteClient(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.Dex/DeleteClient", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DexServer).DeleteClient(ctx, req.(*DeleteClientReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _Dex_CreatePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreatePasswordReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DexServer).CreatePassword(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.Dex/CreatePassword", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DexServer).CreatePassword(ctx, req.(*CreatePasswordReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _Dex_UpdatePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdatePasswordReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DexServer).UpdatePassword(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.Dex/UpdatePassword", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DexServer).UpdatePassword(ctx, req.(*UpdatePasswordReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _Dex_DeletePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeletePasswordReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DexServer).DeletePassword(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.Dex/DeletePassword", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DexServer).DeletePassword(ctx, req.(*DeletePasswordReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _Dex_ListPasswords_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListPasswordReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DexServer).ListPasswords(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.Dex/ListPasswords", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DexServer).ListPasswords(ctx, req.(*ListPasswordReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _Dex_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VersionReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DexServer).GetVersion(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.Dex/GetVersion", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DexServer).GetVersion(ctx, req.(*VersionReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _Dex_ListRefresh_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListRefreshReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DexServer).ListRefresh(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.Dex/ListRefresh", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DexServer).ListRefresh(ctx, req.(*ListRefreshReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _Dex_RevokeRefresh_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RevokeRefreshReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DexServer).RevokeRefresh(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.Dex/RevokeRefresh", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DexServer).RevokeRefresh(ctx, req.(*RevokeRefreshReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _Dex_VerifyPassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VerifyPasswordReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DexServer).VerifyPassword(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/api.Dex/VerifyPassword", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DexServer).VerifyPassword(ctx, req.(*VerifyPasswordReq)) - } - return interceptor(ctx, in, info, handler) -} - -var _Dex_serviceDesc = grpc.ServiceDesc{ - ServiceName: "api.Dex", - HandlerType: (*DexServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateClient", - Handler: _Dex_CreateClient_Handler, - }, - { - MethodName: "UpdateClient", - Handler: _Dex_UpdateClient_Handler, - }, - { - MethodName: "DeleteClient", - Handler: _Dex_DeleteClient_Handler, - }, - { - MethodName: "CreatePassword", - Handler: _Dex_CreatePassword_Handler, - }, - { - MethodName: "UpdatePassword", - Handler: _Dex_UpdatePassword_Handler, - }, - { - MethodName: "DeletePassword", - Handler: _Dex_DeletePassword_Handler, - }, - { - MethodName: "ListPasswords", - Handler: _Dex_ListPasswords_Handler, - }, - { - MethodName: "GetVersion", - Handler: _Dex_GetVersion_Handler, - }, - { - MethodName: "ListRefresh", - Handler: _Dex_ListRefresh_Handler, - }, - { - MethodName: "RevokeRefresh", - Handler: _Dex_RevokeRefresh_Handler, - }, - { - MethodName: "VerifyPassword", - Handler: _Dex_VerifyPassword_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "api/api.proto", -} diff --git a/vendor/github.com/dexidp/dex/api/api.proto b/vendor/github.com/dexidp/dex/api/api.proto deleted file mode 100644 index 5d9ce1a..0000000 --- a/vendor/github.com/dexidp/dex/api/api.proto +++ /dev/null @@ -1,187 +0,0 @@ -syntax = "proto3"; -option java_package = "com.coreos.dex.api"; - -package api; - -// Client represents an OAuth2 client. -message Client { - string id = 1; - string secret = 2; - repeated string redirect_uris = 3; - repeated string trusted_peers = 4; - bool public = 5; - string name = 6; - string logo_url = 7; -} - -// CreateClientReq is a request to make a client. -message CreateClientReq { - Client client = 1; -} - -// CreateClientResp returns the response from creating a client. -message CreateClientResp { - bool already_exists = 1; - Client client = 2; -} - -// DeleteClientReq is a request to delete a client. -message DeleteClientReq { - // The ID of the client. - string id = 1; -} - -// DeleteClientResp determines if the client is deleted successfully. -message DeleteClientResp { - bool not_found = 1; -} - -// UpdateClientReq is a request to update an exisitng client. -message UpdateClientReq { - string id = 1; - repeated string redirect_uris = 2; - repeated string trusted_peers = 3; - string name = 4; - string logo_url = 5; -} - -// UpdateClientResp returns the reponse form updating a client. -message UpdateClientResp { - bool not_found = 1; -} - -// TODO(ericchiang): expand this. - -// Password is an email for password mapping managed by the storage. -message Password { - string email = 1; - - // Currently we do not accept plain text passwords. Could be an option in the future. - bytes hash = 2; - string username = 3; - string user_id = 4; -} - -// CreatePasswordReq is a request to make a password. -message CreatePasswordReq { - Password password = 1; -} - -// CreatePasswordResp returns the response from creating a password. -message CreatePasswordResp { - bool already_exists = 1; -} - -// UpdatePasswordReq is a request to modify an existing password. -message UpdatePasswordReq { - // The email used to lookup the password. This field cannot be modified - string email = 1; - bytes new_hash = 2; - string new_username = 3; -} - -// UpdatePasswordResp returns the response from modifying an existing password. -message UpdatePasswordResp { - bool not_found = 1; -} - -// DeletePasswordReq is a request to delete a password. -message DeletePasswordReq { - string email = 1; -} - -// DeletePasswordResp returns the response from deleting a password. -message DeletePasswordResp { - bool not_found = 1; -} - -// ListPasswordReq is a request to enumerate passwords. -message ListPasswordReq {} - -// ListPasswordResp returns a list of passwords. -message ListPasswordResp { - repeated Password passwords = 1; -} - -// VersionReq is a request to fetch version info. -message VersionReq {} - -// VersionResp holds the version info of components. -message VersionResp { - // Semantic version of the server. - string server = 1; - // Numeric version of the API. It increases everytime a new call is added to the API. - // Clients should use this info to determine if the server supports specific features. - int32 api = 2; -} - -// RefreshTokenRef contains the metadata for a refresh token that is managed by the storage. -message RefreshTokenRef { - // ID of the refresh token. - string id = 1; - string client_id = 2; - int64 created_at = 5; - int64 last_used = 6; -} - -// ListRefreshReq is a request to enumerate the refresh tokens of a user. -message ListRefreshReq { - // The "sub" claim returned in the ID Token. - string user_id = 1; -} - -// ListRefreshResp returns a list of refresh tokens for a user. -message ListRefreshResp { - repeated RefreshTokenRef refresh_tokens = 1; -} - -// RevokeRefreshReq is a request to revoke the refresh token of the user-client pair. -message RevokeRefreshReq { - // The "sub" claim returned in the ID Token. - string user_id = 1; - string client_id = 2; -} - -// RevokeRefreshResp determines if the refresh token is revoked successfully. -message RevokeRefreshResp { - // Set to true is refresh token was not found and token could not be revoked. - bool not_found = 1; -} - -message VerifyPasswordReq { - string email = 1; - string password = 2; -} - -message VerifyPasswordResp { - bool verified = 1; - bool not_found = 2; -} - -// Dex represents the dex gRPC service. -service Dex { - // CreateClient creates a client. - rpc CreateClient(CreateClientReq) returns (CreateClientResp) {}; - // UpdateClient updates an existing client - rpc UpdateClient(UpdateClientReq) returns (UpdateClientResp) {}; - // DeleteClient deletes the provided client. - rpc DeleteClient(DeleteClientReq) returns (DeleteClientResp) {}; - // CreatePassword creates a password. - rpc CreatePassword(CreatePasswordReq) returns (CreatePasswordResp) {}; - // UpdatePassword modifies existing password. - rpc UpdatePassword(UpdatePasswordReq) returns (UpdatePasswordResp) {}; - // DeletePassword deletes the password. - rpc DeletePassword(DeletePasswordReq) returns (DeletePasswordResp) {}; - // ListPassword lists all password entries. - rpc ListPasswords(ListPasswordReq) returns (ListPasswordResp) {}; - // GetVersion returns version information of the server. - rpc GetVersion(VersionReq) returns (VersionResp) {}; - // ListRefresh lists all the refresh token entries for a particular user. - rpc ListRefresh(ListRefreshReq) returns (ListRefreshResp) {}; - // RevokeRefresh revokes the refresh token for the provided user-client pair. - // - // Note that each user-client pair can have only one refresh token at a time. - rpc RevokeRefresh(RevokeRefreshReq) returns (RevokeRefreshResp) {}; - // VerifyPassword returns whether a password matches a hash for a specific email or not. - rpc VerifyPassword(VerifyPasswordReq) returns (VerifyPasswordResp) {}; -} diff --git a/vendor/github.com/dexidp/dex/cmd/dex/config.go b/vendor/github.com/dexidp/dex/cmd/dex/config.go deleted file mode 100644 index 3d07f2f..0000000 --- a/vendor/github.com/dexidp/dex/cmd/dex/config.go +++ /dev/null @@ -1,291 +0,0 @@ -package main - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "os" - "strings" - - "golang.org/x/crypto/bcrypt" - - "github.com/dexidp/dex/pkg/log" - "github.com/dexidp/dex/server" - "github.com/dexidp/dex/storage" - "github.com/dexidp/dex/storage/etcd" - "github.com/dexidp/dex/storage/kubernetes" - "github.com/dexidp/dex/storage/memory" - "github.com/dexidp/dex/storage/sql" -) - -// Config is the config format for the main application. -type Config struct { - Issuer string `json:"issuer"` - Storage Storage `json:"storage"` - Web Web `json:"web"` - Telemetry Telemetry `json:"telemetry"` - OAuth2 OAuth2 `json:"oauth2"` - GRPC GRPC `json:"grpc"` - Expiry Expiry `json:"expiry"` - Logger Logger `json:"logger"` - - Frontend server.WebConfig `json:"frontend"` - - // StaticConnectors are user defined connectors specified in the ConfigMap - // Write operations, like updating a connector, will fail. - StaticConnectors []Connector `json:"connectors"` - - // StaticClients cause the server to use this list of clients rather than - // querying the storage. Write operations, like creating a client, will fail. - StaticClients []storage.Client `json:"staticClients"` - - // If enabled, the server will maintain a list of passwords which can be used - // to identify a user. - EnablePasswordDB bool `json:"enablePasswordDB"` - - // StaticPasswords cause the server use this list of passwords rather than - // querying the storage. Cannot be specified without enabling a passwords - // database. - StaticPasswords []password `json:"staticPasswords"` -} - -//Validate the configuration -func (c Config) Validate() error { - // Fast checks. Perform these first for a more responsive CLI. - checks := []struct { - bad bool - errMsg string - }{ - {c.Issuer == "", "no issuer specified in config file"}, - {!c.EnablePasswordDB && len(c.StaticPasswords) != 0, "cannot specify static passwords without enabling password db"}, - {c.Storage.Config == nil, "no storage supplied in config file"}, - {c.Web.HTTP == "" && c.Web.HTTPS == "", "must supply a HTTP/HTTPS address to listen on"}, - {c.Web.HTTPS != "" && c.Web.TLSCert == "", "no cert specified for HTTPS"}, - {c.Web.HTTPS != "" && c.Web.TLSKey == "", "no private key specified for HTTPS"}, - {c.GRPC.TLSCert != "" && c.GRPC.Addr == "", "no address specified for gRPC"}, - {c.GRPC.TLSKey != "" && c.GRPC.Addr == "", "no address specified for gRPC"}, - {(c.GRPC.TLSCert == "") != (c.GRPC.TLSKey == ""), "must specific both a gRPC TLS cert and key"}, - {c.GRPC.TLSCert == "" && c.GRPC.TLSClientCA != "", "cannot specify gRPC TLS client CA without a gRPC TLS cert"}, - } - - var checkErrors []string - - for _, check := range checks { - if check.bad { - checkErrors = append(checkErrors, check.errMsg) - } - } - if len(checkErrors) != 0 { - return fmt.Errorf("invalid Config:\n\t-\t%s", strings.Join(checkErrors, "\n\t-\t")) - } - return nil -} - -type password storage.Password - -func (p *password) UnmarshalJSON(b []byte) error { - var data struct { - Email string `json:"email"` - Username string `json:"username"` - UserID string `json:"userID"` - Hash string `json:"hash"` - HashFromEnv string `json:"hashFromEnv"` - } - if err := json.Unmarshal(b, &data); err != nil { - return err - } - *p = password(storage.Password{ - Email: data.Email, - Username: data.Username, - UserID: data.UserID, - }) - if len(data.Hash) == 0 && len(data.HashFromEnv) > 0 { - data.Hash = os.Getenv(data.HashFromEnv) - } - if len(data.Hash) == 0 { - return fmt.Errorf("no password hash provided") - } - - // If this value is a valid bcrypt, use it. - _, bcryptErr := bcrypt.Cost([]byte(data.Hash)) - if bcryptErr == nil { - p.Hash = []byte(data.Hash) - return nil - } - - // For backwards compatibility try to base64 decode this value. - hashBytes, err := base64.StdEncoding.DecodeString(data.Hash) - if err != nil { - return fmt.Errorf("malformed bcrypt hash: %v", bcryptErr) - } - if _, err := bcrypt.Cost(hashBytes); err != nil { - return fmt.Errorf("malformed bcrypt hash: %v", err) - } - p.Hash = hashBytes - return nil -} - -// OAuth2 describes enabled OAuth2 extensions. -type OAuth2 struct { - ResponseTypes []string `json:"responseTypes"` - // If specified, do not prompt the user to approve client authorization. The - // act of logging in implies authorization. - SkipApprovalScreen bool `json:"skipApprovalScreen"` - // If specified, show the connector selection screen even if there's only one - AlwaysShowLoginScreen bool `json:"alwaysShowLoginScreen"` - // This is the connector that can be used for password grant - PasswordConnector string `json:"passwordConnector"` -} - -// Web is the config format for the HTTP server. -type Web struct { - HTTP string `json:"http"` - HTTPS string `json:"https"` - TLSCert string `json:"tlsCert"` - TLSKey string `json:"tlsKey"` - AllowedOrigins []string `json:"allowedOrigins"` -} - -// Telemetry is the config format for telemetry including the HTTP server config. -type Telemetry struct { - HTTP string `json:"http"` -} - -// GRPC is the config for the gRPC API. -type GRPC struct { - // The port to listen on. - Addr string `json:"addr"` - TLSCert string `json:"tlsCert"` - TLSKey string `json:"tlsKey"` - TLSClientCA string `json:"tlsClientCA"` - Reflection bool `json:"reflection"` -} - -// Storage holds app's storage configuration. -type Storage struct { - Type string `json:"type"` - Config StorageConfig `json:"config"` -} - -// StorageConfig is a configuration that can create a storage. -type StorageConfig interface { - Open(logger log.Logger) (storage.Storage, error) -} - -var storages = map[string]func() StorageConfig{ - "etcd": func() StorageConfig { return new(etcd.Etcd) }, - "kubernetes": func() StorageConfig { return new(kubernetes.Config) }, - "memory": func() StorageConfig { return new(memory.Config) }, - "sqlite3": func() StorageConfig { return new(sql.SQLite3) }, - "postgres": func() StorageConfig { return new(sql.Postgres) }, - "mysql": func() StorageConfig { return new(sql.MySQL) }, -} - -// UnmarshalJSON allows Storage to implement the unmarshaler interface to -// dynamically determine the type of the storage config. -func (s *Storage) UnmarshalJSON(b []byte) error { - var store struct { - Type string `json:"type"` - Config json.RawMessage `json:"config"` - } - if err := json.Unmarshal(b, &store); err != nil { - return fmt.Errorf("parse storage: %v", err) - } - f, ok := storages[store.Type] - if !ok { - return fmt.Errorf("unknown storage type %q", store.Type) - } - - storageConfig := f() - if len(store.Config) != 0 { - data := []byte(os.ExpandEnv(string(store.Config))) - if err := json.Unmarshal(data, storageConfig); err != nil { - return fmt.Errorf("parse storage config: %v", err) - } - } - *s = Storage{ - Type: store.Type, - Config: storageConfig, - } - return nil -} - -// Connector is a magical type that can unmarshal YAML dynamically. The -// Type field determines the connector type, which is then customized for Config. -type Connector struct { - Type string `json:"type"` - Name string `json:"name"` - ID string `json:"id"` - - Config server.ConnectorConfig `json:"config"` -} - -// UnmarshalJSON allows Connector to implement the unmarshaler interface to -// dynamically determine the type of the connector config. -func (c *Connector) UnmarshalJSON(b []byte) error { - var conn struct { - Type string `json:"type"` - Name string `json:"name"` - ID string `json:"id"` - - Config json.RawMessage `json:"config"` - } - if err := json.Unmarshal(b, &conn); err != nil { - return fmt.Errorf("parse connector: %v", err) - } - f, ok := server.ConnectorsConfig[conn.Type] - if !ok { - return fmt.Errorf("unknown connector type %q", conn.Type) - } - - connConfig := f() - if len(conn.Config) != 0 { - data := []byte(os.ExpandEnv(string(conn.Config))) - if err := json.Unmarshal(data, connConfig); err != nil { - return fmt.Errorf("parse connector config: %v", err) - } - } - *c = Connector{ - Type: conn.Type, - Name: conn.Name, - ID: conn.ID, - Config: connConfig, - } - return nil -} - -// ToStorageConnector converts an object to storage connector type. -func ToStorageConnector(c Connector) (storage.Connector, error) { - data, err := json.Marshal(c.Config) - if err != nil { - return storage.Connector{}, fmt.Errorf("failed to marshal connector config: %v", err) - } - - return storage.Connector{ - ID: c.ID, - Type: c.Type, - Name: c.Name, - Config: data, - }, nil -} - -// Expiry holds configuration for the validity period of components. -type Expiry struct { - // SigningKeys defines the duration of time after which the SigningKeys will be rotated. - SigningKeys string `json:"signingKeys"` - - // IdTokens defines the duration of time for which the IdTokens will be valid. - IDTokens string `json:"idTokens"` - - // AuthRequests defines the duration of time for which the AuthRequests will be valid. - AuthRequests string `json:"authRequests"` -} - -// Logger holds configuration required to customize logging for dex. -type Logger struct { - // Level sets logging level severity. - Level string `json:"level"` - - // Format specifies the format to be used for logging. - Format string `json:"format"` -} diff --git a/vendor/github.com/dexidp/dex/cmd/dex/main.go b/vendor/github.com/dexidp/dex/cmd/dex/main.go deleted file mode 100644 index be334d9..0000000 --- a/vendor/github.com/dexidp/dex/cmd/dex/main.go +++ /dev/null @@ -1,28 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/spf13/cobra" -) - -func commandRoot() *cobra.Command { - rootCmd := &cobra.Command{ - Use: "dex", - Run: func(cmd *cobra.Command, args []string) { - cmd.Help() - os.Exit(2) - }, - } - rootCmd.AddCommand(commandServe()) - rootCmd.AddCommand(commandVersion()) - return rootCmd -} - -func main() { - if err := commandRoot().Execute(); err != nil { - fmt.Fprintln(os.Stderr, err.Error()) - os.Exit(2) - } -} diff --git a/vendor/github.com/dexidp/dex/cmd/dex/serve.go b/vendor/github.com/dexidp/dex/cmd/dex/serve.go deleted file mode 100644 index 5c8732a..0000000 --- a/vendor/github.com/dexidp/dex/cmd/dex/serve.go +++ /dev/null @@ -1,379 +0,0 @@ -package main - -import ( - "context" - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "strings" - "time" - - "github.com/ghodss/yaml" - grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/reflection" - - "github.com/dexidp/dex/api" - "github.com/dexidp/dex/pkg/log" - "github.com/dexidp/dex/server" - "github.com/dexidp/dex/storage" -) - -func commandServe() *cobra.Command { - return &cobra.Command{ - Use: "serve [ config file ]", - Short: "Connect to the storage and begin serving requests.", - Long: ``, - Example: "dex serve config.yaml", - Run: func(cmd *cobra.Command, args []string) { - if err := serve(cmd, args); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(2) - } - }, - } -} - -func serve(cmd *cobra.Command, args []string) error { - switch len(args) { - default: - return errors.New("surplus arguments") - case 0: - // TODO(ericchiang): Consider having a default config file location. - return errors.New("no arguments provided") - case 1: - } - - configFile := args[0] - configData, err := ioutil.ReadFile(configFile) - if err != nil { - return fmt.Errorf("failed to read config file %s: %v", configFile, err) - } - - var c Config - if err := yaml.Unmarshal(configData, &c); err != nil { - return fmt.Errorf("error parse config file %s: %v", configFile, err) - } - - logger, err := newLogger(c.Logger.Level, c.Logger.Format) - if err != nil { - return fmt.Errorf("invalid config: %v", err) - } - if c.Logger.Level != "" { - logger.Infof("config using log level: %s", c.Logger.Level) - } - if err := c.Validate(); err != nil { - return err - } - - logger.Infof("config issuer: %s", c.Issuer) - - prometheusRegistry := prometheus.NewRegistry() - err = prometheusRegistry.Register(prometheus.NewGoCollector()) - if err != nil { - return fmt.Errorf("failed to register Go runtime metrics: %v", err) - } - - err = prometheusRegistry.Register(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{})) - if err != nil { - return fmt.Errorf("failed to register process metrics: %v", err) - } - - grpcMetrics := grpcprometheus.NewServerMetrics() - err = prometheusRegistry.Register(grpcMetrics) - if err != nil { - return fmt.Errorf("failed to register gRPC server metrics: %v", err) - } - - var grpcOptions []grpc.ServerOption - - allowedTLSCiphers := []uint16{ - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - tls.TLS_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - } - - if c.GRPC.TLSCert != "" { - // Parse certificates from certificate file and key file for server. - cert, err := tls.LoadX509KeyPair(c.GRPC.TLSCert, c.GRPC.TLSKey) - if err != nil { - return fmt.Errorf("invalid config: error parsing gRPC certificate file: %v", err) - } - - tlsConfig := tls.Config{ - Certificates: []tls.Certificate{cert}, - MinVersion: tls.VersionTLS12, - CipherSuites: allowedTLSCiphers, - PreferServerCipherSuites: true, - } - - if c.GRPC.TLSClientCA != "" { - // Parse certificates from client CA file to a new CertPool. - cPool := x509.NewCertPool() - clientCert, err := ioutil.ReadFile(c.GRPC.TLSClientCA) - if err != nil { - return fmt.Errorf("invalid config: reading from client CA file: %v", err) - } - if !cPool.AppendCertsFromPEM(clientCert) { - return errors.New("invalid config: failed to parse client CA") - } - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - tlsConfig.ClientCAs = cPool - - // Only add metrics if client auth is enabled - grpcOptions = append(grpcOptions, - grpc.StreamInterceptor(grpcMetrics.StreamServerInterceptor()), - grpc.UnaryInterceptor(grpcMetrics.UnaryServerInterceptor()), - ) - } - - grpcOptions = append(grpcOptions, grpc.Creds(credentials.NewTLS(&tlsConfig))) - } - - s, err := c.Storage.Config.Open(logger) - if err != nil { - return fmt.Errorf("failed to initialize storage: %v", err) - } - logger.Infof("config storage: %s", c.Storage.Type) - - if len(c.StaticClients) > 0 { - for i, client := range c.StaticClients { - if client.Name == "" { - return fmt.Errorf("invalid config: Name field is required for a client") - } - if client.ID == "" && client.IDEnv == "" { - return fmt.Errorf("invalid config: ID or IDEnv field is required for a client") - } - if client.IDEnv != "" { - if client.ID != "" { - return fmt.Errorf("invalid config: ID and IDEnv fields are exclusive for client %q", client.ID) - } - c.StaticClients[i].ID = os.Getenv(client.IDEnv) - } - if client.Secret == "" && client.SecretEnv == "" && !client.Public { - return fmt.Errorf("invalid config: Secret or SecretEnv field is required for client %q", client.ID) - } - if client.SecretEnv != "" { - if client.Secret != "" { - return fmt.Errorf("invalid config: Secret and SecretEnv fields are exclusive for client %q", client.ID) - } - c.StaticClients[i].Secret = os.Getenv(client.SecretEnv) - } - logger.Infof("config static client: %s", client.Name) - } - s = storage.WithStaticClients(s, c.StaticClients) - } - if len(c.StaticPasswords) > 0 { - passwords := make([]storage.Password, len(c.StaticPasswords)) - for i, p := range c.StaticPasswords { - passwords[i] = storage.Password(p) - } - s = storage.WithStaticPasswords(s, passwords, logger) - } - - storageConnectors := make([]storage.Connector, len(c.StaticConnectors)) - for i, c := range c.StaticConnectors { - if c.ID == "" || c.Name == "" || c.Type == "" { - return fmt.Errorf("invalid config: ID, Type and Name fields are required for a connector") - } - if c.Config == nil { - return fmt.Errorf("invalid config: no config field for connector %q", c.ID) - } - logger.Infof("config connector: %s", c.ID) - - // convert to a storage connector object - conn, err := ToStorageConnector(c) - if err != nil { - return fmt.Errorf("failed to initialize storage connectors: %v", err) - } - storageConnectors[i] = conn - } - - if c.EnablePasswordDB { - storageConnectors = append(storageConnectors, storage.Connector{ - ID: server.LocalConnector, - Name: "Email", - Type: server.LocalConnector, - }) - logger.Infof("config connector: local passwords enabled") - } - - s = storage.WithStaticConnectors(s, storageConnectors) - - if len(c.OAuth2.ResponseTypes) > 0 { - logger.Infof("config response types accepted: %s", c.OAuth2.ResponseTypes) - } - if c.OAuth2.SkipApprovalScreen { - logger.Infof("config skipping approval screen") - } - if c.OAuth2.PasswordConnector != "" { - logger.Infof("config using password grant connector: %s", c.OAuth2.PasswordConnector) - } - if len(c.Web.AllowedOrigins) > 0 { - logger.Infof("config allowed origins: %s", c.Web.AllowedOrigins) - } - - // explicitly convert to UTC. - now := func() time.Time { return time.Now().UTC() } - - serverConfig := server.Config{ - SupportedResponseTypes: c.OAuth2.ResponseTypes, - SkipApprovalScreen: c.OAuth2.SkipApprovalScreen, - AlwaysShowLoginScreen: c.OAuth2.AlwaysShowLoginScreen, - PasswordConnector: c.OAuth2.PasswordConnector, - AllowedOrigins: c.Web.AllowedOrigins, - Issuer: c.Issuer, - Storage: s, - Web: c.Frontend, - Logger: logger, - Now: now, - PrometheusRegistry: prometheusRegistry, - } - if c.Expiry.SigningKeys != "" { - signingKeys, err := time.ParseDuration(c.Expiry.SigningKeys) - if err != nil { - return fmt.Errorf("invalid config value %q for signing keys expiry: %v", c.Expiry.SigningKeys, err) - } - logger.Infof("config signing keys expire after: %v", signingKeys) - serverConfig.RotateKeysAfter = signingKeys - } - if c.Expiry.IDTokens != "" { - idTokens, err := time.ParseDuration(c.Expiry.IDTokens) - if err != nil { - return fmt.Errorf("invalid config value %q for id token expiry: %v", c.Expiry.IDTokens, err) - } - logger.Infof("config id tokens valid for: %v", idTokens) - serverConfig.IDTokensValidFor = idTokens - } - if c.Expiry.AuthRequests != "" { - authRequests, err := time.ParseDuration(c.Expiry.AuthRequests) - if err != nil { - return fmt.Errorf("invalid config value %q for auth request expiry: %v", c.Expiry.AuthRequests, err) - } - logger.Infof("config auth requests valid for: %v", authRequests) - serverConfig.AuthRequestsValidFor = authRequests - } - - serv, err := server.NewServer(context.Background(), serverConfig) - if err != nil { - return fmt.Errorf("failed to initialize server: %v", err) - } - - telemetryServ := http.NewServeMux() - telemetryServ.Handle("/metrics", promhttp.HandlerFor(prometheusRegistry, promhttp.HandlerOpts{})) - - errc := make(chan error, 3) - if c.Telemetry.HTTP != "" { - logger.Infof("listening (http/telemetry) on %s", c.Telemetry.HTTP) - go func() { - err := http.ListenAndServe(c.Telemetry.HTTP, telemetryServ) - errc <- fmt.Errorf("listening on %s failed: %v", c.Telemetry.HTTP, err) - }() - } - if c.Web.HTTP != "" { - logger.Infof("listening (http) on %s", c.Web.HTTP) - go func() { - err := http.ListenAndServe(c.Web.HTTP, serv) - errc <- fmt.Errorf("listening on %s failed: %v", c.Web.HTTP, err) - }() - } - if c.Web.HTTPS != "" { - httpsSrv := &http.Server{ - Addr: c.Web.HTTPS, - Handler: serv, - TLSConfig: &tls.Config{ - CipherSuites: allowedTLSCiphers, - PreferServerCipherSuites: true, - MinVersion: tls.VersionTLS12, - }, - } - - logger.Infof("listening (https) on %s", c.Web.HTTPS) - go func() { - err = httpsSrv.ListenAndServeTLS(c.Web.TLSCert, c.Web.TLSKey) - errc <- fmt.Errorf("listening on %s failed: %v", c.Web.HTTPS, err) - }() - } - if c.GRPC.Addr != "" { - logger.Infof("listening (grpc) on %s", c.GRPC.Addr) - go func() { - errc <- func() error { - list, err := net.Listen("tcp", c.GRPC.Addr) - if err != nil { - return fmt.Errorf("listening on %s failed: %v", c.GRPC.Addr, err) - } - s := grpc.NewServer(grpcOptions...) - api.RegisterDexServer(s, server.NewAPI(serverConfig.Storage, logger)) - grpcMetrics.InitializeMetrics(s) - if c.GRPC.Reflection { - logger.Info("enabling reflection in grpc service") - reflection.Register(s) - } - err = s.Serve(list) - return fmt.Errorf("listening on %s failed: %v", c.GRPC.Addr, err) - }() - }() - } - - return <-errc -} - -var ( - logLevels = []string{"debug", "info", "error"} - logFormats = []string{"json", "text"} -) - -type utcFormatter struct { - f logrus.Formatter -} - -func (f *utcFormatter) Format(e *logrus.Entry) ([]byte, error) { - e.Time = e.Time.UTC() - return f.f.Format(e) -} - -func newLogger(level string, format string) (log.Logger, error) { - var logLevel logrus.Level - switch strings.ToLower(level) { - case "debug": - logLevel = logrus.DebugLevel - case "", "info": - logLevel = logrus.InfoLevel - case "error": - logLevel = logrus.ErrorLevel - default: - return nil, fmt.Errorf("log level is not one of the supported values (%s): %s", strings.Join(logLevels, ", "), level) - } - - var formatter utcFormatter - switch strings.ToLower(format) { - case "", "text": - formatter.f = &logrus.TextFormatter{DisableColors: true} - case "json": - formatter.f = &logrus.JSONFormatter{} - default: - return nil, fmt.Errorf("log format is not one of the supported values (%s): %s", strings.Join(logFormats, ", "), format) - } - - return &logrus.Logger{ - Out: os.Stderr, - Formatter: &formatter, - Level: logLevel, - }, nil -} diff --git a/vendor/github.com/dexidp/dex/cmd/dex/version.go b/vendor/github.com/dexidp/dex/cmd/dex/version.go deleted file mode 100644 index b74cb01..0000000 --- a/vendor/github.com/dexidp/dex/cmd/dex/version.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "fmt" - "runtime" - - "github.com/spf13/cobra" - - "github.com/dexidp/dex/version" -) - -func commandVersion() *cobra.Command { - return &cobra.Command{ - Use: "version", - Short: "Print the version and exit", - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf(`dex Version: %s -Go Version: %s -Go OS/ARCH: %s %s -`, version.Version, runtime.Version(), runtime.GOOS, runtime.GOARCH) - }, - } -} diff --git a/vendor/github.com/dexidp/dex/connector/atlassiancrowd/atlassiancrowd.go b/vendor/github.com/dexidp/dex/connector/atlassiancrowd/atlassiancrowd.go deleted file mode 100644 index 928e69a..0000000 --- a/vendor/github.com/dexidp/dex/connector/atlassiancrowd/atlassiancrowd.go +++ /dev/null @@ -1,456 +0,0 @@ -// Package atlassiancrowd provides authentication strategies using Atlassian Crowd. -package atlassiancrowd - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "strings" - "time" - - "github.com/dexidp/dex/connector" - "github.com/dexidp/dex/pkg/groups" - "github.com/dexidp/dex/pkg/log" -) - -// Config holds configuration options for Atlassian Crowd connector. -// Crowd connectors require executing two queries, the first to find -// the user based on the username and password given to the connector. -// The second to use the user entry to search for groups. -// -// An example config: -// -// type: atlassian-crowd -// config: -// baseURL: https://crowd.example.com/context -// clientID: applogin -// clientSecret: appP4$$w0rd -// # users can be restricted by a list of groups -// groups: -// - admin -// # Prompt for username field -// usernamePrompt: Login -// preferredUsernameField: name -// -type Config struct { - BaseURL string `json:"baseURL"` - ClientID string `json:"clientID"` - ClientSecret string `json:"clientSecret"` - Groups []string `json:"groups"` - - // PreferredUsernameField allows users to set the field to any of the - // following values: "key", "name" or "email". - // If unset, the preferred_username field will remain empty. - PreferredUsernameField string `json:"preferredUsernameField"` - - // UsernamePrompt allows users to override the username attribute (displayed - // in the username/password prompt). If unset, the handler will use. - // "Username". - UsernamePrompt string `json:"usernamePrompt"` -} - -type crowdUser struct { - Key string - Name string - Active bool - Email string -} - -type crowdGroups struct { - Groups []struct { - Name string - } `json:"groups"` -} - -type crowdAuthentication struct { - Token string - User struct { - Name string - } `json:"user"` - CreatedDate uint64 `json:"created-date"` - ExpiryDate uint64 `json:"expiry-date"` -} - -type crowdAuthenticationError struct { - Reason string - Message string -} - -// Open returns a strategy for logging in through Atlassian Crowd -func (c *Config) Open(_ string, logger log.Logger) (connector.Connector, error) { - if c.BaseURL == "" { - return nil, fmt.Errorf("crowd: no baseURL provided for crowd connector") - } - return &crowdConnector{Config: *c, logger: logger}, nil -} - -type crowdConnector struct { - Config - logger log.Logger -} - -var ( - _ connector.PasswordConnector = (*crowdConnector)(nil) - _ connector.RefreshConnector = (*crowdConnector)(nil) -) - -type refreshData struct { - Username string `json:"username"` -} - -func (c *crowdConnector) Login(ctx context.Context, s connector.Scopes, username, password string) (ident connector.Identity, validPass bool, err error) { - // make this check to avoid empty passwords. - if password == "" { - return connector.Identity{}, false, nil - } - - // We want to return a different error if the user's password is incorrect vs - // if there was an error. - incorrectPass := false - var user crowdUser - - client := c.crowdAPIClient() - - if incorrectPass, err = c.authenticateWithPassword(ctx, client, username, password); err != nil { - return connector.Identity{}, false, err - } - - if incorrectPass { - return connector.Identity{}, false, nil - } - - if user, err = c.user(ctx, client, username); err != nil { - return connector.Identity{}, false, err - } - - if ident, err = c.identityFromCrowdUser(user); err != nil { - return connector.Identity{}, false, err - } - - if s.Groups { - userGroups, err := c.getGroups(ctx, client, s.Groups, ident.Username) - if err != nil { - return connector.Identity{}, false, fmt.Errorf("crowd: failed to query groups: %v", err) - } - ident.Groups = userGroups - } - - if s.OfflineAccess { - refresh := refreshData{Username: username} - // Encode entry for following up requests such as the groups query and refresh attempts. - if ident.ConnectorData, err = json.Marshal(refresh); err != nil { - return connector.Identity{}, false, fmt.Errorf("crowd: marshal refresh data: %v", err) - } - } - - return ident, true, nil -} - -func (c *crowdConnector) Refresh(ctx context.Context, s connector.Scopes, ident connector.Identity) (connector.Identity, error) { - var data refreshData - if err := json.Unmarshal(ident.ConnectorData, &data); err != nil { - return ident, fmt.Errorf("crowd: failed to unmarshal internal data: %v", err) - } - - var user crowdUser - client := c.crowdAPIClient() - - user, err := c.user(ctx, client, data.Username) - if err != nil { - return ident, fmt.Errorf("crowd: get user %q: %v", data.Username, err) - } - - newIdent, err := c.identityFromCrowdUser(user) - if err != nil { - return ident, err - } - newIdent.ConnectorData = ident.ConnectorData - - // If user exists, authenticate it to prolong sso session. - err = c.authenticateUser(ctx, client, data.Username) - if err != nil { - return ident, fmt.Errorf("crowd: authenticate user: %v", err) - } - - if s.Groups { - userGroups, err := c.getGroups(ctx, client, s.Groups, newIdent.Username) - if err != nil { - return connector.Identity{}, fmt.Errorf("crowd: failed to query groups: %v", err) - } - newIdent.Groups = userGroups - } - return newIdent, nil -} - -func (c *crowdConnector) Prompt() string { - return c.UsernamePrompt -} - -func (c *crowdConnector) crowdAPIClient() *http.Client { - return &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - }, - } -} - -// authenticateWithPassword creates a new session for user and validates a password with Crowd API -func (c *crowdConnector) authenticateWithPassword(ctx context.Context, client *http.Client, username string, password string) (invalidPass bool, err error) { - req, err := c.crowdUserManagementRequest(ctx, - "POST", - "/session", - struct { - Username string `json:"username"` - Password string `json:"password"` - }{Username: username, Password: password}, - ) - if err != nil { - return false, fmt.Errorf("crowd: new auth pass api request %v", err) - } - - resp, err := client.Do(req) - if err != nil { - return false, fmt.Errorf("crowd: api request %v", err) - } - defer resp.Body.Close() - - body, err := c.validateCrowdResponse(resp) - if err != nil { - return false, err - } - - if resp.StatusCode != http.StatusCreated { - var authError crowdAuthenticationError - if err := json.Unmarshal(body, &authError); err != nil { - return false, fmt.Errorf("unmarshal auth pass response: %d %v %q", resp.StatusCode, err, string(body)) - } - - if authError.Reason == "INVALID_USER_AUTHENTICATION" { - return true, nil - } - - return false, fmt.Errorf("%s: %s", resp.Status, authError.Message) - } - - var authResponse crowdAuthentication - - if err := json.Unmarshal(body, &authResponse); err != nil { - return false, fmt.Errorf("decode auth response: %v", err) - } - - return false, nil -} - -// authenticateUser creates a new session for user without password validations with Crowd API -func (c *crowdConnector) authenticateUser(ctx context.Context, client *http.Client, username string) error { - req, err := c.crowdUserManagementRequest(ctx, - "POST", - "/session?validate-password=false", - struct { - Username string `json:"username"` - }{Username: username}, - ) - if err != nil { - return fmt.Errorf("crowd: new auth api request %v", err) - } - - resp, err := client.Do(req) - if err != nil { - return fmt.Errorf("crowd: api request %v", err) - } - defer resp.Body.Close() - - body, err := c.validateCrowdResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusCreated { - return fmt.Errorf("%s: %s", resp.Status, body) - } - - var authResponse crowdAuthentication - - if err := json.Unmarshal(body, &authResponse); err != nil { - return fmt.Errorf("decode auth response: %v", err) - } - - return nil -} - -// user retrieves user info from Crowd API -func (c *crowdConnector) user(ctx context.Context, client *http.Client, username string) (crowdUser, error) { - var user crowdUser - - req, err := c.crowdUserManagementRequest(ctx, - "GET", - fmt.Sprintf("/user?username=%s", username), - nil, - ) - if err != nil { - return user, fmt.Errorf("crowd: new user api request %v", err) - } - - resp, err := client.Do(req) - if err != nil { - return user, fmt.Errorf("crowd: api request %v", err) - } - defer resp.Body.Close() - - body, err := c.validateCrowdResponse(resp) - if err != nil { - return user, err - } - - if resp.StatusCode != http.StatusOK { - return user, fmt.Errorf("%s: %s", resp.Status, body) - } - - if err := json.Unmarshal(body, &user); err != nil { - return user, fmt.Errorf("failed to decode response: %v", err) - } - - return user, nil -} - -// groups retrieves groups from Crowd API -func (c *crowdConnector) groups(ctx context.Context, client *http.Client, username string) (userGroups []string, err error) { - var crowdGroups crowdGroups - - req, err := c.crowdUserManagementRequest(ctx, - "GET", - fmt.Sprintf("/user/group/nested?username=%s", username), - nil, - ) - if err != nil { - return nil, fmt.Errorf("crowd: new groups api request %v", err) - } - - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("crowd: api request %v", err) - } - defer resp.Body.Close() - - body, err := c.validateCrowdResponse(resp) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s: %s", resp.Status, body) - } - - if err := json.Unmarshal(body, &crowdGroups); err != nil { - return nil, fmt.Errorf("failed to decode response: %v", err) - } - - for _, group := range crowdGroups.Groups { - userGroups = append(userGroups, group.Name) - } - - return userGroups, nil -} - -// identityFromCrowdUser converts crowdUser to Identity -func (c *crowdConnector) identityFromCrowdUser(user crowdUser) (connector.Identity, error) { - identity := connector.Identity{ - Username: user.Name, - UserID: user.Key, - Email: user.Email, - EmailVerified: true, - } - - switch c.PreferredUsernameField { - case "key": - identity.PreferredUsername = user.Key - case "name": - identity.PreferredUsername = user.Name - case "email": - identity.PreferredUsername = user.Email - default: - if c.PreferredUsernameField != "" { - c.logger.Warnf("preferred_username left empty. Invalid crowd field mapped to preferred_username: %s", c.PreferredUsernameField) - } - } - - return identity, nil -} - -// getGroups retrieves a list of user's groups and filters it -func (c *crowdConnector) getGroups(ctx context.Context, client *http.Client, groupScope bool, userLogin string) ([]string, error) { - crowdGroups, err := c.groups(ctx, client, userLogin) - if err != nil { - return nil, err - } - - if len(c.Groups) > 0 { - filteredGroups := groups.Filter(crowdGroups, c.Groups) - if len(filteredGroups) == 0 { - return nil, fmt.Errorf("crowd: user %q is not in any of the required groups", userLogin) - } - return filteredGroups, nil - } else if groupScope { - return crowdGroups, nil - } - - return nil, nil -} - -// crowdUserManagementRequest create a http.Request with basic auth, json payload and Accept header -func (c *crowdConnector) crowdUserManagementRequest(ctx context.Context, method string, apiURL string, jsonPayload interface{}) (*http.Request, error) { - var body io.Reader - if jsonPayload != nil { - jsonData, err := json.Marshal(jsonPayload) - if err != nil { - return nil, fmt.Errorf("crowd: marshal API json payload: %v", err) - } - body = bytes.NewReader(jsonData) - } - - req, err := http.NewRequest(method, fmt.Sprintf("%s/rest/usermanagement/1%s", c.BaseURL, apiURL), body) - if err != nil { - return nil, fmt.Errorf("new API req: %v", err) - } - req = req.WithContext(ctx) - - // Crowd API requires a basic auth - req.SetBasicAuth(c.ClientID, c.ClientSecret) - req.Header.Set("Accept", "application/json") - if jsonPayload != nil { - req.Header.Set("Content-type", "application/json") - } - return req, nil -} - -// validateCrowdResponse validates unique not JSON responses from API -func (c *crowdConnector) validateCrowdResponse(resp *http.Response) ([]byte, error) { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("crowd: read user body: %v", err) - } - - if resp.StatusCode == http.StatusForbidden && strings.Contains(string(body), "The server understood the request but refuses to authorize it.") { - c.logger.Debugf("crowd response validation failed: %s", string(body)) - return nil, fmt.Errorf("dex is forbidden from making requests to the Atlassian Crowd application by URL %q", c.BaseURL) - } - - if resp.StatusCode == http.StatusUnauthorized && string(body) == "Application failed to authenticate" { - c.logger.Debugf("crowd response validation failed: %s", string(body)) - return nil, fmt.Errorf("dex failed to authenticate Crowd Application with ID %q", c.ClientID) - } - return body, nil -} diff --git a/vendor/github.com/dexidp/dex/connector/authproxy/authproxy.go b/vendor/github.com/dexidp/dex/connector/authproxy/authproxy.go deleted file mode 100644 index f58c4b6..0000000 --- a/vendor/github.com/dexidp/dex/connector/authproxy/authproxy.go +++ /dev/null @@ -1,57 +0,0 @@ -// Package authproxy implements a connector which relies on external -// authentication (e.g. mod_auth in Apache2) and returns an identity with the -// HTTP header X-Remote-User as verified email. -package authproxy - -import ( - "fmt" - "net/http" - "net/url" - - "github.com/dexidp/dex/connector" - "github.com/dexidp/dex/pkg/log" -) - -// Config holds the configuration parameters for a connector which returns an -// identity with the HTTP header X-Remote-User as verified email. -type Config struct{} - -// Open returns an authentication strategy which requires no user interaction. -func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { - return &callback{logger: logger, pathSuffix: "/" + id}, nil -} - -// Callback is a connector which returns an identity with the HTTP header -// X-Remote-User as verified email. -type callback struct { - logger log.Logger - pathSuffix string -} - -// LoginURL returns the URL to redirect the user to login with. -func (m *callback) LoginURL(s connector.Scopes, callbackURL, state string) (string, error) { - u, err := url.Parse(callbackURL) - if err != nil { - return "", fmt.Errorf("failed to parse callbackURL %q: %v", callbackURL, err) - } - u.Path = u.Path + m.pathSuffix - v := u.Query() - v.Set("state", state) - u.RawQuery = v.Encode() - return u.String(), nil -} - -// HandleCallback parses the request and returns the user's identity -func (m *callback) HandleCallback(s connector.Scopes, r *http.Request) (connector.Identity, error) { - remoteUser := r.Header.Get("X-Remote-User") - if remoteUser == "" { - return connector.Identity{}, fmt.Errorf("required HTTP header X-Remote-User is not set") - } - // TODO: add support for X-Remote-Group, see - // https://kubernetes.io/docs/admin/authentication/#authenticating-proxy - return connector.Identity{ - UserID: remoteUser, // TODO: figure out if this is a bad ID value. - Email: remoteUser, - EmailVerified: true, - }, nil -} diff --git a/vendor/github.com/dexidp/dex/connector/bitbucketcloud/bitbucketcloud.go b/vendor/github.com/dexidp/dex/connector/bitbucketcloud/bitbucketcloud.go deleted file mode 100644 index d8ccf2e..0000000 --- a/vendor/github.com/dexidp/dex/connector/bitbucketcloud/bitbucketcloud.go +++ /dev/null @@ -1,427 +0,0 @@ -// Package bitbucketcloud provides authentication strategies using Bitbucket Cloud. -package bitbucketcloud - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "sync" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/bitbucket" - - "github.com/dexidp/dex/connector" - "github.com/dexidp/dex/pkg/groups" - "github.com/dexidp/dex/pkg/log" -) - -const ( - apiURL = "https://api.bitbucket.org/2.0" - - // Bitbucket requires this scope to access '/user' API endpoints. - scopeAccount = "account" - // Bitbucket requires this scope to access '/user/emails' API endpoints. - scopeEmail = "email" - // Bitbucket requires this scope to access '/teams' API endpoints - // which are used when a client includes the 'groups' scope. - scopeTeams = "team" -) - -// Config holds configuration options for Bitbucket logins. -type Config struct { - ClientID string `json:"clientID"` - ClientSecret string `json:"clientSecret"` - RedirectURI string `json:"redirectURI"` - Teams []string `json:"teams"` -} - -// Open returns a strategy for logging in through Bitbucket. -func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { - b := bitbucketConnector{ - redirectURI: c.RedirectURI, - teams: c.Teams, - clientID: c.ClientID, - clientSecret: c.ClientSecret, - apiURL: apiURL, - logger: logger, - } - - return &b, nil -} - -type connectorData struct { - AccessToken string `json:"accessToken"` - RefreshToken string `json:"refreshToken"` - Expiry time.Time `json:"expiry"` -} - -var ( - _ connector.CallbackConnector = (*bitbucketConnector)(nil) - _ connector.RefreshConnector = (*bitbucketConnector)(nil) -) - -type bitbucketConnector struct { - redirectURI string - teams []string - clientID string - clientSecret string - logger log.Logger - apiURL string - - // the following are used only for tests - hostName string - httpClient *http.Client -} - -// groupsRequired returns whether dex requires Bitbucket's 'team' scope. -func (b *bitbucketConnector) groupsRequired(groupScope bool) bool { - return len(b.teams) > 0 || groupScope -} - -func (b *bitbucketConnector) oauth2Config(scopes connector.Scopes) *oauth2.Config { - bitbucketScopes := []string{scopeAccount, scopeEmail} - if b.groupsRequired(scopes.Groups) { - bitbucketScopes = append(bitbucketScopes, scopeTeams) - } - - endpoint := bitbucket.Endpoint - if b.hostName != "" { - endpoint = oauth2.Endpoint{ - AuthURL: "https://" + b.hostName + "/site/oauth2/authorize", - TokenURL: "https://" + b.hostName + "/site/oauth2/access_token", - } - } - - return &oauth2.Config{ - ClientID: b.clientID, - ClientSecret: b.clientSecret, - Endpoint: endpoint, - Scopes: bitbucketScopes, - } -} - -func (b *bitbucketConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) { - if b.redirectURI != callbackURL { - return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", callbackURL, b.redirectURI) - } - - return b.oauth2Config(scopes).AuthCodeURL(state), nil -} - -type oauth2Error struct { - error string - errorDescription string -} - -func (e *oauth2Error) Error() string { - if e.errorDescription == "" { - return e.error - } - return e.error + ": " + e.errorDescription -} - -func (b *bitbucketConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { - q := r.URL.Query() - if errType := q.Get("error"); errType != "" { - return identity, &oauth2Error{errType, q.Get("error_description")} - } - - oauth2Config := b.oauth2Config(s) - - ctx := r.Context() - if b.httpClient != nil { - ctx = context.WithValue(r.Context(), oauth2.HTTPClient, b.httpClient) - } - - token, err := oauth2Config.Exchange(ctx, q.Get("code")) - if err != nil { - return identity, fmt.Errorf("bitbucket: failed to get token: %v", err) - } - - client := oauth2Config.Client(ctx, token) - - user, err := b.user(ctx, client) - if err != nil { - return identity, fmt.Errorf("bitbucket: get user: %v", err) - } - - identity = connector.Identity{ - UserID: user.UUID, - Username: user.Username, - Email: user.Email, - EmailVerified: true, - } - - if b.groupsRequired(s.Groups) { - groups, err := b.getGroups(ctx, client, s.Groups, user.Username) - if err != nil { - return identity, err - } - identity.Groups = groups - } - - if s.OfflineAccess { - data := connectorData{ - AccessToken: token.AccessToken, - RefreshToken: token.RefreshToken, - Expiry: token.Expiry, - } - connData, err := json.Marshal(data) - if err != nil { - return identity, fmt.Errorf("bitbucket: marshal connector data: %v", err) - } - identity.ConnectorData = connData - } - - return identity, nil -} - -// Refreshing tokens -// https://github.com/golang/oauth2/issues/84#issuecomment-332860871 -type tokenNotifyFunc func(*oauth2.Token) error - -// notifyRefreshTokenSource is essentially `oauth2.ReuseTokenSource` with `TokenNotifyFunc` added. -type notifyRefreshTokenSource struct { - new oauth2.TokenSource - mu sync.Mutex // guards t - t *oauth2.Token - f tokenNotifyFunc // called when token refreshed so new refresh token can be persisted -} - -// Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. -func (s *notifyRefreshTokenSource) Token() (*oauth2.Token, error) { - s.mu.Lock() - defer s.mu.Unlock() - if s.t.Valid() { - return s.t, nil - } - t, err := s.new.Token() - if err != nil { - return nil, err - } - s.t = t - return t, s.f(t) -} - -func (b *bitbucketConnector) Refresh(ctx context.Context, s connector.Scopes, identity connector.Identity) (connector.Identity, error) { - if len(identity.ConnectorData) == 0 { - return identity, errors.New("bitbucket: no upstream access token found") - } - - var data connectorData - if err := json.Unmarshal(identity.ConnectorData, &data); err != nil { - return identity, fmt.Errorf("bitbucket: unmarshal access token: %v", err) - } - - tok := &oauth2.Token{ - AccessToken: data.AccessToken, - RefreshToken: data.RefreshToken, - Expiry: data.Expiry, - } - - client := oauth2.NewClient(ctx, ¬ifyRefreshTokenSource{ - new: b.oauth2Config(s).TokenSource(ctx, tok), - t: tok, - f: func(tok *oauth2.Token) error { - data := connectorData{ - AccessToken: tok.AccessToken, - RefreshToken: tok.RefreshToken, - Expiry: tok.Expiry, - } - connData, err := json.Marshal(data) - if err != nil { - return fmt.Errorf("bitbucket: marshal connector data: %v", err) - } - identity.ConnectorData = connData - return nil - }, - }) - - user, err := b.user(ctx, client) - if err != nil { - return identity, fmt.Errorf("bitbucket: get user: %v", err) - } - - identity.Username = user.Username - identity.Email = user.Email - - if b.groupsRequired(s.Groups) { - groups, err := b.getGroups(ctx, client, s.Groups, user.Username) - if err != nil { - return identity, err - } - identity.Groups = groups - } - - return identity, nil -} - -// Bitbucket pagination wrapper -type pagedResponse struct { - Size int `json:"size"` - Page int `json:"page"` - PageLen int `json:"pagelen"` - Next *string `json:"next"` - Previous *string `json:"previous"` -} - -// user holds Bitbucket user information (relevant to dex) as defined by -// https://developer.atlassian.com/bitbucket/api/2/reference/resource/user -type user struct { - Username string `json:"username"` - UUID string `json:"uuid"` - Email string `json:"email"` -} - -// user queries the Bitbucket API for profile information using the provided client. -// -// The HTTP client is expected to be constructed by the golang.org/x/oauth2 package, -// which inserts a bearer token as part of the request. -func (b *bitbucketConnector) user(ctx context.Context, client *http.Client) (user, error) { - // https://developer.atlassian.com/bitbucket/api/2/reference/resource/user - var ( - u user - err error - ) - - if err = get(ctx, client, b.apiURL+"/user", &u); err != nil { - return user{}, err - } - - if u.Email, err = b.userEmail(ctx, client); err != nil { - return user{}, err - } - - return u, nil -} - -// userEmail holds Bitbucket user email information as defined by -// https://developer.atlassian.com/bitbucket/api/2/reference/resource/user/emails -type userEmail struct { - IsPrimary bool `json:"is_primary"` - IsConfirmed bool `json:"is_confirmed"` - Email string `json:"email"` -} - -type userEmailResponse struct { - pagedResponse - Values []userEmail -} - -// userEmail returns the users primary, confirmed email -// -// The HTTP client is expected to be constructed by the golang.org/x/oauth2 package, -// which inserts a bearer token as part of the request. -func (b *bitbucketConnector) userEmail(ctx context.Context, client *http.Client) (string, error) { - apiURL := b.apiURL + "/user/emails" - for { - // https://developer.atlassian.com/bitbucket/api/2/reference/resource/user/emails - var response userEmailResponse - - if err := get(ctx, client, apiURL, &response); err != nil { - return "", err - } - - for _, email := range response.Values { - if email.IsConfirmed && email.IsPrimary { - return email.Email, nil - } - } - - if response.Next == nil { - break - } - } - - return "", errors.New("bitbucket: user has no confirmed, primary email") -} - -// getGroups retrieves Bitbucket teams a user is in, if any. -func (b *bitbucketConnector) getGroups(ctx context.Context, client *http.Client, groupScope bool, userLogin string) ([]string, error) { - bitbucketTeams, err := b.userTeams(ctx, client) - if err != nil { - return nil, err - } - - if len(b.teams) > 0 { - filteredTeams := groups.Filter(bitbucketTeams, b.teams) - if len(filteredTeams) == 0 { - return nil, fmt.Errorf("bitbucket: user %q is not in any of the required teams", userLogin) - } - return filteredTeams, nil - } else if groupScope { - return bitbucketTeams, nil - } - - return nil, nil -} - -type team struct { - Name string `json:"username"` // The "username" from Bitbucket Cloud is actually the team name here -} - -type userTeamsResponse struct { - pagedResponse - Values []team -} - -func (b *bitbucketConnector) userTeams(ctx context.Context, client *http.Client) ([]string, error) { - var teams []string - apiURL := b.apiURL + "/teams?role=member" - - for { - // https://developer.atlassian.com/bitbucket/api/2/reference/resource/teams - var response userTeamsResponse - - if err := get(ctx, client, apiURL, &response); err != nil { - return nil, fmt.Errorf("bitbucket: get user teams: %v", err) - } - - for _, team := range response.Values { - teams = append(teams, team.Name) - } - - if response.Next == nil { - break - } - } - - return teams, nil -} - -// get creates a "GET `apiURL`" request with context, sends the request using -// the client, and decodes the resulting response body into v. -// Any errors encountered when building requests, sending requests, and -// reading and decoding response data are returned. -func get(ctx context.Context, client *http.Client, apiURL string, v interface{}) error { - req, err := http.NewRequest("GET", apiURL, nil) - if err != nil { - return fmt.Errorf("bitbucket: new req: %v", err) - } - req = req.WithContext(ctx) - resp, err := client.Do(req) - if err != nil { - return fmt.Errorf("bitbucket: get URL %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("bitbucket: read body: %s: %v", resp.Status, err) - } - return fmt.Errorf("%s: %s", resp.Status, body) - } - - if err := json.NewDecoder(resp.Body).Decode(v); err != nil { - return fmt.Errorf("bitbucket: failed to decode response: %v", err) - } - - return nil -} diff --git a/vendor/github.com/dexidp/dex/connector/connector.go b/vendor/github.com/dexidp/dex/connector/connector.go deleted file mode 100644 index aab994b..0000000 --- a/vendor/github.com/dexidp/dex/connector/connector.go +++ /dev/null @@ -1,100 +0,0 @@ -// Package connector defines interfaces for federated identity strategies. -package connector - -import ( - "context" - "net/http" -) - -// Connector is a mechanism for federating login to a remote identity service. -// -// Implementations are expected to implement either the PasswordConnector or -// CallbackConnector interface. -type Connector interface{} - -// Scopes represents additional data requested by the clients about the end user. -type Scopes struct { - // The client has requested a refresh token from the server. - OfflineAccess bool - - // The client has requested group information about the end user. - Groups bool -} - -// Identity represents the ID Token claims supported by the server. -type Identity struct { - UserID string - Username string - PreferredUsername string - Email string - EmailVerified bool - - Groups []string - - // ConnectorData holds data used by the connector for subsequent requests after initial - // authentication, such as access tokens for upstream provides. - // - // This data is never shared with end users, OAuth clients, or through the API. - ConnectorData []byte -} - -// PasswordConnector is an interface implemented by connectors which take a -// username and password. -// Prompt() is used to inform the handler what to display in the password -// template. If this returns an empty string, it'll default to "Username". -type PasswordConnector interface { - Prompt() string - Login(ctx context.Context, s Scopes, username, password string) (identity Identity, validPassword bool, err error) -} - -// CallbackConnector is an interface implemented by connectors which use an OAuth -// style redirect flow to determine user information. -type CallbackConnector interface { - // The initial URL to redirect the user to. - // - // OAuth2 implementations should request different scopes from the upstream - // identity provider based on the scopes requested by the downstream client. - // For example, if the downstream client requests a refresh token from the - // server, the connector should also request a token from the provider. - // - // Many identity providers have arbitrary restrictions on refresh tokens. For - // example Google only allows a single refresh token per client/user/scopes - // combination, and wont return a refresh token even if offline access is - // requested if one has already been issues. There's no good general answer - // for these kind of restrictions, and may require this package to become more - // aware of the global set of user/connector interactions. - LoginURL(s Scopes, callbackURL, state string) (string, error) - - // Handle the callback to the server and return an identity. - HandleCallback(s Scopes, r *http.Request) (identity Identity, err error) -} - -// SAMLConnector represents SAML connectors which implement the HTTP POST binding. -// RelayState is handled by the server. -// -// See: https://docs.oasis-open.org/security/saml/v2.0/saml-bindings-2.0-os.pdf -// "3.5 HTTP POST Binding" -type SAMLConnector interface { - // POSTData returns an encoded SAML request and SSO URL for the server to - // render a POST form with. - // - // POSTData should encode the provided request ID in the returned serialized - // SAML request. - POSTData(s Scopes, requestID string) (ssoURL, samlRequest string, err error) - - // HandlePOST decodes, verifies, and maps attributes from the SAML response. - // It passes the expected value of the "InResponseTo" response field, which - // the connector must ensure matches the response value. - // - // See: https://www.oasis-open.org/committees/download.php/35711/sstc-saml-core-errata-2.0-wd-06-diff.pdf - // "3.2.2 Complex Type StatusResponseType" - HandlePOST(s Scopes, samlResponse, inResponseTo string) (identity Identity, err error) -} - -// RefreshConnector is a connector that can update the client claims. -type RefreshConnector interface { - // Refresh is called when a client attempts to claim a refresh token. The - // connector should attempt to update the identity object to reflect any - // changes since the token was last refreshed. - Refresh(ctx context.Context, s Scopes, identity Identity) (Identity, error) -} diff --git a/vendor/github.com/dexidp/dex/connector/github/github.go b/vendor/github.com/dexidp/dex/connector/github/github.go deleted file mode 100644 index e38eb26..0000000 --- a/vendor/github.com/dexidp/dex/connector/github/github.go +++ /dev/null @@ -1,705 +0,0 @@ -// Package github provides authentication strategies using GitHub. -package github - -import ( - "context" - "crypto/tls" - "crypto/x509" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "regexp" - "strconv" - "strings" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/github" - - "github.com/dexidp/dex/connector" - groups_pkg "github.com/dexidp/dex/pkg/groups" - "github.com/dexidp/dex/pkg/log" -) - -const ( - apiURL = "https://api.github.com" - // GitHub requires this scope to access '/user' and '/user/emails' API endpoints. - scopeEmail = "user:email" - // GitHub requires this scope to access '/user/teams' and '/orgs' API endpoints - // which are used when a client includes the 'groups' scope. - scopeOrgs = "read:org" -) - -// Pagination URL patterns -// https://developer.github.com/v3/#pagination -var reNext = regexp.MustCompile("<([^>]+)>; rel=\"next\"") -var reLast = regexp.MustCompile("<([^>]+)>; rel=\"last\"") - -// Config holds configuration options for github logins. -type Config struct { - ClientID string `json:"clientID"` - ClientSecret string `json:"clientSecret"` - RedirectURI string `json:"redirectURI"` - Org string `json:"org"` - Orgs []Org `json:"orgs"` - HostName string `json:"hostName"` - RootCA string `json:"rootCA"` - TeamNameField string `json:"teamNameField"` - LoadAllGroups bool `json:"loadAllGroups"` - UseLoginAsID bool `json:"useLoginAsID"` -} - -// Org holds org-team filters, in which teams are optional. -type Org struct { - // Organization name in github (not slug, full name). Only users in this github - // organization can authenticate. - Name string `json:"name"` - - // Names of teams in a github organization. A user will be able to - // authenticate if they are members of at least one of these teams. Users - // in the organization can authenticate if this field is omitted from the - // config file. - Teams []string `json:"teams,omitempty"` -} - -// Open returns a strategy for logging in through GitHub. -func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { - if c.Org != "" { - // Return error if both 'org' and 'orgs' fields are used. - if len(c.Orgs) > 0 { - return nil, errors.New("github: cannot use both 'org' and 'orgs' fields simultaneously") - } - logger.Warn("github: legacy field 'org' being used. Switch to the newer 'orgs' field structure") - } - - g := githubConnector{ - redirectURI: c.RedirectURI, - org: c.Org, - orgs: c.Orgs, - clientID: c.ClientID, - clientSecret: c.ClientSecret, - apiURL: apiURL, - logger: logger, - useLoginAsID: c.UseLoginAsID, - } - - if c.HostName != "" { - // ensure this is a hostname and not a URL or path. - if strings.Contains(c.HostName, "/") { - return nil, errors.New("invalid hostname: hostname cannot contain `/`") - } - - g.hostName = c.HostName - g.apiURL = "https://" + c.HostName + "/api/v3" - } - - if c.RootCA != "" { - if c.HostName == "" { - return nil, errors.New("invalid connector config: Host name field required for a root certificate file") - } - g.rootCA = c.RootCA - - var err error - if g.httpClient, err = newHTTPClient(g.rootCA); err != nil { - return nil, fmt.Errorf("failed to create HTTP client: %v", err) - } - } - g.loadAllGroups = c.LoadAllGroups - - switch c.TeamNameField { - case "name", "slug", "both", "": - g.teamNameField = c.TeamNameField - default: - return nil, fmt.Errorf("invalid connector config: unsupported team name field value `%s`", c.TeamNameField) - } - - return &g, nil -} - -type connectorData struct { - // GitHub's OAuth2 tokens never expire. We don't need a refresh token. - AccessToken string `json:"accessToken"` -} - -var ( - _ connector.CallbackConnector = (*githubConnector)(nil) - _ connector.RefreshConnector = (*githubConnector)(nil) -) - -type githubConnector struct { - redirectURI string - org string - orgs []Org - clientID string - clientSecret string - logger log.Logger - // apiURL defaults to "https://api.github.com" - apiURL string - // hostName of the GitHub enterprise account. - hostName string - // Used to support untrusted/self-signed CA certs. - rootCA string - // HTTP Client that trusts the custom declared rootCA cert. - httpClient *http.Client - // optional choice between 'name' (default) or 'slug' - teamNameField string - // if set to true and no orgs are configured then connector loads all user claims (all orgs and team) - loadAllGroups bool - // if set to true will use the user's handle rather than their numeric id as the ID - useLoginAsID bool -} - -// groupsRequired returns whether dex requires GitHub's 'read:org' scope. Dex -// needs 'read:org' if 'orgs' or 'org' fields are populated in a config file. -// Clients can require 'groups' scope without setting 'orgs'/'org'. -func (c *githubConnector) groupsRequired(groupScope bool) bool { - return len(c.orgs) > 0 || c.org != "" || groupScope -} - -func (c *githubConnector) oauth2Config(scopes connector.Scopes) *oauth2.Config { - // 'read:org' scope is required by the GitHub API, and thus for dex to ensure - // a user is a member of orgs and teams provided in configs. - githubScopes := []string{scopeEmail} - if c.groupsRequired(scopes.Groups) { - githubScopes = append(githubScopes, scopeOrgs) - } - - endpoint := github.Endpoint - // case when it is a GitHub Enterprise account. - if c.hostName != "" { - endpoint = oauth2.Endpoint{ - AuthURL: "https://" + c.hostName + "/login/oauth/authorize", - TokenURL: "https://" + c.hostName + "/login/oauth/access_token", - } - } - - return &oauth2.Config{ - ClientID: c.clientID, - ClientSecret: c.clientSecret, - Endpoint: endpoint, - Scopes: githubScopes, - RedirectURL: c.redirectURI, - } -} - -func (c *githubConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) { - if c.redirectURI != callbackURL { - return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", callbackURL, c.redirectURI) - } - - return c.oauth2Config(scopes).AuthCodeURL(state), nil -} - -type oauth2Error struct { - error string - errorDescription string -} - -func (e *oauth2Error) Error() string { - if e.errorDescription == "" { - return e.error - } - return e.error + ": " + e.errorDescription -} - -// newHTTPClient returns a new HTTP client that trusts the custom declared rootCA cert. -func newHTTPClient(rootCA string) (*http.Client, error) { - tlsConfig := tls.Config{RootCAs: x509.NewCertPool()} - rootCABytes, err := ioutil.ReadFile(rootCA) - if err != nil { - return nil, fmt.Errorf("failed to read root-ca: %v", err) - } - if !tlsConfig.RootCAs.AppendCertsFromPEM(rootCABytes) { - return nil, fmt.Errorf("no certs found in root CA file %q", rootCA) - } - - return &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tlsConfig, - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - }, - }, nil -} - -func (c *githubConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { - q := r.URL.Query() - if errType := q.Get("error"); errType != "" { - return identity, &oauth2Error{errType, q.Get("error_description")} - } - - oauth2Config := c.oauth2Config(s) - - ctx := r.Context() - // GitHub Enterprise account - if c.httpClient != nil { - ctx = context.WithValue(r.Context(), oauth2.HTTPClient, c.httpClient) - } - - token, err := oauth2Config.Exchange(ctx, q.Get("code")) - if err != nil { - return identity, fmt.Errorf("github: failed to get token: %v", err) - } - - client := oauth2Config.Client(ctx, token) - - user, err := c.user(ctx, client) - if err != nil { - return identity, fmt.Errorf("github: get user: %v", err) - } - - username := user.Name - if username == "" { - username = user.Login - } - - identity = connector.Identity{ - UserID: strconv.Itoa(user.ID), - Username: username, - PreferredUsername: user.Login, - Email: user.Email, - EmailVerified: true, - } - if c.useLoginAsID { - identity.UserID = user.Login - } - - // Only set identity.Groups if 'orgs', 'org', or 'groups' scope are specified. - if c.groupsRequired(s.Groups) { - groups, err := c.getGroups(ctx, client, s.Groups, user.Login) - if err != nil { - return identity, err - } - identity.Groups = groups - } - - if s.OfflineAccess { - data := connectorData{AccessToken: token.AccessToken} - connData, err := json.Marshal(data) - if err != nil { - return identity, fmt.Errorf("marshal connector data: %v", err) - } - identity.ConnectorData = connData - } - - return identity, nil -} - -func (c *githubConnector) Refresh(ctx context.Context, s connector.Scopes, identity connector.Identity) (connector.Identity, error) { - if len(identity.ConnectorData) == 0 { - return identity, errors.New("no upstream access token found") - } - - var data connectorData - if err := json.Unmarshal(identity.ConnectorData, &data); err != nil { - return identity, fmt.Errorf("github: unmarshal access token: %v", err) - } - - client := c.oauth2Config(s).Client(ctx, &oauth2.Token{AccessToken: data.AccessToken}) - user, err := c.user(ctx, client) - if err != nil { - return identity, fmt.Errorf("github: get user: %v", err) - } - - username := user.Name - if username == "" { - username = user.Login - } - identity.Username = username - identity.PreferredUsername = user.Login - identity.Email = user.Email - - // Only set identity.Groups if 'orgs', 'org', or 'groups' scope are specified. - if c.groupsRequired(s.Groups) { - groups, err := c.getGroups(ctx, client, s.Groups, user.Login) - if err != nil { - return identity, err - } - identity.Groups = groups - } - - return identity, nil -} - -// getGroups retrieves GitHub orgs and teams a user is in, if any. -func (c *githubConnector) getGroups(ctx context.Context, client *http.Client, groupScope bool, userLogin string) ([]string, error) { - if len(c.orgs) > 0 { - return c.groupsForOrgs(ctx, client, userLogin) - } else if c.org != "" { - return c.teamsForOrg(ctx, client, c.org) - } else if groupScope && c.loadAllGroups { - return c.userGroups(ctx, client) - } - return nil, nil -} - -// formatTeamName returns unique team name. -// Orgs might have the same team names. To make team name unique it should be prefixed with the org name. -func formatTeamName(org string, team string) string { - return fmt.Sprintf("%s:%s", org, team) -} - -// groupsForOrgs enforces org and team constraints on user authorization -// Cases in which user is authorized: -// N orgs, no teams: user is member of at least 1 org -// N orgs, M teams per org: user is member of any team from at least 1 org -// N-1 orgs, M teams per org, 1 org with no teams: user is member of any team -// from at least 1 org, or member of org with no teams -func (c *githubConnector) groupsForOrgs(ctx context.Context, client *http.Client, userName string) ([]string, error) { - groups := make([]string, 0) - var inOrgNoTeams bool - for _, org := range c.orgs { - inOrg, err := c.userInOrg(ctx, client, userName, org.Name) - if err != nil { - return nil, err - } - if !inOrg { - continue - } - - teams, err := c.teamsForOrg(ctx, client, org.Name) - if err != nil { - return nil, err - } - // User is in at least one org. User is authorized if no teams are specified - // in config; include all teams in claim. Otherwise filter out teams not in - // 'teams' list in config. - if len(org.Teams) == 0 { - inOrgNoTeams = true - } else if teams = groups_pkg.Filter(teams, org.Teams); len(teams) == 0 { - c.logger.Infof("github: user %q in org %q but no teams", userName, org.Name) - } - - for _, teamName := range teams { - groups = append(groups, formatTeamName(org.Name, teamName)) - } - } - if inOrgNoTeams || len(groups) > 0 { - return groups, nil - } - return groups, fmt.Errorf("github: user %q not in required orgs or teams", userName) -} - -func (c *githubConnector) userGroups(ctx context.Context, client *http.Client) ([]string, error) { - orgs, err := c.userOrgs(ctx, client) - if err != nil { - return nil, err - } - - orgTeams, err := c.userOrgTeams(ctx, client) - if err != nil { - return nil, err - } - - groups := make([]string, 0) - for _, o := range orgs { - groups = append(groups, o) - if teams, ok := orgTeams[o]; ok { - for _, t := range teams { - groups = append(groups, formatTeamName(o, t)) - } - } - } - - return groups, nil -} - -// userOrgs retrieves list of current user orgs -func (c *githubConnector) userOrgs(ctx context.Context, client *http.Client) ([]string, error) { - groups := make([]string, 0) - apiURL := c.apiURL + "/user/orgs" - for { - // https://developer.github.com/v3/orgs/#list-your-organizations - var ( - orgs []org - err error - ) - if apiURL, err = get(ctx, client, apiURL, &orgs); err != nil { - return nil, fmt.Errorf("github: get orgs: %v", err) - } - - for _, o := range orgs { - groups = append(groups, o.Login) - } - - if apiURL == "" { - break - } - } - - return groups, nil -} - -// userOrgTeams retrieves teams which current user belongs to. -// Method returns a map where key is an org name and value list of teams under the org. -func (c *githubConnector) userOrgTeams(ctx context.Context, client *http.Client) (map[string][]string, error) { - groups := make(map[string][]string) - apiURL := c.apiURL + "/user/teams" - for { - // https://developer.github.com/v3/orgs/teams/#list-user-teams - var ( - teams []team - err error - ) - if apiURL, err = get(ctx, client, apiURL, &teams); err != nil { - return nil, fmt.Errorf("github: get teams: %v", err) - } - - for _, t := range teams { - groups[t.Org.Login] = append(groups[t.Org.Login], c.teamGroupClaims(t)...) - } - - if apiURL == "" { - break - } - } - - return groups, nil -} - -// get creates a "GET `apiURL`" request with context, sends the request using -// the client, and decodes the resulting response body into v. A pagination URL -// is returned if one exists. Any errors encountered when building requests, -// sending requests, and reading and decoding response data are returned. -func get(ctx context.Context, client *http.Client, apiURL string, v interface{}) (string, error) { - req, err := http.NewRequest("GET", apiURL, nil) - if err != nil { - return "", fmt.Errorf("github: new req: %v", err) - } - req = req.WithContext(ctx) - resp, err := client.Do(req) - if err != nil { - return "", fmt.Errorf("github: get URL %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", fmt.Errorf("github: read body: %v", err) - } - return "", fmt.Errorf("%s: %s", resp.Status, body) - } - - if err := json.NewDecoder(resp.Body).Decode(v); err != nil { - return "", fmt.Errorf("failed to decode response: %v", err) - } - - return getPagination(apiURL, resp), nil -} - -// getPagination checks the "Link" header field for "next" or "last" pagination URLs, -// and returns "next" page URL or empty string to indicate that there are no more pages. -// Non empty next pages' URL is returned if both "last" and "next" URLs are found and next page -// URL is not equal to last. -// -// https://developer.github.com/v3/#pagination -func getPagination(apiURL string, resp *http.Response) string { - if resp == nil { - return "" - } - - links := resp.Header.Get("Link") - if len(reLast.FindStringSubmatch(links)) > 1 { - lastPageURL := reLast.FindStringSubmatch(links)[1] - if apiURL == lastPageURL { - return "" - } - } else { - return "" - } - - if len(reNext.FindStringSubmatch(links)) > 1 { - return reNext.FindStringSubmatch(links)[1] - } - - return "" -} - -// user holds GitHub user information (relevant to dex) as defined by -// https://developer.github.com/v3/users/#response-with-public-profile-information -type user struct { - Name string `json:"name"` - Login string `json:"login"` - ID int `json:"id"` - Email string `json:"email"` -} - -// user queries the GitHub API for profile information using the provided client. -// -// The HTTP client is expected to be constructed by the golang.org/x/oauth2 package, -// which inserts a bearer token as part of the request. -func (c *githubConnector) user(ctx context.Context, client *http.Client) (user, error) { - // https://developer.github.com/v3/users/#get-the-authenticated-user - var u user - if _, err := get(ctx, client, c.apiURL+"/user", &u); err != nil { - return u, err - } - - // Only public user emails are returned by 'GET /user'. u.Email will be empty - // if a users' email is private. We must retrieve private emails explicitly. - if u.Email == "" { - var err error - if u.Email, err = c.userEmail(ctx, client); err != nil { - return u, err - } - } - return u, nil -} - -// userEmail holds GitHub user email information as defined by -// https://developer.github.com/v3/users/emails/#response -type userEmail struct { - Email string `json:"email"` - Verified bool `json:"verified"` - Primary bool `json:"primary"` - Visibility string `json:"visibility"` -} - -// userEmail queries the GitHub API for a users' email information using the -// provided client. Only returns the users' verified, primary email (private or -// public). -// -// The HTTP client is expected to be constructed by the golang.org/x/oauth2 package, -// which inserts a bearer token as part of the request. -func (c *githubConnector) userEmail(ctx context.Context, client *http.Client) (string, error) { - apiURL := c.apiURL + "/user/emails" - for { - // https://developer.github.com/v3/users/emails/#list-email-addresses-for-a-user - var ( - emails []userEmail - err error - ) - if apiURL, err = get(ctx, client, apiURL, &emails); err != nil { - return "", err - } - - for _, email := range emails { - /* - if GitHub Enterprise, set email.Verified to true - This change being made because GitHub Enterprise does not - support email verification. CircleCI indicated that GitHub - advised them not to check for verified emails - (https://circleci.com/enterprise/changelog/#1-47-1). - In addition, GitHub Enterprise support replied to a support - ticket with "There is no way to verify an email address in - GitHub Enterprise." - */ - if c.hostName != "" { - email.Verified = true - } - - if email.Verified && email.Primary { - return email.Email, nil - } - } - - if apiURL == "" { - break - } - } - - return "", errors.New("github: user has no verified, primary email") -} - -// userInOrg queries the GitHub API for a users' org membership. -// -// The HTTP passed client is expected to be constructed by the golang.org/x/oauth2 package, -// which inserts a bearer token as part of the request. -func (c *githubConnector) userInOrg(ctx context.Context, client *http.Client, userName, orgName string) (bool, error) { - // requester == user, so GET-ing this endpoint should return 404/302 if user - // is not a member - // - // https://developer.github.com/v3/orgs/members/#check-membership - apiURL := fmt.Sprintf("%s/orgs/%s/members/%s", c.apiURL, orgName, userName) - - req, err := http.NewRequest("GET", apiURL, nil) - - if err != nil { - return false, fmt.Errorf("github: new req: %v", err) - } - req = req.WithContext(ctx) - resp, err := client.Do(req) - if err != nil { - return false, fmt.Errorf("github: get teams: %v", err) - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusNoContent: - case http.StatusFound, http.StatusNotFound: - c.logger.Infof("github: user %q not in org %q or application not authorized to read org data", userName, orgName) - default: - err = fmt.Errorf("github: unexpected return status: %q", resp.Status) - } - - // 204 if user is a member - return resp.StatusCode == http.StatusNoContent, err -} - -// teams holds GitHub a users' team information as defined by -// https://developer.github.com/v3/orgs/teams/#response-12 -type team struct { - Name string `json:"name"` - Org org `json:"organization"` - Slug string `json:"slug"` -} - -type org struct { - Login string `json:"login"` -} - -// teamsForOrg queries the GitHub API for team membership within a specific organization. -// -// The HTTP passed client is expected to be constructed by the golang.org/x/oauth2 package, -// which inserts a bearer token as part of the request. -func (c *githubConnector) teamsForOrg(ctx context.Context, client *http.Client, orgName string) ([]string, error) { - apiURL, groups := c.apiURL+"/user/teams", []string{} - for { - // https://developer.github.com/v3/orgs/teams/#list-user-teams - var ( - teams []team - err error - ) - if apiURL, err = get(ctx, client, apiURL, &teams); err != nil { - return nil, fmt.Errorf("github: get teams: %v", err) - } - - for _, t := range teams { - if t.Org.Login == orgName { - groups = append(groups, c.teamGroupClaims(t)...) - } - } - - if apiURL == "" { - break - } - } - - return groups, nil -} - -// teamGroupClaims returns team slug if 'teamNameField' option is set to -// 'slug', returns the slug *and* name if set to 'both', otherwise returns team -// name. -func (c *githubConnector) teamGroupClaims(t team) []string { - switch c.teamNameField { - case "both": - return []string{t.Name, t.Slug} - case "slug": - return []string{t.Slug} - default: - return []string{t.Name} - } -} diff --git a/vendor/github.com/dexidp/dex/connector/gitlab/gitlab.go b/vendor/github.com/dexidp/dex/connector/gitlab/gitlab.go deleted file mode 100644 index e406014..0000000 --- a/vendor/github.com/dexidp/dex/connector/gitlab/gitlab.go +++ /dev/null @@ -1,300 +0,0 @@ -// Package gitlab provides authentication strategies using Gitlab. -package gitlab - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "strconv" - - "golang.org/x/oauth2" - - "github.com/dexidp/dex/connector" - "github.com/dexidp/dex/pkg/groups" - "github.com/dexidp/dex/pkg/log" -) - -const ( - // read operations of the /api/v4/user endpoint - scopeUser = "read_user" - // used to retrieve groups from /oauth/userinfo - // https://docs.gitlab.com/ee/integration/openid_connect_provider.html - scopeOpenID = "openid" -) - -// Config holds configuration options for gitlab logins. -type Config struct { - BaseURL string `json:"baseURL"` - ClientID string `json:"clientID"` - ClientSecret string `json:"clientSecret"` - RedirectURI string `json:"redirectURI"` - Groups []string `json:"groups"` - UseLoginAsID bool `json:"useLoginAsID"` -} - -type gitlabUser struct { - ID int - Name string - Username string - State string - Email string - IsAdmin bool -} - -// Open returns a strategy for logging in through GitLab. -func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { - if c.BaseURL == "" { - c.BaseURL = "https://gitlab.com" - } - return &gitlabConnector{ - baseURL: c.BaseURL, - redirectURI: c.RedirectURI, - clientID: c.ClientID, - clientSecret: c.ClientSecret, - logger: logger, - groups: c.Groups, - useLoginAsID: c.UseLoginAsID, - }, nil -} - -type connectorData struct { - // GitLab's OAuth2 tokens never expire. We don't need a refresh token. - AccessToken string `json:"accessToken"` -} - -var ( - _ connector.CallbackConnector = (*gitlabConnector)(nil) - _ connector.RefreshConnector = (*gitlabConnector)(nil) -) - -type gitlabConnector struct { - baseURL string - redirectURI string - groups []string - clientID string - clientSecret string - logger log.Logger - httpClient *http.Client - // if set to true will use the user's handle rather than their numeric id as the ID - useLoginAsID bool -} - -func (c *gitlabConnector) oauth2Config(scopes connector.Scopes) *oauth2.Config { - gitlabScopes := []string{scopeUser} - if c.groupsRequired(scopes.Groups) { - gitlabScopes = []string{scopeUser, scopeOpenID} - } - - gitlabEndpoint := oauth2.Endpoint{AuthURL: c.baseURL + "/oauth/authorize", TokenURL: c.baseURL + "/oauth/token"} - return &oauth2.Config{ - ClientID: c.clientID, - ClientSecret: c.clientSecret, - Endpoint: gitlabEndpoint, - Scopes: gitlabScopes, - RedirectURL: c.redirectURI, - } -} - -func (c *gitlabConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) { - if c.redirectURI != callbackURL { - return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", c.redirectURI, callbackURL) - } - return c.oauth2Config(scopes).AuthCodeURL(state), nil -} - -type oauth2Error struct { - error string - errorDescription string -} - -func (e *oauth2Error) Error() string { - if e.errorDescription == "" { - return e.error - } - return e.error + ": " + e.errorDescription -} - -func (c *gitlabConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { - q := r.URL.Query() - if errType := q.Get("error"); errType != "" { - return identity, &oauth2Error{errType, q.Get("error_description")} - } - - oauth2Config := c.oauth2Config(s) - - ctx := r.Context() - if c.httpClient != nil { - ctx = context.WithValue(r.Context(), oauth2.HTTPClient, c.httpClient) - } - - token, err := oauth2Config.Exchange(ctx, q.Get("code")) - if err != nil { - return identity, fmt.Errorf("gitlab: failed to get token: %v", err) - } - - client := oauth2Config.Client(ctx, token) - - user, err := c.user(ctx, client) - if err != nil { - return identity, fmt.Errorf("gitlab: get user: %v", err) - } - - username := user.Name - if username == "" { - username = user.Email - } - identity = connector.Identity{ - UserID: strconv.Itoa(user.ID), - Username: username, - PreferredUsername: user.Username, - Email: user.Email, - EmailVerified: true, - } - if c.useLoginAsID { - identity.UserID = user.Username - } - - if c.groupsRequired(s.Groups) { - groups, err := c.getGroups(ctx, client, s.Groups, user.Username) - if err != nil { - return identity, fmt.Errorf("gitlab: get groups: %v", err) - } - identity.Groups = groups - } - - if s.OfflineAccess { - data := connectorData{AccessToken: token.AccessToken} - connData, err := json.Marshal(data) - if err != nil { - return identity, fmt.Errorf("marshal connector data: %v", err) - } - identity.ConnectorData = connData - } - - return identity, nil -} - -func (c *gitlabConnector) Refresh(ctx context.Context, s connector.Scopes, ident connector.Identity) (connector.Identity, error) { - if len(ident.ConnectorData) == 0 { - return ident, errors.New("no upstream access token found") - } - - var data connectorData - if err := json.Unmarshal(ident.ConnectorData, &data); err != nil { - return ident, fmt.Errorf("gitlab: unmarshal access token: %v", err) - } - - client := c.oauth2Config(s).Client(ctx, &oauth2.Token{AccessToken: data.AccessToken}) - user, err := c.user(ctx, client) - if err != nil { - return ident, fmt.Errorf("gitlab: get user: %v", err) - } - - username := user.Name - if username == "" { - username = user.Email - } - ident.Username = username - ident.PreferredUsername = user.Username - ident.Email = user.Email - - if c.groupsRequired(s.Groups) { - groups, err := c.getGroups(ctx, client, s.Groups, user.Username) - if err != nil { - return ident, fmt.Errorf("gitlab: get groups: %v", err) - } - ident.Groups = groups - } - return ident, nil -} - -func (c *gitlabConnector) groupsRequired(groupScope bool) bool { - return len(c.groups) > 0 || groupScope -} - -// user queries the GitLab API for profile information using the provided client. The HTTP -// client is expected to be constructed by the golang.org/x/oauth2 package, which inserts -// a bearer token as part of the request. -func (c *gitlabConnector) user(ctx context.Context, client *http.Client) (gitlabUser, error) { - var u gitlabUser - req, err := http.NewRequest("GET", c.baseURL+"/api/v4/user", nil) - if err != nil { - return u, fmt.Errorf("gitlab: new req: %v", err) - } - req = req.WithContext(ctx) - resp, err := client.Do(req) - if err != nil { - return u, fmt.Errorf("gitlab: get URL %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return u, fmt.Errorf("gitlab: read body: %v", err) - } - return u, fmt.Errorf("%s: %s", resp.Status, body) - } - - if err := json.NewDecoder(resp.Body).Decode(&u); err != nil { - return u, fmt.Errorf("failed to decode response: %v", err) - } - return u, nil -} - -type userInfo struct { - Groups []string -} - -// userGroups queries the GitLab API for group membership. -// -// The HTTP passed client is expected to be constructed by the golang.org/x/oauth2 package, -// which inserts a bearer token as part of the request. -func (c *gitlabConnector) userGroups(ctx context.Context, client *http.Client) ([]string, error) { - req, err := http.NewRequest("GET", c.baseURL+"/oauth/userinfo", nil) - if err != nil { - return nil, fmt.Errorf("gitlab: new req: %v", err) - } - req = req.WithContext(ctx) - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("gitlab: get URL %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("gitlab: read body: %v", err) - } - return nil, fmt.Errorf("%s: %s", resp.Status, body) - } - var u userInfo - if err := json.NewDecoder(resp.Body).Decode(&u); err != nil { - return nil, fmt.Errorf("failed to decode response: %v", err) - } - - return u.Groups, nil -} - -func (c *gitlabConnector) getGroups(ctx context.Context, client *http.Client, groupScope bool, userLogin string) ([]string, error) { - gitlabGroups, err := c.userGroups(ctx, client) - if err != nil { - return nil, err - } - - if len(c.groups) > 0 { - filteredGroups := groups.Filter(gitlabGroups, c.groups) - if len(filteredGroups) == 0 { - return nil, fmt.Errorf("gitlab: user %q is not in any of the required groups", userLogin) - } - return filteredGroups, nil - } else if groupScope { - return gitlabGroups, nil - } - - return nil, nil -} diff --git a/vendor/github.com/dexidp/dex/connector/google/google.go b/vendor/github.com/dexidp/dex/connector/google/google.go deleted file mode 100644 index 0e82385..0000000 --- a/vendor/github.com/dexidp/dex/connector/google/google.go +++ /dev/null @@ -1,291 +0,0 @@ -// Package google implements logging in through Google's OpenID Connect provider. -package google - -import ( - "context" - "errors" - "fmt" - "io/ioutil" - "net/http" - "time" - - "github.com/coreos/go-oidc" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - admin "google.golang.org/api/admin/directory/v1" - - "github.com/dexidp/dex/connector" - pkg_groups "github.com/dexidp/dex/pkg/groups" - "github.com/dexidp/dex/pkg/log" -) - -const ( - issuerURL = "https://accounts.google.com" -) - -// Config holds configuration options for Google logins. -type Config struct { - ClientID string `json:"clientID"` - ClientSecret string `json:"clientSecret"` - RedirectURI string `json:"redirectURI"` - - Scopes []string `json:"scopes"` // defaults to "profile" and "email" - - // Optional list of whitelisted domains - // If this field is nonempty, only users from a listed domain will be allowed to log in - HostedDomains []string `json:"hostedDomains"` - - // Optional list of whitelisted groups - // If this field is nonempty, only users from a listed group will be allowed to log in - Groups []string `json:"groups"` - - // Optional path to service account json - // If nonempty, and groups claim is made, will use authentication from file to - // check groups with the admin directory api - ServiceAccountFilePath string `json:"serviceAccountFilePath"` - - // Required if ServiceAccountFilePath - // The email of a GSuite super user which the service account will impersonate - // when listing groups - AdminEmail string -} - -// Open returns a connector which can be used to login users through Google. -func (c *Config) Open(id string, logger log.Logger) (conn connector.Connector, err error) { - ctx, cancel := context.WithCancel(context.Background()) - - provider, err := oidc.NewProvider(ctx, issuerURL) - if err != nil { - cancel() - return nil, fmt.Errorf("failed to get provider: %v", err) - } - - scopes := []string{oidc.ScopeOpenID} - if len(c.Scopes) > 0 { - scopes = append(scopes, c.Scopes...) - } else { - scopes = append(scopes, "profile", "email") - } - - srv, err := createDirectoryService(c.ServiceAccountFilePath, c.AdminEmail) - if err != nil { - cancel() - return nil, fmt.Errorf("could not create directory service: %v", err) - } - - clientID := c.ClientID - return &googleConnector{ - redirectURI: c.RedirectURI, - oauth2Config: &oauth2.Config{ - ClientID: clientID, - ClientSecret: c.ClientSecret, - Endpoint: provider.Endpoint(), - Scopes: scopes, - RedirectURL: c.RedirectURI, - }, - verifier: provider.Verifier( - &oidc.Config{ClientID: clientID}, - ), - logger: logger, - cancel: cancel, - hostedDomains: c.HostedDomains, - groups: c.Groups, - serviceAccountFilePath: c.ServiceAccountFilePath, - adminEmail: c.AdminEmail, - adminSrv: srv, - }, nil -} - -var ( - _ connector.CallbackConnector = (*googleConnector)(nil) - _ connector.RefreshConnector = (*googleConnector)(nil) -) - -type googleConnector struct { - redirectURI string - oauth2Config *oauth2.Config - verifier *oidc.IDTokenVerifier - cancel context.CancelFunc - logger log.Logger - hostedDomains []string - groups []string - serviceAccountFilePath string - adminEmail string - adminSrv *admin.Service -} - -func (c *googleConnector) Close() error { - c.cancel() - return nil -} - -func (c *googleConnector) LoginURL(s connector.Scopes, callbackURL, state string) (string, error) { - if c.redirectURI != callbackURL { - return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", callbackURL, c.redirectURI) - } - - var opts []oauth2.AuthCodeOption - if len(c.hostedDomains) > 0 { - preferredDomain := c.hostedDomains[0] - if len(c.hostedDomains) > 1 { - preferredDomain = "*" - } - opts = append(opts, oauth2.SetAuthURLParam("hd", preferredDomain)) - } - - if s.OfflineAccess { - opts = append(opts, oauth2.AccessTypeOffline, oauth2.SetAuthURLParam("prompt", "consent")) - } - return c.oauth2Config.AuthCodeURL(state, opts...), nil -} - -type oauth2Error struct { - error string - errorDescription string -} - -func (e *oauth2Error) Error() string { - if e.errorDescription == "" { - return e.error - } - return e.error + ": " + e.errorDescription -} - -func (c *googleConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { - q := r.URL.Query() - if errType := q.Get("error"); errType != "" { - return identity, &oauth2Error{errType, q.Get("error_description")} - } - token, err := c.oauth2Config.Exchange(r.Context(), q.Get("code")) - if err != nil { - return identity, fmt.Errorf("google: failed to get token: %v", err) - } - - return c.createIdentity(r.Context(), identity, s, token) -} - -func (c *googleConnector) Refresh(ctx context.Context, s connector.Scopes, identity connector.Identity) (connector.Identity, error) { - t := &oauth2.Token{ - RefreshToken: string(identity.ConnectorData), - Expiry: time.Now().Add(-time.Hour), - } - token, err := c.oauth2Config.TokenSource(ctx, t).Token() - if err != nil { - return identity, fmt.Errorf("google: failed to get token: %v", err) - } - - return c.createIdentity(ctx, identity, s, token) -} - -func (c *googleConnector) createIdentity(ctx context.Context, identity connector.Identity, s connector.Scopes, token *oauth2.Token) (connector.Identity, error) { - rawIDToken, ok := token.Extra("id_token").(string) - if !ok { - return identity, errors.New("google: no id_token in token response") - } - idToken, err := c.verifier.Verify(ctx, rawIDToken) - if err != nil { - return identity, fmt.Errorf("google: failed to verify ID Token: %v", err) - } - - var claims struct { - Username string `json:"name"` - Email string `json:"email"` - EmailVerified bool `json:"email_verified"` - HostedDomain string `json:"hd"` - } - if err := idToken.Claims(&claims); err != nil { - return identity, fmt.Errorf("oidc: failed to decode claims: %v", err) - } - - if len(c.hostedDomains) > 0 { - found := false - for _, domain := range c.hostedDomains { - if claims.HostedDomain == domain { - found = true - break - } - } - - if !found { - return identity, fmt.Errorf("oidc: unexpected hd claim %v", claims.HostedDomain) - } - } - - var groups []string - if s.Groups && c.adminEmail != "" && c.serviceAccountFilePath != "" { - groups, err = c.getGroups(claims.Email) - if err != nil { - return identity, fmt.Errorf("google: could not retrieve groups: %v", err) - } - - if len(c.groups) > 0 { - groups = pkg_groups.Filter(groups, c.groups) - if len(groups) == 0 { - return identity, fmt.Errorf("google: user %q is not in any of the required groups", claims.Username) - } - } - } - - identity = connector.Identity{ - UserID: idToken.Subject, - Username: claims.Username, - Email: claims.Email, - EmailVerified: claims.EmailVerified, - ConnectorData: []byte(token.RefreshToken), - Groups: groups, - } - return identity, nil -} - -// getGroups creates a connection to the admin directory service and lists -// all groups the user is a member of -func (c *googleConnector) getGroups(email string) ([]string, error) { - var userGroups []string - var err error - groupsList := &admin.Groups{} - for { - groupsList, err = c.adminSrv.Groups.List(). - UserKey(email).PageToken(groupsList.NextPageToken).Do() - if err != nil { - return nil, fmt.Errorf("could not list groups: %v", err) - } - - for _, group := range groupsList.Groups { - // TODO (joelspeed): Make desried group key configurable - userGroups = append(userGroups, group.Email) - } - - if groupsList.NextPageToken == "" { - break - } - } - - return userGroups, nil -} - -// createDirectoryService loads a google service account credentials file, -// sets up super user impersonation and creates an admin client for calling -// the google admin api -func createDirectoryService(serviceAccountFilePath string, email string) (*admin.Service, error) { - jsonCredentials, err := ioutil.ReadFile(serviceAccountFilePath) - if err != nil { - return nil, fmt.Errorf("error reading credentials from file: %v", err) - } - - config, err := google.JWTConfigFromJSON(jsonCredentials, admin.AdminDirectoryGroupReadonlyScope) - if err != nil { - return nil, fmt.Errorf("unable to parse client secret file to config: %v", err) - } - - // Impersonate an admin. This is mandatory for the admin APIs. - config.Subject = email - - ctx := context.Background() - client := config.Client(ctx) - - srv, err := admin.New(client) - if err != nil { - return nil, fmt.Errorf("unable to create directory service %v", err) - } - return srv, nil -} diff --git a/vendor/github.com/dexidp/dex/connector/keystone/keystone.go b/vendor/github.com/dexidp/dex/connector/keystone/keystone.go deleted file mode 100644 index 92682f7..0000000 --- a/vendor/github.com/dexidp/dex/connector/keystone/keystone.go +++ /dev/null @@ -1,310 +0,0 @@ -// Package keystone provides authentication strategy using Keystone. -package keystone - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - - "github.com/dexidp/dex/connector" - "github.com/dexidp/dex/pkg/log" -) - -type conn struct { - Domain string - Host string - AdminUsername string - AdminPassword string - Logger log.Logger -} - -type userKeystone struct { - Domain domainKeystone `json:"domain"` - ID string `json:"id"` - Name string `json:"name"` -} - -type domainKeystone struct { - ID string `json:"id"` - Name string `json:"name"` -} - -// Config holds the configuration parameters for Keystone connector. -// Keystone should expose API v3 -// An example config: -// connectors: -// type: keystone -// id: keystone -// name: Keystone -// config: -// keystoneHost: http://example:5000 -// domain: default -// keystoneUsername: demo -// keystonePassword: DEMO_PASS -type Config struct { - Domain string `json:"domain"` - Host string `json:"keystoneHost"` - AdminUsername string `json:"keystoneUsername"` - AdminPassword string `json:"keystonePassword"` -} - -type loginRequestData struct { - auth `json:"auth"` -} - -type auth struct { - Identity identity `json:"identity"` -} - -type identity struct { - Methods []string `json:"methods"` - Password password `json:"password"` -} - -type password struct { - User user `json:"user"` -} - -type user struct { - Name string `json:"name"` - Domain domain `json:"domain"` - Password string `json:"password"` -} - -type domain struct { - ID string `json:"id"` -} - -type token struct { - User userKeystone `json:"user"` -} - -type tokenResponse struct { - Token token `json:"token"` -} - -type group struct { - ID string `json:"id"` - Name string `json:"name"` -} - -type groupsResponse struct { - Groups []group `json:"groups"` -} - -type userResponse struct { - User struct { - Name string `json:"name"` - Email string `json:"email"` - ID string `json:"id"` - } `json:"user"` -} - -var ( - _ connector.PasswordConnector = &conn{} - _ connector.RefreshConnector = &conn{} -) - -// Open returns an authentication strategy using Keystone. -func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { - return &conn{ - c.Domain, - c.Host, - c.AdminUsername, - c.AdminPassword, - logger}, nil -} - -func (p *conn) Close() error { return nil } - -func (p *conn) Login(ctx context.Context, scopes connector.Scopes, username, password string) (identity connector.Identity, validPassword bool, err error) { - resp, err := p.getTokenResponse(ctx, username, password) - if err != nil { - return identity, false, fmt.Errorf("keystone: error %v", err) - } - if resp.StatusCode/100 != 2 { - return identity, false, fmt.Errorf("keystone login: error %v", resp.StatusCode) - } - if resp.StatusCode != 201 { - return identity, false, nil - } - token := resp.Header.Get("X-Subject-Token") - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return identity, false, err - } - defer resp.Body.Close() - var tokenResp = new(tokenResponse) - err = json.Unmarshal(data, &tokenResp) - if err != nil { - return identity, false, fmt.Errorf("keystone: invalid token response: %v", err) - } - if scopes.Groups { - groups, err := p.getUserGroups(ctx, tokenResp.Token.User.ID, token) - if err != nil { - return identity, false, err - } - identity.Groups = groups - } - identity.Username = username - identity.UserID = tokenResp.Token.User.ID - - user, err := p.getUser(ctx, tokenResp.Token.User.ID, token) - if err != nil { - return identity, false, err - } - if user.User.Email != "" { - identity.Email = user.User.Email - identity.EmailVerified = true - } - - return identity, true, nil -} - -func (p *conn) Prompt() string { return "username" } - -func (p *conn) Refresh( - ctx context.Context, scopes connector.Scopes, identity connector.Identity) (connector.Identity, error) { - token, err := p.getAdminToken(ctx) - if err != nil { - return identity, fmt.Errorf("keystone: failed to obtain admin token: %v", err) - } - ok, err := p.checkIfUserExists(ctx, identity.UserID, token) - if err != nil { - return identity, err - } - if !ok { - return identity, fmt.Errorf("keystone: user %q does not exist", identity.UserID) - } - if scopes.Groups { - groups, err := p.getUserGroups(ctx, identity.UserID, token) - if err != nil { - return identity, err - } - identity.Groups = groups - } - return identity, nil -} - -func (p *conn) getTokenResponse(ctx context.Context, username, pass string) (response *http.Response, err error) { - client := &http.Client{} - jsonData := loginRequestData{ - auth: auth{ - Identity: identity{ - Methods: []string{"password"}, - Password: password{ - User: user{ - Name: username, - Domain: domain{ID: p.Domain}, - Password: pass, - }, - }, - }, - }, - } - jsonValue, err := json.Marshal(jsonData) - if err != nil { - return nil, err - } - // https://developer.openstack.org/api-ref/identity/v3/#password-authentication-with-unscoped-authorization - authTokenURL := p.Host + "/v3/auth/tokens/" - req, err := http.NewRequest("POST", authTokenURL, bytes.NewBuffer(jsonValue)) - if err != nil { - return nil, err - } - - req.Header.Set("Content-Type", "application/json") - req = req.WithContext(ctx) - - return client.Do(req) -} - -func (p *conn) getAdminToken(ctx context.Context) (string, error) { - resp, err := p.getTokenResponse(ctx, p.AdminUsername, p.AdminPassword) - if err != nil { - return "", err - } - defer resp.Body.Close() - - token := resp.Header.Get("X-Subject-Token") - return token, nil -} - -func (p *conn) checkIfUserExists(ctx context.Context, userID string, token string) (bool, error) { - user, err := p.getUser(ctx, userID, token) - return user != nil, err -} - -func (p *conn) getUser(ctx context.Context, userID string, token string) (*userResponse, error) { - // https://developer.openstack.org/api-ref/identity/v3/#show-user-details - userURL := p.Host + "/v3/users/" + userID - client := &http.Client{} - req, err := http.NewRequest("GET", userURL, nil) - if err != nil { - return nil, err - } - - req.Header.Set("X-Auth-Token", token) - req = req.WithContext(ctx) - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return nil, err - } - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - user := userResponse{} - err = json.Unmarshal(data, &user) - if err != nil { - return nil, err - } - - return &user, nil -} - -func (p *conn) getUserGroups(ctx context.Context, userID string, token string) ([]string, error) { - client := &http.Client{} - // https://developer.openstack.org/api-ref/identity/v3/#list-groups-to-which-a-user-belongs - groupsURL := p.Host + "/v3/users/" + userID + "/groups" - req, err := http.NewRequest("GET", groupsURL, nil) - if err != nil { - return nil, err - } - req.Header.Set("X-Auth-Token", token) - req = req.WithContext(ctx) - resp, err := client.Do(req) - if err != nil { - p.Logger.Errorf("keystone: error while fetching user %q groups\n", userID) - return nil, err - } - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var groupsResp = new(groupsResponse) - - err = json.Unmarshal(data, &groupsResp) - if err != nil { - return nil, err - } - - groups := make([]string, len(groupsResp.Groups)) - for i, group := range groupsResp.Groups { - groups[i] = group.Name - } - return groups, nil -} diff --git a/vendor/github.com/dexidp/dex/connector/ldap/gen-certs.sh b/vendor/github.com/dexidp/dex/connector/ldap/gen-certs.sh deleted file mode 100644 index 8b0ea49..0000000 --- a/vendor/github.com/dexidp/dex/connector/ldap/gen-certs.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash -e - -# Stolen from the coreos/matchbox repo. - -echo " -[req] -req_extensions = v3_req -distinguished_name = req_distinguished_name - -[req_distinguished_name] - -[ v3_req ] -basicConstraints = CA:FALSE -keyUsage = nonRepudiation, digitalSignature, keyEncipherment -subjectAltName = @alt_names - -[alt_names] -DNS.101 = localhost -" > openssl.config - -openssl genrsa -out testdata/ca.key 2048 -openssl genrsa -out testdata/server.key 2048 - -openssl req \ - -x509 -new -nodes \ - -key testdata/ca.key \ - -days 10000 -out testdata/ca.crt \ - -subj "/CN=ldap-tests" - -openssl req \ - -new \ - -key testdata/server.key \ - -out testdata/server.csr \ - -subj "/CN=localhost" \ - -config openssl.config - -openssl x509 -req \ - -in testdata/server.csr \ - -CA testdata/ca.crt \ - -CAkey testdata/ca.key \ - -CAcreateserial \ - -out testdata/server.crt \ - -days 10000 \ - -extensions v3_req \ - -extfile openssl.config - -rm testdata/server.csr -rm testdata/ca.srl -rm openssl.config diff --git a/vendor/github.com/dexidp/dex/connector/ldap/ldap.go b/vendor/github.com/dexidp/dex/connector/ldap/ldap.go deleted file mode 100644 index 53f9062..0000000 --- a/vendor/github.com/dexidp/dex/connector/ldap/ldap.go +++ /dev/null @@ -1,629 +0,0 @@ -// Package ldap implements strategies for authenticating using the LDAP protocol. -package ldap - -import ( - "context" - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io/ioutil" - "net" - - "gopkg.in/ldap.v2" - - "github.com/dexidp/dex/connector" - "github.com/dexidp/dex/pkg/log" -) - -// Config holds the configuration parameters for the LDAP connector. The LDAP -// connectors require executing two queries, the first to find the user based on -// the username and password given to the connector. The second to use the user -// entry to search for groups. -// -// An example config: -// -// type: ldap -// config: -// host: ldap.example.com:636 -// # The following field is required if using port 389. -// # insecureNoSSL: true -// rootCA: /etc/dex/ldap.ca -// bindDN: uid=seviceaccount,cn=users,dc=example,dc=com -// bindPW: password -// userSearch: -// # Would translate to the query "(&(objectClass=person)(uid=))" -// baseDN: cn=users,dc=example,dc=com -// filter: "(objectClass=person)" -// username: uid -// idAttr: uid -// emailAttr: mail -// nameAttr: name -// preferredUsernameAttr: uid -// groupSearch: -// # Would translate to the separate query per user matcher pair and aggregate results into a single group list: -// # "(&(|(objectClass=posixGroup)(objectClass=groupOfNames))(memberUid=))" -// # "(&(|(objectClass=posixGroup)(objectClass=groupOfNames))(member=))" -// baseDN: cn=groups,dc=example,dc=com -// filter: "(|(objectClass=posixGroup)(objectClass=groupOfNames))" -// userMatchers: -// - userAttr: uid -// groupAttr: memberUid -// # Use if full DN is needed and not available as any other attribute -// # Will only work if "DN" attribute does not exist in the record: -// - userAttr: DN -// groupAttr: member -// nameAttr: name -// - -type UserMatcher struct { - UserAttr string `json:"userAttr"` - GroupAttr string `json:"groupAttr"` -} - -type Config struct { - // The host and optional port of the LDAP server. If port isn't supplied, it will be - // guessed based on the TLS configuration. 389 or 636. - Host string `json:"host"` - - // Required if LDAP host does not use TLS. - InsecureNoSSL bool `json:"insecureNoSSL"` - - // Don't verify the CA. - InsecureSkipVerify bool `json:"insecureSkipVerify"` - - // Connect to the insecure port then issue a StartTLS command to negotiate a - // secure connection. If unsupplied secure connections will use the LDAPS - // protocol. - StartTLS bool `json:"startTLS"` - - // Path to a trusted root certificate file. - RootCA string `json:"rootCA"` - // Path to a client cert file generated by rootCA. - ClientCert string `json:"clientCert"` - // Path to a client private key file generated by rootCA. - ClientKey string `json:"clientKey"` - // Base64 encoded PEM data containing root CAs. - RootCAData []byte `json:"rootCAData"` - - // BindDN and BindPW for an application service account. The connector uses these - // credentials to search for users and groups. - BindDN string `json:"bindDN"` - BindPW string `json:"bindPW"` - - // UsernamePrompt allows users to override the username attribute (displayed - // in the username/password prompt). If unset, the handler will use - // "Username". - UsernamePrompt string `json:"usernamePrompt"` - - // User entry search configuration. - UserSearch struct { - // BaseDN to start the search from. For example "cn=users,dc=example,dc=com" - BaseDN string `json:"baseDN"` - - // Optional filter to apply when searching the directory. For example "(objectClass=person)" - Filter string `json:"filter"` - - // Attribute to match against the inputted username. This will be translated and combined - // with the other filter as "(=)". - Username string `json:"username"` - - // Can either be: - // * "sub" - search the whole sub tree - // * "one" - only search one level - Scope string `json:"scope"` - - // A mapping of attributes on the user entry to claims. - IDAttr string `json:"idAttr"` // Defaults to "uid" - EmailAttr string `json:"emailAttr"` // Defaults to "mail" - NameAttr string `json:"nameAttr"` // No default. - PreferredUsernameAttrAttr string `json:"preferredUsernameAttr"` // No default. - - // If this is set, the email claim of the id token will be constructed from the idAttr and - // value of emailSuffix. This should not include the @ character. - EmailSuffix string `json:"emailSuffix"` // No default. - } `json:"userSearch"` - - // Group search configuration. - GroupSearch struct { - // BaseDN to start the search from. For example "cn=groups,dc=example,dc=com" - BaseDN string `json:"baseDN"` - - // Optional filter to apply when searching the directory. For example "(objectClass=posixGroup)" - Filter string `json:"filter"` - - Scope string `json:"scope"` // Defaults to "sub" - - // DEPRECATED config options. Those are left for backward compatibility. - // See "UserMatchers" below for the current group to user matching implementation - // TODO: should be eventually removed from the code - UserAttr string `json:"userAttr"` - GroupAttr string `json:"groupAttr"` - - // Array of the field pairs used to match a user to a group. - // See the "UserMatcher" struct for the exact field names - // - // Each pair adds an additional requirement to the filter that an attribute in the group - // match the user's attribute value. For example that the "members" attribute of - // a group matches the "uid" of the user. The exact filter being added is: - // - // (userMatchers[n].=userMatchers[n].) - // - UserMatchers []UserMatcher `json:"userMatchers"` - - // The attribute of the group that represents its name. - NameAttr string `json:"nameAttr"` - } `json:"groupSearch"` -} - -func scopeString(i int) string { - switch i { - case ldap.ScopeBaseObject: - return "base" - case ldap.ScopeSingleLevel: - return "one" - case ldap.ScopeWholeSubtree: - return "sub" - default: - return "" - } -} - -func parseScope(s string) (int, bool) { - // NOTE(ericchiang): ScopeBaseObject doesn't really make sense for us because we - // never know the user's or group's DN. - switch s { - case "", "sub": - return ldap.ScopeWholeSubtree, true - case "one": - return ldap.ScopeSingleLevel, true - } - return 0, false -} - -// Build a list of group attr name to user attr value matchers. -// Function exists here to allow backward compatibility between old and new -// group to user matching implementations. -// See "Config.GroupSearch.UserMatchers" comments for the details -func (c *ldapConnector) userMatchers() []UserMatcher { - if len(c.GroupSearch.UserMatchers) > 0 && c.GroupSearch.UserMatchers[0].UserAttr != "" { - return c.GroupSearch.UserMatchers[:] - } else { - return []UserMatcher{ - { - UserAttr: c.GroupSearch.UserAttr, - GroupAttr: c.GroupSearch.GroupAttr, - }, - } - } -} - -// Open returns an authentication strategy using LDAP. -func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { - conn, err := c.OpenConnector(logger) - if err != nil { - return nil, err - } - return connector.Connector(conn), nil -} - -type refreshData struct { - Username string `json:"username"` - Entry ldap.Entry `json:"entry"` -} - -// OpenConnector is the same as Open but returns a type with all implemented connector interfaces. -func (c *Config) OpenConnector(logger log.Logger) (interface { - connector.Connector - connector.PasswordConnector - connector.RefreshConnector -}, error) { - return c.openConnector(logger) -} - -func (c *Config) openConnector(logger log.Logger) (*ldapConnector, error) { - requiredFields := []struct { - name string - val string - }{ - {"host", c.Host}, - {"userSearch.baseDN", c.UserSearch.BaseDN}, - {"userSearch.username", c.UserSearch.Username}, - } - - for _, field := range requiredFields { - if field.val == "" { - return nil, fmt.Errorf("ldap: missing required field %q", field.name) - } - } - - var ( - host string - err error - ) - if host, _, err = net.SplitHostPort(c.Host); err != nil { - host = c.Host - if c.InsecureNoSSL { - c.Host = c.Host + ":389" - } else { - c.Host = c.Host + ":636" - } - } - - tlsConfig := &tls.Config{ServerName: host, InsecureSkipVerify: c.InsecureSkipVerify} - if c.RootCA != "" || len(c.RootCAData) != 0 { - data := c.RootCAData - if len(data) == 0 { - var err error - if data, err = ioutil.ReadFile(c.RootCA); err != nil { - return nil, fmt.Errorf("ldap: read ca file: %v", err) - } - } - rootCAs := x509.NewCertPool() - if !rootCAs.AppendCertsFromPEM(data) { - return nil, fmt.Errorf("ldap: no certs found in ca file") - } - tlsConfig.RootCAs = rootCAs - } - - if c.ClientKey != "" && c.ClientCert != "" { - cert, err := tls.LoadX509KeyPair(c.ClientCert, c.ClientKey) - if err != nil { - return nil, fmt.Errorf("ldap: load client cert failed: %v", err) - } - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - } - userSearchScope, ok := parseScope(c.UserSearch.Scope) - if !ok { - return nil, fmt.Errorf("userSearch.Scope unknown value %q", c.UserSearch.Scope) - } - groupSearchScope, ok := parseScope(c.GroupSearch.Scope) - if !ok { - return nil, fmt.Errorf("groupSearch.Scope unknown value %q", c.GroupSearch.Scope) - } - return &ldapConnector{*c, userSearchScope, groupSearchScope, tlsConfig, logger}, nil -} - -type ldapConnector struct { - Config - - userSearchScope int - groupSearchScope int - - tlsConfig *tls.Config - - logger log.Logger -} - -var ( - _ connector.PasswordConnector = (*ldapConnector)(nil) - _ connector.RefreshConnector = (*ldapConnector)(nil) -) - -// do initializes a connection to the LDAP directory and passes it to the -// provided function. It then performs appropriate teardown or reuse before -// returning. -func (c *ldapConnector) do(ctx context.Context, f func(c *ldap.Conn) error) error { - // TODO(ericchiang): support context here - var ( - conn *ldap.Conn - err error - ) - switch { - case c.InsecureNoSSL: - conn, err = ldap.Dial("tcp", c.Host) - case c.StartTLS: - conn, err = ldap.Dial("tcp", c.Host) - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - if err := conn.StartTLS(c.tlsConfig); err != nil { - return fmt.Errorf("start TLS failed: %v", err) - } - default: - conn, err = ldap.DialTLS("tcp", c.Host, c.tlsConfig) - } - if err != nil { - return fmt.Errorf("failed to connect: %v", err) - } - defer conn.Close() - - // If bindDN and bindPW are empty this will default to an anonymous bind. - if err := conn.Bind(c.BindDN, c.BindPW); err != nil { - if c.BindDN == "" && c.BindPW == "" { - return fmt.Errorf("ldap: initial anonymous bind failed: %v", err) - } - return fmt.Errorf("ldap: initial bind for user %q failed: %v", c.BindDN, err) - } - - return f(conn) -} - -func getAttrs(e ldap.Entry, name string) []string { - for _, a := range e.Attributes { - if a.Name != name { - continue - } - return a.Values - } - if name == "DN" { - return []string{e.DN} - } - return nil -} - -func getAttr(e ldap.Entry, name string) string { - if a := getAttrs(e, name); len(a) > 0 { - return a[0] - } - return "" -} - -func (c *ldapConnector) identityFromEntry(user ldap.Entry) (ident connector.Identity, err error) { - // If we're missing any attributes, such as email or ID, we want to report - // an error rather than continuing. - missing := []string{} - - // Fill the identity struct using the attributes from the user entry. - if ident.UserID = getAttr(user, c.UserSearch.IDAttr); ident.UserID == "" { - missing = append(missing, c.UserSearch.IDAttr) - } - - if c.UserSearch.NameAttr != "" { - if ident.Username = getAttr(user, c.UserSearch.NameAttr); ident.Username == "" { - missing = append(missing, c.UserSearch.NameAttr) - } - } - - if c.UserSearch.PreferredUsernameAttrAttr != "" { - if ident.PreferredUsername = getAttr(user, c.UserSearch.PreferredUsernameAttrAttr); ident.PreferredUsername == "" { - missing = append(missing, c.UserSearch.PreferredUsernameAttrAttr) - } - } - - if c.UserSearch.EmailSuffix != "" { - ident.Email = ident.Username + "@" + c.UserSearch.EmailSuffix - } else if ident.Email = getAttr(user, c.UserSearch.EmailAttr); ident.Email == "" { - missing = append(missing, c.UserSearch.EmailAttr) - } - // TODO(ericchiang): Let this value be set from an attribute. - ident.EmailVerified = true - - if len(missing) != 0 { - err := fmt.Errorf("ldap: entry %q missing following required attribute(s): %q", user.DN, missing) - return connector.Identity{}, err - } - return ident, nil -} - -func (c *ldapConnector) userEntry(conn *ldap.Conn, username string) (user ldap.Entry, found bool, err error) { - filter := fmt.Sprintf("(%s=%s)", c.UserSearch.Username, ldap.EscapeFilter(username)) - if c.UserSearch.Filter != "" { - filter = fmt.Sprintf("(&%s%s)", c.UserSearch.Filter, filter) - } - - // Initial search. - req := &ldap.SearchRequest{ - BaseDN: c.UserSearch.BaseDN, - Filter: filter, - Scope: c.userSearchScope, - // We only need to search for these specific requests. - Attributes: []string{ - c.UserSearch.IDAttr, - c.UserSearch.EmailAttr, - // TODO(ericchiang): what if this contains duplicate values? - }, - } - - for _, matcher := range c.userMatchers() { - req.Attributes = append(req.Attributes, matcher.UserAttr) - } - - if c.UserSearch.NameAttr != "" { - req.Attributes = append(req.Attributes, c.UserSearch.NameAttr) - } - - if c.UserSearch.PreferredUsernameAttrAttr != "" { - req.Attributes = append(req.Attributes, c.UserSearch.PreferredUsernameAttrAttr) - } - - c.logger.Infof("performing ldap search %s %s %s", - req.BaseDN, scopeString(req.Scope), req.Filter) - resp, err := conn.Search(req) - if err != nil { - return ldap.Entry{}, false, fmt.Errorf("ldap: search with filter %q failed: %v", req.Filter, err) - } - - switch n := len(resp.Entries); n { - case 0: - c.logger.Errorf("ldap: no results returned for filter: %q", filter) - return ldap.Entry{}, false, nil - case 1: - user = *resp.Entries[0] - c.logger.Infof("username %q mapped to entry %s", username, user.DN) - return user, true, nil - default: - return ldap.Entry{}, false, fmt.Errorf("ldap: filter returned multiple (%d) results: %q", n, filter) - } -} - -func (c *ldapConnector) Login(ctx context.Context, s connector.Scopes, username, password string) (ident connector.Identity, validPass bool, err error) { - // make this check to avoid unauthenticated bind to the LDAP server. - if password == "" { - return connector.Identity{}, false, nil - } - - var ( - // We want to return a different error if the user's password is incorrect vs - // if there was an error. - incorrectPass = false - user ldap.Entry - ) - - err = c.do(ctx, func(conn *ldap.Conn) error { - entry, found, err := c.userEntry(conn, username) - if err != nil { - return err - } - if !found { - incorrectPass = true - return nil - } - user = entry - - // Try to authenticate as the distinguished name. - if err := conn.Bind(user.DN, password); err != nil { - // Detect a bad password through the LDAP error code. - if ldapErr, ok := err.(*ldap.Error); ok { - switch ldapErr.ResultCode { - case ldap.LDAPResultInvalidCredentials: - c.logger.Errorf("ldap: invalid password for user %q", user.DN) - incorrectPass = true - return nil - case ldap.LDAPResultConstraintViolation: - c.logger.Errorf("ldap: constraint violation for user %q: %s", user.DN, ldapErr.Error()) - incorrectPass = true - return nil - } - } // will also catch all ldap.Error without a case statement above - return fmt.Errorf("ldap: failed to bind as dn %q: %v", user.DN, err) - } - return nil - }) - if err != nil { - return connector.Identity{}, false, err - } - if incorrectPass { - return connector.Identity{}, false, nil - } - - if ident, err = c.identityFromEntry(user); err != nil { - return connector.Identity{}, false, err - } - - if s.Groups { - groups, err := c.groups(ctx, user) - if err != nil { - return connector.Identity{}, false, fmt.Errorf("ldap: failed to query groups: %v", err) - } - ident.Groups = groups - } - - if s.OfflineAccess { - refresh := refreshData{ - Username: username, - Entry: user, - } - // Encode entry for follow up requests such as the groups query and - // refresh attempts. - if ident.ConnectorData, err = json.Marshal(refresh); err != nil { - return connector.Identity{}, false, fmt.Errorf("ldap: marshal entry: %v", err) - } - } - - return ident, true, nil -} - -func (c *ldapConnector) Refresh(ctx context.Context, s connector.Scopes, ident connector.Identity) (connector.Identity, error) { - var data refreshData - if err := json.Unmarshal(ident.ConnectorData, &data); err != nil { - return ident, fmt.Errorf("ldap: failed to unmarshal internal data: %v", err) - } - - var user ldap.Entry - err := c.do(ctx, func(conn *ldap.Conn) error { - entry, found, err := c.userEntry(conn, data.Username) - if err != nil { - return err - } - if !found { - return fmt.Errorf("ldap: user not found %q", data.Username) - } - user = entry - return nil - }) - if err != nil { - return ident, err - } - if user.DN != data.Entry.DN { - return ident, fmt.Errorf("ldap: refresh for username %q expected DN %q got %q", data.Username, data.Entry.DN, user.DN) - } - - newIdent, err := c.identityFromEntry(user) - if err != nil { - return ident, err - } - newIdent.ConnectorData = ident.ConnectorData - - if s.Groups { - groups, err := c.groups(ctx, user) - if err != nil { - return connector.Identity{}, fmt.Errorf("ldap: failed to query groups: %v", err) - } - newIdent.Groups = groups - } - return newIdent, nil -} - -func (c *ldapConnector) groups(ctx context.Context, user ldap.Entry) ([]string, error) { - if c.GroupSearch.BaseDN == "" { - c.logger.Debugf("No groups returned for %q because no groups baseDN has been configured.", getAttr(user, c.UserSearch.NameAttr)) - return nil, nil - } - - var groups []*ldap.Entry - for _, matcher := range c.userMatchers() { - for _, attr := range getAttrs(user, matcher.UserAttr) { - filter := fmt.Sprintf("(%s=%s)", matcher.GroupAttr, ldap.EscapeFilter(attr)) - if c.GroupSearch.Filter != "" { - filter = fmt.Sprintf("(&%s%s)", c.GroupSearch.Filter, filter) - } - - req := &ldap.SearchRequest{ - BaseDN: c.GroupSearch.BaseDN, - Filter: filter, - Scope: c.groupSearchScope, - Attributes: []string{c.GroupSearch.NameAttr}, - } - - gotGroups := false - if err := c.do(ctx, func(conn *ldap.Conn) error { - c.logger.Infof("performing ldap search %s %s %s", - req.BaseDN, scopeString(req.Scope), req.Filter) - resp, err := conn.Search(req) - if err != nil { - return fmt.Errorf("ldap: search failed: %v", err) - } - gotGroups = len(resp.Entries) != 0 - groups = append(groups, resp.Entries...) - return nil - }); err != nil { - return nil, err - } - if !gotGroups { - // TODO(ericchiang): Is this going to spam the logs? - c.logger.Errorf("ldap: groups search with filter %q returned no groups", filter) - } - } - } - - var groupNames []string - for _, group := range groups { - name := getAttr(*group, c.GroupSearch.NameAttr) - if name == "" { - // Be obnoxious about missing missing attributes. If the group entry is - // missing its name attribute, that indicates a misconfiguration. - // - // In the future we can add configuration options to just log these errors. - return nil, fmt.Errorf("ldap: group entity %q missing required attribute %q", - group.DN, c.GroupSearch.NameAttr) - } - - groupNames = append(groupNames, name) - } - return groupNames, nil -} - -func (c *ldapConnector) Prompt() string { - return c.UsernamePrompt -} diff --git a/vendor/github.com/dexidp/dex/connector/linkedin/linkedin.go b/vendor/github.com/dexidp/dex/connector/linkedin/linkedin.go deleted file mode 100644 index 1c8312c..0000000 --- a/vendor/github.com/dexidp/dex/connector/linkedin/linkedin.go +++ /dev/null @@ -1,242 +0,0 @@ -// Package linkedin provides authentication strategies using LinkedIn -package linkedin - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - - "golang.org/x/oauth2" - - "github.com/dexidp/dex/connector" - "github.com/dexidp/dex/pkg/log" -) - -const ( - apiURL = "https://api.linkedin.com/v2" - authURL = "https://www.linkedin.com/oauth/v2/authorization" - tokenURL = "https://www.linkedin.com/oauth/v2/accessToken" -) - -// Config holds configuration options for LinkedIn logins. -type Config struct { - ClientID string `json:"clientID"` - ClientSecret string `json:"clientSecret"` - RedirectURI string `json:"redirectURI"` -} - -// Open returns a strategy for logging in through LinkedIn -func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { - return &linkedInConnector{ - oauth2Config: &oauth2.Config{ - ClientID: c.ClientID, - ClientSecret: c.ClientSecret, - Endpoint: oauth2.Endpoint{ - AuthURL: authURL, - TokenURL: tokenURL, - }, - Scopes: []string{"r_liteprofile", "r_emailaddress"}, - RedirectURL: c.RedirectURI, - }, - logger: logger, - }, nil -} - -type connectorData struct { - AccessToken string `json:"accessToken"` -} - -type linkedInConnector struct { - oauth2Config *oauth2.Config - logger log.Logger -} - -// LinkedIn doesn't provide refresh tokens, so refresh tokens issued by Dex -// will expire in 60 days (default LinkedIn token lifetime). -var ( - _ connector.CallbackConnector = (*linkedInConnector)(nil) - _ connector.RefreshConnector = (*linkedInConnector)(nil) -) - -// LoginURL returns an access token request URL -func (c *linkedInConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) { - if c.oauth2Config.RedirectURL != callbackURL { - return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", - callbackURL, c.oauth2Config.RedirectURL) - } - - return c.oauth2Config.AuthCodeURL(state), nil -} - -// HandleCallback handles HTTP redirect from LinkedIn -func (c *linkedInConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { - q := r.URL.Query() - if errType := q.Get("error"); errType != "" { - return identity, &oauth2Error{errType, q.Get("error_description")} - } - - ctx := r.Context() - token, err := c.oauth2Config.Exchange(ctx, q.Get("code")) - if err != nil { - return identity, fmt.Errorf("linkedin: get token: %v", err) - } - - client := c.oauth2Config.Client(ctx, token) - profile, err := c.profile(ctx, client) - if err != nil { - return identity, fmt.Errorf("linkedin: get profile: %v", err) - } - - identity = connector.Identity{ - UserID: profile.ID, - Username: profile.fullname(), - Email: profile.Email, - EmailVerified: true, - } - - if s.OfflineAccess { - data := connectorData{AccessToken: token.AccessToken} - connData, err := json.Marshal(data) - if err != nil { - return identity, fmt.Errorf("linkedin: marshal connector data: %v", err) - } - identity.ConnectorData = connData - } - - return identity, nil -} - -func (c *linkedInConnector) Refresh(ctx context.Context, s connector.Scopes, ident connector.Identity) (connector.Identity, error) { - if len(ident.ConnectorData) == 0 { - return ident, fmt.Errorf("linkedin: no upstream access token found") - } - - var data connectorData - if err := json.Unmarshal(ident.ConnectorData, &data); err != nil { - return ident, fmt.Errorf("linkedin: unmarshal access token: %v", err) - } - - client := c.oauth2Config.Client(ctx, &oauth2.Token{AccessToken: data.AccessToken}) - profile, err := c.profile(ctx, client) - if err != nil { - return ident, fmt.Errorf("linkedin: get profile: %v", err) - } - - ident.Username = profile.fullname() - ident.Email = profile.Email - - return ident, nil -} - -type profile struct { - ID string `json:"id"` - FirstName string `json:"localizedFirstName"` - LastName string `json:"localizedLastName"` - Email string `json:"emailAddress"` -} - -type emailresp struct { - Elements []struct { - Handle struct { - EmailAddress string `json:"emailAddress"` - } `json:"handle~"` - } `json:"elements"` -} - -// fullname returns a full name of a person, or email if the resulting name is -// empty -func (p profile) fullname() string { - fname := strings.TrimSpace(p.FirstName + " " + p.LastName) - if fname == "" { - return p.Email - } - - return fname -} - -func (c *linkedInConnector) primaryEmail(ctx context.Context, client *http.Client) (email string, err error) { - req, err := http.NewRequest("GET", apiURL+"/emailAddress?q=members&projection=(elements*(handle~))", nil) - if err != nil { - return email, fmt.Errorf("new req: %v", err) - } - - resp, err := client.Do(req.WithContext(ctx)) - if err != nil { - return email, fmt.Errorf("get URL %v", err) - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return email, fmt.Errorf("read body: %v", err) - } - - if resp.StatusCode != http.StatusOK { - return email, fmt.Errorf("%s: %s", resp.Status, body) - } - - var parsedResp emailresp - err = json.Unmarshal(body, &parsedResp) - if err == nil { - for _, elem := range parsedResp.Elements { - email = elem.Handle.EmailAddress - } - } - - if email == "" { - err = fmt.Errorf("email is not set") - } - - return email, err -} - -func (c *linkedInConnector) profile(ctx context.Context, client *http.Client) (p profile, err error) { - // https://docs.microsoft.com/en-us/linkedin/shared/integrations/people/profile-api - // https://docs.microsoft.com/en-us/linkedin/shared/integrations/people/primary-contact-api - // https://docs.microsoft.com/en-us/linkedin/consumer/integrations/self-serve/migration-faq#how-do-i-retrieve-the-members-email-address - req, err := http.NewRequest("GET", apiURL+"/me", nil) - if err != nil { - return p, fmt.Errorf("new req: %v", err) - } - - resp, err := client.Do(req.WithContext(ctx)) - if err != nil { - return p, fmt.Errorf("get URL %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return p, fmt.Errorf("read body: %v", err) - } - return p, fmt.Errorf("%s: %s", resp.Status, body) - } - - if err := json.NewDecoder(resp.Body).Decode(&p); err != nil { - return p, fmt.Errorf("JSON decode: %v", err) - } - - email, err := c.primaryEmail(ctx, client) - if err != nil { - return p, fmt.Errorf("fetching email: %v", err) - } - p.Email = email - - return p, err -} - -type oauth2Error struct { - error string - errorDescription string -} - -func (e *oauth2Error) Error() string { - if e.errorDescription == "" { - return e.error - } - return e.error + ": " + e.errorDescription -} diff --git a/vendor/github.com/dexidp/dex/connector/microsoft/microsoft.go b/vendor/github.com/dexidp/dex/connector/microsoft/microsoft.go deleted file mode 100644 index 142a7c6..0000000 --- a/vendor/github.com/dexidp/dex/connector/microsoft/microsoft.go +++ /dev/null @@ -1,474 +0,0 @@ -// Package microsoft provides authentication strategies using Microsoft. -package microsoft - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "sync" - "time" - - "golang.org/x/oauth2" - - "github.com/dexidp/dex/connector" - groups_pkg "github.com/dexidp/dex/pkg/groups" - "github.com/dexidp/dex/pkg/log" -) - -// GroupNameFormat represents the format of the group identifier -// we use type of string instead of int because it's easier to -// marshall/unmarshall -type GroupNameFormat string - -// Possible values for GroupNameFormat -const ( - GroupID GroupNameFormat = "id" - GroupName GroupNameFormat = "name" -) - -const ( - // Microsoft requires this scope to access user's profile - scopeUser = "user.read" - // Microsoft requires this scope to list groups the user is a member of - // and resolve their ids to groups names. - scopeGroups = "directory.read.all" -) - -// Config holds configuration options for microsoft logins. -type Config struct { - ClientID string `json:"clientID"` - ClientSecret string `json:"clientSecret"` - RedirectURI string `json:"redirectURI"` - Tenant string `json:"tenant"` - OnlySecurityGroups bool `json:"onlySecurityGroups"` - Groups []string `json:"groups"` - GroupNameFormat GroupNameFormat `json:"groupNameFormat"` - UseGroupsAsWhitelist bool `json:"useGroupsAsWhitelist"` -} - -// Open returns a strategy for logging in through Microsoft. -func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { - m := microsoftConnector{ - apiURL: "https://login.microsoftonline.com", - graphURL: "https://graph.microsoft.com", - redirectURI: c.RedirectURI, - clientID: c.ClientID, - clientSecret: c.ClientSecret, - tenant: c.Tenant, - onlySecurityGroups: c.OnlySecurityGroups, - groups: c.Groups, - groupNameFormat: c.GroupNameFormat, - useGroupsAsWhitelist: c.UseGroupsAsWhitelist, - logger: logger, - } - // By default allow logins from both personal and business/school - // accounts. - if m.tenant == "" { - m.tenant = "common" - } - - // By default, use group names - switch m.groupNameFormat { - case "": - m.groupNameFormat = GroupName - case GroupID, GroupName: - default: - return nil, fmt.Errorf("invalid groupNameFormat: %s", m.groupNameFormat) - } - - return &m, nil -} - -type connectorData struct { - AccessToken string `json:"accessToken"` - RefreshToken string `json:"refreshToken"` - Expiry time.Time `json:"expiry"` -} - -var ( - _ connector.CallbackConnector = (*microsoftConnector)(nil) - _ connector.RefreshConnector = (*microsoftConnector)(nil) -) - -type microsoftConnector struct { - apiURL string - graphURL string - redirectURI string - clientID string - clientSecret string - tenant string - onlySecurityGroups bool - groupNameFormat GroupNameFormat - groups []string - useGroupsAsWhitelist bool - logger log.Logger -} - -func (c *microsoftConnector) isOrgTenant() bool { - return c.tenant != "common" && c.tenant != "consumers" && c.tenant != "organizations" -} - -func (c *microsoftConnector) groupsRequired(groupScope bool) bool { - return (len(c.groups) > 0 || groupScope) && c.isOrgTenant() -} - -func (c *microsoftConnector) oauth2Config(scopes connector.Scopes) *oauth2.Config { - microsoftScopes := []string{scopeUser} - if c.groupsRequired(scopes.Groups) { - microsoftScopes = append(microsoftScopes, scopeGroups) - } - - return &oauth2.Config{ - ClientID: c.clientID, - ClientSecret: c.clientSecret, - Endpoint: oauth2.Endpoint{ - AuthURL: c.apiURL + "/" + c.tenant + "/oauth2/v2.0/authorize", - TokenURL: c.apiURL + "/" + c.tenant + "/oauth2/v2.0/token", - }, - Scopes: microsoftScopes, - RedirectURL: c.redirectURI, - } -} - -func (c *microsoftConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) { - if c.redirectURI != callbackURL { - return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", callbackURL, c.redirectURI) - } - - return c.oauth2Config(scopes).AuthCodeURL(state), nil -} - -func (c *microsoftConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { - q := r.URL.Query() - if errType := q.Get("error"); errType != "" { - return identity, &oauth2Error{errType, q.Get("error_description")} - } - - oauth2Config := c.oauth2Config(s) - - ctx := r.Context() - - token, err := oauth2Config.Exchange(ctx, q.Get("code")) - if err != nil { - return identity, fmt.Errorf("microsoft: failed to get token: %v", err) - } - - client := oauth2Config.Client(ctx, token) - - user, err := c.user(ctx, client) - if err != nil { - return identity, fmt.Errorf("microsoft: get user: %v", err) - } - - identity = connector.Identity{ - UserID: user.ID, - Username: user.Name, - Email: user.Email, - EmailVerified: true, - } - - if c.groupsRequired(s.Groups) { - groups, err := c.getGroups(ctx, client, user.ID) - if err != nil { - return identity, fmt.Errorf("microsoft: get groups: %v", err) - } - identity.Groups = groups - } - - if s.OfflineAccess { - data := connectorData{ - AccessToken: token.AccessToken, - RefreshToken: token.RefreshToken, - Expiry: token.Expiry, - } - connData, err := json.Marshal(data) - if err != nil { - return identity, fmt.Errorf("microsoft: marshal connector data: %v", err) - } - identity.ConnectorData = connData - } - - return identity, nil -} - -type tokenNotifyFunc func(*oauth2.Token) error - -// notifyRefreshTokenSource is essentially `oauth2.ResuseTokenSource` with `TokenNotifyFunc` added. -type notifyRefreshTokenSource struct { - new oauth2.TokenSource - mu sync.Mutex // guards t - t *oauth2.Token - f tokenNotifyFunc // called when token refreshed so new refresh token can be persisted -} - -// Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. -func (s *notifyRefreshTokenSource) Token() (*oauth2.Token, error) { - s.mu.Lock() - defer s.mu.Unlock() - if s.t.Valid() { - return s.t, nil - } - t, err := s.new.Token() - if err != nil { - return nil, err - } - s.t = t - return t, s.f(t) -} - -func (c *microsoftConnector) Refresh(ctx context.Context, s connector.Scopes, identity connector.Identity) (connector.Identity, error) { - if len(identity.ConnectorData) == 0 { - return identity, errors.New("microsoft: no upstream access token found") - } - - var data connectorData - if err := json.Unmarshal(identity.ConnectorData, &data); err != nil { - return identity, fmt.Errorf("microsoft: unmarshal access token: %v", err) - } - tok := &oauth2.Token{ - AccessToken: data.AccessToken, - RefreshToken: data.RefreshToken, - Expiry: data.Expiry, - } - - client := oauth2.NewClient(ctx, ¬ifyRefreshTokenSource{ - new: c.oauth2Config(s).TokenSource(ctx, tok), - t: tok, - f: func(tok *oauth2.Token) error { - data := connectorData{ - AccessToken: tok.AccessToken, - RefreshToken: tok.RefreshToken, - Expiry: tok.Expiry, - } - connData, err := json.Marshal(data) - if err != nil { - return fmt.Errorf("microsoft: marshal connector data: %v", err) - } - identity.ConnectorData = connData - return nil - }, - }) - user, err := c.user(ctx, client) - if err != nil { - return identity, fmt.Errorf("microsoft: get user: %v", err) - } - - identity.Username = user.Name - identity.Email = user.Email - - if c.groupsRequired(s.Groups) { - groups, err := c.getGroups(ctx, client, user.ID) - if err != nil { - return identity, fmt.Errorf("microsoft: get groups: %v", err) - } - identity.Groups = groups - } - - return identity, nil -} - -// https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/user -// id - The unique identifier for the user. Inherited from -// directoryObject. Key. Not nullable. Read-only. -// displayName - The name displayed in the address book for the user. -// This is usually the combination of the user's first name, -// middle initial and last name. This property is required -// when a user is created and it cannot be cleared during -// updates. Supports $filter and $orderby. -// userPrincipalName - The user principal name (UPN) of the user. -// The UPN is an Internet-style login name for the user -// based on the Internet standard RFC 822. By convention, -// this should map to the user's email name. The general -// format is alias@domain, where domain must be present in -// the tenant’s collection of verified domains. This -// property is required when a user is created. The -// verified domains for the tenant can be accessed from the -// verifiedDomains property of organization. Supports -// $filter and $orderby. -type user struct { - ID string `json:"id"` - Name string `json:"displayName"` - Email string `json:"userPrincipalName"` -} - -func (c *microsoftConnector) user(ctx context.Context, client *http.Client) (u user, err error) { - // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/api/user_get - req, err := http.NewRequest("GET", c.graphURL+"/v1.0/me?$select=id,displayName,userPrincipalName", nil) - if err != nil { - return u, fmt.Errorf("new req: %v", err) - } - - resp, err := client.Do(req.WithContext(ctx)) - if err != nil { - return u, fmt.Errorf("get URL %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return u, newGraphError(resp.Body) - } - - if err := json.NewDecoder(resp.Body).Decode(&u); err != nil { - return u, fmt.Errorf("JSON decode: %v", err) - } - - return u, err -} - -// https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/group -// displayName - The display name for the group. This property is required when -// a group is created and it cannot be cleared during updates. -// Supports $filter and $orderby. -type group struct { - Name string `json:"displayName"` -} - -func (c *microsoftConnector) getGroups(ctx context.Context, client *http.Client, userID string) ([]string, error) { - userGroups, err := c.getGroupIDs(ctx, client) - if err != nil { - return nil, err - } - - if c.groupNameFormat == GroupName { - userGroups, err = c.getGroupNames(ctx, client, userGroups) - if err != nil { - return nil, err - } - } - - // ensure that the user is in at least one required group - filteredGroups := groups_pkg.Filter(userGroups, c.groups) - if len(c.groups) > 0 && len(filteredGroups) == 0 { - return nil, fmt.Errorf("microsoft: user %v not in any of the required groups", userID) - } else if c.useGroupsAsWhitelist { - return filteredGroups, nil - } - - return userGroups, nil -} - -func (c *microsoftConnector) getGroupIDs(ctx context.Context, client *http.Client) (ids []string, err error) { - // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/api/user_getmembergroups - in := &struct { - SecurityEnabledOnly bool `json:"securityEnabledOnly"` - }{c.onlySecurityGroups} - reqURL := c.graphURL + "/v1.0/me/getMemberGroups" - for { - var out []string - var next string - - next, err = c.post(ctx, client, reqURL, in, &out) - if err != nil { - return ids, err - } - - ids = append(ids, out...) - if next == "" { - return - } - reqURL = next - } -} - -func (c *microsoftConnector) getGroupNames(ctx context.Context, client *http.Client, ids []string) (groups []string, err error) { - if len(ids) == 0 { - return - } - - // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/api/directoryobject_getbyids - in := &struct { - IDs []string `json:"ids"` - Types []string `json:"types"` - }{ids, []string{"group"}} - reqURL := c.graphURL + "/v1.0/directoryObjects/getByIds" - for { - var out []group - var next string - - next, err = c.post(ctx, client, reqURL, in, &out) - if err != nil { - return groups, err - } - - for _, g := range out { - groups = append(groups, g.Name) - } - if next == "" { - return - } - reqURL = next - } -} - -func (c *microsoftConnector) post(ctx context.Context, client *http.Client, reqURL string, in interface{}, out interface{}) (string, error) { - var payload bytes.Buffer - - err := json.NewEncoder(&payload).Encode(in) - if err != nil { - return "", fmt.Errorf("microsoft: JSON encode: %v", err) - } - - req, err := http.NewRequest("POST", reqURL, &payload) - if err != nil { - return "", fmt.Errorf("new req: %v", err) - } - req.Header.Set("Content-Type", "application/json") - - resp, err := client.Do(req.WithContext(ctx)) - if err != nil { - return "", fmt.Errorf("post URL %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return "", newGraphError(resp.Body) - } - - var next string - if err = json.NewDecoder(resp.Body).Decode(&struct { - NextLink *string `json:"@odata.nextLink"` - Value interface{} `json:"value"` - }{&next, out}); err != nil { - return "", fmt.Errorf("JSON decode: %v", err) - } - - return next, nil -} - -type graphError struct { - Code string `json:"code"` - Message string `json:"message"` -} - -func (e *graphError) Error() string { - return e.Code + ": " + e.Message -} - -func newGraphError(r io.Reader) error { - // https://developer.microsoft.com/en-us/graph/docs/concepts/errors - var ge graphError - if err := json.NewDecoder(r).Decode(&struct { - Error *graphError `json:"error"` - }{&ge}); err != nil { - return fmt.Errorf("JSON error decode: %v", err) - } - return &ge -} - -type oauth2Error struct { - error string - errorDescription string -} - -func (e *oauth2Error) Error() string { - if e.errorDescription == "" { - return e.error - } - return e.error + ": " + e.errorDescription -} diff --git a/vendor/github.com/dexidp/dex/connector/mock/connectortest.go b/vendor/github.com/dexidp/dex/connector/mock/connectortest.go deleted file mode 100644 index 5089914..0000000 --- a/vendor/github.com/dexidp/dex/connector/mock/connectortest.go +++ /dev/null @@ -1,119 +0,0 @@ -// Package mock implements connectors which help test various server components. -package mock - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/url" - - "github.com/dexidp/dex/connector" - "github.com/dexidp/dex/pkg/log" -) - -// NewCallbackConnector returns a mock connector which requires no user interaction. It always returns -// the same (fake) identity. -func NewCallbackConnector(logger log.Logger) connector.Connector { - return &Callback{ - Identity: connector.Identity{ - UserID: "0-385-28089-0", - Username: "Kilgore Trout", - Email: "kilgore@kilgore.trout", - EmailVerified: true, - Groups: []string{"authors"}, - ConnectorData: connectorData, - }, - Logger: logger, - } -} - -var ( - _ connector.CallbackConnector = &Callback{} - - _ connector.PasswordConnector = passwordConnector{} - _ connector.RefreshConnector = passwordConnector{} -) - -// Callback is a connector that requires no user interaction and always returns the same identity. -type Callback struct { - // The returned identity. - Identity connector.Identity - Logger log.Logger -} - -// LoginURL returns the URL to redirect the user to login with. -func (m *Callback) LoginURL(s connector.Scopes, callbackURL, state string) (string, error) { - u, err := url.Parse(callbackURL) - if err != nil { - return "", fmt.Errorf("failed to parse callbackURL %q: %v", callbackURL, err) - } - v := u.Query() - v.Set("state", state) - u.RawQuery = v.Encode() - return u.String(), nil -} - -var connectorData = []byte("foobar") - -// HandleCallback parses the request and returns the user's identity -func (m *Callback) HandleCallback(s connector.Scopes, r *http.Request) (connector.Identity, error) { - return m.Identity, nil -} - -// Refresh updates the identity during a refresh token request. -func (m *Callback) Refresh(ctx context.Context, s connector.Scopes, identity connector.Identity) (connector.Identity, error) { - return m.Identity, nil -} - -// CallbackConfig holds the configuration parameters for a connector which requires no interaction. -type CallbackConfig struct{} - -// Open returns an authentication strategy which requires no user interaction. -func (c *CallbackConfig) Open(id string, logger log.Logger) (connector.Connector, error) { - return NewCallbackConnector(logger), nil -} - -// PasswordConfig holds the configuration for a mock connector which prompts for the supplied -// username and password. -type PasswordConfig struct { - Username string `json:"username"` - Password string `json:"password"` -} - -// Open returns an authentication strategy which prompts for a predefined username and password. -func (c *PasswordConfig) Open(id string, logger log.Logger) (connector.Connector, error) { - if c.Username == "" { - return nil, errors.New("no username supplied") - } - if c.Password == "" { - return nil, errors.New("no password supplied") - } - return &passwordConnector{c.Username, c.Password, logger}, nil -} - -type passwordConnector struct { - username string - password string - logger log.Logger -} - -func (p passwordConnector) Close() error { return nil } - -func (p passwordConnector) Login(ctx context.Context, s connector.Scopes, username, password string) (identity connector.Identity, validPassword bool, err error) { - if username == p.username && password == p.password { - return connector.Identity{ - UserID: "0-385-28089-0", - Username: "Kilgore Trout", - Email: "kilgore@kilgore.trout", - EmailVerified: true, - }, true, nil - } - return identity, false, nil -} - -func (p passwordConnector) Prompt() string { return "" } - -func (p passwordConnector) Refresh(_ context.Context, _ connector.Scopes, identity connector.Identity) (connector.Identity, error) { - return identity, nil -} diff --git a/vendor/github.com/dexidp/dex/connector/oidc/oidc.go b/vendor/github.com/dexidp/dex/connector/oidc/oidc.go deleted file mode 100644 index 675e4b9..0000000 --- a/vendor/github.com/dexidp/dex/connector/oidc/oidc.go +++ /dev/null @@ -1,352 +0,0 @@ -// Package oidc implements logging in through OpenID Connect providers. -package oidc - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "time" - - "github.com/coreos/go-oidc" - "golang.org/x/oauth2" - - "github.com/dexidp/dex/connector" - "github.com/dexidp/dex/pkg/log" -) - -// Config holds configuration options for OpenID Connect logins. -type Config struct { - Issuer string `json:"issuer"` - ClientID string `json:"clientID"` - ClientSecret string `json:"clientSecret"` - RedirectURI string `json:"redirectURI"` - - // Causes client_secret to be passed as POST parameters instead of basic - // auth. This is specifically "NOT RECOMMENDED" by the OAuth2 RFC, but some - // providers require it. - // - // https://tools.ietf.org/html/rfc6749#section-2.3.1 - BasicAuthUnsupported *bool `json:"basicAuthUnsupported"` - - Scopes []string `json:"scopes"` // defaults to "profile" and "email" - - // Optional list of whitelisted domains when using Google - // If this field is nonempty, only users from a listed domain will be allowed to log in - HostedDomains []string `json:"hostedDomains"` - - // Override the value of email_verifed to true in the returned claims - InsecureSkipEmailVerified bool `json:"insecureSkipEmailVerified"` - - // InsecureEnableGroups enables groups claims. This is disabled by default until https://github.com/dexidp/dex/issues/1065 is resolved - InsecureEnableGroups bool `json:"insecureEnableGroups"` - - // GetUserInfo uses the userinfo endpoint to get additional claims for - // the token. This is especially useful where upstreams return "thin" - // id tokens - GetUserInfo bool `json:"getUserInfo"` - - // Configurable key which contains the user id claim - UserIDKey string `json:"userIDKey"` - - // Configurable key which contains the user name claim - UserNameKey string `json:"userNameKey"` - - // PromptType will be used fot the prompt parameter (when offline_access, by default prompt=consent) - PromptType string `json:"promptType"` -} - -// Domains that don't support basic auth. golang.org/x/oauth2 has an internal -// list, but it only matches specific URLs, not top level domains. -var brokenAuthHeaderDomains = []string{ - // See: https://github.com/dexidp/dex/issues/859 - "okta.com", - "oktapreview.com", -} - -// connectorData stores information for sessions authenticated by this connector -type connectorData struct { - RefreshToken []byte -} - -// Detect auth header provider issues for known providers. This lets users -// avoid having to explicitly set "basicAuthUnsupported" in their config. -// -// Setting the config field always overrides values returned by this function. -func knownBrokenAuthHeaderProvider(issuerURL string) bool { - if u, err := url.Parse(issuerURL); err == nil { - for _, host := range brokenAuthHeaderDomains { - if u.Host == host || strings.HasSuffix(u.Host, "."+host) { - return true - } - } - } - return false -} - -// Open returns a connector which can be used to login users through an upstream -// OpenID Connect provider. -func (c *Config) Open(id string, logger log.Logger) (conn connector.Connector, err error) { - ctx, cancel := context.WithCancel(context.Background()) - - provider, err := oidc.NewProvider(ctx, c.Issuer) - if err != nil { - cancel() - return nil, fmt.Errorf("failed to get provider: %v", err) - } - - endpoint := provider.Endpoint() - - if c.BasicAuthUnsupported != nil { - // Setting "basicAuthUnsupported" always overrides our detection. - if *c.BasicAuthUnsupported { - endpoint.AuthStyle = oauth2.AuthStyleInParams - } - } else if knownBrokenAuthHeaderProvider(c.Issuer) { - endpoint.AuthStyle = oauth2.AuthStyleInParams - } - - scopes := []string{oidc.ScopeOpenID} - if len(c.Scopes) > 0 { - scopes = append(scopes, c.Scopes...) - } else { - scopes = append(scopes, "profile", "email") - } - - // PromptType should be "consent" by default, if not set - if c.PromptType == "" { - c.PromptType = "consent" - } - - clientID := c.ClientID - return &oidcConnector{ - provider: provider, - redirectURI: c.RedirectURI, - oauth2Config: &oauth2.Config{ - ClientID: clientID, - ClientSecret: c.ClientSecret, - Endpoint: endpoint, - Scopes: scopes, - RedirectURL: c.RedirectURI, - }, - verifier: provider.Verifier( - &oidc.Config{ClientID: clientID}, - ), - logger: logger, - cancel: cancel, - hostedDomains: c.HostedDomains, - insecureSkipEmailVerified: c.InsecureSkipEmailVerified, - insecureEnableGroups: c.InsecureEnableGroups, - getUserInfo: c.GetUserInfo, - userIDKey: c.UserIDKey, - userNameKey: c.UserNameKey, - promptType: c.PromptType, - }, nil -} - -var ( - _ connector.CallbackConnector = (*oidcConnector)(nil) - _ connector.RefreshConnector = (*oidcConnector)(nil) -) - -type oidcConnector struct { - provider *oidc.Provider - redirectURI string - oauth2Config *oauth2.Config - verifier *oidc.IDTokenVerifier - cancel context.CancelFunc - logger log.Logger - hostedDomains []string - insecureSkipEmailVerified bool - insecureEnableGroups bool - getUserInfo bool - userIDKey string - userNameKey string - promptType string -} - -func (c *oidcConnector) Close() error { - c.cancel() - return nil -} - -func (c *oidcConnector) LoginURL(s connector.Scopes, callbackURL, state string) (string, error) { - if c.redirectURI != callbackURL { - return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", callbackURL, c.redirectURI) - } - - var opts []oauth2.AuthCodeOption - if len(c.hostedDomains) > 0 { - preferredDomain := c.hostedDomains[0] - if len(c.hostedDomains) > 1 { - preferredDomain = "*" - } - opts = append(opts, oauth2.SetAuthURLParam("hd", preferredDomain)) - } - - if s.OfflineAccess { - opts = append(opts, oauth2.AccessTypeOffline, oauth2.SetAuthURLParam("prompt", c.promptType)) - } - return c.oauth2Config.AuthCodeURL(state, opts...), nil -} - -type oauth2Error struct { - error string - errorDescription string -} - -func (e *oauth2Error) Error() string { - if e.errorDescription == "" { - return e.error - } - return e.error + ": " + e.errorDescription -} - -func (c *oidcConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { - q := r.URL.Query() - if errType := q.Get("error"); errType != "" { - return identity, &oauth2Error{errType, q.Get("error_description")} - } - token, err := c.oauth2Config.Exchange(r.Context(), q.Get("code")) - if err != nil { - return identity, fmt.Errorf("oidc: failed to get token: %v", err) - } - - return c.createIdentity(r.Context(), identity, token) -} - -// Refresh is used to refresh a session with the refresh token provided by the IdP -func (c *oidcConnector) Refresh(ctx context.Context, s connector.Scopes, identity connector.Identity) (connector.Identity, error) { - cd := connectorData{} - err := json.Unmarshal(identity.ConnectorData, &cd) - if err != nil { - return identity, fmt.Errorf("oidc: failed to unmarshal connector data: %v", err) - } - - t := &oauth2.Token{ - RefreshToken: string(cd.RefreshToken), - Expiry: time.Now().Add(-time.Hour), - } - token, err := c.oauth2Config.TokenSource(ctx, t).Token() - if err != nil { - return identity, fmt.Errorf("oidc: failed to get refresh token: %v", err) - } - - return c.createIdentity(ctx, identity, token) -} - -func (c *oidcConnector) createIdentity(ctx context.Context, identity connector.Identity, token *oauth2.Token) (connector.Identity, error) { - rawIDToken, ok := token.Extra("id_token").(string) - if !ok { - return identity, errors.New("oidc: no id_token in token response") - } - idToken, err := c.verifier.Verify(ctx, rawIDToken) - if err != nil { - return identity, fmt.Errorf("oidc: failed to verify ID Token: %v", err) - } - - var claims map[string]interface{} - if err := idToken.Claims(&claims); err != nil { - return identity, fmt.Errorf("oidc: failed to decode claims: %v", err) - } - - // We immediately want to run getUserInfo if configured before we validate the claims - if c.getUserInfo { - userInfo, err := c.provider.UserInfo(ctx, oauth2.StaticTokenSource(token)) - if err != nil { - return identity, fmt.Errorf("oidc: error loading userinfo: %v", err) - } - if err := userInfo.Claims(&claims); err != nil { - return identity, fmt.Errorf("oidc: failed to decode userinfo claims: %v", err) - } - } - - userNameKey := "name" - if c.userNameKey != "" { - userNameKey = c.userNameKey - } - name, found := claims[userNameKey].(string) - if !found { - return identity, fmt.Errorf("missing \"%s\" claim", userNameKey) - } - - hasEmailScope := false - for _, s := range c.oauth2Config.Scopes { - if s == "email" { - hasEmailScope = true - break - } - } - - email, found := claims["email"].(string) - if !found && hasEmailScope { - return identity, errors.New("missing \"email\" claim") - } - - emailVerified, found := claims["email_verified"].(bool) - if !found { - if c.insecureSkipEmailVerified { - emailVerified = true - } else if hasEmailScope { - return identity, errors.New("missing \"email_verified\" claim") - } - } - hostedDomain, _ := claims["hd"].(string) - - if len(c.hostedDomains) > 0 { - found := false - for _, domain := range c.hostedDomains { - if hostedDomain == domain { - found = true - break - } - } - - if !found { - return identity, fmt.Errorf("oidc: unexpected hd claim %v", hostedDomain) - } - } - - cd := connectorData{ - RefreshToken: []byte(token.RefreshToken), - } - - connData, err := json.Marshal(&cd) - if err != nil { - return identity, fmt.Errorf("oidc: failed to encode connector data: %v", err) - } - - identity = connector.Identity{ - UserID: idToken.Subject, - Username: name, - Email: email, - EmailVerified: emailVerified, - ConnectorData: connData, - } - - if c.userIDKey != "" { - userID, found := claims[c.userIDKey].(string) - if !found { - return identity, fmt.Errorf("oidc: not found %v claim", c.userIDKey) - } - identity.UserID = userID - } - - if c.insecureEnableGroups { - vs, ok := claims["groups"].([]interface{}) - if ok { - for _, v := range vs { - if s, ok := v.(string); ok { - identity.Groups = append(identity.Groups, s) - } else { - return identity, errors.New("malformed \"groups\" claim") - } - } - } - } - - return identity, nil -} diff --git a/vendor/github.com/dexidp/dex/connector/openshift/openshift.go b/vendor/github.com/dexidp/dex/connector/openshift/openshift.go deleted file mode 100644 index 0666935..0000000 --- a/vendor/github.com/dexidp/dex/connector/openshift/openshift.go +++ /dev/null @@ -1,255 +0,0 @@ -package openshift - -import ( - "context" - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "strings" - "time" - - "github.com/dexidp/dex/connector" - "github.com/dexidp/dex/pkg/groups" - "github.com/dexidp/dex/pkg/log" - - "github.com/dexidp/dex/storage/kubernetes/k8sapi" - - "golang.org/x/oauth2" -) - -// Config holds configuration options for OpenShift login -type Config struct { - Issuer string `json:"issuer"` - ClientID string `json:"clientID"` - ClientSecret string `json:"clientSecret"` - RedirectURI string `json:"redirectURI"` - Groups []string `json:"groups"` - InsecureCA bool `json:"insecureCA"` - RootCA string `json:"rootCA"` -} - -var ( - _ connector.CallbackConnector = (*openshiftConnector)(nil) -) - -type openshiftConnector struct { - apiURL string - redirectURI string - clientID string - clientSecret string - cancel context.CancelFunc - logger log.Logger - httpClient *http.Client - oauth2Config *oauth2.Config - insecureCA bool - rootCA string - groups []string -} - -type user struct { - k8sapi.TypeMeta `json:",inline"` - k8sapi.ObjectMeta `json:"metadata,omitempty"` - Identities []string `json:"identities" protobuf:"bytes,3,rep,name=identities"` - FullName string `json:"fullName,omitempty" protobuf:"bytes,2,opt,name=fullName"` - Groups []string `json:"groups" protobuf:"bytes,4,rep,name=groups"` -} - -// Open returns a connector which can be used to login users through an upstream -// OpenShift OAuth2 provider. -func (c *Config) Open(id string, logger log.Logger) (conn connector.Connector, err error) { - ctx, cancel := context.WithCancel(context.Background()) - - wellKnownURL := strings.TrimSuffix(c.Issuer, "/") + "/.well-known/oauth-authorization-server" - req, err := http.NewRequest(http.MethodGet, wellKnownURL, nil) - - openshiftConnector := openshiftConnector{ - apiURL: c.Issuer, - cancel: cancel, - clientID: c.ClientID, - clientSecret: c.ClientSecret, - insecureCA: c.InsecureCA, - logger: logger, - redirectURI: c.RedirectURI, - rootCA: c.RootCA, - groups: c.Groups, - } - - if openshiftConnector.httpClient, err = newHTTPClient(c.InsecureCA, c.RootCA); err != nil { - cancel() - return nil, fmt.Errorf("failed to create HTTP client: %v", err) - } - - var metadata struct { - Auth string `json:"authorization_endpoint"` - Token string `json:"token_endpoint"` - } - - resp, err := openshiftConnector.httpClient.Do(req.WithContext(ctx)) - - if err != nil { - cancel() - return nil, fmt.Errorf("failed to query OpenShift endpoint %v", err) - } - - defer resp.Body.Close() - - if err := json.NewDecoder(resp.Body).Decode(&metadata); err != nil { - cancel() - return nil, fmt.Errorf("discovery through endpoint %s failed to decode body: %v", - wellKnownURL, err) - } - - openshiftConnector.oauth2Config = &oauth2.Config{ - ClientID: c.ClientID, - ClientSecret: c.ClientSecret, - Endpoint: oauth2.Endpoint{ - AuthURL: metadata.Auth, TokenURL: metadata.Token, - }, - Scopes: []string{"user:info"}, - RedirectURL: c.RedirectURI, - } - return &openshiftConnector, nil -} - -func (c *openshiftConnector) Close() error { - c.cancel() - return nil -} - -// LoginURL returns the URL to redirect the user to login with. -func (c *openshiftConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) { - if c.redirectURI != callbackURL { - return "", fmt.Errorf("expected callback URL %q did not match the URL in the config %q", callbackURL, c.redirectURI) - } - return c.oauth2Config.AuthCodeURL(state), nil -} - -type oauth2Error struct { - error string - errorDescription string -} - -func (e *oauth2Error) Error() string { - if e.errorDescription == "" { - return e.error - } - return e.error + ": " + e.errorDescription -} - -// HandleCallback parses the request and returns the user's identity -func (c *openshiftConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) { - q := r.URL.Query() - if errType := q.Get("error"); errType != "" { - return identity, &oauth2Error{errType, q.Get("error_description")} - } - - ctx := r.Context() - if c.httpClient != nil { - ctx = context.WithValue(r.Context(), oauth2.HTTPClient, c.httpClient) - } - - token, err := c.oauth2Config.Exchange(ctx, q.Get("code")) - if err != nil { - return identity, fmt.Errorf("oidc: failed to get token: %v", err) - } - - client := c.oauth2Config.Client(ctx, token) - - user, err := c.user(ctx, client) - - if err != nil { - return identity, fmt.Errorf("openshift: get user: %v", err) - } - - if len(c.groups) > 0 { - validGroups := validateAllowedGroups(user.Groups, c.groups) - - if !validGroups { - return identity, fmt.Errorf("openshift: user %q is not in any of the required groups", user.Name) - } - } - - identity = connector.Identity{ - UserID: user.UID, - Username: user.Name, - PreferredUsername: user.Name, - Email: user.Name, - Groups: user.Groups, - } - - return identity, nil -} - -// user function returns the OpenShift user associated with the authenticated user -func (c *openshiftConnector) user(ctx context.Context, client *http.Client) (u user, err error) { - url := c.apiURL + "/apis/user.openshift.io/v1/users/~" - - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return u, fmt.Errorf("new req: %v", err) - } - - resp, err := client.Do(req.WithContext(ctx)) - if err != nil { - return u, fmt.Errorf("get URL %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return u, fmt.Errorf("read body: %v", err) - } - return u, fmt.Errorf("%s: %s", resp.Status, body) - } - - if err := json.NewDecoder(resp.Body).Decode(&u); err != nil { - return u, fmt.Errorf("JSON decode: %v", err) - } - - return u, err -} - -func validateAllowedGroups(userGroups, allowedGroups []string) bool { - matchingGroups := groups.Filter(userGroups, allowedGroups) - - return len(matchingGroups) != 0 -} - -// newHTTPClient returns a new HTTP client -func newHTTPClient(insecureCA bool, rootCA string) (*http.Client, error) { - tlsConfig := tls.Config{} - - if insecureCA { - tlsConfig = tls.Config{InsecureSkipVerify: true} - } else if rootCA != "" { - tlsConfig = tls.Config{RootCAs: x509.NewCertPool()} - rootCABytes, err := ioutil.ReadFile(rootCA) - if err != nil { - return nil, fmt.Errorf("failed to read root-ca: %v", err) - } - if !tlsConfig.RootCAs.AppendCertsFromPEM(rootCABytes) { - return nil, fmt.Errorf("no certs found in root CA file %q", rootCA) - } - } - - return &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tlsConfig, - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - }, - }, nil -} diff --git a/vendor/github.com/dexidp/dex/connector/saml/saml.go b/vendor/github.com/dexidp/dex/connector/saml/saml.go deleted file mode 100644 index e0f3f93..0000000 --- a/vendor/github.com/dexidp/dex/connector/saml/saml.go +++ /dev/null @@ -1,635 +0,0 @@ -// Package saml contains login methods for SAML. -package saml - -import ( - "bytes" - "crypto/x509" - "encoding/base64" - "encoding/pem" - "encoding/xml" - "errors" - "fmt" - "io/ioutil" - "strings" - "time" - - "github.com/beevik/etree" - dsig "github.com/russellhaering/goxmldsig" - "github.com/russellhaering/goxmldsig/etreeutils" - - "github.com/dexidp/dex/connector" - "github.com/dexidp/dex/pkg/groups" - "github.com/dexidp/dex/pkg/log" -) - -// nolint -const ( - bindingRedirect = "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" - bindingPOST = "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" - - nameIDFormatEmailAddress = "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress" - nameIDFormatUnspecified = "urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified" - nameIDFormatX509Subject = "urn:oasis:names:tc:SAML:1.1:nameid-format:X509SubjectName" - nameIDFormatWindowsDN = "urn:oasis:names:tc:SAML:1.1:nameid-format:WindowsDomainQualifiedName" - nameIDFormatEncrypted = "urn:oasis:names:tc:SAML:2.0:nameid-format:encrypted" - nameIDFormatEntity = "urn:oasis:names:tc:SAML:2.0:nameid-format:entity" - nameIDFormatKerberos = "urn:oasis:names:tc:SAML:2.0:nameid-format:kerberos" - nameIDFormatPersistent = "urn:oasis:names:tc:SAML:2.0:nameid-format:persistent" - nameIDformatTransient = "urn:oasis:names:tc:SAML:2.0:nameid-format:transient" - - // top level status codes - statusCodeSuccess = "urn:oasis:names:tc:SAML:2.0:status:Success" - - // subject confirmation methods - subjectConfirmationMethodBearer = "urn:oasis:names:tc:SAML:2.0:cm:bearer" - - // allowed clock drift for timestamp validation - allowedClockDrift = time.Duration(30) * time.Second -) - -var ( - nameIDFormats = []string{ - nameIDFormatEmailAddress, - nameIDFormatUnspecified, - nameIDFormatX509Subject, - nameIDFormatWindowsDN, - nameIDFormatEncrypted, - nameIDFormatEntity, - nameIDFormatKerberos, - nameIDFormatPersistent, - nameIDformatTransient, - } - nameIDFormatLookup = make(map[string]string) -) - -func init() { - suffix := func(s, sep string) string { - if i := strings.LastIndex(s, sep); i > 0 { - return s[i+1:] - } - return s - } - for _, format := range nameIDFormats { - nameIDFormatLookup[suffix(format, ":")] = format - nameIDFormatLookup[format] = format - } -} - -// Config represents configuration options for the SAML provider. -type Config struct { - // TODO(ericchiang): A bunch of these fields could be auto-filled if - // we supported SAML metadata discovery. - // - // https://www.oasis-open.org/committees/download.php/35391/sstc-saml-metadata-errata-2.0-wd-04-diff.pdf - - EntityIssuer string `json:"entityIssuer"` - SSOIssuer string `json:"ssoIssuer"` - SSOURL string `json:"ssoURL"` - - // X509 CA file or raw data to verify XML signatures. - CA string `json:"ca"` - CAData []byte `json:"caData"` - - InsecureSkipSignatureValidation bool `json:"insecureSkipSignatureValidation"` - - // Assertion attribute names to lookup various claims with. - UsernameAttr string `json:"usernameAttr"` - EmailAttr string `json:"emailAttr"` - GroupsAttr string `json:"groupsAttr"` - // If GroupsDelim is supplied the connector assumes groups are returned as a - // single string instead of multiple attribute values. This delimiter will be - // used split the groups string. - GroupsDelim string `json:"groupsDelim"` - AllowedGroups []string `json:"allowedGroups"` - FilterGroups bool `json:"filterGroups"` - RedirectURI string `json:"redirectURI"` - - // Requested format of the NameID. The NameID value is is mapped to the ID Token - // 'sub' claim. - // - // This can be an abbreviated form of the full URI with just the last component. For - // example, if this value is set to "emailAddress" the format will resolve to: - // - // urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress - // - // If no value is specified, this value defaults to: - // - // urn:oasis:names:tc:SAML:2.0:nameid-format:persistent - // - NameIDPolicyFormat string `json:"nameIDPolicyFormat"` -} - -type certStore struct { - certs []*x509.Certificate -} - -func (c certStore) Certificates() (roots []*x509.Certificate, err error) { - return c.certs, nil -} - -// Open validates the config and returns a connector. It does not actually -// validate connectivity with the provider. -func (c *Config) Open(id string, logger log.Logger) (connector.Connector, error) { - return c.openConnector(logger) -} - -func (c *Config) openConnector(logger log.Logger) (*provider, error) { - requiredFields := []struct { - name, val string - }{ - {"ssoURL", c.SSOURL}, - {"usernameAttr", c.UsernameAttr}, - {"emailAttr", c.EmailAttr}, - {"redirectURI", c.RedirectURI}, - } - var missing []string - for _, f := range requiredFields { - if f.val == "" { - missing = append(missing, f.name) - } - } - switch len(missing) { - case 0: - case 1: - return nil, fmt.Errorf("missing required field %q", missing[0]) - default: - return nil, fmt.Errorf("missing required fields %q", missing) - } - - p := &provider{ - entityIssuer: c.EntityIssuer, - ssoIssuer: c.SSOIssuer, - ssoURL: c.SSOURL, - now: time.Now, - usernameAttr: c.UsernameAttr, - emailAttr: c.EmailAttr, - groupsAttr: c.GroupsAttr, - groupsDelim: c.GroupsDelim, - allowedGroups: c.AllowedGroups, - filterGroups: c.FilterGroups, - redirectURI: c.RedirectURI, - logger: logger, - - nameIDPolicyFormat: c.NameIDPolicyFormat, - } - - if p.nameIDPolicyFormat == "" { - p.nameIDPolicyFormat = nameIDFormatPersistent - } else { - if format, ok := nameIDFormatLookup[p.nameIDPolicyFormat]; ok { - p.nameIDPolicyFormat = format - } else { - return nil, fmt.Errorf("invalid nameIDPolicyFormat: %q", p.nameIDPolicyFormat) - } - } - - if !c.InsecureSkipSignatureValidation { - if (c.CA == "") == (c.CAData == nil) { - return nil, errors.New("must provide either 'ca' or 'caData'") - } - - var caData []byte - if c.CA != "" { - data, err := ioutil.ReadFile(c.CA) - if err != nil { - return nil, fmt.Errorf("read ca file: %v", err) - } - caData = data - } else { - caData = c.CAData - } - - var ( - certs []*x509.Certificate - block *pem.Block - ) - for { - block, caData = pem.Decode(caData) - if block == nil { - caData = bytes.TrimSpace(caData) - if len(caData) > 0 { // if there's some left, we've been given bad caData - return nil, fmt.Errorf("parse cert: trailing data: %q", string(caData)) - } - break - } - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, fmt.Errorf("parse cert: %v", err) - } - certs = append(certs, cert) - } - if len(certs) == 0 { - return nil, errors.New("no certificates found in ca data") - } - p.validator = dsig.NewDefaultValidationContext(certStore{certs}) - } - return p, nil -} - -type provider struct { - entityIssuer string - ssoIssuer string - ssoURL string - - now func() time.Time - - // If nil, don't do signature validation. - validator *dsig.ValidationContext - - // Attribute mappings - usernameAttr string - emailAttr string - groupsAttr string - groupsDelim string - allowedGroups []string - filterGroups bool - - redirectURI string - - nameIDPolicyFormat string - - logger log.Logger -} - -func (p *provider) POSTData(s connector.Scopes, id string) (action, value string, err error) { - r := &authnRequest{ - ProtocolBinding: bindingPOST, - ID: id, - IssueInstant: xmlTime(p.now()), - Destination: p.ssoURL, - NameIDPolicy: &nameIDPolicy{ - AllowCreate: true, - Format: p.nameIDPolicyFormat, - }, - AssertionConsumerServiceURL: p.redirectURI, - } - if p.entityIssuer != "" { - // Issuer for the request is optional. For example, okta always ignores - // this value. - r.Issuer = &issuer{Issuer: p.entityIssuer} - } - - data, err := xml.MarshalIndent(r, "", " ") - if err != nil { - return "", "", fmt.Errorf("marshal authn request: %v", err) - } - - // See: https://docs.oasis-open.org/security/saml/v2.0/saml-bindings-2.0-os.pdf - // "3.5.4 Message Encoding" - return p.ssoURL, base64.StdEncoding.EncodeToString(data), nil -} - -// HandlePOST interprets a request from a SAML provider attempting to verify a -// user's identity. -// -// The steps taken are: -// -// * Verify signature on XML document (or verify sig on assertion elements). -// * Verify various parts of the Assertion element. Conditions, audience, etc. -// * Map the Assertion's attribute elements to user info. -// -func (p *provider) HandlePOST(s connector.Scopes, samlResponse, inResponseTo string) (ident connector.Identity, err error) { - rawResp, err := base64.StdEncoding.DecodeString(samlResponse) - if err != nil { - return ident, fmt.Errorf("decode response: %v", err) - } - - // Root element is allowed to not be signed if the Assertion element is. - rootElementSigned := true - if p.validator != nil { - rawResp, rootElementSigned, err = verifyResponseSig(p.validator, rawResp) - if err != nil { - return ident, fmt.Errorf("verify signature: %v", err) - } - } - - var resp response - if err := xml.Unmarshal(rawResp, &resp); err != nil { - return ident, fmt.Errorf("unmarshal response: %v", err) - } - - // If the root element isn't signed, there's no reason to inspect these - // elements. They're not verified. - if rootElementSigned { - if p.ssoIssuer != "" && resp.Issuer != nil && resp.Issuer.Issuer != p.ssoIssuer { - return ident, fmt.Errorf("expected Issuer value %s, got %s", p.ssoIssuer, resp.Issuer.Issuer) - } - - // Verify InResponseTo value matches the expected ID associated with - // the RelayState. - if resp.InResponseTo != inResponseTo { - return ident, fmt.Errorf("expected InResponseTo value %s, got %s", inResponseTo, resp.InResponseTo) - } - - // Destination is optional. - if resp.Destination != "" && resp.Destination != p.redirectURI { - return ident, fmt.Errorf("expected destination %q got %q", p.redirectURI, resp.Destination) - } - - // Status is a required element. - if resp.Status == nil { - return ident, fmt.Errorf("response did not contain a Status element") - } - - if err = p.validateStatus(resp.Status); err != nil { - return ident, err - } - } - - assertion := resp.Assertion - if assertion == nil { - return ident, fmt.Errorf("response did not contain an assertion") - } - - // Subject is usually optional, but we need it for the user ID, so complain - // if it's not present. - subject := assertion.Subject - if subject == nil { - return ident, fmt.Errorf("response did not contain a subject") - } - - // Validate that the response is to the request we originally sent. - if err = p.validateSubject(subject, inResponseTo); err != nil { - return ident, err - } - - // Conditions element is optional, but must be validated if present. - if assertion.Conditions != nil { - // Validate that dex is the intended audience of this response. - if err = p.validateConditions(assertion.Conditions); err != nil { - return ident, err - } - } - - switch { - case subject.NameID != nil: - if ident.UserID = subject.NameID.Value; ident.UserID == "" { - return ident, fmt.Errorf("NameID element does not contain a value") - } - default: - return ident, fmt.Errorf("subject does not contain an NameID element") - } - - // After verifying the assertion, map data in the attribute statements to - // various user info. - attributes := assertion.AttributeStatement - if attributes == nil { - return ident, fmt.Errorf("response did not contain a AttributeStatement") - } - - // Log the actual attributes we got back from the server. This helps debug - // configuration errors on the server side, where the SAML server doesn't - // send us the correct attributes. - p.logger.Infof("parsed and verified saml response attributes %s", attributes) - - // Grab the email. - if ident.Email, _ = attributes.get(p.emailAttr); ident.Email == "" { - return ident, fmt.Errorf("no attribute with name %q: %s", p.emailAttr, attributes.names()) - } - // TODO(ericchiang): Does SAML have an email_verified equivalent? - ident.EmailVerified = true - - // Grab the username. - if ident.Username, _ = attributes.get(p.usernameAttr); ident.Username == "" { - return ident, fmt.Errorf("no attribute with name %q: %s", p.usernameAttr, attributes.names()) - } - - if len(p.allowedGroups) == 0 && (!s.Groups || p.groupsAttr == "") { - // Groups not requested or not configured. We're done. - return ident, nil - } - - if len(p.allowedGroups) > 0 && (!s.Groups || p.groupsAttr == "") { - // allowedGroups set but no groups or groupsAttr. Disallowing. - return ident, fmt.Errorf("user not a member of allowed groups") - } - - // Grab the groups. - if p.groupsDelim != "" { - groupsStr, ok := attributes.get(p.groupsAttr) - if !ok { - return ident, fmt.Errorf("no attribute with name %q: %s", p.groupsAttr, attributes.names()) - } - // TODO(ericchiang): Do we need to further trim whitespace? - ident.Groups = strings.Split(groupsStr, p.groupsDelim) - } else { - groups, ok := attributes.all(p.groupsAttr) - if !ok { - return ident, fmt.Errorf("no attribute with name %q: %s", p.groupsAttr, attributes.names()) - } - ident.Groups = groups - } - - if len(p.allowedGroups) == 0 { - // No allowed groups set, just return the ident - return ident, nil - } - - // Look for membership in one of the allowed groups - groupMatches := groups.Filter(ident.Groups, p.allowedGroups) - - if len(groupMatches) == 0 { - // No group membership matches found, disallowing - return ident, fmt.Errorf("user not a member of allowed groups") - } - - if p.filterGroups { - ident.Groups = groupMatches - } - - // Otherwise, we're good - return ident, nil -} - -// validateStatus verifies that the response has a good status code or -// formats a human readble error based on the bad status. -func (p *provider) validateStatus(status *status) error { - // StatusCode is mandatory in the Status type - statusCode := status.StatusCode - if statusCode == nil { - return fmt.Errorf("response did not contain a StatusCode") - } - - if statusCode.Value != statusCodeSuccess { - parts := strings.Split(statusCode.Value, ":") - lastPart := parts[len(parts)-1] - errorMessage := fmt.Sprintf("status code of the Response was not Success, was %q", lastPart) - statusMessage := status.StatusMessage - if statusMessage != nil && statusMessage.Value != "" { - errorMessage += " -> " + statusMessage.Value - } - return fmt.Errorf(errorMessage) - } - return nil -} - -// validateSubject ensures the response is to the request we expect. -// -// This is described in the spec "Profiles for the OASIS Security -// Assertion Markup Language" in section 3.3 Bearer. -// see https://www.oasis-open.org/committees/download.php/35389/sstc-saml-profiles-errata-2.0-wd-06-diff.pdf -// -// Some of these fields are optional, but we're going to be strict here since -// we have no other way of guarenteeing that this is actually the response to -// the request we expect. -func (p *provider) validateSubject(subject *subject, inResponseTo string) error { - // Optional according to the spec, but again, we're going to be strict here. - if len(subject.SubjectConfirmations) == 0 { - return fmt.Errorf("subject contained no SubjectConfirmations") - } - - var errs []error - // One of these must match our assumptions, not all. - for _, c := range subject.SubjectConfirmations { - err := func() error { - if c.Method != subjectConfirmationMethodBearer { - return fmt.Errorf("unexpected subject confirmation method: %v", c.Method) - } - - data := c.SubjectConfirmationData - if data == nil { - return fmt.Errorf("SubjectConfirmation contained no SubjectConfirmationData") - } - if data.InResponseTo != inResponseTo { - return fmt.Errorf("expected SubjectConfirmationData InResponseTo value %q, got %q", inResponseTo, data.InResponseTo) - } - - notBefore := time.Time(data.NotBefore) - notOnOrAfter := time.Time(data.NotOnOrAfter) - now := p.now() - if !notBefore.IsZero() && before(now, notBefore) { - return fmt.Errorf("at %s got response that cannot be processed before %s", now, notBefore) - } - if !notOnOrAfter.IsZero() && after(now, notOnOrAfter) { - return fmt.Errorf("at %s got response that cannot be processed because it expired at %s", now, notOnOrAfter) - } - if r := data.Recipient; r != "" && r != p.redirectURI { - return fmt.Errorf("expected Recipient %q got %q", p.redirectURI, r) - } - return nil - }() - if err == nil { - // Subject is valid. - return nil - } - errs = append(errs, err) - } - - if len(errs) == 1 { - return fmt.Errorf("failed to validate subject confirmation: %v", errs[0]) - } - return fmt.Errorf("failed to validate subject confirmation: %v", errs) -} - -// validationConditions ensures that dex is the intended audience -// for the request, and not another service provider. -// -// See: https://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf -// "2.3.3 Element " -func (p *provider) validateConditions(conditions *conditions) error { - // Ensure the conditions haven't expired. - now := p.now() - notBefore := time.Time(conditions.NotBefore) - if !notBefore.IsZero() && before(now, notBefore) { - return fmt.Errorf("at %s got response that cannot be processed before %s", now, notBefore) - } - - notOnOrAfter := time.Time(conditions.NotOnOrAfter) - if !notOnOrAfter.IsZero() && after(now, notOnOrAfter) { - return fmt.Errorf("at %s got response that cannot be processed because it expired at %s", now, notOnOrAfter) - } - - // Sometimes, dex's issuer string can be different than the redirect URI, - // but if dex's issuer isn't explicitly provided assume the redirect URI. - expAud := p.entityIssuer - if expAud == "" { - expAud = p.redirectURI - } - - // AudienceRestriction elements indicate the intended audience(s) of an - // assertion. If dex isn't in these audiences, reject the assertion. - // - // Note that if there are multiple AudienceRestriction elements, each must - // individually contain dex in their audience list. - for _, r := range conditions.AudienceRestriction { - values := make([]string, len(r.Audiences)) - issuerInAudiences := false - for i, aud := range r.Audiences { - if aud.Value == expAud { - issuerInAudiences = true - break - } - values[i] = aud.Value - } - - if !issuerInAudiences { - return fmt.Errorf("required audience %s was not in Response audiences %s", expAud, values) - } - } - return nil -} - -// verifyResponseSig attempts to verify the signature of a SAML response or -// the assertion. -// -// If the root element is properly signed, this method returns it. -// -// The SAML spec requires supporting responses where the root element is -// unverified, but the sub elements are signed. In these cases, -// this method returns rootVerified=false to indicate that the -// elements should be trusted, but all other elements MUST be ignored. -// -// Note: we still don't support multiple tags. If there are -// multiple present this code will only process the first. -func verifyResponseSig(validator *dsig.ValidationContext, data []byte) (signed []byte, rootVerified bool, err error) { - doc := etree.NewDocument() - if err = doc.ReadFromBytes(data); err != nil { - return nil, false, fmt.Errorf("parse document: %v", err) - } - - response := doc.Root() - transformedResponse, err := validator.Validate(response) - if err == nil { - // Root element is verified, return it. - doc.SetRoot(transformedResponse) - signed, err = doc.WriteToBytes() - return signed, true, err - } - - // Ensures xmlns are copied down to the assertion element when they are defined in the root - // - // TODO: Only select from child elements of the root. - assertion, err := etreeutils.NSSelectOne(response, "urn:oasis:names:tc:SAML:2.0:assertion", "Assertion") - if err != nil { - return nil, false, fmt.Errorf("response does not contain an Assertion element") - } - transformedAssertion, err := validator.Validate(assertion) - if err != nil { - return nil, false, fmt.Errorf("response does not contain a valid signature element: %v", err) - } - - // Verified an assertion but not the response. Can't trust any child elements, - // except the assertion. Remove them all. - for _, el := range response.ChildElements() { - response.RemoveChild(el) - } - - // We still return the full element, even though it's unverified - // because the element is not a valid XML document on its own. - // It still requires the root element to define things like namespaces. - response.AddChild(transformedAssertion) - signed, err = doc.WriteToBytes() - return signed, false, err -} - -// before determines if a given time is before the current time, with an -// allowed clock drift. -func before(now, notBefore time.Time) bool { - return now.Add(allowedClockDrift).Before(notBefore) -} - -// after determines if a given time is after the current time, with an -// allowed clock drift. -func after(now, notOnOrAfter time.Time) bool { - return now.After(notOnOrAfter.Add(allowedClockDrift)) -} diff --git a/vendor/github.com/dexidp/dex/connector/saml/types.go b/vendor/github.com/dexidp/dex/connector/saml/types.go deleted file mode 100644 index c8d7e7f..0000000 --- a/vendor/github.com/dexidp/dex/connector/saml/types.go +++ /dev/null @@ -1,277 +0,0 @@ -package saml - -import ( - "bytes" - "encoding/xml" - "fmt" - "time" -) - -const timeFormat = "2006-01-02T15:04:05Z" - -type xmlTime time.Time - -func (t xmlTime) MarshalXMLAttr(name xml.Name) (xml.Attr, error) { - return xml.Attr{ - Name: name, - Value: time.Time(t).UTC().Format(timeFormat), - }, nil -} - -func (t *xmlTime) UnmarshalXMLAttr(attr xml.Attr) error { - got, err := time.Parse(timeFormat, attr.Value) - if err != nil { - return err - } - *t = xmlTime(got) - return nil -} - -type samlVersion struct{} - -func (s samlVersion) MarshalXMLAttr(name xml.Name) (xml.Attr, error) { - return xml.Attr{ - Name: name, - Value: "2.0", - }, nil -} - -func (s *samlVersion) UnmarshalXMLAttr(attr xml.Attr) error { - if attr.Value != "2.0" { - return fmt.Errorf(`saml version expected "2.0" got %q`, attr.Value) - } - return nil -} - -type authnRequest struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol AuthnRequest"` - - ID string `xml:"ID,attr"` - Version samlVersion `xml:"Version,attr"` - - ProviderName string `xml:"ProviderName,attr,omitempty"` - IssueInstant xmlTime `xml:"IssueInstant,attr,omitempty"` - Consent bool `xml:"Consent,attr,omitempty"` - Destination string `xml:"Destination,attr,omitempty"` - - ForceAuthn bool `xml:"ForceAuthn,attr,omitempty"` - IsPassive bool `xml:"IsPassive,attr,omitempty"` - ProtocolBinding string `xml:"ProtocolBinding,attr,omitempty"` - - AssertionConsumerServiceURL string `xml:"AssertionConsumerServiceURL,attr,omitempty"` - - Subject *subject `xml:"Subject,omitempty"` - Issuer *issuer `xml:"Issuer,omitempty"` - NameIDPolicy *nameIDPolicy `xml:"NameIDPolicy,omitempty"` - - // TODO(ericchiang): Make this configurable and determine appropriate default values. - RequestAuthnContext *requestAuthnContext `xml:"RequestAuthnContext,omitempty"` -} - -type subject struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Subject"` - - NameID *nameID `xml:"NameID,omitempty"` - SubjectConfirmations []subjectConfirmation `xml:"SubjectConfirmation"` - - // TODO(ericchiang): Do we need to deal with baseID? -} - -type nameID struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion NameID"` - - Format string `xml:"Format,omitempty"` - Value string `xml:",chardata"` -} - -type subjectConfirmationData struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion SubjectConfirmationData"` - - NotBefore xmlTime `xml:"NotBefore,attr,omitempty"` - NotOnOrAfter xmlTime `xml:"NotOnOrAfter,attr,omitempty"` - Recipient string `xml:"Recipient,attr,omitempty"` - InResponseTo string `xml:"InResponseTo,attr,omitempty"` -} - -type subjectConfirmation struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion SubjectConfirmation"` - - Method string `xml:"Method,attr,omitempty"` - SubjectConfirmationData *subjectConfirmationData `xml:"SubjectConfirmationData,omitempty"` -} - -type audience struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Audience"` - Value string `xml:",chardata"` -} - -type audienceRestriction struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion AudienceRestriction"` - - Audiences []audience `xml:"Audience"` -} - -type conditions struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Conditions"` - - NotBefore xmlTime `xml:"NotBefore,attr,omitempty"` - NotOnOrAfter xmlTime `xml:"NotOnOrAfter,attr,omitempty"` - - AudienceRestriction []audienceRestriction `xml:"AudienceRestriction,omitempty"` -} - -type statusCode struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol StatusCode"` - - Value string `xml:"Value,attr,omitempty"` -} - -type statusMessage struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol StatusMessage"` - - Value string `xml:",chardata"` -} - -type status struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol Status"` - - StatusCode *statusCode `xml:"StatusCode"` - StatusMessage *statusMessage `xml:"StatusMessage,omitempty"` -} - -type issuer struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Issuer"` - Issuer string `xml:",chardata"` -} - -type nameIDPolicy struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol NameIDPolicy"` - AllowCreate bool `xml:"AllowCreate,attr,omitempty"` - Format string `xml:"Format,attr,omitempty"` -} - -type requestAuthnContext struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol RequestAuthnContext"` - - AuthnContextClassRefs []authnContextClassRef -} - -type authnContextClassRef struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol AuthnContextClassRef"` - Value string `xml:",chardata"` -} - -type response struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:protocol Response"` - - ID string `xml:"ID,attr"` - InResponseTo string `xml:"InResponseTo,attr"` - Version samlVersion `xml:"Version,attr"` - - Destination string `xml:"Destination,attr,omitempty"` - - Issuer *issuer `xml:"Issuer,omitempty"` - - Status *status `xml:"Status"` - - // TODO(ericchiang): How do deal with multiple assertions? - Assertion *assertion `xml:"Assertion,omitempty"` -} - -type assertion struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Assertion"` - - Version samlVersion `xml:"Version,attr"` - ID string `xml:"ID,attr"` - IssueInstance xmlTime `xml:"IssueInstance,attr"` - - Issuer issuer `xml:"Issuer"` - - Subject *subject `xml:"Subject,omitempty"` - - Conditions *conditions `xml:"Conditions"` - - AttributeStatement *attributeStatement `xml:"AttributeStatement,omitempty"` -} - -type attributeStatement struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion AttributeStatement"` - - Attributes []attribute `xml:"Attribute"` -} - -func (a *attributeStatement) get(name string) (s string, ok bool) { - for _, attr := range a.Attributes { - if attr.Name == name { - ok = true - if len(attr.AttributeValues) > 0 { - return attr.AttributeValues[0].Value, true - } - } - } - return -} - -func (a *attributeStatement) all(name string) (s []string, ok bool) { - for _, attr := range a.Attributes { - if attr.Name == name { - ok = true - for _, val := range attr.AttributeValues { - s = append(s, val.Value) - } - } - } - return -} - -// names list the names of all attributes in the attribute statement. -func (a *attributeStatement) names() []string { - s := make([]string, len(a.Attributes)) - - for i, attr := range a.Attributes { - s[i] = attr.Name - } - return s -} - -// String is a formatter for logging an attribute statement's sub statements. -func (a *attributeStatement) String() string { - buff := new(bytes.Buffer) - for i, attr := range a.Attributes { - if i != 0 { - buff.WriteString(", ") - } - buff.WriteString(attr.String()) - } - return buff.String() -} - -type attribute struct { - XMLName xml.Name `xml:"urn:oasis:names:tc:SAML:2.0:assertion Attribute"` - - Name string `xml:"Name,attr"` - - NameFormat string `xml:"NameFormat,attr,omitempty"` - FriendlyName string `xml:"FriendlyName,attr,omitempty"` - - AttributeValues []attributeValue `xml:"AttributeValue,omitempty"` -} - -type attributeValue struct { - XMLName xml.Name `xml:"AttributeValue"` - Value string `xml:",chardata"` -} - -func (a attribute) String() string { - if len(a.AttributeValues) == 1 { - // "email" = "jane.doe@coreos.com" - return fmt.Sprintf("%q = %q", a.Name, a.AttributeValues[0].Value) - } - values := make([]string, len(a.AttributeValues)) - for i, av := range a.AttributeValues { - values[i] = av.Value - } - - // "groups" = ["engineering", "docs"] - return fmt.Sprintf("%q = %q", a.Name, values) -} diff --git a/vendor/github.com/dexidp/dex/pkg/groups/groups.go b/vendor/github.com/dexidp/dex/pkg/groups/groups.go deleted file mode 100644 index 5dde65a..0000000 --- a/vendor/github.com/dexidp/dex/pkg/groups/groups.go +++ /dev/null @@ -1,18 +0,0 @@ -// Package groups contains helper functions related to groups -package groups - -// Filter filters out any groups of given that are not in required. Thus it may -// happen that the resulting slice is empty. -func Filter(given, required []string) []string { - groups := []string{} - groupFilter := make(map[string]struct{}) - for _, group := range required { - groupFilter[group] = struct{}{} - } - for _, group := range given { - if _, ok := groupFilter[group]; ok { - groups = append(groups, group) - } - } - return groups -} diff --git a/vendor/github.com/dexidp/dex/pkg/log/logger.go b/vendor/github.com/dexidp/dex/pkg/log/logger.go deleted file mode 100644 index 4f3cdd3..0000000 --- a/vendor/github.com/dexidp/dex/pkg/log/logger.go +++ /dev/null @@ -1,18 +0,0 @@ -// Package log provides a logger interface for logger libraries -// so that dex does not depend on any of them directly. -// It also includes a default implementation using Logrus (used by dex previously). -package log - -// Logger serves as an adapter interface for logger libraries -// so that dex does not depend on any of them directly. -type Logger interface { - Debug(args ...interface{}) - Info(args ...interface{}) - Warn(args ...interface{}) - Error(args ...interface{}) - - Debugf(format string, args ...interface{}) - Infof(format string, args ...interface{}) - Warnf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) -} diff --git a/vendor/github.com/dexidp/dex/server/api.go b/vendor/github.com/dexidp/dex/server/api.go deleted file mode 100644 index 55784b9..0000000 --- a/vendor/github.com/dexidp/dex/server/api.go +++ /dev/null @@ -1,368 +0,0 @@ -package server - -import ( - "context" - "errors" - "fmt" - - "golang.org/x/crypto/bcrypt" - - "github.com/dexidp/dex/api" - "github.com/dexidp/dex/pkg/log" - "github.com/dexidp/dex/server/internal" - "github.com/dexidp/dex/storage" - "github.com/dexidp/dex/version" -) - -// apiVersion increases every time a new call is added to the API. Clients should use this info -// to determine if the server supports specific features. -const apiVersion = 2 - -const ( - // recCost is the recommended bcrypt cost, which balances hash strength and - // efficiency. - recCost = 12 - - // upBoundCost is a sane upper bound on bcrypt cost determined by benchmarking: - // high enough to ensure secure encryption, low enough to not put unnecessary - // load on a dex server. - upBoundCost = 16 -) - -// NewAPI returns a server which implements the gRPC API interface. -func NewAPI(s storage.Storage, logger log.Logger) api.DexServer { - return dexAPI{ - s: s, - logger: logger, - } -} - -type dexAPI struct { - s storage.Storage - logger log.Logger -} - -func (d dexAPI) CreateClient(ctx context.Context, req *api.CreateClientReq) (*api.CreateClientResp, error) { - if req.Client == nil { - return nil, errors.New("no client supplied") - } - - if req.Client.Id == "" { - req.Client.Id = storage.NewID() - } - if req.Client.Secret == "" { - req.Client.Secret = storage.NewID() + storage.NewID() - } - - c := storage.Client{ - ID: req.Client.Id, - Secret: req.Client.Secret, - RedirectURIs: req.Client.RedirectUris, - TrustedPeers: req.Client.TrustedPeers, - Public: req.Client.Public, - Name: req.Client.Name, - LogoURL: req.Client.LogoUrl, - } - if err := d.s.CreateClient(c); err != nil { - if err == storage.ErrAlreadyExists { - return &api.CreateClientResp{AlreadyExists: true}, nil - } - d.logger.Errorf("api: failed to create client: %v", err) - return nil, fmt.Errorf("create client: %v", err) - } - - return &api.CreateClientResp{ - Client: req.Client, - }, nil -} - -func (d dexAPI) UpdateClient(ctx context.Context, req *api.UpdateClientReq) (*api.UpdateClientResp, error) { - if req.Id == "" { - return nil, errors.New("update client: no client ID supplied") - } - - err := d.s.UpdateClient(req.Id, func(old storage.Client) (storage.Client, error) { - if req.RedirectUris != nil { - old.RedirectURIs = req.RedirectUris - } - if req.TrustedPeers != nil { - old.TrustedPeers = req.TrustedPeers - } - if req.Name != "" { - old.Name = req.Name - } - if req.LogoUrl != "" { - old.LogoURL = req.LogoUrl - } - return old, nil - }) - - if err != nil { - if err == storage.ErrNotFound { - return &api.UpdateClientResp{NotFound: true}, nil - } - d.logger.Errorf("api: failed to update the client: %v", err) - return nil, fmt.Errorf("update client: %v", err) - } - return &api.UpdateClientResp{}, nil -} - -func (d dexAPI) DeleteClient(ctx context.Context, req *api.DeleteClientReq) (*api.DeleteClientResp, error) { - err := d.s.DeleteClient(req.Id) - if err != nil { - if err == storage.ErrNotFound { - return &api.DeleteClientResp{NotFound: true}, nil - } - d.logger.Errorf("api: failed to delete client: %v", err) - return nil, fmt.Errorf("delete client: %v", err) - } - return &api.DeleteClientResp{}, nil -} - -// checkCost returns an error if the hash provided does not meet lower or upper -// bound cost requirements. -func checkCost(hash []byte) error { - actual, err := bcrypt.Cost(hash) - if err != nil { - return fmt.Errorf("parsing bcrypt hash: %v", err) - } - if actual < bcrypt.DefaultCost { - return fmt.Errorf("given hash cost = %d does not meet minimum cost requirement = %d", actual, bcrypt.DefaultCost) - } - if actual > upBoundCost { - return fmt.Errorf("given hash cost = %d is above upper bound cost = %d, recommended cost = %d", actual, upBoundCost, recCost) - } - return nil -} - -func (d dexAPI) CreatePassword(ctx context.Context, req *api.CreatePasswordReq) (*api.CreatePasswordResp, error) { - if req.Password == nil { - return nil, errors.New("no password supplied") - } - if req.Password.UserId == "" { - return nil, errors.New("no user ID supplied") - } - if req.Password.Hash != nil { - if err := checkCost(req.Password.Hash); err != nil { - return nil, err - } - } else { - return nil, errors.New("no hash of password supplied") - } - - p := storage.Password{ - Email: req.Password.Email, - Hash: req.Password.Hash, - Username: req.Password.Username, - UserID: req.Password.UserId, - } - if err := d.s.CreatePassword(p); err != nil { - if err == storage.ErrAlreadyExists { - return &api.CreatePasswordResp{AlreadyExists: true}, nil - } - d.logger.Errorf("api: failed to create password: %v", err) - return nil, fmt.Errorf("create password: %v", err) - } - - return &api.CreatePasswordResp{}, nil -} - -func (d dexAPI) UpdatePassword(ctx context.Context, req *api.UpdatePasswordReq) (*api.UpdatePasswordResp, error) { - if req.Email == "" { - return nil, errors.New("no email supplied") - } - if req.NewHash == nil && req.NewUsername == "" { - return nil, errors.New("nothing to update") - } - - if req.NewHash != nil { - if err := checkCost(req.NewHash); err != nil { - return nil, err - } - } - - updater := func(old storage.Password) (storage.Password, error) { - if req.NewHash != nil { - old.Hash = req.NewHash - } - - if req.NewUsername != "" { - old.Username = req.NewUsername - } - - return old, nil - } - - if err := d.s.UpdatePassword(req.Email, updater); err != nil { - if err == storage.ErrNotFound { - return &api.UpdatePasswordResp{NotFound: true}, nil - } - d.logger.Errorf("api: failed to update password: %v", err) - return nil, fmt.Errorf("update password: %v", err) - } - - return &api.UpdatePasswordResp{}, nil -} - -func (d dexAPI) DeletePassword(ctx context.Context, req *api.DeletePasswordReq) (*api.DeletePasswordResp, error) { - if req.Email == "" { - return nil, errors.New("no email supplied") - } - - err := d.s.DeletePassword(req.Email) - if err != nil { - if err == storage.ErrNotFound { - return &api.DeletePasswordResp{NotFound: true}, nil - } - d.logger.Errorf("api: failed to delete password: %v", err) - return nil, fmt.Errorf("delete password: %v", err) - } - return &api.DeletePasswordResp{}, nil -} - -func (d dexAPI) GetVersion(ctx context.Context, req *api.VersionReq) (*api.VersionResp, error) { - return &api.VersionResp{ - Server: version.Version, - Api: apiVersion, - }, nil -} - -func (d dexAPI) ListPasswords(ctx context.Context, req *api.ListPasswordReq) (*api.ListPasswordResp, error) { - passwordList, err := d.s.ListPasswords() - if err != nil { - d.logger.Errorf("api: failed to list passwords: %v", err) - return nil, fmt.Errorf("list passwords: %v", err) - } - - var passwords []*api.Password - for _, password := range passwordList { - p := api.Password{ - Email: password.Email, - Username: password.Username, - UserId: password.UserID, - } - passwords = append(passwords, &p) - } - - return &api.ListPasswordResp{ - Passwords: passwords, - }, nil -} - -func (d dexAPI) VerifyPassword(ctx context.Context, req *api.VerifyPasswordReq) (*api.VerifyPasswordResp, error) { - if req.Email == "" { - return nil, errors.New("no email supplied") - } - - if req.Password == "" { - return nil, errors.New("no password to verify supplied") - } - - password, err := d.s.GetPassword(req.Email) - if err != nil { - if err == storage.ErrNotFound { - return &api.VerifyPasswordResp{ - NotFound: true, - }, nil - } - d.logger.Errorf("api: there was an error retrieving the password: %v", err) - return nil, fmt.Errorf("verify password: %v", err) - } - - if err := bcrypt.CompareHashAndPassword(password.Hash, []byte(req.Password)); err != nil { - d.logger.Infof("api: password check failed: %v", err) - return &api.VerifyPasswordResp{ - Verified: false, - }, nil - } - return &api.VerifyPasswordResp{ - Verified: true, - }, nil -} - -func (d dexAPI) ListRefresh(ctx context.Context, req *api.ListRefreshReq) (*api.ListRefreshResp, error) { - id := new(internal.IDTokenSubject) - if err := internal.Unmarshal(req.UserId, id); err != nil { - d.logger.Errorf("api: failed to unmarshal ID Token subject: %v", err) - return nil, err - } - - var refreshTokenRefs []*api.RefreshTokenRef - offlineSessions, err := d.s.GetOfflineSessions(id.UserId, id.ConnId) - if err != nil { - if err == storage.ErrNotFound { - // This means that this user-client pair does not have a refresh token yet. - // An empty list should be returned instead of an error. - return &api.ListRefreshResp{ - RefreshTokens: refreshTokenRefs, - }, nil - } - d.logger.Errorf("api: failed to list refresh tokens %t here : %v", err == storage.ErrNotFound, err) - return nil, err - } - - for _, session := range offlineSessions.Refresh { - r := api.RefreshTokenRef{ - Id: session.ID, - ClientId: session.ClientID, - CreatedAt: session.CreatedAt.Unix(), - LastUsed: session.LastUsed.Unix(), - } - refreshTokenRefs = append(refreshTokenRefs, &r) - } - - return &api.ListRefreshResp{ - RefreshTokens: refreshTokenRefs, - }, nil -} - -func (d dexAPI) RevokeRefresh(ctx context.Context, req *api.RevokeRefreshReq) (*api.RevokeRefreshResp, error) { - id := new(internal.IDTokenSubject) - if err := internal.Unmarshal(req.UserId, id); err != nil { - d.logger.Errorf("api: failed to unmarshal ID Token subject: %v", err) - return nil, err - } - - var ( - refreshID string - notFound bool - ) - updater := func(old storage.OfflineSessions) (storage.OfflineSessions, error) { - refreshRef := old.Refresh[req.ClientId] - if refreshRef == nil || refreshRef.ID == "" { - d.logger.Errorf("api: refresh token issued to client %q for user %q not found for deletion", req.ClientId, id.UserId) - notFound = true - return old, storage.ErrNotFound - } - - refreshID = refreshRef.ID - - // Remove entry from Refresh list of the OfflineSession object. - delete(old.Refresh, req.ClientId) - - return old, nil - } - - if err := d.s.UpdateOfflineSessions(id.UserId, id.ConnId, updater); err != nil { - if err == storage.ErrNotFound { - return &api.RevokeRefreshResp{NotFound: true}, nil - } - d.logger.Errorf("api: failed to update offline session object: %v", err) - return nil, err - } - - if notFound { - return &api.RevokeRefreshResp{NotFound: true}, nil - } - - // Delete the refresh token from the storage - // - // TODO(ericchiang): we don't have any good recourse if this call fails. - // Consider garbage collection of refresh tokens with no associated ref. - if err := d.s.DeleteRefresh(refreshID); err != nil { - d.logger.Errorf("failed to delete refresh token: %v", err) - return nil, err - } - - return &api.RevokeRefreshResp{}, nil -} diff --git a/vendor/github.com/dexidp/dex/server/doc.go b/vendor/github.com/dexidp/dex/server/doc.go deleted file mode 100644 index 0f662e5..0000000 --- a/vendor/github.com/dexidp/dex/server/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package server implements an OpenID Connect server with federated logins. -package server diff --git a/vendor/github.com/dexidp/dex/server/handlers.go b/vendor/github.com/dexidp/dex/server/handlers.go deleted file mode 100644 index 5512d87..0000000 --- a/vendor/github.com/dexidp/dex/server/handlers.go +++ /dev/null @@ -1,1417 +0,0 @@ -package server - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "path" - "sort" - "strconv" - "strings" - "sync" - "time" - - oidc "github.com/coreos/go-oidc" - "github.com/gorilla/mux" - jose "gopkg.in/square/go-jose.v2" - - "github.com/dexidp/dex/connector" - "github.com/dexidp/dex/server/internal" - "github.com/dexidp/dex/storage" -) - -// newHealthChecker returns the healthz handler. The handler runs until the -// provided context is canceled. -func (s *Server) newHealthChecker(ctx context.Context) http.Handler { - h := &healthChecker{s: s} - - // Perform one health check synchronously so the returned handler returns - // valid data immediately. - h.runHealthCheck() - - go func() { - for { - select { - case <-ctx.Done(): - return - case <-time.After(time.Second * 15): - } - h.runHealthCheck() - } - }() - return h -} - -// healthChecker periodically performs health checks on server dependencies. -// Currently, it only checks that the storage layer is available. -type healthChecker struct { - s *Server - - // Result of the last health check: any error and the amount of time it took - // to query the storage. - mu sync.RWMutex - // Guarded by the mutex - err error - passed time.Duration -} - -// runHealthCheck performs a single health check and makes the result available -// for any clients performing and HTTP request against the healthChecker. -func (h *healthChecker) runHealthCheck() { - t := h.s.now() - err := checkStorageHealth(h.s.storage, h.s.now) - passed := h.s.now().Sub(t) - if err != nil { - h.s.logger.Errorf("Storage health check failed: %v", err) - } - - // Make sure to only hold the mutex to access the fields, and not while - // we're querying the storage object. - h.mu.Lock() - h.err = err - h.passed = passed - h.mu.Unlock() -} - -func checkStorageHealth(s storage.Storage, now func() time.Time) error { - a := storage.AuthRequest{ - ID: storage.NewID(), - ClientID: storage.NewID(), - - // Set a short expiry so if the delete fails this will be cleaned up quickly by garbage collection. - Expiry: now().Add(time.Minute), - } - - if err := s.CreateAuthRequest(a); err != nil { - return fmt.Errorf("create auth request: %v", err) - } - if err := s.DeleteAuthRequest(a.ID); err != nil { - return fmt.Errorf("delete auth request: %v", err) - } - return nil -} - -func (h *healthChecker) ServeHTTP(w http.ResponseWriter, r *http.Request) { - h.mu.RLock() - err := h.err - t := h.passed - h.mu.RUnlock() - - if err != nil { - h.s.renderError(r, w, http.StatusInternalServerError, "Health check failed.") - return - } - fmt.Fprintf(w, "Health check passed in %s", t) -} - -func (s *Server) handlePublicKeys(w http.ResponseWriter, r *http.Request) { - // TODO(ericchiang): Cache this. - keys, err := s.storage.GetKeys() - if err != nil { - s.logger.Errorf("failed to get keys: %v", err) - s.renderError(r, w, http.StatusInternalServerError, "Internal server error.") - return - } - - if keys.SigningKeyPub == nil { - s.logger.Errorf("No public keys found.") - s.renderError(r, w, http.StatusInternalServerError, "Internal server error.") - return - } - - jwks := jose.JSONWebKeySet{ - Keys: make([]jose.JSONWebKey, len(keys.VerificationKeys)+1), - } - jwks.Keys[0] = *keys.SigningKeyPub - for i, verificationKey := range keys.VerificationKeys { - jwks.Keys[i+1] = *verificationKey.PublicKey - } - - data, err := json.MarshalIndent(jwks, "", " ") - if err != nil { - s.logger.Errorf("failed to marshal discovery data: %v", err) - s.renderError(r, w, http.StatusInternalServerError, "Internal server error.") - return - } - maxAge := keys.NextRotation.Sub(s.now()) - if maxAge < (time.Minute * 2) { - maxAge = time.Minute * 2 - } - - w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%d, must-revalidate", int(maxAge.Seconds()))) - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Content-Length", strconv.Itoa(len(data))) - w.Write(data) -} - -type discovery struct { - Issuer string `json:"issuer"` - Auth string `json:"authorization_endpoint"` - Token string `json:"token_endpoint"` - Keys string `json:"jwks_uri"` - UserInfo string `json:"userinfo_endpoint"` - ResponseTypes []string `json:"response_types_supported"` - Subjects []string `json:"subject_types_supported"` - IDTokenAlgs []string `json:"id_token_signing_alg_values_supported"` - Scopes []string `json:"scopes_supported"` - AuthMethods []string `json:"token_endpoint_auth_methods_supported"` - Claims []string `json:"claims_supported"` -} - -func (s *Server) discoveryHandler() (http.HandlerFunc, error) { - d := discovery{ - Issuer: s.issuerURL.String(), - Auth: s.absURL("/auth"), - Token: s.absURL("/token"), - Keys: s.absURL("/keys"), - UserInfo: s.absURL("/userinfo"), - Subjects: []string{"public"}, - IDTokenAlgs: []string{string(jose.RS256)}, - Scopes: []string{"openid", "email", "groups", "profile", "offline_access"}, - AuthMethods: []string{"client_secret_basic"}, - Claims: []string{ - "aud", "email", "email_verified", "exp", - "iat", "iss", "locale", "name", "sub", - }, - } - - for responseType := range s.supportedResponseTypes { - d.ResponseTypes = append(d.ResponseTypes, responseType) - } - sort.Strings(d.ResponseTypes) - - data, err := json.MarshalIndent(d, "", " ") - if err != nil { - return nil, fmt.Errorf("failed to marshal discovery data: %v", err) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Content-Length", strconv.Itoa(len(data))) - w.Write(data) - }), nil -} - -// handleAuthorization handles the OAuth2 auth endpoint. -func (s *Server) handleAuthorization(w http.ResponseWriter, r *http.Request) { - authReq, err := s.parseAuthorizationRequest(r) - if err != nil { - s.logger.Errorf("Failed to parse authorization request: %v", err) - status := http.StatusInternalServerError - - // If this is an authErr, let's let it handle the error, or update the HTTP - // status code - if err, ok := err.(*authErr); ok { - if handler, ok := err.Handle(); ok { - // client_id and redirect_uri checked out and we can redirect back to - // the client with the error. - handler.ServeHTTP(w, r) - return - } - status = err.Status() - } - - s.renderError(r, w, status, err.Error()) - return - } - - // TODO(ericchiang): Create this authorization request later in the login flow - // so users don't hit "not found" database errors if they wait at the login - // screen too long. - // - // See: https://github.com/dexidp/dex/issues/646 - authReq.Expiry = s.now().Add(s.authRequestsValidFor) - if err := s.storage.CreateAuthRequest(*authReq); err != nil { - s.logger.Errorf("Failed to create authorization request: %v", err) - s.renderError(r, w, http.StatusInternalServerError, "Failed to connect to the database.") - return - } - - connectors, err := s.storage.ListConnectors() - if err != nil { - s.logger.Errorf("Failed to get list of connectors: %v", err) - s.renderError(r, w, http.StatusInternalServerError, "Failed to retrieve connector list.") - return - } - - // Redirect if a client chooses a specific connector_id - if authReq.ConnectorID != "" { - for _, c := range connectors { - if c.ID == authReq.ConnectorID { - http.Redirect(w, r, s.absPath("/auth", c.ID)+"?req="+authReq.ID, http.StatusFound) - return - } - } - s.tokenErrHelper(w, errInvalidConnectorID, "Connector ID does not match a valid Connector", http.StatusNotFound) - return - } - - if len(connectors) == 1 && !s.alwaysShowLogin { - for _, c := range connectors { - // TODO(ericchiang): Make this pass on r.URL.RawQuery and let something latter - // on create the auth request. - http.Redirect(w, r, s.absPath("/auth", c.ID)+"?req="+authReq.ID, http.StatusFound) - return - } - } - - connectorInfos := make([]connectorInfo, len(connectors)) - for index, conn := range connectors { - connectorInfos[index] = connectorInfo{ - ID: conn.ID, - Name: conn.Name, - Type: conn.Type, - // TODO(ericchiang): Make this pass on r.URL.RawQuery and let something latter - // on create the auth request. - URL: s.absPath("/auth", conn.ID) + "?req=" + authReq.ID, - } - } - - if err := s.templates.login(r, w, connectorInfos, r.URL.Path); err != nil { - s.logger.Errorf("Server template error: %v", err) - } -} - -func (s *Server) handleConnectorLogin(w http.ResponseWriter, r *http.Request) { - connID := mux.Vars(r)["connector"] - conn, err := s.getConnector(connID) - if err != nil { - s.logger.Errorf("Failed to create authorization request: %v", err) - s.renderError(r, w, http.StatusBadRequest, "Requested resource does not exist") - return - } - - authReqID := r.FormValue("req") - - authReq, err := s.storage.GetAuthRequest(authReqID) - if err != nil { - s.logger.Errorf("Failed to get auth request: %v", err) - if err == storage.ErrNotFound { - s.renderError(r, w, http.StatusBadRequest, "Login session expired.") - } else { - s.renderError(r, w, http.StatusInternalServerError, "Database error.") - } - return - } - - // Set the connector being used for the login. - if authReq.ConnectorID != connID { - updater := func(a storage.AuthRequest) (storage.AuthRequest, error) { - a.ConnectorID = connID - return a, nil - } - if err := s.storage.UpdateAuthRequest(authReqID, updater); err != nil { - s.logger.Errorf("Failed to set connector ID on auth request: %v", err) - s.renderError(r, w, http.StatusInternalServerError, "Database error.") - return - } - } - - scopes := parseScopes(authReq.Scopes) - showBacklink := len(s.connectors) > 1 - - switch r.Method { - case http.MethodGet: - switch conn := conn.Connector.(type) { - case connector.CallbackConnector: - // Use the auth request ID as the "state" token. - // - // TODO(ericchiang): Is this appropriate or should we also be using a nonce? - callbackURL, err := conn.LoginURL(scopes, s.absURL("/callback"), authReqID) - if err != nil { - s.logger.Errorf("Connector %q returned error when creating callback: %v", connID, err) - s.renderError(r, w, http.StatusInternalServerError, "Login error.") - return - } - http.Redirect(w, r, callbackURL, http.StatusFound) - case connector.PasswordConnector: - if err := s.templates.password(r, w, r.URL.String(), "", usernamePrompt(conn), false, showBacklink, r.URL.Path); err != nil { - s.logger.Errorf("Server template error: %v", err) - } - case connector.SAMLConnector: - action, value, err := conn.POSTData(scopes, authReqID) - if err != nil { - s.logger.Errorf("Creating SAML data: %v", err) - s.renderError(r, w, http.StatusInternalServerError, "Connector Login Error") - return - } - - // TODO(ericchiang): Don't inline this. - fmt.Fprintf(w, ` - - - - SAML login - - -
- - -
- - - `, action, value, authReqID) - default: - s.renderError(r, w, http.StatusBadRequest, "Requested resource does not exist.") - } - case http.MethodPost: - passwordConnector, ok := conn.Connector.(connector.PasswordConnector) - if !ok { - s.renderError(r, w, http.StatusBadRequest, "Requested resource does not exist.") - return - } - - username := r.FormValue("login") - password := r.FormValue("password") - - identity, ok, err := passwordConnector.Login(r.Context(), scopes, username, password) - if err != nil { - s.logger.Errorf("Failed to login user: %v", err) - s.renderError(r, w, http.StatusInternalServerError, fmt.Sprintf("Login error: %v", err)) - return - } - if !ok { - if err := s.templates.password(r, w, r.URL.String(), username, usernamePrompt(passwordConnector), true, showBacklink, r.URL.Path); err != nil { - s.logger.Errorf("Server template error: %v", err) - } - return - } - redirectURL, err := s.finalizeLogin(identity, authReq, conn.Connector) - if err != nil { - s.logger.Errorf("Failed to finalize login: %v", err) - s.renderError(r, w, http.StatusInternalServerError, "Login error.") - return - } - - http.Redirect(w, r, redirectURL, http.StatusSeeOther) - default: - s.renderError(r, w, http.StatusBadRequest, "Unsupported request method.") - } -} - -func (s *Server) handleConnectorCallback(w http.ResponseWriter, r *http.Request) { - var authID string - switch r.Method { - case http.MethodGet: // OAuth2 callback - if authID = r.URL.Query().Get("state"); authID == "" { - s.renderError(r, w, http.StatusBadRequest, "User session error.") - return - } - case http.MethodPost: // SAML POST binding - if authID = r.PostFormValue("RelayState"); authID == "" { - s.renderError(r, w, http.StatusBadRequest, "User session error.") - return - } - default: - s.renderError(r, w, http.StatusBadRequest, "Method not supported") - return - } - - authReq, err := s.storage.GetAuthRequest(authID) - if err != nil { - if err == storage.ErrNotFound { - s.logger.Errorf("Invalid 'state' parameter provided: %v", err) - s.renderError(r, w, http.StatusBadRequest, "Requested resource does not exist.") - return - } - s.logger.Errorf("Failed to get auth request: %v", err) - s.renderError(r, w, http.StatusInternalServerError, "Database error.") - return - } - - if connID := mux.Vars(r)["connector"]; connID != "" && connID != authReq.ConnectorID { - s.logger.Errorf("Connector mismatch: authentication started with id %q, but callback for id %q was triggered", authReq.ConnectorID, connID) - s.renderError(r, w, http.StatusInternalServerError, "Requested resource does not exist.") - return - } - - conn, err := s.getConnector(authReq.ConnectorID) - if err != nil { - s.logger.Errorf("Failed to get connector with id %q : %v", authReq.ConnectorID, err) - s.renderError(r, w, http.StatusInternalServerError, "Requested resource does not exist.") - return - } - - var identity connector.Identity - switch conn := conn.Connector.(type) { - case connector.CallbackConnector: - if r.Method != http.MethodGet { - s.logger.Errorf("SAML request mapped to OAuth2 connector") - s.renderError(r, w, http.StatusBadRequest, "Invalid request") - return - } - identity, err = conn.HandleCallback(parseScopes(authReq.Scopes), r) - case connector.SAMLConnector: - if r.Method != http.MethodPost { - s.logger.Errorf("OAuth2 request mapped to SAML connector") - s.renderError(r, w, http.StatusBadRequest, "Invalid request") - return - } - identity, err = conn.HandlePOST(parseScopes(authReq.Scopes), r.PostFormValue("SAMLResponse"), authReq.ID) - default: - s.renderError(r, w, http.StatusInternalServerError, "Requested resource does not exist.") - return - } - - if err != nil { - s.logger.Errorf("Failed to authenticate: %v", err) - s.renderError(r, w, http.StatusInternalServerError, fmt.Sprintf("Failed to authenticate: %v", err)) - return - } - - redirectURL, err := s.finalizeLogin(identity, authReq, conn.Connector) - if err != nil { - s.logger.Errorf("Failed to finalize login: %v", err) - s.renderError(r, w, http.StatusInternalServerError, "Login error.") - return - } - - http.Redirect(w, r, redirectURL, http.StatusSeeOther) -} - -// finalizeLogin associates the user's identity with the current AuthRequest, then returns -// the approval page's path. -func (s *Server) finalizeLogin(identity connector.Identity, authReq storage.AuthRequest, conn connector.Connector) (string, error) { - claims := storage.Claims{ - UserID: identity.UserID, - Username: identity.Username, - PreferredUsername: identity.PreferredUsername, - Email: identity.Email, - EmailVerified: identity.EmailVerified, - Groups: identity.Groups, - } - - updater := func(a storage.AuthRequest) (storage.AuthRequest, error) { - a.LoggedIn = true - a.Claims = claims - a.ConnectorData = identity.ConnectorData - return a, nil - } - if err := s.storage.UpdateAuthRequest(authReq.ID, updater); err != nil { - return "", fmt.Errorf("failed to update auth request: %v", err) - } - - email := claims.Email - if !claims.EmailVerified { - email = email + " (unverified)" - } - - s.logger.Infof("login successful: connector %q, username=%q, preferred_username=%q, email=%q, groups=%q", - authReq.ConnectorID, claims.Username, claims.PreferredUsername, email, claims.Groups) - - returnURL := path.Join(s.issuerURL.Path, "/approval") + "?req=" + authReq.ID - _, ok := conn.(connector.RefreshConnector) - if !ok { - return returnURL, nil - } - - // Try to retrieve an existing OfflineSession object for the corresponding user. - if session, err := s.storage.GetOfflineSessions(identity.UserID, authReq.ConnectorID); err != nil { - if err != storage.ErrNotFound { - s.logger.Errorf("failed to get offline session: %v", err) - return "", err - } - offlineSessions := storage.OfflineSessions{ - UserID: identity.UserID, - ConnID: authReq.ConnectorID, - Refresh: make(map[string]*storage.RefreshTokenRef), - ConnectorData: identity.ConnectorData, - } - - // Create a new OfflineSession object for the user and add a reference object for - // the newly received refreshtoken. - if err := s.storage.CreateOfflineSessions(offlineSessions); err != nil { - s.logger.Errorf("failed to create offline session: %v", err) - return "", err - } - } else { - // Update existing OfflineSession obj with new RefreshTokenRef. - if err := s.storage.UpdateOfflineSessions(session.UserID, session.ConnID, func(old storage.OfflineSessions) (storage.OfflineSessions, error) { - if len(identity.ConnectorData) > 0 { - old.ConnectorData = identity.ConnectorData - } - return old, nil - }); err != nil { - s.logger.Errorf("failed to update offline session: %v", err) - return "", err - } - } - - return returnURL, nil -} - -func (s *Server) handleApproval(w http.ResponseWriter, r *http.Request) { - authReq, err := s.storage.GetAuthRequest(r.FormValue("req")) - if err != nil { - s.logger.Errorf("Failed to get auth request: %v", err) - s.renderError(r, w, http.StatusInternalServerError, "Database error.") - return - } - if !authReq.LoggedIn { - s.logger.Errorf("Auth request does not have an identity for approval") - s.renderError(r, w, http.StatusInternalServerError, "Login process not yet finalized.") - return - } - - switch r.Method { - case http.MethodGet: - if s.skipApproval { - s.sendCodeResponse(w, r, authReq) - return - } - client, err := s.storage.GetClient(authReq.ClientID) - if err != nil { - s.logger.Errorf("Failed to get client %q: %v", authReq.ClientID, err) - s.renderError(r, w, http.StatusInternalServerError, "Failed to retrieve client.") - return - } - if err := s.templates.approval(r, w, authReq.ID, authReq.Claims.Username, client.Name, authReq.Scopes, r.URL.Path); err != nil { - s.logger.Errorf("Server template error: %v", err) - } - case http.MethodPost: - if r.FormValue("approval") != "approve" { - s.renderError(r, w, http.StatusInternalServerError, "Approval rejected.") - return - } - s.sendCodeResponse(w, r, authReq) - } -} - -func (s *Server) sendCodeResponse(w http.ResponseWriter, r *http.Request, authReq storage.AuthRequest) { - if s.now().After(authReq.Expiry) { - s.renderError(r, w, http.StatusBadRequest, "User session has expired.") - return - } - - if err := s.storage.DeleteAuthRequest(authReq.ID); err != nil { - if err != storage.ErrNotFound { - s.logger.Errorf("Failed to delete authorization request: %v", err) - s.renderError(r, w, http.StatusInternalServerError, "Internal server error.") - } else { - s.renderError(r, w, http.StatusBadRequest, "User session error.") - } - return - } - u, err := url.Parse(authReq.RedirectURI) - if err != nil { - s.renderError(r, w, http.StatusInternalServerError, "Invalid redirect URI.") - return - } - - var ( - // Was the initial request using the implicit or hybrid flow instead of - // the "normal" code flow? - implicitOrHybrid = false - - // Only present in hybrid or code flow. code.ID == "" if this is not set. - code storage.AuthCode - - // ID token returned immediately if the response_type includes "id_token". - // Only valid for implicit and hybrid flows. - idToken string - idTokenExpiry time.Time - - // Access token - accessToken string - ) - - for _, responseType := range authReq.ResponseTypes { - switch responseType { - case responseTypeCode: - code = storage.AuthCode{ - ID: storage.NewID(), - ClientID: authReq.ClientID, - ConnectorID: authReq.ConnectorID, - Nonce: authReq.Nonce, - Scopes: authReq.Scopes, - Claims: authReq.Claims, - Expiry: s.now().Add(time.Minute * 30), - RedirectURI: authReq.RedirectURI, - ConnectorData: authReq.ConnectorData, - } - if err := s.storage.CreateAuthCode(code); err != nil { - s.logger.Errorf("Failed to create auth code: %v", err) - s.renderError(r, w, http.StatusInternalServerError, "Internal server error.") - return - } - - // Implicit and hybrid flows that try to use the OOB redirect URI are - // rejected earlier. If we got here we're using the code flow. - if authReq.RedirectURI == redirectURIOOB { - if err := s.templates.oob(r, w, code.ID, r.URL.Path); err != nil { - s.logger.Errorf("Server template error: %v", err) - } - return - } - case responseTypeToken: - implicitOrHybrid = true - case responseTypeIDToken: - implicitOrHybrid = true - var err error - - accessToken, err = s.newAccessToken(authReq.ClientID, authReq.Claims, authReq.Scopes, authReq.Nonce, authReq.ConnectorID) - if err != nil { - s.logger.Errorf("failed to create new access token: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - - idToken, idTokenExpiry, err = s.newIDToken(authReq.ClientID, authReq.Claims, authReq.Scopes, authReq.Nonce, accessToken, authReq.ConnectorID) - if err != nil { - s.logger.Errorf("failed to create ID token: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - } - } - - if implicitOrHybrid { - v := url.Values{} - v.Set("access_token", accessToken) - v.Set("token_type", "bearer") - v.Set("state", authReq.State) - if idToken != "" { - v.Set("id_token", idToken) - // The hybrid flow with only "code token" or "code id_token" doesn't return an - // "expires_in" value. If "code" wasn't provided, indicating the implicit flow, - // don't add it. - // - // https://openid.net/specs/openid-connect-core-1_0.html#HybridAuthResponse - if code.ID == "" { - v.Set("expires_in", strconv.Itoa(int(idTokenExpiry.Sub(s.now()).Seconds()))) - } - } - if code.ID != "" { - v.Set("code", code.ID) - } - - // Implicit and hybrid flows return their values as part of the fragment. - // - // HTTP/1.1 303 See Other - // Location: https://client.example.org/cb# - // access_token=SlAV32hkKG - // &token_type=bearer - // &id_token=eyJ0 ... NiJ9.eyJ1c ... I6IjIifX0.DeWt4Qu ... ZXso - // &expires_in=3600 - // &state=af0ifjsldkj - // - u.Fragment = v.Encode() - } else { - // The code flow add values to the URL query. - // - // HTTP/1.1 303 See Other - // Location: https://client.example.org/cb? - // code=SplxlOBeZQQYbYS6WxSbIA - // &state=af0ifjsldkj - // - q := u.Query() - q.Set("code", code.ID) - q.Set("state", authReq.State) - u.RawQuery = q.Encode() - } - - http.Redirect(w, r, u.String(), http.StatusSeeOther) -} - -func (s *Server) handleToken(w http.ResponseWriter, r *http.Request) { - clientID, clientSecret, ok := r.BasicAuth() - if ok { - var err error - if clientID, err = url.QueryUnescape(clientID); err != nil { - s.tokenErrHelper(w, errInvalidRequest, "client_id improperly encoded", http.StatusBadRequest) - return - } - if clientSecret, err = url.QueryUnescape(clientSecret); err != nil { - s.tokenErrHelper(w, errInvalidRequest, "client_secret improperly encoded", http.StatusBadRequest) - return - } - } else { - clientID = r.PostFormValue("client_id") - clientSecret = r.PostFormValue("client_secret") - } - - client, err := s.storage.GetClient(clientID) - if err != nil { - if err != storage.ErrNotFound { - s.logger.Errorf("failed to get client: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - } else { - s.tokenErrHelper(w, errInvalidClient, "Invalid client credentials.", http.StatusUnauthorized) - } - return - } - if client.Secret != clientSecret { - s.tokenErrHelper(w, errInvalidClient, "Invalid client credentials.", http.StatusUnauthorized) - return - } - - grantType := r.PostFormValue("grant_type") - switch grantType { - case grantTypeAuthorizationCode: - s.handleAuthCode(w, r, client) - case grantTypeRefreshToken: - s.handleRefreshToken(w, r, client) - case grantTypePassword: - s.handlePasswordGrant(w, r, client) - default: - s.tokenErrHelper(w, errInvalidGrant, "", http.StatusBadRequest) - } -} - -// handle an access token request https://tools.ietf.org/html/rfc6749#section-4.1.3 -func (s *Server) handleAuthCode(w http.ResponseWriter, r *http.Request, client storage.Client) { - code := r.PostFormValue("code") - redirectURI := r.PostFormValue("redirect_uri") - - authCode, err := s.storage.GetAuthCode(code) - if err != nil || s.now().After(authCode.Expiry) || authCode.ClientID != client.ID { - if err != storage.ErrNotFound { - s.logger.Errorf("failed to get auth code: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - } else { - s.tokenErrHelper(w, errInvalidRequest, "Invalid or expired code parameter.", http.StatusBadRequest) - } - return - } - - if authCode.RedirectURI != redirectURI { - s.tokenErrHelper(w, errInvalidRequest, "redirect_uri did not match URI from initial request.", http.StatusBadRequest) - return - } - - accessToken, err := s.newAccessToken(client.ID, authCode.Claims, authCode.Scopes, authCode.Nonce, authCode.ConnectorID) - if err != nil { - s.logger.Errorf("failed to create new access token: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - - idToken, expiry, err := s.newIDToken(client.ID, authCode.Claims, authCode.Scopes, authCode.Nonce, accessToken, authCode.ConnectorID) - if err != nil { - s.logger.Errorf("failed to create ID token: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - - if err := s.storage.DeleteAuthCode(code); err != nil { - s.logger.Errorf("failed to delete auth code: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - - reqRefresh := func() bool { - // Ensure the connector supports refresh tokens. - // - // Connectors like `saml` do not implement RefreshConnector. - conn, err := s.getConnector(authCode.ConnectorID) - if err != nil { - s.logger.Errorf("connector with ID %q not found: %v", authCode.ConnectorID, err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return false - } - - _, ok := conn.Connector.(connector.RefreshConnector) - if !ok { - return false - } - - for _, scope := range authCode.Scopes { - if scope == scopeOfflineAccess { - return true - } - } - return false - }() - var refreshToken string - if reqRefresh { - refresh := storage.RefreshToken{ - ID: storage.NewID(), - Token: storage.NewID(), - ClientID: authCode.ClientID, - ConnectorID: authCode.ConnectorID, - Scopes: authCode.Scopes, - Claims: authCode.Claims, - Nonce: authCode.Nonce, - ConnectorData: authCode.ConnectorData, - CreatedAt: s.now(), - LastUsed: s.now(), - } - token := &internal.RefreshToken{ - RefreshId: refresh.ID, - Token: refresh.Token, - } - if refreshToken, err = internal.Marshal(token); err != nil { - s.logger.Errorf("failed to marshal refresh token: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - - if err := s.storage.CreateRefresh(refresh); err != nil { - s.logger.Errorf("failed to create refresh token: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - - // deleteToken determines if we need to delete the newly created refresh token - // due to a failure in updating/creating the OfflineSession object for the - // corresponding user. - var deleteToken bool - defer func() { - if deleteToken { - // Delete newly created refresh token from storage. - if err := s.storage.DeleteRefresh(refresh.ID); err != nil { - s.logger.Errorf("failed to delete refresh token: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - } - }() - - tokenRef := storage.RefreshTokenRef{ - ID: refresh.ID, - ClientID: refresh.ClientID, - CreatedAt: refresh.CreatedAt, - LastUsed: refresh.LastUsed, - } - - // Try to retrieve an existing OfflineSession object for the corresponding user. - if session, err := s.storage.GetOfflineSessions(refresh.Claims.UserID, refresh.ConnectorID); err != nil { - if err != storage.ErrNotFound { - s.logger.Errorf("failed to get offline session: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - deleteToken = true - return - } - offlineSessions := storage.OfflineSessions{ - UserID: refresh.Claims.UserID, - ConnID: refresh.ConnectorID, - Refresh: make(map[string]*storage.RefreshTokenRef), - } - offlineSessions.Refresh[tokenRef.ClientID] = &tokenRef - - // Create a new OfflineSession object for the user and add a reference object for - // the newly received refreshtoken. - if err := s.storage.CreateOfflineSessions(offlineSessions); err != nil { - s.logger.Errorf("failed to create offline session: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - deleteToken = true - return - } - } else { - if oldTokenRef, ok := session.Refresh[tokenRef.ClientID]; ok { - // Delete old refresh token from storage. - if err := s.storage.DeleteRefresh(oldTokenRef.ID); err != nil && err != storage.ErrNotFound { - s.logger.Errorf("failed to delete refresh token: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - deleteToken = true - return - } - } - - // Update existing OfflineSession obj with new RefreshTokenRef. - if err := s.storage.UpdateOfflineSessions(session.UserID, session.ConnID, func(old storage.OfflineSessions) (storage.OfflineSessions, error) { - old.Refresh[tokenRef.ClientID] = &tokenRef - return old, nil - }); err != nil { - s.logger.Errorf("failed to update offline session: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - deleteToken = true - return - } - } - } - s.writeAccessToken(w, idToken, accessToken, refreshToken, expiry) -} - -// handle a refresh token request https://tools.ietf.org/html/rfc6749#section-6 -func (s *Server) handleRefreshToken(w http.ResponseWriter, r *http.Request, client storage.Client) { - code := r.PostFormValue("refresh_token") - scope := r.PostFormValue("scope") - if code == "" { - s.tokenErrHelper(w, errInvalidRequest, "No refresh token in request.", http.StatusBadRequest) - return - } - - token := new(internal.RefreshToken) - if err := internal.Unmarshal(code, token); err != nil { - // For backward compatibility, assume the refresh_token is a raw refresh token ID - // if it fails to decode. - // - // Because refresh_token values that aren't unmarshable were generated by servers - // that don't have a Token value, we'll still reject any attempts to claim a - // refresh_token twice. - token = &internal.RefreshToken{RefreshId: code, Token: ""} - } - - refresh, err := s.storage.GetRefresh(token.RefreshId) - if err != nil { - s.logger.Errorf("failed to get refresh token: %v", err) - if err == storage.ErrNotFound { - s.tokenErrHelper(w, errInvalidRequest, "Refresh token is invalid or has already been claimed by another client.", http.StatusBadRequest) - } else { - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - } - return - } - if refresh.ClientID != client.ID { - s.logger.Errorf("client %s trying to claim token for client %s", client.ID, refresh.ClientID) - s.tokenErrHelper(w, errInvalidRequest, "Refresh token is invalid or has already been claimed by another client.", http.StatusBadRequest) - return - } - if refresh.Token != token.Token { - s.logger.Errorf("refresh token with id %s claimed twice", refresh.ID) - s.tokenErrHelper(w, errInvalidRequest, "Refresh token is invalid or has already been claimed by another client.", http.StatusBadRequest) - return - } - - // Per the OAuth2 spec, if the client has omitted the scopes, default to the original - // authorized scopes. - // - // https://tools.ietf.org/html/rfc6749#section-6 - scopes := refresh.Scopes - if scope != "" { - requestedScopes := strings.Fields(scope) - var unauthorizedScopes []string - - for _, s := range requestedScopes { - contains := func() bool { - for _, scope := range refresh.Scopes { - if s == scope { - return true - } - } - return false - }() - if !contains { - unauthorizedScopes = append(unauthorizedScopes, s) - } - } - - if len(unauthorizedScopes) > 0 { - msg := fmt.Sprintf("Requested scopes contain unauthorized scope(s): %q.", unauthorizedScopes) - s.tokenErrHelper(w, errInvalidRequest, msg, http.StatusBadRequest) - return - } - scopes = requestedScopes - } - - var connectorData []byte - if session, err := s.storage.GetOfflineSessions(refresh.Claims.UserID, refresh.ConnectorID); err != nil { - if err != storage.ErrNotFound { - s.logger.Errorf("failed to get offline session: %v", err) - return - } - } else if len(refresh.ConnectorData) > 0 { - // Use the old connector data if it exists, should be deleted once used - connectorData = refresh.ConnectorData - } else { - connectorData = session.ConnectorData - } - - conn, err := s.getConnector(refresh.ConnectorID) - if err != nil { - s.logger.Errorf("connector with ID %q not found: %v", refresh.ConnectorID, err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - ident := connector.Identity{ - UserID: refresh.Claims.UserID, - Username: refresh.Claims.Username, - PreferredUsername: refresh.Claims.PreferredUsername, - Email: refresh.Claims.Email, - EmailVerified: refresh.Claims.EmailVerified, - Groups: refresh.Claims.Groups, - ConnectorData: connectorData, - } - - // Can the connector refresh the identity? If so, attempt to refresh the data - // in the connector. - // - // TODO(ericchiang): We may want a strict mode where connectors that don't implement - // this interface can't perform refreshing. - if refreshConn, ok := conn.Connector.(connector.RefreshConnector); ok { - newIdent, err := refreshConn.Refresh(r.Context(), parseScopes(scopes), ident) - if err != nil { - s.logger.Errorf("failed to refresh identity: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - ident = newIdent - } - - claims := storage.Claims{ - UserID: ident.UserID, - Username: ident.Username, - PreferredUsername: ident.PreferredUsername, - Email: ident.Email, - EmailVerified: ident.EmailVerified, - Groups: ident.Groups, - } - - accessToken, err := s.newAccessToken(client.ID, claims, scopes, refresh.Nonce, refresh.ConnectorID) - if err != nil { - s.logger.Errorf("failed to create new access token: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - - idToken, expiry, err := s.newIDToken(client.ID, claims, scopes, refresh.Nonce, accessToken, refresh.ConnectorID) - if err != nil { - s.logger.Errorf("failed to create ID token: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - - newToken := &internal.RefreshToken{ - RefreshId: refresh.ID, - Token: storage.NewID(), - } - rawNewToken, err := internal.Marshal(newToken) - if err != nil { - s.logger.Errorf("failed to marshal refresh token: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - - lastUsed := s.now() - updater := func(old storage.RefreshToken) (storage.RefreshToken, error) { - if old.Token != refresh.Token { - return old, errors.New("refresh token claimed twice") - } - old.Token = newToken.Token - // Update the claims of the refresh token. - // - // UserID intentionally ignored for now. - old.Claims.Username = ident.Username - old.Claims.PreferredUsername = ident.PreferredUsername - old.Claims.Email = ident.Email - old.Claims.EmailVerified = ident.EmailVerified - old.Claims.Groups = ident.Groups - old.LastUsed = lastUsed - - // ConnectorData has been moved to OfflineSession - old.ConnectorData = []byte{} - return old, nil - } - - // Update LastUsed time stamp in refresh token reference object - // in offline session for the user. - if err := s.storage.UpdateOfflineSessions(refresh.Claims.UserID, refresh.ConnectorID, func(old storage.OfflineSessions) (storage.OfflineSessions, error) { - if old.Refresh[refresh.ClientID].ID != refresh.ID { - return old, errors.New("refresh token invalid") - } - old.Refresh[refresh.ClientID].LastUsed = lastUsed - old.ConnectorData = ident.ConnectorData - return old, nil - }); err != nil { - s.logger.Errorf("failed to update offline session: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - - // Update refresh token in the storage. - if err := s.storage.UpdateRefreshToken(refresh.ID, updater); err != nil { - s.logger.Errorf("failed to update refresh token: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - - s.writeAccessToken(w, idToken, accessToken, rawNewToken, expiry) -} - -func (s *Server) handleUserInfo(w http.ResponseWriter, r *http.Request) { - const prefix = "Bearer " - - auth := r.Header.Get("authorization") - if len(auth) < len(prefix) || !strings.EqualFold(prefix, auth[:len(prefix)]) { - w.Header().Set("WWW-Authenticate", "Bearer") - s.tokenErrHelper(w, errAccessDenied, "Invalid bearer token.", http.StatusUnauthorized) - return - } - rawIDToken := auth[len(prefix):] - - verifier := oidc.NewVerifier(s.issuerURL.String(), &storageKeySet{s.storage}, &oidc.Config{SkipClientIDCheck: true}) - idToken, err := verifier.Verify(r.Context(), rawIDToken) - if err != nil { - s.tokenErrHelper(w, errAccessDenied, err.Error(), http.StatusForbidden) - return - } - - var claims json.RawMessage - if err := idToken.Claims(&claims); err != nil { - s.tokenErrHelper(w, errServerError, err.Error(), http.StatusInternalServerError) - return - } - - w.Header().Set("Content-Type", "application/json") - w.Write(claims) -} - -func (s *Server) handlePasswordGrant(w http.ResponseWriter, r *http.Request, client storage.Client) { - // Parse the fields - if err := r.ParseForm(); err != nil { - s.tokenErrHelper(w, errInvalidRequest, "Couldn't parse data", http.StatusBadRequest) - return - } - q := r.Form - - nonce := q.Get("nonce") - // Some clients, like the old go-oidc, provide extra whitespace. Tolerate this. - scopes := strings.Fields(q.Get("scope")) - - // Parse the scopes if they are passed - var ( - unrecognized []string - invalidScopes []string - ) - hasOpenIDScope := false - for _, scope := range scopes { - switch scope { - case scopeOpenID: - hasOpenIDScope = true - case scopeOfflineAccess, scopeEmail, scopeProfile, scopeGroups, scopeFederatedID: - default: - peerID, ok := parseCrossClientScope(scope) - if !ok { - unrecognized = append(unrecognized, scope) - continue - } - - isTrusted, err := s.validateCrossClientTrust(client.ID, peerID) - if err != nil { - s.tokenErrHelper(w, errInvalidClient, fmt.Sprintf("Error validating cross client trust %v.", err), http.StatusBadRequest) - return - } - if !isTrusted { - invalidScopes = append(invalidScopes, scope) - } - } - } - if !hasOpenIDScope { - s.tokenErrHelper(w, errInvalidRequest, `Missing required scope(s) ["openid"].`, http.StatusBadRequest) - return - } - if len(unrecognized) > 0 { - s.tokenErrHelper(w, errInvalidRequest, fmt.Sprintf("Unrecognized scope(s) %q", unrecognized), http.StatusBadRequest) - return - } - if len(invalidScopes) > 0 { - s.tokenErrHelper(w, errInvalidRequest, fmt.Sprintf("Client can't request scope(s) %q", invalidScopes), http.StatusBadRequest) - return - } - - // Which connector - connID := s.passwordConnector - conn, err := s.getConnector(connID) - if err != nil { - s.tokenErrHelper(w, errInvalidRequest, "Requested connector does not exist.", http.StatusBadRequest) - return - } - - passwordConnector, ok := conn.Connector.(connector.PasswordConnector) - if !ok { - s.tokenErrHelper(w, errInvalidRequest, "Requested password connector does not correct type.", http.StatusBadRequest) - return - } - - // Login - username := q.Get("username") - password := q.Get("password") - identity, ok, err := passwordConnector.Login(r.Context(), parseScopes(scopes), username, password) - if err != nil { - s.tokenErrHelper(w, errInvalidRequest, "Could not login user", http.StatusBadRequest) - return - } - if !ok { - s.tokenErrHelper(w, errAccessDenied, "Invalid username or password", http.StatusUnauthorized) - return - } - - // Build the claims to send the id token - claims := storage.Claims{ - UserID: identity.UserID, - Username: identity.Username, - PreferredUsername: identity.PreferredUsername, - Email: identity.Email, - EmailVerified: identity.EmailVerified, - Groups: identity.Groups, - } - - accessToken := storage.NewID() - idToken, expiry, err := s.newIDToken(client.ID, claims, scopes, nonce, accessToken, connID) - if err != nil { - s.tokenErrHelper(w, errServerError, fmt.Sprintf("failed to create ID token: %v", err), http.StatusInternalServerError) - return - } - - reqRefresh := func() bool { - // Ensure the connector supports refresh tokens. - // - // Connectors like `saml` do not implement RefreshConnector. - _, ok := conn.Connector.(connector.RefreshConnector) - if !ok { - return false - } - - for _, scope := range scopes { - if scope == scopeOfflineAccess { - return true - } - } - return false - }() - var refreshToken string - if reqRefresh { - refresh := storage.RefreshToken{ - ID: storage.NewID(), - Token: storage.NewID(), - ClientID: client.ID, - ConnectorID: connID, - Scopes: scopes, - Claims: claims, - Nonce: nonce, - // ConnectorData: authCode.ConnectorData, - CreatedAt: s.now(), - LastUsed: s.now(), - } - token := &internal.RefreshToken{ - RefreshId: refresh.ID, - Token: refresh.Token, - } - if refreshToken, err = internal.Marshal(token); err != nil { - s.logger.Errorf("failed to marshal refresh token: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - - if err := s.storage.CreateRefresh(refresh); err != nil { - s.logger.Errorf("failed to create refresh token: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - - // deleteToken determines if we need to delete the newly created refresh token - // due to a failure in updating/creating the OfflineSession object for the - // corresponding user. - var deleteToken bool - defer func() { - if deleteToken { - // Delete newly created refresh token from storage. - if err := s.storage.DeleteRefresh(refresh.ID); err != nil { - s.logger.Errorf("failed to delete refresh token: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - } - }() - - tokenRef := storage.RefreshTokenRef{ - ID: refresh.ID, - ClientID: refresh.ClientID, - CreatedAt: refresh.CreatedAt, - LastUsed: refresh.LastUsed, - } - - // Try to retrieve an existing OfflineSession object for the corresponding user. - if session, err := s.storage.GetOfflineSessions(refresh.Claims.UserID, refresh.ConnectorID); err != nil { - if err != storage.ErrNotFound { - s.logger.Errorf("failed to get offline session: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - deleteToken = true - return - } - offlineSessions := storage.OfflineSessions{ - UserID: refresh.Claims.UserID, - ConnID: refresh.ConnectorID, - Refresh: make(map[string]*storage.RefreshTokenRef), - } - offlineSessions.Refresh[tokenRef.ClientID] = &tokenRef - - // Create a new OfflineSession object for the user and add a reference object for - // the newly received refreshtoken. - if err := s.storage.CreateOfflineSessions(offlineSessions); err != nil { - s.logger.Errorf("failed to create offline session: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - deleteToken = true - return - } - } else { - if oldTokenRef, ok := session.Refresh[tokenRef.ClientID]; ok { - // Delete old refresh token from storage. - if err := s.storage.DeleteRefresh(oldTokenRef.ID); err != nil { - if err == storage.ErrNotFound { - s.logger.Warnf("database inconsistent, refresh token missing: %v", oldTokenRef.ID) - } else { - s.logger.Errorf("failed to delete refresh token: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - deleteToken = true - return - } - } - } - - // Update existing OfflineSession obj with new RefreshTokenRef. - if err := s.storage.UpdateOfflineSessions(session.UserID, session.ConnID, func(old storage.OfflineSessions) (storage.OfflineSessions, error) { - old.Refresh[tokenRef.ClientID] = &tokenRef - return old, nil - }); err != nil { - s.logger.Errorf("failed to update offline session: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - deleteToken = true - return - } - } - } - - s.writeAccessToken(w, idToken, accessToken, refreshToken, expiry) -} - -func (s *Server) writeAccessToken(w http.ResponseWriter, idToken, accessToken, refreshToken string, expiry time.Time) { - resp := struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - ExpiresIn int `json:"expires_in"` - RefreshToken string `json:"refresh_token,omitempty"` - IDToken string `json:"id_token"` - }{ - accessToken, - "bearer", - int(expiry.Sub(s.now()).Seconds()), - refreshToken, - idToken, - } - data, err := json.Marshal(resp) - if err != nil { - s.logger.Errorf("failed to marshal access token response: %v", err) - s.tokenErrHelper(w, errServerError, "", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Content-Length", strconv.Itoa(len(data))) - w.Write(data) -} - -func (s *Server) renderError(r *http.Request, w http.ResponseWriter, status int, description string) { - if err := s.templates.err(r, w, status, description); err != nil { - s.logger.Errorf("Server template error: %v", err) - } -} - -func (s *Server) tokenErrHelper(w http.ResponseWriter, typ string, description string, statusCode int) { - if err := tokenErr(w, typ, description, statusCode); err != nil { - s.logger.Errorf("token error response: %v", err) - } -} - -// Check for username prompt override from connector. Defaults to "Username". -func usernamePrompt(conn connector.PasswordConnector) string { - if attr := conn.Prompt(); attr != "" { - return attr - } - return "Username" -} diff --git a/vendor/github.com/dexidp/dex/server/internal/codec.go b/vendor/github.com/dexidp/dex/server/internal/codec.go deleted file mode 100644 index a92c26f..0000000 --- a/vendor/github.com/dexidp/dex/server/internal/codec.go +++ /dev/null @@ -1,25 +0,0 @@ -package internal - -import ( - "encoding/base64" - - "github.com/golang/protobuf/proto" -) - -// Marshal converts a protobuf message to a URL legal string. -func Marshal(message proto.Message) (string, error) { - data, err := proto.Marshal(message) - if err != nil { - return "", err - } - return base64.RawURLEncoding.EncodeToString(data), nil -} - -// Unmarshal decodes a protobuf message. -func Unmarshal(s string, message proto.Message) error { - data, err := base64.RawURLEncoding.DecodeString(s) - if err != nil { - return err - } - return proto.Unmarshal(data, message) -} diff --git a/vendor/github.com/dexidp/dex/server/internal/types.pb.go b/vendor/github.com/dexidp/dex/server/internal/types.pb.go deleted file mode 100644 index 18d954d..0000000 --- a/vendor/github.com/dexidp/dex/server/internal/types.pb.go +++ /dev/null @@ -1,141 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: server/internal/types.proto - -// Package internal holds protobuf types used by the server - -package internal - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// RefreshToken is a message that holds refresh token data used by dex. -type RefreshToken struct { - RefreshId string `protobuf:"bytes,1,opt,name=refresh_id,json=refreshId,proto3" json:"refresh_id,omitempty"` - Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RefreshToken) Reset() { *m = RefreshToken{} } -func (m *RefreshToken) String() string { return proto.CompactTextString(m) } -func (*RefreshToken) ProtoMessage() {} -func (*RefreshToken) Descriptor() ([]byte, []int) { - return fileDescriptor_c3164bea0c2d595f, []int{0} -} - -func (m *RefreshToken) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RefreshToken.Unmarshal(m, b) -} -func (m *RefreshToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RefreshToken.Marshal(b, m, deterministic) -} -func (m *RefreshToken) XXX_Merge(src proto.Message) { - xxx_messageInfo_RefreshToken.Merge(m, src) -} -func (m *RefreshToken) XXX_Size() int { - return xxx_messageInfo_RefreshToken.Size(m) -} -func (m *RefreshToken) XXX_DiscardUnknown() { - xxx_messageInfo_RefreshToken.DiscardUnknown(m) -} - -var xxx_messageInfo_RefreshToken proto.InternalMessageInfo - -func (m *RefreshToken) GetRefreshId() string { - if m != nil { - return m.RefreshId - } - return "" -} - -func (m *RefreshToken) GetToken() string { - if m != nil { - return m.Token - } - return "" -} - -// IDTokenSubject represents both the userID and connID which is returned -// as the "sub" claim in the ID Token. -type IDTokenSubject struct { - UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - ConnId string `protobuf:"bytes,2,opt,name=conn_id,json=connId,proto3" json:"conn_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IDTokenSubject) Reset() { *m = IDTokenSubject{} } -func (m *IDTokenSubject) String() string { return proto.CompactTextString(m) } -func (*IDTokenSubject) ProtoMessage() {} -func (*IDTokenSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_c3164bea0c2d595f, []int{1} -} - -func (m *IDTokenSubject) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IDTokenSubject.Unmarshal(m, b) -} -func (m *IDTokenSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IDTokenSubject.Marshal(b, m, deterministic) -} -func (m *IDTokenSubject) XXX_Merge(src proto.Message) { - xxx_messageInfo_IDTokenSubject.Merge(m, src) -} -func (m *IDTokenSubject) XXX_Size() int { - return xxx_messageInfo_IDTokenSubject.Size(m) -} -func (m *IDTokenSubject) XXX_DiscardUnknown() { - xxx_messageInfo_IDTokenSubject.DiscardUnknown(m) -} - -var xxx_messageInfo_IDTokenSubject proto.InternalMessageInfo - -func (m *IDTokenSubject) GetUserId() string { - if m != nil { - return m.UserId - } - return "" -} - -func (m *IDTokenSubject) GetConnId() string { - if m != nil { - return m.ConnId - } - return "" -} - -func init() { - proto.RegisterType((*RefreshToken)(nil), "internal.RefreshToken") - proto.RegisterType((*IDTokenSubject)(nil), "internal.IDTokenSubject") -} - -func init() { proto.RegisterFile("server/internal/types.proto", fileDescriptor_c3164bea0c2d595f) } - -var fileDescriptor_c3164bea0c2d595f = []byte{ - // 157 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2e, 0x4e, 0x2d, 0x2a, - 0x4b, 0x2d, 0xd2, 0xcf, 0xcc, 0x2b, 0x49, 0x2d, 0xca, 0x4b, 0xcc, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, - 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0x89, 0x2a, 0x39, 0x73, 0xf1, 0x04, - 0xa5, 0xa6, 0x15, 0xa5, 0x16, 0x67, 0x84, 0xe4, 0x67, 0xa7, 0xe6, 0x09, 0xc9, 0x72, 0x71, 0x15, - 0x41, 0xf8, 0xf1, 0x99, 0x29, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x9c, 0x50, 0x11, 0xcf, - 0x14, 0x21, 0x11, 0x2e, 0xd6, 0x12, 0x90, 0x3a, 0x09, 0x26, 0xb0, 0x0c, 0x84, 0xa3, 0xe4, 0xc4, - 0xc5, 0xe7, 0xe9, 0x02, 0xd6, 0x1f, 0x5c, 0x9a, 0x94, 0x95, 0x9a, 0x5c, 0x22, 0x24, 0xce, 0xc5, - 0x5e, 0x5a, 0x9c, 0x5a, 0x84, 0x30, 0x83, 0x0d, 0xc4, 0xf5, 0x4c, 0x01, 0x49, 0x24, 0xe7, 0xe7, - 0xe5, 0x81, 0x24, 0x20, 0x46, 0xb0, 0x81, 0xb8, 0x9e, 0x29, 0x49, 0x6c, 0x60, 0x97, 0x19, 0x03, - 0x02, 0x00, 0x00, 0xff, 0xff, 0x13, 0xfe, 0x01, 0x37, 0xb8, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/dexidp/dex/server/internal/types.proto b/vendor/github.com/dexidp/dex/server/internal/types.proto deleted file mode 100644 index 87e18f4..0000000 --- a/vendor/github.com/dexidp/dex/server/internal/types.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -// Package internal holds protobuf types used by the server -package internal; - -// RefreshToken is a message that holds refresh token data used by dex. -message RefreshToken { - string refresh_id = 1; - string token = 2; -} - -// IDTokenSubject represents both the userID and connID which is returned -// as the "sub" claim in the ID Token. -message IDTokenSubject { - string user_id = 1; - string conn_id = 2; -} diff --git a/vendor/github.com/dexidp/dex/server/oauth2.go b/vendor/github.com/dexidp/dex/server/oauth2.go deleted file mode 100644 index ecc619d..0000000 --- a/vendor/github.com/dexidp/dex/server/oauth2.go +++ /dev/null @@ -1,633 +0,0 @@ -package server - -import ( - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/sha256" - "crypto/sha512" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "hash" - "io" - "net" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - jose "gopkg.in/square/go-jose.v2" - - "github.com/dexidp/dex/connector" - "github.com/dexidp/dex/server/internal" - "github.com/dexidp/dex/storage" -) - -// TODO(ericchiang): clean this file up and figure out more idiomatic error handling. - -// authErr is an error response to an authorization request. -// See: https://tools.ietf.org/html/rfc6749#section-4.1.2.1 -type authErr struct { - State string - RedirectURI string - Type string - Description string -} - -func (err *authErr) Status() int { - if err.State == errServerError { - return http.StatusInternalServerError - } - return http.StatusBadRequest -} - -func (err *authErr) Error() string { - return err.Description -} - -func (err *authErr) Handle() (http.Handler, bool) { - // Didn't get a valid redirect URI. - if err.RedirectURI == "" { - return nil, false - } - - hf := func(w http.ResponseWriter, r *http.Request) { - v := url.Values{} - v.Add("state", err.State) - v.Add("error", err.Type) - if err.Description != "" { - v.Add("error_description", err.Description) - } - var redirectURI string - if strings.Contains(err.RedirectURI, "?") { - redirectURI = err.RedirectURI + "&" + v.Encode() - } else { - redirectURI = err.RedirectURI + "?" + v.Encode() - } - http.Redirect(w, r, redirectURI, http.StatusSeeOther) - } - return http.HandlerFunc(hf), true -} - -func tokenErr(w http.ResponseWriter, typ, description string, statusCode int) error { - data := struct { - Error string `json:"error"` - Description string `json:"error_description,omitempty"` - }{typ, description} - body, err := json.Marshal(data) - if err != nil { - return fmt.Errorf("failed to marshal token error response: %v", err) - } - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Content-Length", strconv.Itoa(len(body))) - w.WriteHeader(statusCode) - w.Write(body) - return nil -} - -// nolint -const ( - errInvalidRequest = "invalid_request" - errUnauthorizedClient = "unauthorized_client" - errAccessDenied = "access_denied" - errUnsupportedResponseType = "unsupported_response_type" - errInvalidScope = "invalid_scope" - errServerError = "server_error" - errTemporarilyUnavailable = "temporarily_unavailable" - errUnsupportedGrantType = "unsupported_grant_type" - errInvalidGrant = "invalid_grant" - errInvalidClient = "invalid_client" - errInvalidConnectorID = "invalid_connector_id" -) - -const ( - scopeOfflineAccess = "offline_access" // Request a refresh token. - scopeOpenID = "openid" - scopeGroups = "groups" - scopeEmail = "email" - scopeProfile = "profile" - scopeFederatedID = "federated:id" - scopeCrossClientPrefix = "audience:server:client_id:" -) - -const ( - redirectURIOOB = "urn:ietf:wg:oauth:2.0:oob" -) - -const ( - grantTypeAuthorizationCode = "authorization_code" - grantTypeRefreshToken = "refresh_token" - grantTypePassword = "password" -) - -const ( - responseTypeCode = "code" // "Regular" flow - responseTypeToken = "token" // Implicit flow for frontend apps. - responseTypeIDToken = "id_token" // ID Token in url fragment -) - -func parseScopes(scopes []string) connector.Scopes { - var s connector.Scopes - for _, scope := range scopes { - switch scope { - case scopeOfflineAccess: - s.OfflineAccess = true - case scopeGroups: - s.Groups = true - } - } - return s -} - -// Determine the signature algorithm for a JWT. -func signatureAlgorithm(jwk *jose.JSONWebKey) (alg jose.SignatureAlgorithm, err error) { - if jwk.Key == nil { - return alg, errors.New("no signing key") - } - switch key := jwk.Key.(type) { - case *rsa.PrivateKey: - // Because OIDC mandates that we support RS256, we always return that - // value. In the future, we might want to make this configurable on a - // per client basis. For example allowing PS256 or ECDSA variants. - // - // See https://github.com/dexidp/dex/issues/692 - return jose.RS256, nil - case *ecdsa.PrivateKey: - // We don't actually support ECDSA keys yet, but they're tested for - // in case we want to in the future. - // - // These values are prescribed depending on the ECDSA key type. We - // can't return different values. - switch key.Params() { - case elliptic.P256().Params(): - return jose.ES256, nil - case elliptic.P384().Params(): - return jose.ES384, nil - case elliptic.P521().Params(): - return jose.ES512, nil - default: - return alg, errors.New("unsupported ecdsa curve") - } - default: - return alg, fmt.Errorf("unsupported signing key type %T", key) - } -} - -func signPayload(key *jose.JSONWebKey, alg jose.SignatureAlgorithm, payload []byte) (jws string, err error) { - signingKey := jose.SigningKey{Key: key, Algorithm: alg} - - signer, err := jose.NewSigner(signingKey, &jose.SignerOptions{}) - if err != nil { - return "", fmt.Errorf("new signier: %v", err) - } - signature, err := signer.Sign(payload) - if err != nil { - return "", fmt.Errorf("signing payload: %v", err) - } - return signature.CompactSerialize() -} - -// The hash algorithm for the at_hash is determined by the signing -// algorithm used for the id_token. From the spec: -// -// ...the hash algorithm used is the hash algorithm used in the alg Header -// Parameter of the ID Token's JOSE Header. For instance, if the alg is RS256, -// hash the access_token value with SHA-256 -// -// https://openid.net/specs/openid-connect-core-1_0.html#ImplicitIDToken -var hashForSigAlg = map[jose.SignatureAlgorithm]func() hash.Hash{ - jose.RS256: sha256.New, - jose.RS384: sha512.New384, - jose.RS512: sha512.New, - jose.ES256: sha256.New, - jose.ES384: sha512.New384, - jose.ES512: sha512.New, -} - -// Compute an at_hash from a raw access token and a signature algorithm -// -// See: https://openid.net/specs/openid-connect-core-1_0.html#ImplicitIDToken -func accessTokenHash(alg jose.SignatureAlgorithm, accessToken string) (string, error) { - newHash, ok := hashForSigAlg[alg] - if !ok { - return "", fmt.Errorf("unsupported signature algorithm: %s", alg) - } - - hash := newHash() - if _, err := io.WriteString(hash, accessToken); err != nil { - return "", fmt.Errorf("computing hash: %v", err) - } - sum := hash.Sum(nil) - return base64.RawURLEncoding.EncodeToString(sum[:len(sum)/2]), nil -} - -type audience []string - -func (a audience) contains(aud string) bool { - for _, e := range a { - if aud == e { - return true - } - } - return false -} - -func (a audience) MarshalJSON() ([]byte, error) { - if len(a) == 1 { - return json.Marshal(a[0]) - } - return json.Marshal([]string(a)) -} - -type idTokenClaims struct { - Issuer string `json:"iss"` - Subject string `json:"sub"` - Audience audience `json:"aud"` - Expiry int64 `json:"exp"` - IssuedAt int64 `json:"iat"` - AuthorizingParty string `json:"azp,omitempty"` - Nonce string `json:"nonce,omitempty"` - - AccessTokenHash string `json:"at_hash,omitempty"` - - Email string `json:"email,omitempty"` - EmailVerified *bool `json:"email_verified,omitempty"` - - Groups []string `json:"groups,omitempty"` - - Name string `json:"name,omitempty"` - PreferredUsername string `json:"preferred_username,omitempty"` - - FederatedIDClaims *federatedIDClaims `json:"federated_claims,omitempty"` -} - -type federatedIDClaims struct { - ConnectorID string `json:"connector_id,omitempty"` - UserID string `json:"user_id,omitempty"` -} - -func (s *Server) newAccessToken(clientID string, claims storage.Claims, scopes []string, nonce, connID string) (accessToken string, err error) { - idToken, _, err := s.newIDToken(clientID, claims, scopes, nonce, storage.NewID(), connID) - return idToken, err -} - -func (s *Server) newIDToken(clientID string, claims storage.Claims, scopes []string, nonce, accessToken, connID string) (idToken string, expiry time.Time, err error) { - keys, err := s.storage.GetKeys() - if err != nil { - s.logger.Errorf("Failed to get keys: %v", err) - return "", expiry, err - } - - signingKey := keys.SigningKey - if signingKey == nil { - return "", expiry, fmt.Errorf("no key to sign payload with") - } - signingAlg, err := signatureAlgorithm(signingKey) - if err != nil { - return "", expiry, err - } - - issuedAt := s.now() - expiry = issuedAt.Add(s.idTokensValidFor) - - sub := &internal.IDTokenSubject{ - UserId: claims.UserID, - ConnId: connID, - } - - subjectString, err := internal.Marshal(sub) - if err != nil { - s.logger.Errorf("failed to marshal offline session ID: %v", err) - return "", expiry, fmt.Errorf("failed to marshal offline session ID: %v", err) - } - - tok := idTokenClaims{ - Issuer: s.issuerURL.String(), - Subject: subjectString, - Nonce: nonce, - Expiry: expiry.Unix(), - IssuedAt: issuedAt.Unix(), - } - - if accessToken != "" { - atHash, err := accessTokenHash(signingAlg, accessToken) - if err != nil { - s.logger.Errorf("error computing at_hash: %v", err) - return "", expiry, fmt.Errorf("error computing at_hash: %v", err) - } - tok.AccessTokenHash = atHash - } - - for _, scope := range scopes { - switch { - case scope == scopeEmail: - tok.Email = claims.Email - tok.EmailVerified = &claims.EmailVerified - case scope == scopeGroups: - tok.Groups = claims.Groups - case scope == scopeProfile: - tok.Name = claims.Username - tok.PreferredUsername = claims.PreferredUsername - case scope == scopeFederatedID: - tok.FederatedIDClaims = &federatedIDClaims{ - ConnectorID: connID, - UserID: claims.UserID, - } - default: - peerID, ok := parseCrossClientScope(scope) - if !ok { - // Ignore unknown scopes. These are already validated during the - // initial auth request. - continue - } - isTrusted, err := s.validateCrossClientTrust(clientID, peerID) - if err != nil { - return "", expiry, err - } - if !isTrusted { - // TODO(ericchiang): propagate this error to the client. - return "", expiry, fmt.Errorf("peer (%s) does not trust client", peerID) - } - tok.Audience = append(tok.Audience, peerID) - } - } - - if len(tok.Audience) == 0 { - // Client didn't ask for cross client audience. Set the current - // client as the audience. - tok.Audience = audience{clientID} - } else { - // Client asked for cross client audience: - // if the current client was not requested explicitly - if !tok.Audience.contains(clientID) { - // by default it becomes one of entries in Audience - tok.Audience = append(tok.Audience, clientID) - } - // The current client becomes the authorizing party. - tok.AuthorizingParty = clientID - } - - payload, err := json.Marshal(tok) - if err != nil { - return "", expiry, fmt.Errorf("could not serialize claims: %v", err) - } - - if idToken, err = signPayload(signingKey, signingAlg, payload); err != nil { - return "", expiry, fmt.Errorf("failed to sign payload: %v", err) - } - return idToken, expiry, nil -} - -// parse the initial request from the OAuth2 client. -func (s *Server) parseAuthorizationRequest(r *http.Request) (*storage.AuthRequest, error) { - if err := r.ParseForm(); err != nil { - return nil, &authErr{"", "", errInvalidRequest, "Failed to parse request body."} - } - q := r.Form - redirectURI, err := url.QueryUnescape(q.Get("redirect_uri")) - if err != nil { - return nil, &authErr{"", "", errInvalidRequest, "No redirect_uri provided."} - } - - clientID := q.Get("client_id") - state := q.Get("state") - nonce := q.Get("nonce") - connectorID := q.Get("connector_id") - // Some clients, like the old go-oidc, provide extra whitespace. Tolerate this. - scopes := strings.Fields(q.Get("scope")) - responseTypes := strings.Fields(q.Get("response_type")) - - client, err := s.storage.GetClient(clientID) - if err != nil { - if err == storage.ErrNotFound { - description := fmt.Sprintf("Invalid client_id (%q).", clientID) - return nil, &authErr{"", "", errUnauthorizedClient, description} - } - s.logger.Errorf("Failed to get client: %v", err) - return nil, &authErr{"", "", errServerError, ""} - } - - if connectorID != "" { - connectors, err := s.storage.ListConnectors() - if err != nil { - return nil, &authErr{"", "", errServerError, "Unable to retrieve connectors"} - } - if !validateConnectorID(connectors, connectorID) { - return nil, &authErr{"", "", errInvalidRequest, "Invalid ConnectorID"} - } - } - - if !validateRedirectURI(client, redirectURI) { - description := fmt.Sprintf("Unregistered redirect_uri (%q).", redirectURI) - return nil, &authErr{"", "", errInvalidRequest, description} - } - - // From here on out, we want to redirect back to the client with an error. - newErr := func(typ, format string, a ...interface{}) *authErr { - return &authErr{state, redirectURI, typ, fmt.Sprintf(format, a...)} - } - - var ( - unrecognized []string - invalidScopes []string - ) - hasOpenIDScope := false - for _, scope := range scopes { - switch scope { - case scopeOpenID: - hasOpenIDScope = true - case scopeOfflineAccess, scopeEmail, scopeProfile, scopeGroups, scopeFederatedID: - default: - peerID, ok := parseCrossClientScope(scope) - if !ok { - unrecognized = append(unrecognized, scope) - continue - } - - isTrusted, err := s.validateCrossClientTrust(clientID, peerID) - if err != nil { - return nil, newErr(errServerError, "Internal server error.") - } - if !isTrusted { - invalidScopes = append(invalidScopes, scope) - } - } - } - if !hasOpenIDScope { - return nil, newErr("invalid_scope", `Missing required scope(s) ["openid"].`) - } - if len(unrecognized) > 0 { - return nil, newErr("invalid_scope", "Unrecognized scope(s) %q", unrecognized) - } - if len(invalidScopes) > 0 { - return nil, newErr("invalid_scope", "Client can't request scope(s) %q", invalidScopes) - } - - var rt struct { - code bool - idToken bool - token bool - } - - for _, responseType := range responseTypes { - switch responseType { - case responseTypeCode: - rt.code = true - case responseTypeIDToken: - rt.idToken = true - case responseTypeToken: - rt.token = true - default: - return nil, newErr("invalid_request", "Invalid response type %q", responseType) - } - - if !s.supportedResponseTypes[responseType] { - return nil, newErr(errUnsupportedResponseType, "Unsupported response type %q", responseType) - } - } - - if len(responseTypes) == 0 { - return nil, newErr("invalid_requests", "No response_type provided") - } - - if rt.token && !rt.code && !rt.idToken { - // "token" can't be provided by its own. - // - // https://openid.net/specs/openid-connect-core-1_0.html#Authentication - return nil, newErr("invalid_request", "Response type 'token' must be provided with type 'id_token' and/or 'code'") - } - if !rt.code { - // Either "id_token code" or "id_token" has been provided which implies the - // implicit flow. Implicit flow requires a nonce value. - // - // https://openid.net/specs/openid-connect-core-1_0.html#ImplicitAuthRequest - if nonce == "" { - return nil, newErr("invalid_request", "Response type 'token' requires a 'nonce' value.") - } - } - if rt.token { - if redirectURI == redirectURIOOB { - err := fmt.Sprintf("Cannot use response type 'token' with redirect_uri '%s'.", redirectURIOOB) - return nil, newErr("invalid_request", err) - } - } - - return &storage.AuthRequest{ - ID: storage.NewID(), - ClientID: client.ID, - State: state, - Nonce: nonce, - ForceApprovalPrompt: q.Get("approval_prompt") == "force", - Scopes: scopes, - RedirectURI: redirectURI, - ResponseTypes: responseTypes, - ConnectorID: connectorID, - }, nil -} - -func parseCrossClientScope(scope string) (peerID string, ok bool) { - if ok = strings.HasPrefix(scope, scopeCrossClientPrefix); ok { - peerID = scope[len(scopeCrossClientPrefix):] - } - return -} - -func (s *Server) validateCrossClientTrust(clientID, peerID string) (trusted bool, err error) { - if peerID == clientID { - return true, nil - } - peer, err := s.storage.GetClient(peerID) - if err != nil { - if err != storage.ErrNotFound { - s.logger.Errorf("Failed to get client: %v", err) - return false, err - } - return false, nil - } - for _, id := range peer.TrustedPeers { - if id == clientID { - return true, nil - } - } - return false, nil -} - -func validateRedirectURI(client storage.Client, redirectURI string) bool { - if !client.Public { - for _, uri := range client.RedirectURIs { - if redirectURI == uri { - return true - } - } - return false - } - - if redirectURI == redirectURIOOB { - return true - } - - // verify that the host is of form "http://localhost:(port)(path)" or "http://localhost(path)" - u, err := url.Parse(redirectURI) - if err != nil { - return false - } - if u.Scheme != "http" { - return false - } - if u.Host == "localhost" { - return true - } - host, _, err := net.SplitHostPort(u.Host) - return err == nil && host == "localhost" -} - -func validateConnectorID(connectors []storage.Connector, connectorID string) bool { - for _, c := range connectors { - if c.ID == connectorID { - return true - } - } - return false -} - -// storageKeySet implements the oidc.KeySet interface backed by Dex storage -type storageKeySet struct { - storage.Storage -} - -func (s *storageKeySet) VerifySignature(_ context.Context, jwt string) (payload []byte, err error) { - jws, err := jose.ParseSigned(jwt) - if err != nil { - return nil, err - } - - keyID := "" - for _, sig := range jws.Signatures { - keyID = sig.Header.KeyID - break - } - - skeys, err := s.Storage.GetKeys() - if err != nil { - return nil, err - } - - keys := []*jose.JSONWebKey{skeys.SigningKeyPub} - for _, vk := range skeys.VerificationKeys { - keys = append(keys, vk.PublicKey) - } - - for _, key := range keys { - if keyID == "" || key.KeyID == keyID { - if payload, err := jws.Verify(key); err == nil { - return payload, nil - } - } - } - - return nil, errors.New("failed to verify id token signature") -} diff --git a/vendor/github.com/dexidp/dex/server/rotation.go b/vendor/github.com/dexidp/dex/server/rotation.go deleted file mode 100644 index 464dccf..0000000 --- a/vendor/github.com/dexidp/dex/server/rotation.go +++ /dev/null @@ -1,179 +0,0 @@ -package server - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "encoding/hex" - "errors" - "fmt" - "io" - "time" - - "gopkg.in/square/go-jose.v2" - - "github.com/dexidp/dex/pkg/log" - "github.com/dexidp/dex/storage" -) - -var errAlreadyRotated = errors.New("keys already rotated by another server instance") - -// rotationStrategy describes a strategy for generating cryptographic keys, how -// often to rotate them, and how long they can validate signatures after rotation. -type rotationStrategy struct { - // Time between rotations. - rotationFrequency time.Duration - - // After being rotated how long should the key be kept around for validating - // signatues? - idTokenValidFor time.Duration - - // Keys are always RSA keys. Though cryptopasta recommends ECDSA keys, not every - // client may support these (e.g. github.com/coreos/go-oidc/oidc). - key func() (*rsa.PrivateKey, error) -} - -// staticRotationStrategy returns a strategy which never rotates keys. -func staticRotationStrategy(key *rsa.PrivateKey) rotationStrategy { - return rotationStrategy{ - // Setting these values to 100 years is easier than having a flag indicating no rotation. - rotationFrequency: time.Hour * 8760 * 100, - idTokenValidFor: time.Hour * 8760 * 100, - key: func() (*rsa.PrivateKey, error) { return key, nil }, - } -} - -// defaultRotationStrategy returns a strategy which rotates keys every provided period, -// holding onto the public parts for some specified amount of time. -func defaultRotationStrategy(rotationFrequency, idTokenValidFor time.Duration) rotationStrategy { - return rotationStrategy{ - rotationFrequency: rotationFrequency, - idTokenValidFor: idTokenValidFor, - key: func() (*rsa.PrivateKey, error) { - return rsa.GenerateKey(rand.Reader, 2048) - }, - } -} - -type keyRotater struct { - storage.Storage - - strategy rotationStrategy - now func() time.Time - - logger log.Logger -} - -// startKeyRotation begins key rotation in a new goroutine, closing once the context is canceled. -// -// The method blocks until after the first attempt to rotate keys has completed. That way -// healthy storages will return from this call with valid keys. -func (s *Server) startKeyRotation(ctx context.Context, strategy rotationStrategy, now func() time.Time) { - rotater := keyRotater{s.storage, strategy, now, s.logger} - - // Try to rotate immediately so properly configured storages will have keys. - if err := rotater.rotate(); err != nil { - if err == errAlreadyRotated { - s.logger.Infof("Key rotation not needed: %v", err) - } else { - s.logger.Errorf("failed to rotate keys: %v", err) - } - } - - go func() { - for { - select { - case <-ctx.Done(): - return - case <-time.After(time.Second * 30): - if err := rotater.rotate(); err != nil { - s.logger.Errorf("failed to rotate keys: %v", err) - } - } - } - }() -} - -func (k keyRotater) rotate() error { - keys, err := k.GetKeys() - if err != nil && err != storage.ErrNotFound { - return fmt.Errorf("get keys: %v", err) - } - if k.now().Before(keys.NextRotation) { - return nil - } - k.logger.Infof("keys expired, rotating") - - // Generate the key outside of a storage transaction. - key, err := k.strategy.key() - if err != nil { - return fmt.Errorf("generate key: %v", err) - } - b := make([]byte, 20) - if _, err := io.ReadFull(rand.Reader, b); err != nil { - panic(err) - } - keyID := hex.EncodeToString(b) - priv := &jose.JSONWebKey{ - Key: key, - KeyID: keyID, - Algorithm: "RS256", - Use: "sig", - } - pub := &jose.JSONWebKey{ - Key: key.Public(), - KeyID: keyID, - Algorithm: "RS256", - Use: "sig", - } - - var nextRotation time.Time - err = k.Storage.UpdateKeys(func(keys storage.Keys) (storage.Keys, error) { - tNow := k.now() - - // if you are running multiple instances of dex, another instance - // could have already rotated the keys. - if tNow.Before(keys.NextRotation) { - return storage.Keys{}, errAlreadyRotated - } - - expired := func(key storage.VerificationKey) bool { - return tNow.After(key.Expiry) - } - - // Remove any verification keys that have expired. - i := 0 - for _, key := range keys.VerificationKeys { - if !expired(key) { - keys.VerificationKeys[i] = key - i++ - } - } - keys.VerificationKeys = keys.VerificationKeys[:i] - - if keys.SigningKeyPub != nil { - // Move current signing key to a verification only key, throwing - // away the private part. - verificationKey := storage.VerificationKey{ - PublicKey: keys.SigningKeyPub, - // After demoting the signing key, keep the token around for at least - // the amount of time an ID Token is valid for. This ensures the - // verification key won't expire until all ID Tokens it's signed - // expired as well. - Expiry: tNow.Add(k.strategy.idTokenValidFor), - } - keys.VerificationKeys = append(keys.VerificationKeys, verificationKey) - } - - nextRotation = k.now().Add(k.strategy.rotationFrequency) - keys.SigningKey = priv - keys.SigningKeyPub = pub - keys.NextRotation = nextRotation - return keys, nil - }) - if err != nil { - return err - } - k.logger.Infof("keys rotated, next rotation: %s", nextRotation) - return nil -} diff --git a/vendor/github.com/dexidp/dex/server/server.go b/vendor/github.com/dexidp/dex/server/server.go deleted file mode 100644 index 09292b1..0000000 --- a/vendor/github.com/dexidp/dex/server/server.go +++ /dev/null @@ -1,561 +0,0 @@ -package server - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "path" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/felixge/httpsnoop" - "github.com/gorilla/handlers" - "github.com/gorilla/mux" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/crypto/bcrypt" - - "github.com/dexidp/dex/connector" - "github.com/dexidp/dex/connector/atlassiancrowd" - "github.com/dexidp/dex/connector/authproxy" - "github.com/dexidp/dex/connector/bitbucketcloud" - "github.com/dexidp/dex/connector/github" - "github.com/dexidp/dex/connector/gitlab" - "github.com/dexidp/dex/connector/google" - "github.com/dexidp/dex/connector/keystone" - "github.com/dexidp/dex/connector/ldap" - "github.com/dexidp/dex/connector/linkedin" - "github.com/dexidp/dex/connector/microsoft" - "github.com/dexidp/dex/connector/mock" - "github.com/dexidp/dex/connector/oidc" - "github.com/dexidp/dex/connector/openshift" - "github.com/dexidp/dex/connector/saml" - "github.com/dexidp/dex/pkg/log" - "github.com/dexidp/dex/storage" -) - -// LocalConnector is the local passwordDB connector which is an internal -// connector maintained by the server. -const LocalConnector = "local" - -// Connector is a connector with resource version metadata. -type Connector struct { - ResourceVersion string - Connector connector.Connector -} - -// Config holds the server's configuration options. -// -// Multiple servers using the same storage are expected to be configured identically. -type Config struct { - Issuer string - - // The backing persistence layer. - Storage storage.Storage - - // Valid values are "code" to enable the code flow and "token" to enable the implicit - // flow. If no response types are supplied this value defaults to "code". - SupportedResponseTypes []string - - // List of allowed origins for CORS requests on discovery, token and keys endpoint. - // If none are indicated, CORS requests are disabled. Passing in "*" will allow any - // domain. - AllowedOrigins []string - - // If enabled, the server won't prompt the user to approve authorization requests. - // Logging in implies approval. - SkipApprovalScreen bool - - // If enabled, the connectors selection page will always be shown even if there's only one - AlwaysShowLoginScreen bool - - RotateKeysAfter time.Duration // Defaults to 6 hours. - IDTokensValidFor time.Duration // Defaults to 24 hours - AuthRequestsValidFor time.Duration // Defaults to 24 hours - // If set, the server will use this connector to handle password grants - PasswordConnector string - - GCFrequency time.Duration // Defaults to 5 minutes - - // If specified, the server will use this function for determining time. - Now func() time.Time - - Web WebConfig - - Logger log.Logger - - PrometheusRegistry *prometheus.Registry -} - -// WebConfig holds the server's frontend templates and asset configuration. -// -// These are currently very custom to CoreOS and it's not recommended that -// outside users attempt to customize these. -type WebConfig struct { - // A filepath to web static. - // - // It is expected to contain the following directories: - // - // * static - Static static served at "( issuer URL )/static". - // * templates - HTML templates controlled by dex. - // * themes/(theme) - Static static served at "( issuer URL )/theme". - // - Dir string - - // Defaults to "( issuer URL )/theme/logo.png" - LogoURL string - - // Defaults to "dex" - Issuer string - - // Defaults to "coreos" - Theme string - - // Map of extra values passed into the templates - Extra map[string]string -} - -func value(val, defaultValue time.Duration) time.Duration { - if val == 0 { - return defaultValue - } - return val -} - -// Server is the top level object. -type Server struct { - issuerURL url.URL - - // mutex for the connectors map. - mu sync.Mutex - // Map of connector IDs to connectors. - connectors map[string]Connector - - storage storage.Storage - - mux http.Handler - - templates *templates - - // If enabled, don't prompt user for approval after logging in through connector. - skipApproval bool - - // If enabled, show the connector selection screen even if there's only one - alwaysShowLogin bool - - // Used for password grant - passwordConnector string - - supportedResponseTypes map[string]bool - - now func() time.Time - - idTokensValidFor time.Duration - authRequestsValidFor time.Duration - - logger log.Logger -} - -// NewServer constructs a server from the provided config. -func NewServer(ctx context.Context, c Config) (*Server, error) { - return newServer(ctx, c, defaultRotationStrategy( - value(c.RotateKeysAfter, 6*time.Hour), - value(c.IDTokensValidFor, 24*time.Hour), - )) -} - -func newServer(ctx context.Context, c Config, rotationStrategy rotationStrategy) (*Server, error) { - issuerURL, err := url.Parse(c.Issuer) - if err != nil { - return nil, fmt.Errorf("server: can't parse issuer URL") - } - - if c.Storage == nil { - return nil, errors.New("server: storage cannot be nil") - } - if len(c.SupportedResponseTypes) == 0 { - c.SupportedResponseTypes = []string{responseTypeCode} - } - - supported := make(map[string]bool) - for _, respType := range c.SupportedResponseTypes { - switch respType { - case responseTypeCode, responseTypeIDToken, responseTypeToken: - default: - return nil, fmt.Errorf("unsupported response_type %q", respType) - } - supported[respType] = true - } - - web := webConfig{ - dir: c.Web.Dir, - logoURL: c.Web.LogoURL, - issuerURL: c.Issuer, - issuer: c.Web.Issuer, - theme: c.Web.Theme, - extra: c.Web.Extra, - } - - static, theme, tmpls, err := loadWebConfig(web) - if err != nil { - return nil, fmt.Errorf("server: failed to load web static: %v", err) - } - - now := c.Now - if now == nil { - now = time.Now - } - - s := &Server{ - issuerURL: *issuerURL, - connectors: make(map[string]Connector), - storage: newKeyCacher(c.Storage, now), - supportedResponseTypes: supported, - idTokensValidFor: value(c.IDTokensValidFor, 24*time.Hour), - authRequestsValidFor: value(c.AuthRequestsValidFor, 24*time.Hour), - skipApproval: c.SkipApprovalScreen, - alwaysShowLogin: c.AlwaysShowLoginScreen, - now: now, - templates: tmpls, - passwordConnector: c.PasswordConnector, - logger: c.Logger, - } - - // Retrieves connector objects in backend storage. This list includes the static connectors - // defined in the ConfigMap and dynamic connectors retrieved from the storage. - storageConnectors, err := c.Storage.ListConnectors() - if err != nil { - return nil, fmt.Errorf("server: failed to list connector objects from storage: %v", err) - } - - if len(storageConnectors) == 0 && len(s.connectors) == 0 { - return nil, errors.New("server: no connectors specified") - } - - for _, conn := range storageConnectors { - if _, err := s.OpenConnector(conn); err != nil { - return nil, fmt.Errorf("server: Failed to open connector %s: %v", conn.ID, err) - } - } - - instrumentHandlerCounter := func(handlerName string, handler http.Handler) http.HandlerFunc { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - handler.ServeHTTP(w, r) - }) - } - - if c.PrometheusRegistry != nil { - requestCounter := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "http_requests_total", - Help: "Count of all HTTP requests.", - }, []string{"handler", "code", "method"}) - - err = c.PrometheusRegistry.Register(requestCounter) - if err != nil { - return nil, fmt.Errorf("server: Failed to register Prometheus HTTP metrics: %v", err) - } - - instrumentHandlerCounter = func(handlerName string, handler http.Handler) http.HandlerFunc { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - m := httpsnoop.CaptureMetrics(handler, w, r) - requestCounter.With(prometheus.Labels{"handler": handlerName, "code": strconv.Itoa(m.Code), "method": r.Method}).Inc() - }) - } - } - - r := mux.NewRouter() - handle := func(p string, h http.Handler) { - r.Handle(path.Join(issuerURL.Path, p), instrumentHandlerCounter(p, h)) - } - handleFunc := func(p string, h http.HandlerFunc) { - handle(p, h) - } - handlePrefix := func(p string, h http.Handler) { - prefix := path.Join(issuerURL.Path, p) - r.PathPrefix(prefix).Handler(http.StripPrefix(prefix, h)) - } - handleWithCORS := func(p string, h http.HandlerFunc) { - var handler http.Handler = h - if len(c.AllowedOrigins) > 0 { - corsOption := handlers.AllowedOrigins(c.AllowedOrigins) - handler = handlers.CORS(corsOption)(handler) - } - r.Handle(path.Join(issuerURL.Path, p), instrumentHandlerCounter(p, handler)) - } - r.NotFoundHandler = http.HandlerFunc(http.NotFound) - - discoveryHandler, err := s.discoveryHandler() - if err != nil { - return nil, err - } - handleWithCORS("/.well-known/openid-configuration", discoveryHandler) - - // TODO(ericchiang): rate limit certain paths based on IP. - handleWithCORS("/token", s.handleToken) - handleWithCORS("/keys", s.handlePublicKeys) - handleWithCORS("/userinfo", s.handleUserInfo) - handleFunc("/auth", s.handleAuthorization) - handleFunc("/auth/{connector}", s.handleConnectorLogin) - r.HandleFunc(path.Join(issuerURL.Path, "/callback"), func(w http.ResponseWriter, r *http.Request) { - // Strip the X-Remote-* headers to prevent security issues on - // misconfigured authproxy connector setups. - for key := range r.Header { - if strings.HasPrefix(strings.ToLower(key), "x-remote-") { - r.Header.Del(key) - } - } - s.handleConnectorCallback(w, r) - }) - // For easier connector-specific web server configuration, e.g. for the - // "authproxy" connector. - handleFunc("/callback/{connector}", s.handleConnectorCallback) - handleFunc("/approval", s.handleApproval) - handle("/healthz", s.newHealthChecker(ctx)) - handlePrefix("/static", static) - handlePrefix("/theme", theme) - s.mux = r - - s.startKeyRotation(ctx, rotationStrategy, now) - s.startGarbageCollection(ctx, value(c.GCFrequency, 5*time.Minute), now) - - return s, nil -} - -func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - s.mux.ServeHTTP(w, r) -} - -func (s *Server) absPath(pathItems ...string) string { - paths := make([]string, len(pathItems)+1) - paths[0] = s.issuerURL.Path - copy(paths[1:], pathItems) - return path.Join(paths...) -} - -func (s *Server) absURL(pathItems ...string) string { - u := s.issuerURL - u.Path = s.absPath(pathItems...) - return u.String() -} - -func newPasswordDB(s storage.Storage) interface { - connector.Connector - connector.PasswordConnector -} { - return passwordDB{s} -} - -type passwordDB struct { - s storage.Storage -} - -func (db passwordDB) Login(ctx context.Context, s connector.Scopes, email, password string) (connector.Identity, bool, error) { - p, err := db.s.GetPassword(email) - if err != nil { - if err != storage.ErrNotFound { - return connector.Identity{}, false, fmt.Errorf("get password: %v", err) - } - return connector.Identity{}, false, nil - } - // This check prevents dex users from logging in using static passwords - // configured with hash costs that are too high or low. - if err := checkCost(p.Hash); err != nil { - return connector.Identity{}, false, err - } - if err := bcrypt.CompareHashAndPassword(p.Hash, []byte(password)); err != nil { - return connector.Identity{}, false, nil - } - return connector.Identity{ - UserID: p.UserID, - Username: p.Username, - Email: p.Email, - EmailVerified: true, - }, true, nil -} - -func (db passwordDB) Refresh(ctx context.Context, s connector.Scopes, identity connector.Identity) (connector.Identity, error) { - // If the user has been deleted, the refresh token will be rejected. - p, err := db.s.GetPassword(identity.Email) - if err != nil { - if err == storage.ErrNotFound { - return connector.Identity{}, errors.New("user not found") - } - return connector.Identity{}, fmt.Errorf("get password: %v", err) - } - - // User removed but a new user with the same email exists. - if p.UserID != identity.UserID { - return connector.Identity{}, errors.New("user not found") - } - - // If a user has updated their username, that will be reflected in the - // refreshed token. - // - // No other fields are expected to be refreshable as email is effectively used - // as an ID and this implementation doesn't deal with groups. - identity.Username = p.Username - - return identity, nil -} - -func (db passwordDB) Prompt() string { - return "Email Address" -} - -// newKeyCacher returns a storage which caches keys so long as the next -func newKeyCacher(s storage.Storage, now func() time.Time) storage.Storage { - if now == nil { - now = time.Now - } - return &keyCacher{Storage: s, now: now} -} - -type keyCacher struct { - storage.Storage - - now func() time.Time - keys atomic.Value // Always holds nil or type *storage.Keys. -} - -func (k *keyCacher) GetKeys() (storage.Keys, error) { - keys, ok := k.keys.Load().(*storage.Keys) - if ok && keys != nil && k.now().Before(keys.NextRotation) { - return *keys, nil - } - - storageKeys, err := k.Storage.GetKeys() - if err != nil { - return storageKeys, err - } - - if k.now().Before(storageKeys.NextRotation) { - k.keys.Store(&storageKeys) - } - return storageKeys, nil -} - -func (s *Server) startGarbageCollection(ctx context.Context, frequency time.Duration, now func() time.Time) { - go func() { - for { - select { - case <-ctx.Done(): - return - case <-time.After(frequency): - if r, err := s.storage.GarbageCollect(now()); err != nil { - s.logger.Errorf("garbage collection failed: %v", err) - } else if r.AuthRequests > 0 || r.AuthCodes > 0 { - s.logger.Infof("garbage collection run, delete auth requests=%d, auth codes=%d", r.AuthRequests, r.AuthCodes) - } - } - } - }() -} - -// ConnectorConfig is a configuration that can open a connector. -type ConnectorConfig interface { - Open(id string, logger log.Logger) (connector.Connector, error) -} - -// ConnectorsConfig variable provides an easy way to return a config struct -// depending on the connector type. -var ConnectorsConfig = map[string]func() ConnectorConfig{ - "keystone": func() ConnectorConfig { return new(keystone.Config) }, - "mockCallback": func() ConnectorConfig { return new(mock.CallbackConfig) }, - "mockPassword": func() ConnectorConfig { return new(mock.PasswordConfig) }, - "ldap": func() ConnectorConfig { return new(ldap.Config) }, - "github": func() ConnectorConfig { return new(github.Config) }, - "gitlab": func() ConnectorConfig { return new(gitlab.Config) }, - "google": func() ConnectorConfig { return new(google.Config) }, - "oidc": func() ConnectorConfig { return new(oidc.Config) }, - "saml": func() ConnectorConfig { return new(saml.Config) }, - "authproxy": func() ConnectorConfig { return new(authproxy.Config) }, - "linkedin": func() ConnectorConfig { return new(linkedin.Config) }, - "microsoft": func() ConnectorConfig { return new(microsoft.Config) }, - "bitbucket-cloud": func() ConnectorConfig { return new(bitbucketcloud.Config) }, - "openshift": func() ConnectorConfig { return new(openshift.Config) }, - "atlassian-crowd": func() ConnectorConfig { return new(atlassiancrowd.Config) }, - // Keep around for backwards compatibility. - "samlExperimental": func() ConnectorConfig { return new(saml.Config) }, -} - -// openConnector will parse the connector config and open the connector. -func openConnector(logger log.Logger, conn storage.Connector) (connector.Connector, error) { - var c connector.Connector - - f, ok := ConnectorsConfig[conn.Type] - if !ok { - return c, fmt.Errorf("unknown connector type %q", conn.Type) - } - - connConfig := f() - if len(conn.Config) != 0 { - data := []byte(string(conn.Config)) - if err := json.Unmarshal(data, connConfig); err != nil { - return c, fmt.Errorf("parse connector config: %v", err) - } - } - - c, err := connConfig.Open(conn.ID, logger) - if err != nil { - return c, fmt.Errorf("failed to create connector %s: %v", conn.ID, err) - } - - return c, nil -} - -// OpenConnector updates server connector map with specified connector object. -func (s *Server) OpenConnector(conn storage.Connector) (Connector, error) { - var c connector.Connector - - if conn.Type == LocalConnector { - c = newPasswordDB(s.storage) - } else { - var err error - c, err = openConnector(s.logger, conn) - if err != nil { - return Connector{}, fmt.Errorf("failed to open connector: %v", err) - } - } - - connector := Connector{ - ResourceVersion: conn.ResourceVersion, - Connector: c, - } - s.mu.Lock() - s.connectors[conn.ID] = connector - s.mu.Unlock() - - return connector, nil -} - -// getConnector retrieves the connector object with the given id from the storage -// and updates the connector list for server if necessary. -func (s *Server) getConnector(id string) (Connector, error) { - storageConnector, err := s.storage.GetConnector(id) - if err != nil { - return Connector{}, fmt.Errorf("failed to get connector object from storage: %v", err) - } - - var conn Connector - var ok bool - s.mu.Lock() - conn, ok = s.connectors[id] - s.mu.Unlock() - - if !ok || storageConnector.ResourceVersion != conn.ResourceVersion { - // Connector object does not exist in server connectors map or - // has been updated in the storage. Need to get latest. - conn, err := s.OpenConnector(storageConnector) - if err != nil { - return Connector{}, fmt.Errorf("failed to open connector: %v", err) - } - return conn, nil - } - - return conn, nil -} diff --git a/vendor/github.com/dexidp/dex/server/templates.go b/vendor/github.com/dexidp/dex/server/templates.go deleted file mode 100644 index 4947a10..0000000 --- a/vendor/github.com/dexidp/dex/server/templates.go +++ /dev/null @@ -1,327 +0,0 @@ -package server - -import ( - "fmt" - "html/template" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "sort" - "strings" -) - -const ( - tmplApproval = "approval.html" - tmplLogin = "login.html" - tmplPassword = "password.html" - tmplOOB = "oob.html" - tmplError = "error.html" -) - -var requiredTmpls = []string{ - tmplApproval, - tmplLogin, - tmplPassword, - tmplOOB, - tmplError, -} - -type templates struct { - loginTmpl *template.Template - approvalTmpl *template.Template - passwordTmpl *template.Template - oobTmpl *template.Template - errorTmpl *template.Template -} - -type webConfig struct { - dir string - logoURL string - issuer string - theme string - issuerURL string - extra map[string]string -} - -func dirExists(dir string) error { - stat, err := os.Stat(dir) - if err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("directory %q does not exist", dir) - } - return fmt.Errorf("stat directory %q: %v", dir, err) - } - if !stat.IsDir() { - return fmt.Errorf("path %q is a file not a directory", dir) - } - return nil -} - -// loadWebConfig returns static assets, theme assets, and templates used by the frontend by -// reading the directory specified in the webConfig. -// -// The directory layout is expected to be: -// -// ( web directory ) -// |- static -// |- themes -// | |- (theme name) -// |- templates -// -func loadWebConfig(c webConfig) (static, theme http.Handler, templates *templates, err error) { - if c.theme == "" { - c.theme = "coreos" - } - if c.issuer == "" { - c.issuer = "dex" - } - if c.dir == "" { - c.dir = "./web" - } - if c.logoURL == "" { - c.logoURL = "theme/logo.png" - } - - if err := dirExists(c.dir); err != nil { - return nil, nil, nil, fmt.Errorf("load web dir: %v", err) - } - - staticDir := filepath.Join(c.dir, "static") - templatesDir := filepath.Join(c.dir, "templates") - themeDir := filepath.Join(c.dir, "themes", c.theme) - - for _, dir := range []string{staticDir, templatesDir, themeDir} { - if err := dirExists(dir); err != nil { - return nil, nil, nil, fmt.Errorf("load dir: %v", err) - } - } - - static = http.FileServer(http.Dir(staticDir)) - theme = http.FileServer(http.Dir(themeDir)) - - templates, err = loadTemplates(c, templatesDir) - return -} - -// loadTemplates parses the expected templates from the provided directory. -func loadTemplates(c webConfig, templatesDir string) (*templates, error) { - files, err := ioutil.ReadDir(templatesDir) - if err != nil { - return nil, fmt.Errorf("read dir: %v", err) - } - - filenames := []string{} - for _, file := range files { - if file.IsDir() { - continue - } - filenames = append(filenames, filepath.Join(templatesDir, file.Name())) - } - if len(filenames) == 0 { - return nil, fmt.Errorf("no files in template dir %q", templatesDir) - } - - issuerURL, err := url.Parse(c.issuerURL) - if err != nil { - return nil, fmt.Errorf("error parsing issuerURL: %v", err) - } - - funcs := map[string]interface{}{ - "issuer": func() string { return c.issuer }, - "logo": func() string { return c.logoURL }, - "url": func(reqPath, assetPath string) string { return relativeURL(issuerURL.Path, reqPath, assetPath) }, - "lower": strings.ToLower, - "extra": func(k string) string { return c.extra[k] }, - } - - tmpls, err := template.New("").Funcs(funcs).ParseFiles(filenames...) - if err != nil { - return nil, fmt.Errorf("parse files: %v", err) - } - missingTmpls := []string{} - for _, tmplName := range requiredTmpls { - if tmpls.Lookup(tmplName) == nil { - missingTmpls = append(missingTmpls, tmplName) - } - } - if len(missingTmpls) > 0 { - return nil, fmt.Errorf("missing template(s): %s", missingTmpls) - } - return &templates{ - loginTmpl: tmpls.Lookup(tmplLogin), - approvalTmpl: tmpls.Lookup(tmplApproval), - passwordTmpl: tmpls.Lookup(tmplPassword), - oobTmpl: tmpls.Lookup(tmplOOB), - errorTmpl: tmpls.Lookup(tmplError), - }, nil -} - -// relativeURL returns the URL of the asset relative to the URL of the request path. -// The serverPath is consulted to trim any prefix due in case it is not listening -// to the root path. -// -// Algorithm: -// 1. Remove common prefix of serverPath and reqPath -// 2. Remove common prefix of assetPath and reqPath -// 3. For each part of reqPath remaining(minus one), go up one level (..) -// 4. For each part of assetPath remaining, append it to result -// -//eg -//server listens at localhost/dex so serverPath is dex -//reqPath is /dex/auth -//assetPath is static/main.css -//relativeURL("/dex", "/dex/auth", "static/main.css") = "../static/main.css" -func relativeURL(serverPath, reqPath, assetPath string) string { - splitPath := func(p string) []string { - res := []string{} - parts := strings.Split(path.Clean(p), "/") - for _, part := range parts { - if part != "" { - res = append(res, part) - } - } - return res - } - - stripCommonParts := func(s1, s2 []string) ([]string, []string) { - min := len(s1) - if len(s2) < min { - min = len(s2) - } - - splitIndex := min - for i := 0; i < min; i++ { - if s1[i] != s2[i] { - splitIndex = i - break - } - } - return s1[splitIndex:], s2[splitIndex:] - } - - server, req, asset := splitPath(serverPath), splitPath(reqPath), splitPath(assetPath) - - // Remove common prefix of request path with server path - // nolint: ineffassign - server, req = stripCommonParts(server, req) - - // Remove common prefix of request path with asset path - asset, req = stripCommonParts(asset, req) - - // For each part of the request remaining (minus one) -> go up one level (..) - // For each part of the asset remaining -> append it - var relativeURL string - for i := 0; i < len(req)-1; i++ { - relativeURL = path.Join("..", relativeURL) - } - relativeURL = path.Join(relativeURL, path.Join(asset...)) - - return relativeURL -} - -var scopeDescriptions = map[string]string{ - "offline_access": "Have offline access", - "profile": "View basic profile information", - "email": "View your email address", -} - -type connectorInfo struct { - ID string - Name string - URL string - Type string -} - -type byName []connectorInfo - -func (n byName) Len() int { return len(n) } -func (n byName) Less(i, j int) bool { return n[i].Name < n[j].Name } -func (n byName) Swap(i, j int) { n[i], n[j] = n[j], n[i] } - -func (t *templates) login(r *http.Request, w http.ResponseWriter, connectors []connectorInfo, reqPath string) error { - sort.Sort(byName(connectors)) - data := struct { - Connectors []connectorInfo - ReqPath string - }{connectors, r.URL.Path} - return renderTemplate(w, t.loginTmpl, data) -} - -func (t *templates) password(r *http.Request, w http.ResponseWriter, postURL, lastUsername, usernamePrompt string, lastWasInvalid, showBacklink bool, reqPath string) error { - data := struct { - PostURL string - BackLink bool - Username string - UsernamePrompt string - Invalid bool - ReqPath string - }{postURL, showBacklink, lastUsername, usernamePrompt, lastWasInvalid, r.URL.Path} - return renderTemplate(w, t.passwordTmpl, data) -} - -func (t *templates) approval(r *http.Request, w http.ResponseWriter, authReqID, username, clientName string, scopes []string, reqPath string) error { - accesses := []string{} - for _, scope := range scopes { - access, ok := scopeDescriptions[scope] - if ok { - accesses = append(accesses, access) - } - } - sort.Strings(accesses) - data := struct { - User string - Client string - AuthReqID string - Scopes []string - ReqPath string - }{username, clientName, authReqID, accesses, r.URL.Path} - return renderTemplate(w, t.approvalTmpl, data) -} - -func (t *templates) oob(r *http.Request, w http.ResponseWriter, code string, reqPath string) error { - data := struct { - Code string - ReqPath string - }{code, r.URL.Path} - return renderTemplate(w, t.oobTmpl, data) -} - -func (t *templates) err(r *http.Request, w http.ResponseWriter, errCode int, errMsg string) error { - w.WriteHeader(errCode) - data := struct { - ErrType string - ErrMsg string - ReqPath string - }{http.StatusText(errCode), errMsg, r.URL.Path} - if err := t.errorTmpl.Execute(w, data); err != nil { - return fmt.Errorf("Error rendering template %s: %s", t.errorTmpl.Name(), err) - } - return nil -} - -// small io.Writer utility to determine if executing the template wrote to the underlying response writer. -type writeRecorder struct { - wrote bool - w io.Writer -} - -func (w *writeRecorder) Write(p []byte) (n int, err error) { - w.wrote = true - return w.w.Write(p) -} - -func renderTemplate(w http.ResponseWriter, tmpl *template.Template, data interface{}) error { - wr := &writeRecorder{w: w} - if err := tmpl.Execute(wr, data); err != nil { - if !wr.wrote { - // TODO(ericchiang): replace with better internal server error. - http.Error(w, "Internal server error", http.StatusInternalServerError) - } - return fmt.Errorf("Error rendering template %s: %s", tmpl.Name(), err) - } - return nil -} diff --git a/vendor/github.com/dexidp/dex/storage/doc.go b/vendor/github.com/dexidp/dex/storage/doc.go deleted file mode 100644 index 0a2d76b..0000000 --- a/vendor/github.com/dexidp/dex/storage/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package storage defines the storage interface and types used by the server. -package storage diff --git a/vendor/github.com/dexidp/dex/storage/etcd/config.go b/vendor/github.com/dexidp/dex/storage/etcd/config.go deleted file mode 100644 index e294a53..0000000 --- a/vendor/github.com/dexidp/dex/storage/etcd/config.go +++ /dev/null @@ -1,93 +0,0 @@ -package etcd - -import ( - "time" - - "go.etcd.io/etcd/clientv3" - "go.etcd.io/etcd/clientv3/namespace" - "go.etcd.io/etcd/pkg/transport" - - "github.com/dexidp/dex/pkg/log" - "github.com/dexidp/dex/storage" -) - -var ( - defaultDialTimeout = 2 * time.Second -) - -// SSL represents SSL options for etcd databases. -type SSL struct { - ServerName string `json:"serverName" yaml:"serverName"` - CAFile string `json:"caFile" yaml:"caFile"` - KeyFile string `json:"keyFile" yaml:"keyFile"` - CertFile string `json:"certFile" yaml:"certFile"` -} - -// Etcd options for connecting to etcd databases. -// If you are using a shared etcd cluster for storage, it might be useful to -// configure an etcd namespace either via Namespace field or using `etcd grpc-proxy -// --namespace=` -type Etcd struct { - Endpoints []string `json:"endpoints" yaml:"endpoints"` - Namespace string `json:"namespace" yaml:"namespace"` - Username string `json:"username" yaml:"username"` - Password string `json:"password" yaml:"password"` - SSL SSL `json:"ssl" yaml:"ssl"` -} - -// Open creates a new storage implementation backed by Etcd -func (p *Etcd) Open(logger log.Logger) (storage.Storage, error) { - return p.open(logger) -} - -func (p *Etcd) open(logger log.Logger) (*conn, error) { - cfg := clientv3.Config{ - Endpoints: p.Endpoints, - DialTimeout: defaultDialTimeout, - Username: p.Username, - Password: p.Password, - } - - var cfgtls *transport.TLSInfo - tlsinfo := transport.TLSInfo{} - if p.SSL.CertFile != "" { - tlsinfo.CertFile = p.SSL.CertFile - cfgtls = &tlsinfo - } - - if p.SSL.KeyFile != "" { - tlsinfo.KeyFile = p.SSL.KeyFile - cfgtls = &tlsinfo - } - - if p.SSL.CAFile != "" { - tlsinfo.TrustedCAFile = p.SSL.CAFile - cfgtls = &tlsinfo - } - - if p.SSL.ServerName != "" { - tlsinfo.ServerName = p.SSL.ServerName - cfgtls = &tlsinfo - } - - if cfgtls != nil { - clientTLS, err := cfgtls.ClientConfig() - if err != nil { - return nil, err - } - cfg.TLS = clientTLS - } - - db, err := clientv3.New(cfg) - if err != nil { - return nil, err - } - if len(p.Namespace) > 0 { - db.KV = namespace.NewKV(db.KV, p.Namespace) - } - c := &conn{ - db: db, - logger: logger, - } - return c, nil -} diff --git a/vendor/github.com/dexidp/dex/storage/etcd/etcd.go b/vendor/github.com/dexidp/dex/storage/etcd/etcd.go deleted file mode 100644 index e26ce76..0000000 --- a/vendor/github.com/dexidp/dex/storage/etcd/etcd.go +++ /dev/null @@ -1,533 +0,0 @@ -package etcd - -import ( - "context" - "encoding/json" - "fmt" - "strings" - "time" - - "go.etcd.io/etcd/clientv3" - - "github.com/dexidp/dex/pkg/log" - "github.com/dexidp/dex/storage" -) - -const ( - clientPrefix = "client/" - authCodePrefix = "auth_code/" - refreshTokenPrefix = "refresh_token/" - authRequestPrefix = "auth_req/" - passwordPrefix = "password/" - offlineSessionPrefix = "offline_session/" - connectorPrefix = "connector/" - keysName = "openid-connect-keys" - - // defaultStorageTimeout will be applied to all storage's operations. - defaultStorageTimeout = 5 * time.Second -) - -type conn struct { - db *clientv3.Client - logger log.Logger -} - -func (c *conn) Close() error { - return c.db.Close() -} - -func (c *conn) GarbageCollect(now time.Time) (result storage.GCResult, err error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - authRequests, err := c.listAuthRequests(ctx) - if err != nil { - return result, err - } - - var delErr error - for _, authRequest := range authRequests { - if now.After(authRequest.Expiry) { - if err := c.deleteKey(ctx, keyID(authRequestPrefix, authRequest.ID)); err != nil { - c.logger.Errorf("failed to delete auth request: %v", err) - delErr = fmt.Errorf("failed to delete auth request: %v", err) - } - result.AuthRequests++ - } - } - if delErr != nil { - return result, delErr - } - - authCodes, err := c.listAuthCodes(ctx) - if err != nil { - return result, err - } - - for _, authCode := range authCodes { - if now.After(authCode.Expiry) { - if err := c.deleteKey(ctx, keyID(authCodePrefix, authCode.ID)); err != nil { - c.logger.Errorf("failed to delete auth code %v", err) - delErr = fmt.Errorf("failed to delete auth code: %v", err) - } - result.AuthCodes++ - } - } - return result, delErr -} - -func (c *conn) CreateAuthRequest(a storage.AuthRequest) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.txnCreate(ctx, keyID(authRequestPrefix, a.ID), fromStorageAuthRequest(a)) -} - -func (c *conn) GetAuthRequest(id string) (a storage.AuthRequest, err error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - var req AuthRequest - if err = c.getKey(ctx, keyID(authRequestPrefix, id), &req); err != nil { - return - } - return toStorageAuthRequest(req), nil -} - -func (c *conn) UpdateAuthRequest(id string, updater func(a storage.AuthRequest) (storage.AuthRequest, error)) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.txnUpdate(ctx, keyID(authRequestPrefix, id), func(currentValue []byte) ([]byte, error) { - var current AuthRequest - if len(currentValue) > 0 { - if err := json.Unmarshal(currentValue, ¤t); err != nil { - return nil, err - } - } - updated, err := updater(toStorageAuthRequest(current)) - if err != nil { - return nil, err - } - return json.Marshal(fromStorageAuthRequest(updated)) - }) -} - -func (c *conn) DeleteAuthRequest(id string) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.deleteKey(ctx, keyID(authRequestPrefix, id)) -} - -func (c *conn) CreateAuthCode(a storage.AuthCode) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.txnCreate(ctx, keyID(authCodePrefix, a.ID), fromStorageAuthCode(a)) -} - -func (c *conn) GetAuthCode(id string) (a storage.AuthCode, err error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - err = c.getKey(ctx, keyID(authCodePrefix, id), &a) - return a, err -} - -func (c *conn) DeleteAuthCode(id string) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.deleteKey(ctx, keyID(authCodePrefix, id)) -} - -func (c *conn) CreateRefresh(r storage.RefreshToken) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.txnCreate(ctx, keyID(refreshTokenPrefix, r.ID), fromStorageRefreshToken(r)) -} - -func (c *conn) GetRefresh(id string) (r storage.RefreshToken, err error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - var token RefreshToken - if err = c.getKey(ctx, keyID(refreshTokenPrefix, id), &token); err != nil { - return - } - return toStorageRefreshToken(token), nil -} - -func (c *conn) UpdateRefreshToken(id string, updater func(old storage.RefreshToken) (storage.RefreshToken, error)) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.txnUpdate(ctx, keyID(refreshTokenPrefix, id), func(currentValue []byte) ([]byte, error) { - var current RefreshToken - if len(currentValue) > 0 { - if err := json.Unmarshal(currentValue, ¤t); err != nil { - return nil, err - } - } - updated, err := updater(toStorageRefreshToken(current)) - if err != nil { - return nil, err - } - return json.Marshal(fromStorageRefreshToken(updated)) - }) -} - -func (c *conn) DeleteRefresh(id string) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.deleteKey(ctx, keyID(refreshTokenPrefix, id)) -} - -func (c *conn) ListRefreshTokens() (tokens []storage.RefreshToken, err error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - res, err := c.db.Get(ctx, refreshTokenPrefix, clientv3.WithPrefix()) - if err != nil { - return tokens, err - } - for _, v := range res.Kvs { - var token RefreshToken - if err = json.Unmarshal(v.Value, &token); err != nil { - return tokens, err - } - tokens = append(tokens, toStorageRefreshToken(token)) - } - return tokens, nil -} - -func (c *conn) CreateClient(cli storage.Client) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.txnCreate(ctx, keyID(clientPrefix, cli.ID), cli) -} - -func (c *conn) GetClient(id string) (cli storage.Client, err error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - err = c.getKey(ctx, keyID(clientPrefix, id), &cli) - return cli, err -} - -func (c *conn) UpdateClient(id string, updater func(old storage.Client) (storage.Client, error)) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.txnUpdate(ctx, keyID(clientPrefix, id), func(currentValue []byte) ([]byte, error) { - var current storage.Client - if len(currentValue) > 0 { - if err := json.Unmarshal(currentValue, ¤t); err != nil { - return nil, err - } - } - updated, err := updater(current) - if err != nil { - return nil, err - } - return json.Marshal(updated) - }) -} - -func (c *conn) DeleteClient(id string) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.deleteKey(ctx, keyID(clientPrefix, id)) -} - -func (c *conn) ListClients() (clients []storage.Client, err error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - res, err := c.db.Get(ctx, clientPrefix, clientv3.WithPrefix()) - if err != nil { - return clients, err - } - for _, v := range res.Kvs { - var cli storage.Client - if err = json.Unmarshal(v.Value, &cli); err != nil { - return clients, err - } - clients = append(clients, cli) - } - return clients, nil -} - -func (c *conn) CreatePassword(p storage.Password) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.txnCreate(ctx, passwordPrefix+strings.ToLower(p.Email), p) -} - -func (c *conn) GetPassword(email string) (p storage.Password, err error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - err = c.getKey(ctx, keyEmail(passwordPrefix, email), &p) - return p, err -} - -func (c *conn) UpdatePassword(email string, updater func(p storage.Password) (storage.Password, error)) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.txnUpdate(ctx, keyEmail(passwordPrefix, email), func(currentValue []byte) ([]byte, error) { - var current storage.Password - if len(currentValue) > 0 { - if err := json.Unmarshal(currentValue, ¤t); err != nil { - return nil, err - } - } - updated, err := updater(current) - if err != nil { - return nil, err - } - return json.Marshal(updated) - }) -} - -func (c *conn) DeletePassword(email string) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.deleteKey(ctx, keyEmail(passwordPrefix, email)) -} - -func (c *conn) ListPasswords() (passwords []storage.Password, err error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - res, err := c.db.Get(ctx, passwordPrefix, clientv3.WithPrefix()) - if err != nil { - return passwords, err - } - for _, v := range res.Kvs { - var p storage.Password - if err = json.Unmarshal(v.Value, &p); err != nil { - return passwords, err - } - passwords = append(passwords, p) - } - return passwords, nil -} - -func (c *conn) CreateOfflineSessions(s storage.OfflineSessions) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.txnCreate(ctx, keySession(offlineSessionPrefix, s.UserID, s.ConnID), fromStorageOfflineSessions(s)) -} - -func (c *conn) UpdateOfflineSessions(userID string, connID string, updater func(s storage.OfflineSessions) (storage.OfflineSessions, error)) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.txnUpdate(ctx, keySession(offlineSessionPrefix, userID, connID), func(currentValue []byte) ([]byte, error) { - var current OfflineSessions - if len(currentValue) > 0 { - if err := json.Unmarshal(currentValue, ¤t); err != nil { - return nil, err - } - } - updated, err := updater(toStorageOfflineSessions(current)) - if err != nil { - return nil, err - } - return json.Marshal(fromStorageOfflineSessions(updated)) - }) -} - -func (c *conn) GetOfflineSessions(userID string, connID string) (s storage.OfflineSessions, err error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - var os OfflineSessions - if err = c.getKey(ctx, keySession(offlineSessionPrefix, userID, connID), &os); err != nil { - return - } - return toStorageOfflineSessions(os), nil -} - -func (c *conn) DeleteOfflineSessions(userID string, connID string) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.deleteKey(ctx, keySession(offlineSessionPrefix, userID, connID)) -} - -func (c *conn) CreateConnector(connector storage.Connector) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.txnCreate(ctx, keyID(connectorPrefix, connector.ID), connector) -} - -func (c *conn) GetConnector(id string) (conn storage.Connector, err error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - err = c.getKey(ctx, keyID(connectorPrefix, id), &conn) - return conn, err -} - -func (c *conn) UpdateConnector(id string, updater func(s storage.Connector) (storage.Connector, error)) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.txnUpdate(ctx, keyID(connectorPrefix, id), func(currentValue []byte) ([]byte, error) { - var current storage.Connector - if len(currentValue) > 0 { - if err := json.Unmarshal(currentValue, ¤t); err != nil { - return nil, err - } - } - updated, err := updater(current) - if err != nil { - return nil, err - } - return json.Marshal(updated) - }) -} - -func (c *conn) DeleteConnector(id string) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.deleteKey(ctx, keyID(connectorPrefix, id)) -} - -func (c *conn) ListConnectors() (connectors []storage.Connector, err error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - res, err := c.db.Get(ctx, connectorPrefix, clientv3.WithPrefix()) - if err != nil { - return nil, err - } - for _, v := range res.Kvs { - var c storage.Connector - if err = json.Unmarshal(v.Value, &c); err != nil { - return nil, err - } - connectors = append(connectors, c) - } - return connectors, nil -} - -func (c *conn) GetKeys() (keys storage.Keys, err error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - res, err := c.db.Get(ctx, keysName) - if err != nil { - return keys, err - } - if res.Count > 0 && len(res.Kvs) > 0 { - err = json.Unmarshal(res.Kvs[0].Value, &keys) - } - return keys, err -} - -func (c *conn) UpdateKeys(updater func(old storage.Keys) (storage.Keys, error)) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultStorageTimeout) - defer cancel() - return c.txnUpdate(ctx, keysName, func(currentValue []byte) ([]byte, error) { - var current storage.Keys - if len(currentValue) > 0 { - if err := json.Unmarshal(currentValue, ¤t); err != nil { - return nil, err - } - } - updated, err := updater(current) - if err != nil { - return nil, err - } - return json.Marshal(updated) - }) -} - -func (c *conn) deleteKey(ctx context.Context, key string) error { - res, err := c.db.Delete(ctx, key) - if err != nil { - return err - } - if res.Deleted == 0 { - return storage.ErrNotFound - } - return nil -} - -func (c *conn) getKey(ctx context.Context, key string, value interface{}) error { - r, err := c.db.Get(ctx, key) - if err != nil { - return err - } - if r.Count == 0 { - return storage.ErrNotFound - } - return json.Unmarshal(r.Kvs[0].Value, value) -} - -func (c *conn) listAuthRequests(ctx context.Context) (reqs []AuthRequest, err error) { - res, err := c.db.Get(ctx, authRequestPrefix, clientv3.WithPrefix()) - if err != nil { - return reqs, err - } - for _, v := range res.Kvs { - var r AuthRequest - if err = json.Unmarshal(v.Value, &r); err != nil { - return reqs, err - } - reqs = append(reqs, r) - } - return reqs, nil -} - -func (c *conn) listAuthCodes(ctx context.Context) (codes []AuthCode, err error) { - res, err := c.db.Get(ctx, authCodePrefix, clientv3.WithPrefix()) - if err != nil { - return codes, err - } - for _, v := range res.Kvs { - var c AuthCode - if err = json.Unmarshal(v.Value, &c); err != nil { - return codes, err - } - codes = append(codes, c) - } - return codes, nil -} - -func (c *conn) txnCreate(ctx context.Context, key string, value interface{}) error { - b, err := json.Marshal(value) - if err != nil { - return err - } - txn := c.db.Txn(ctx) - res, err := txn. - If(clientv3.Compare(clientv3.CreateRevision(key), "=", 0)). - Then(clientv3.OpPut(key, string(b))). - Commit() - if err != nil { - return err - } - if !res.Succeeded { - return storage.ErrAlreadyExists - } - return nil -} - -func (c *conn) txnUpdate(ctx context.Context, key string, update func(current []byte) ([]byte, error)) error { - getResp, err := c.db.Get(ctx, key) - if err != nil { - return err - } - var currentValue []byte - var modRev int64 - if len(getResp.Kvs) > 0 { - currentValue = getResp.Kvs[0].Value - modRev = getResp.Kvs[0].ModRevision - } - - updatedValue, err := update(currentValue) - if err != nil { - return err - } - - txn := c.db.Txn(ctx) - updateResp, err := txn. - If(clientv3.Compare(clientv3.ModRevision(key), "=", modRev)). - Then(clientv3.OpPut(key, string(updatedValue))). - Commit() - if err != nil { - return err - } - if !updateResp.Succeeded { - return fmt.Errorf("failed to update key=%q: concurrent conflicting update happened", key) - } - return nil -} - -func keyID(prefix, id string) string { return prefix + id } -func keyEmail(prefix, email string) string { return prefix + strings.ToLower(email) } -func keySession(prefix, userID, connID string) string { - return prefix + strings.ToLower(userID+"|"+connID) -} diff --git a/vendor/github.com/dexidp/dex/storage/etcd/types.go b/vendor/github.com/dexidp/dex/storage/etcd/types.go deleted file mode 100644 index a16eae8..0000000 --- a/vendor/github.com/dexidp/dex/storage/etcd/types.go +++ /dev/null @@ -1,218 +0,0 @@ -package etcd - -import ( - "time" - - jose "gopkg.in/square/go-jose.v2" - - "github.com/dexidp/dex/storage" -) - -// AuthCode is a mirrored struct from storage with JSON struct tags -type AuthCode struct { - ID string `json:"ID"` - ClientID string `json:"clientID"` - RedirectURI string `json:"redirectURI"` - Nonce string `json:"nonce,omitempty"` - Scopes []string `json:"scopes,omitempty"` - - ConnectorID string `json:"connectorID,omitempty"` - ConnectorData []byte `json:"connectorData,omitempty"` - Claims Claims `json:"claims,omitempty"` - - Expiry time.Time `json:"expiry"` -} - -func fromStorageAuthCode(a storage.AuthCode) AuthCode { - return AuthCode{ - ID: a.ID, - ClientID: a.ClientID, - RedirectURI: a.RedirectURI, - ConnectorID: a.ConnectorID, - ConnectorData: a.ConnectorData, - Nonce: a.Nonce, - Scopes: a.Scopes, - Claims: fromStorageClaims(a.Claims), - Expiry: a.Expiry, - } -} - -// AuthRequest is a mirrored struct from storage with JSON struct tags -type AuthRequest struct { - ID string `json:"id"` - ClientID string `json:"client_id"` - - ResponseTypes []string `json:"response_types"` - Scopes []string `json:"scopes"` - RedirectURI string `json:"redirect_uri"` - Nonce string `json:"nonce"` - State string `json:"state"` - - ForceApprovalPrompt bool `json:"force_approval_prompt"` - - Expiry time.Time `json:"expiry"` - - LoggedIn bool `json:"logged_in"` - - Claims Claims `json:"claims"` - - ConnectorID string `json:"connector_id"` - ConnectorData []byte `json:"connector_data"` -} - -func fromStorageAuthRequest(a storage.AuthRequest) AuthRequest { - return AuthRequest{ - ID: a.ID, - ClientID: a.ClientID, - ResponseTypes: a.ResponseTypes, - Scopes: a.Scopes, - RedirectURI: a.RedirectURI, - Nonce: a.Nonce, - State: a.State, - ForceApprovalPrompt: a.ForceApprovalPrompt, - Expiry: a.Expiry, - LoggedIn: a.LoggedIn, - Claims: fromStorageClaims(a.Claims), - ConnectorID: a.ConnectorID, - ConnectorData: a.ConnectorData, - } -} - -func toStorageAuthRequest(a AuthRequest) storage.AuthRequest { - return storage.AuthRequest{ - ID: a.ID, - ClientID: a.ClientID, - ResponseTypes: a.ResponseTypes, - Scopes: a.Scopes, - RedirectURI: a.RedirectURI, - Nonce: a.Nonce, - State: a.State, - ForceApprovalPrompt: a.ForceApprovalPrompt, - LoggedIn: a.LoggedIn, - ConnectorID: a.ConnectorID, - ConnectorData: a.ConnectorData, - Expiry: a.Expiry, - Claims: toStorageClaims(a.Claims), - } -} - -// RefreshToken is a mirrored struct from storage with JSON struct tags -type RefreshToken struct { - ID string `json:"id"` - - Token string `json:"token"` - - CreatedAt time.Time `json:"created_at"` - LastUsed time.Time `json:"last_used"` - - ClientID string `json:"client_id"` - - ConnectorID string `json:"connector_id"` - ConnectorData []byte `json:"connector_data"` - Claims Claims `json:"claims"` - - Scopes []string `json:"scopes"` - - Nonce string `json:"nonce"` -} - -func toStorageRefreshToken(r RefreshToken) storage.RefreshToken { - return storage.RefreshToken{ - ID: r.ID, - Token: r.Token, - CreatedAt: r.CreatedAt, - LastUsed: r.LastUsed, - ClientID: r.ClientID, - ConnectorID: r.ConnectorID, - ConnectorData: r.ConnectorData, - Scopes: r.Scopes, - Nonce: r.Nonce, - Claims: toStorageClaims(r.Claims), - } -} - -func fromStorageRefreshToken(r storage.RefreshToken) RefreshToken { - return RefreshToken{ - ID: r.ID, - Token: r.Token, - CreatedAt: r.CreatedAt, - LastUsed: r.LastUsed, - ClientID: r.ClientID, - ConnectorID: r.ConnectorID, - ConnectorData: r.ConnectorData, - Scopes: r.Scopes, - Nonce: r.Nonce, - Claims: fromStorageClaims(r.Claims), - } -} - -// Claims is a mirrored struct from storage with JSON struct tags. -type Claims struct { - UserID string `json:"userID"` - Username string `json:"username"` - PreferredUsername string `json:"preferredUsername"` - Email string `json:"email"` - EmailVerified bool `json:"emailVerified"` - Groups []string `json:"groups,omitempty"` -} - -func fromStorageClaims(i storage.Claims) Claims { - return Claims{ - UserID: i.UserID, - Username: i.Username, - PreferredUsername: i.PreferredUsername, - Email: i.Email, - EmailVerified: i.EmailVerified, - Groups: i.Groups, - } -} - -func toStorageClaims(i Claims) storage.Claims { - return storage.Claims{ - UserID: i.UserID, - Username: i.Username, - PreferredUsername: i.PreferredUsername, - Email: i.Email, - EmailVerified: i.EmailVerified, - Groups: i.Groups, - } -} - -// Keys is a mirrored struct from storage with JSON struct tags -type Keys struct { - SigningKey *jose.JSONWebKey `json:"signing_key,omitempty"` - SigningKeyPub *jose.JSONWebKey `json:"signing_key_pub,omitempty"` - VerificationKeys []storage.VerificationKey `json:"verification_keys"` - NextRotation time.Time `json:"next_rotation"` -} - -// OfflineSessions is a mirrored struct from storage with JSON struct tags -type OfflineSessions struct { - UserID string `json:"user_id,omitempty"` - ConnID string `json:"conn_id,omitempty"` - Refresh map[string]*storage.RefreshTokenRef `json:"refresh,omitempty"` - ConnectorData []byte `json:"connectorData,omitempty"` -} - -func fromStorageOfflineSessions(o storage.OfflineSessions) OfflineSessions { - return OfflineSessions{ - UserID: o.UserID, - ConnID: o.ConnID, - Refresh: o.Refresh, - ConnectorData: o.ConnectorData, - } -} - -func toStorageOfflineSessions(o OfflineSessions) storage.OfflineSessions { - s := storage.OfflineSessions{ - UserID: o.UserID, - ConnID: o.ConnID, - Refresh: o.Refresh, - ConnectorData: o.ConnectorData, - } - if s.Refresh == nil { - // Server code assumes this will be non-nil. - s.Refresh = make(map[string]*storage.RefreshTokenRef) - } - return s -} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/client.go b/vendor/github.com/dexidp/dex/storage/kubernetes/client.go deleted file mode 100644 index 025c4dd..0000000 --- a/vendor/github.com/dexidp/dex/storage/kubernetes/client.go +++ /dev/null @@ -1,486 +0,0 @@ -package kubernetes - -import ( - "bytes" - "context" - "crypto/tls" - "crypto/x509" - "encoding/base32" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "hash" - "hash/fnv" - "io" - "io/ioutil" - "net" - "net/http" - "os" - "path" - "strconv" - "strings" - "time" - - "github.com/ghodss/yaml" - "github.com/gtank/cryptopasta" - "golang.org/x/net/http2" - - "github.com/dexidp/dex/pkg/log" - "github.com/dexidp/dex/storage" - "github.com/dexidp/dex/storage/kubernetes/k8sapi" -) - -type client struct { - client *http.Client - baseURL string - namespace string - logger log.Logger - - // Hash function to map IDs (which could span a large range) to Kubernetes names. - // While this is not currently upgradable, it could be in the future. - // - // The default hash is a non-cryptographic hash, because cryptographic hashes - // always produce sums too long to fit into a Kubernetes name. Because of this, - // gets, updates, and deletes are _always_ checked for collisions. - hash func() hash.Hash - - // API version of the oidc resources. For example "oidc.coreos.com". This is - // currently not configurable, but could be in the future. - apiVersion string - - // This is called once the client's Close method is called to signal goroutines, - // such as the one creating third party resources, to stop. - cancel context.CancelFunc -} - -// idToName maps an arbitrary ID, such as an email or client ID to a Kubernetes object name. -func (cli *client) idToName(s string) string { - return idToName(s, cli.hash) -} - -// offlineTokenName maps two arbitrary IDs, to a single Kubernetes object name. -// This is used when more than one field is used to uniquely identify the object. -func (cli *client) offlineTokenName(userID string, connID string) string { - return offlineTokenName(userID, connID, cli.hash) -} - -// Kubernetes names must match the regexp '[a-z0-9]([-a-z0-9]*[a-z0-9])?'. -var encoding = base32.NewEncoding("abcdefghijklmnopqrstuvwxyz234567") - -func idToName(s string, h func() hash.Hash) string { - return strings.TrimRight(encoding.EncodeToString(h().Sum([]byte(s))), "=") -} - -func offlineTokenName(userID string, connID string, h func() hash.Hash) string { - hash := h() - hash.Write([]byte(userID)) - hash.Write([]byte(connID)) - return strings.TrimRight(encoding.EncodeToString(hash.Sum(nil)), "=") -} - -func (cli *client) urlFor(apiVersion, namespace, resource, name string) string { - basePath := "apis/" - if apiVersion == "v1" { - basePath = "api/" - } - - var p string - if namespace != "" { - p = path.Join(basePath, apiVersion, "namespaces", namespace, resource, name) - } else { - p = path.Join(basePath, apiVersion, resource, name) - } - if strings.HasSuffix(cli.baseURL, "/") { - return cli.baseURL + p - } - return cli.baseURL + "/" + p -} - -// Define an error interface so we can get at the underlying status code if it's -// absolutely necessary. For instance when we need to see if an error indicates -// a resource already exists. -type httpError interface { - StatusCode() int -} - -var _ httpError = (*httpErr)(nil) - -type httpErr struct { - method string - url string - status int - body []byte -} - -func (e *httpErr) StatusCode() int { - return e.status -} - -func (e *httpErr) Error() string { - return fmt.Sprintf("%s %s %s: response from server \"%s\"", e.method, e.url, http.StatusText(e.status), bytes.TrimSpace(e.body)) -} - -func checkHTTPErr(r *http.Response, validStatusCodes ...int) error { - for _, status := range validStatusCodes { - if r.StatusCode == status { - return nil - } - } - - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 2<<15)) // 64 KiB - if err != nil { - return fmt.Errorf("read response body: %v", err) - } - - // Check this case after we read the body so the connection can be reused. - if r.StatusCode == http.StatusNotFound { - return storage.ErrNotFound - } - if r.Request.Method == http.MethodPost && r.StatusCode == http.StatusConflict { - return storage.ErrAlreadyExists - } - - var url, method string - if r.Request != nil { - method = r.Request.Method - url = r.Request.URL.String() - } - return &httpErr{method, url, r.StatusCode, body} -} - -// Close the response body. The initial request is drained so the connection can -// be reused. -func closeResp(r *http.Response) { - io.Copy(ioutil.Discard, r.Body) - r.Body.Close() -} - -func (cli *client) get(resource, name string, v interface{}) error { - return cli.getResource(cli.apiVersion, cli.namespace, resource, name, v) -} - -func (cli *client) getResource(apiVersion, namespace, resource, name string, v interface{}) error { - url := cli.urlFor(apiVersion, namespace, resource, name) - resp, err := cli.client.Get(url) - if err != nil { - return err - } - defer closeResp(resp) - if err := checkHTTPErr(resp, http.StatusOK); err != nil { - return err - } - return json.NewDecoder(resp.Body).Decode(v) -} - -func (cli *client) list(resource string, v interface{}) error { - return cli.get(resource, "", v) -} - -func (cli *client) post(resource string, v interface{}) error { - return cli.postResource(cli.apiVersion, cli.namespace, resource, v) -} - -func (cli *client) postResource(apiVersion, namespace, resource string, v interface{}) error { - body, err := json.Marshal(v) - if err != nil { - return fmt.Errorf("marshal object: %v", err) - } - - url := cli.urlFor(apiVersion, namespace, resource, "") - resp, err := cli.client.Post(url, "application/json", bytes.NewReader(body)) - if err != nil { - return err - } - defer closeResp(resp) - return checkHTTPErr(resp, http.StatusCreated) -} - -func (cli *client) delete(resource, name string) error { - url := cli.urlFor(cli.apiVersion, cli.namespace, resource, name) - req, err := http.NewRequest("DELETE", url, nil) - if err != nil { - return fmt.Errorf("create delete request: %v", err) - } - resp, err := cli.client.Do(req) - if err != nil { - return fmt.Errorf("delete request: %v", err) - } - defer closeResp(resp) - return checkHTTPErr(resp, http.StatusOK) -} - -func (cli *client) deleteAll(resource string) error { - var list struct { - k8sapi.TypeMeta `json:",inline"` - k8sapi.ListMeta `json:"metadata,omitempty"` - Items []struct { - k8sapi.TypeMeta `json:",inline"` - k8sapi.ObjectMeta `json:"metadata,omitempty"` - } `json:"items"` - } - if err := cli.list(resource, &list); err != nil { - return err - } - for _, item := range list.Items { - if err := cli.delete(resource, item.Name); err != nil { - return err - } - } - return nil -} - -func (cli *client) put(resource, name string, v interface{}) error { - body, err := json.Marshal(v) - if err != nil { - return fmt.Errorf("marshal object: %v", err) - } - - url := cli.urlFor(cli.apiVersion, cli.namespace, resource, name) - req, err := http.NewRequest("PUT", url, bytes.NewReader(body)) - if err != nil { - return fmt.Errorf("create patch request: %v", err) - } - - req.Header.Set("Content-Length", strconv.Itoa(len(body))) - - resp, err := cli.client.Do(req) - if err != nil { - return fmt.Errorf("patch request: %v", err) - } - defer closeResp(resp) - - return checkHTTPErr(resp, http.StatusOK) -} - -func newClient(cluster k8sapi.Cluster, user k8sapi.AuthInfo, namespace string, logger log.Logger) (*client, error) { - tlsConfig := cryptopasta.DefaultTLSConfig() - data := func(b string, file string) ([]byte, error) { - if b != "" { - return base64.StdEncoding.DecodeString(b) - } - if file == "" { - return nil, nil - } - return ioutil.ReadFile(file) - } - - if caData, err := data(cluster.CertificateAuthorityData, cluster.CertificateAuthority); err != nil { - return nil, err - } else if caData != nil { - tlsConfig.RootCAs = x509.NewCertPool() - if !tlsConfig.RootCAs.AppendCertsFromPEM(caData) { - return nil, fmt.Errorf("no certificate data found: %v", err) - } - } - - clientCert, err := data(user.ClientCertificateData, user.ClientCertificate) - if err != nil { - return nil, err - } - clientKey, err := data(user.ClientKeyData, user.ClientKey) - if err != nil { - return nil, err - } - if clientCert != nil && clientKey != nil { - cert, err := tls.X509KeyPair(clientCert, clientKey) - if err != nil { - return nil, fmt.Errorf("failed to load client cert: %v", err) - } - tlsConfig.Certificates = []tls.Certificate{cert} - } - - var t http.RoundTripper - httpTransport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSClientConfig: tlsConfig, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } - - // Since we set a custom TLS client config we have to explicitly - // enable HTTP/2. - // - // https://github.com/golang/go/blob/go1.7.4/src/net/http/transport.go#L200-L206 - if err := http2.ConfigureTransport(httpTransport); err != nil { - return nil, err - } - t = httpTransport - - if user.Token != "" { - t = transport{ - updateReq: func(r *http.Request) { - r.Header.Set("Authorization", "Bearer "+user.Token) - }, - base: t, - } - } - - if user.Username != "" && user.Password != "" { - t = transport{ - updateReq: func(r *http.Request) { - r.SetBasicAuth(user.Username, user.Password) - }, - base: t, - } - } - - apiVersion := "dex.coreos.com/v1" - - logger.Infof("kubernetes client apiVersion = %s", apiVersion) - return &client{ - client: &http.Client{ - Transport: t, - Timeout: 15 * time.Second, - }, - baseURL: cluster.Server, - hash: func() hash.Hash { return fnv.New64() }, - namespace: namespace, - apiVersion: apiVersion, - logger: logger, - }, nil -} - -type transport struct { - updateReq func(r *http.Request) - base http.RoundTripper -} - -func (t transport) RoundTrip(r *http.Request) (*http.Response, error) { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - t.updateReq(r2) - return t.base.RoundTrip(r2) -} - -func loadKubeConfig(kubeConfigPath string) (cluster k8sapi.Cluster, user k8sapi.AuthInfo, namespace string, err error) { - data, err := ioutil.ReadFile(kubeConfigPath) - if err != nil { - err = fmt.Errorf("read %s: %v", kubeConfigPath, err) - return - } - - var c k8sapi.Config - if err = yaml.Unmarshal(data, &c); err != nil { - err = fmt.Errorf("unmarshal %s: %v", kubeConfigPath, err) - return - } - - cluster, user, namespace, err = currentContext(&c) - if namespace == "" { - namespace = "default" - } - return -} - -func namespaceFromServiceAccountJWT(s string) (string, error) { - // The service account token is just a JWT. Parse it as such. - parts := strings.Split(s, ".") - if len(parts) < 2 { - // It's extremely important we don't log the actual service account token. - return "", fmt.Errorf("malformed service account token: expected 3 parts got %d", len(parts)) - } - payload, err := base64.RawURLEncoding.DecodeString(parts[1]) - if err != nil { - return "", fmt.Errorf("malformed service account token: %v", err) - } - var data struct { - // The claim Kubernetes uses to identify which namespace a service account belongs to. - // - // See: https://github.com/kubernetes/kubernetes/blob/v1.4.3/pkg/serviceaccount/jwt.go#L42 - Namespace string `json:"kubernetes.io/serviceaccount/namespace"` - } - if err := json.Unmarshal(payload, &data); err != nil { - return "", fmt.Errorf("malformed service account token: %v", err) - } - if data.Namespace == "" { - return "", errors.New(`jwt claim "kubernetes.io/serviceaccount/namespace" not found`) - } - return data.Namespace, nil -} - -func inClusterConfig() (cluster k8sapi.Cluster, user k8sapi.AuthInfo, namespace string, err error) { - host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT") - if len(host) == 0 || len(port) == 0 { - err = fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined") - return - } - // we need to wrap IPv6 addresses in square brackets - // IPv4 also works with square brackets - host = "[" + host + "]" - cluster = k8sapi.Cluster{ - Server: "https://" + host + ":" + port, - CertificateAuthority: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", - } - token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token") - if err != nil { - return - } - user = k8sapi.AuthInfo{Token: string(token)} - - if namespace = os.Getenv("KUBERNETES_POD_NAMESPACE"); namespace == "" { - namespace, err = namespaceFromServiceAccountJWT(user.Token) - if err != nil { - err = fmt.Errorf("failed to inspect service account token: %v", err) - return - } - } - - return -} - -func currentContext(config *k8sapi.Config) (cluster k8sapi.Cluster, user k8sapi.AuthInfo, ns string, err error) { - if config.CurrentContext == "" { - if len(config.Contexts) == 1 { - config.CurrentContext = config.Contexts[0].Name - } else { - return cluster, user, "", errors.New("kubeconfig has no current context") - } - } - context, ok := func() (k8sapi.Context, bool) { - for _, namedContext := range config.Contexts { - if namedContext.Name == config.CurrentContext { - return namedContext.Context, true - } - } - return k8sapi.Context{}, false - }() - if !ok { - return cluster, user, "", fmt.Errorf("no context named %q found", config.CurrentContext) - } - - cluster, ok = func() (k8sapi.Cluster, bool) { - for _, namedCluster := range config.Clusters { - if namedCluster.Name == context.Cluster { - return namedCluster.Cluster, true - } - } - return k8sapi.Cluster{}, false - }() - if !ok { - return cluster, user, "", fmt.Errorf("no cluster named %q found", context.Cluster) - } - - user, ok = func() (k8sapi.AuthInfo, bool) { - for _, namedAuthInfo := range config.AuthInfos { - if namedAuthInfo.Name == context.AuthInfo { - return namedAuthInfo.AuthInfo, true - } - } - return k8sapi.AuthInfo{}, false - }() - if !ok { - return cluster, user, "", fmt.Errorf("no user named %q found", context.AuthInfo) - } - return cluster, user, context.Namespace, nil -} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/doc.go b/vendor/github.com/dexidp/dex/storage/kubernetes/doc.go deleted file mode 100644 index 1112e39..0000000 --- a/vendor/github.com/dexidp/dex/storage/kubernetes/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package kubernetes provides a storage implementation using Kubernetes third party APIs. -package kubernetes diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/client.go b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/client.go deleted file mode 100644 index 4e71dd0..0000000 --- a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/client.go +++ /dev/null @@ -1,146 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8sapi - -// Where possible, json tags match the cli argument names. -// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. - -// Config holds the information needed to build connect to remote kubernetes clusters as a given user -type Config struct { - // Legacy field from pkg/api/types.go TypeMeta. - // TODO(jlowdermilk): remove this after eliminating downstream dependencies. - Kind string `json:"kind,omitempty"` - // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). - // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify - // a single value for the cluster version. - // This field isn't really needed anyway, so we are deprecating it without replacement. - // It will be ignored if it is present. - APIVersion string `json:"apiVersion,omitempty"` - // Preferences holds general information to be use for cli interactions - Preferences Preferences `json:"preferences"` - // Clusters is a map of referencable names to cluster configs - Clusters []NamedCluster `json:"clusters"` - // AuthInfos is a map of referencable names to user configs - AuthInfos []NamedAuthInfo `json:"users"` - // Contexts is a map of referencable names to context configs - Contexts []NamedContext `json:"contexts"` - // CurrentContext is the name of the context that you would like to use by default - CurrentContext string `json:"current-context"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -// Preferences contains information about the users command line experience preferences. -type Preferences struct { - Colors bool `json:"colors,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -// Cluster contains information about how to communicate with a kubernetes cluster -type Cluster struct { - // Server is the address of the kubernetes cluster (https://hostname:port). - Server string `json:"server"` - // APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). - APIVersion string `json:"api-version,omitempty"` - // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` - // CertificateAuthority is the path to a cert file for the certificate authority. - CertificateAuthority string `json:"certificate-authority,omitempty"` - // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority - // - // NOTE(ericchiang): Our yaml parser doesn't assume []byte is a base64 encoded string. - CertificateAuthorityData string `json:"certificate-authority-data,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. -type AuthInfo struct { - // ClientCertificate is the path to a client cert file for TLS. - ClientCertificate string `json:"client-certificate,omitempty"` - // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate - // - // NOTE(ericchiang): Our yaml parser doesn't assume []byte is a base64 encoded string. - ClientCertificateData string `json:"client-certificate-data,omitempty"` - // ClientKey is the path to a client key file for TLS. - ClientKey string `json:"client-key,omitempty"` - // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey - // - // NOTE(ericchiang): Our yaml parser doesn't assume []byte is a base64 encoded string. - ClientKeyData string `json:"client-key-data,omitempty"` - // Token is the bearer token for authentication to the kubernetes cluster. - Token string `json:"token,omitempty"` - // Impersonate is the username to imperonate. The name matches the flag. - Impersonate string `json:"as,omitempty"` - // Username is the username for basic authentication to the kubernetes cluster. - Username string `json:"username,omitempty"` - // Password is the password for basic authentication to the kubernetes cluster. - Password string `json:"password,omitempty"` - // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. - AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) -type Context struct { - // Cluster is the name of the cluster for this context - Cluster string `json:"cluster"` - // AuthInfo is the name of the authInfo for this context - AuthInfo string `json:"user"` - // Namespace is the default namespace to use on unspecified requests - Namespace string `json:"namespace,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -// NamedCluster relates nicknames to cluster information -type NamedCluster struct { - // Name is the nickname for this Cluster - Name string `json:"name"` - // Cluster holds the cluster information - Cluster Cluster `json:"cluster"` -} - -// NamedContext relates nicknames to context information -type NamedContext struct { - // Name is the nickname for this Context - Name string `json:"name"` - // Context holds the context information - Context Context `json:"context"` -} - -// NamedAuthInfo relates nicknames to auth information -type NamedAuthInfo struct { - // Name is the nickname for this AuthInfo - Name string `json:"name"` - // AuthInfo holds the auth information - AuthInfo AuthInfo `json:"user"` -} - -// NamedExtension relates nicknames to extension information -type NamedExtension struct { - // Name is the nickname for this Extension - Name string `json:"name"` -} - -// AuthProviderConfig holds the configuration for a specified auth provider. -type AuthProviderConfig struct { - Name string `json:"name"` - Config map[string]string `json:"config"` -} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/crd_extensions.go b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/crd_extensions.go deleted file mode 100644 index 7a65410..0000000 --- a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/crd_extensions.go +++ /dev/null @@ -1,141 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8sapi - -// CustomResourceDefinitionSpec describes how a user wants their resource to appear -type CustomResourceDefinitionSpec struct { - // Group is the group this resource belongs in - Group string `json:"group" protobuf:"bytes,1,opt,name=group"` - // Version is the version this resource belongs in - Version string `json:"version" protobuf:"bytes,2,opt,name=version"` - // Names are the names used to describe this custom resource - Names CustomResourceDefinitionNames `json:"names" protobuf:"bytes,3,opt,name=names"` - - // Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced - Scope ResourceScope `json:"scope" protobuf:"bytes,4,opt,name=scope,casttype=ResourceScope"` -} - -// CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition -type CustomResourceDefinitionNames struct { - // Plural is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration - // too: plural.group and it must be all lowercase. - Plural string `json:"plural" protobuf:"bytes,1,opt,name=plural"` - // Singular is the singular name of the resource. It must be all lowercase Defaults to lowercased - Singular string `json:"singular,omitempty" protobuf:"bytes,2,opt,name=singular"` - // ShortNames are short names for the resource. It must be all lowercase. - ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,3,opt,name=shortNames"` - // Kind is the serialized kind of the resource. It is normally CamelCase and singular. - Kind string `json:"kind" protobuf:"bytes,4,opt,name=kind"` - // ListKind is the serialized kind of the list for this resource. Defaults to List. - ListKind string `json:"listKind,omitempty" protobuf:"bytes,5,opt,name=listKind"` -} - -// ResourceScope is an enum defining the different scopes available to a custom resource -type ResourceScope string - -const ( - // ClusterScoped is the `cluster` scope for a custom resource. - ClusterScoped ResourceScope = "Cluster" - // NamespaceScoped is the `namespaced` scope for a custom resource. - NamespaceScoped ResourceScope = "Namespaced" -) - -// ConditionStatus reflects if a resource -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. -// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -// CustomResourceDefinitionConditionType is a valid value for CustomResourceDefinitionCondition.Type -type CustomResourceDefinitionConditionType string - -const ( - // Established means that the resource has become active. A resource is established when all names are - // accepted without a conflict for the first time. A resource stays established until deleted, even during - // a later NamesAccepted due to changed names. Note that not all names can be changed. - Established CustomResourceDefinitionConditionType = "Established" - // NamesAccepted means the names chosen for this CustomResourceDefinition do not conflict with others in - // the group and are therefore accepted. - NamesAccepted CustomResourceDefinitionConditionType = "NamesAccepted" - // Terminating means that the CustomResourceDefinition has been deleted and is cleaning up. - Terminating CustomResourceDefinitionConditionType = "Terminating" -) - -// CustomResourceDefinitionCondition contains details for the current condition of this pod. -type CustomResourceDefinitionCondition struct { - // Type is the type of the condition. - Type CustomResourceDefinitionConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=CustomResourceDefinitionConditionType"` - // Status is the status of the condition. - // Can be True, False, Unknown. - Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // Unique, one-word, CamelCase reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // Human-readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition -type CustomResourceDefinitionStatus struct { - // Conditions indicate state for particular aspects of a CustomResourceDefinition - Conditions []CustomResourceDefinitionCondition `json:"conditions" protobuf:"bytes,1,opt,name=conditions"` - - // AcceptedNames are the names that are actually being used to serve discovery - // They may be different than the names in spec. - AcceptedNames CustomResourceDefinitionNames `json:"acceptedNames" protobuf:"bytes,2,opt,name=acceptedNames"` -} - -// CustomResourceCleanupFinalizer is the name of the finalizer which will delete instances of -// a CustomResourceDefinition -const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s.io" - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format -// <.spec.name>.<.spec.group>. -type CustomResourceDefinition struct { - TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec describes how the user wants the resources to appear - Spec CustomResourceDefinitionSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status indicates the actual state of the CustomResourceDefinition - Status CustomResourceDefinitionStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CustomResourceDefinitionList is a list of CustomResourceDefinition objects. -type CustomResourceDefinitionList struct { - TypeMeta `json:",inline"` - ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items individual CustomResourceDefinitions - Items []CustomResourceDefinition `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/doc.go b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/doc.go deleted file mode 100644 index cdefdbd..0000000 --- a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package k8sapi holds vendored Kubernetes types. -package k8sapi diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/extensions.go b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/extensions.go deleted file mode 100644 index 8a7dbfd..0000000 --- a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/extensions.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8sapi - -// An APIVersion represents a single concrete version of an object model. -type APIVersion struct { - // Name of this version (e.g. 'v1'). - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` -} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/time.go b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/time.go deleted file mode 100644 index 5b60bfd..0000000 --- a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/time.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8sapi - -import ( - "encoding/json" - "time" -) - -// Time is a wrapper around time.Time which supports correct -// marshaling to YAML and JSON. Wrappers are provided for many -// of the factory methods that the time package offers. -// -// +protobuf.options.marshal=false -// +protobuf.as=Timestamp -type Time struct { - time.Time `protobuf:"-"` -} - -// NewTime returns a wrapped instance of the provided time -func NewTime(time time.Time) Time { - return Time{time} -} - -// Date returns the Time corresponding to the supplied parameters -// by wrapping time.Date. -func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time { - return Time{time.Date(year, month, day, hour, min, sec, nsec, loc)} -} - -// Now returns the current local time. -func Now() Time { - return Time{time.Now()} -} - -// IsZero returns true if the value is nil or time is zero. -func (t *Time) IsZero() bool { - if t == nil { - return true - } - return t.Time.IsZero() -} - -// Before reports whether the time instant t is before u. -func (t Time) Before(u Time) bool { - return t.Time.Before(u.Time) -} - -// Equal reports whether the time instant t is equal to u. -func (t Time) Equal(u Time) bool { - return t.Time.Equal(u.Time) -} - -// Unix returns the local time corresponding to the given Unix time -// by wrapping time.Unix. -func Unix(sec int64, nsec int64) Time { - return Time{time.Unix(sec, nsec)} -} - -// Rfc3339Copy returns a copy of the Time at second-level precision. -func (t Time) Rfc3339Copy() Time { - copied, _ := time.Parse(time.RFC3339, t.Format(time.RFC3339)) - return Time{copied} -} - -// UnmarshalJSON implements the json.Unmarshaller interface. -func (t *Time) UnmarshalJSON(b []byte) error { - if len(b) == 4 && string(b) == "null" { - t.Time = time.Time{} - return nil - } - - var str string - json.Unmarshal(b, &str) - - pt, err := time.Parse(time.RFC3339, str) - if err != nil { - return err - } - - t.Time = pt.Local() - return nil -} - -// UnmarshalQueryParameter converts from a URL query parameter value to an object -func (t *Time) UnmarshalQueryParameter(str string) error { - if len(str) == 0 { - t.Time = time.Time{} - return nil - } - // Tolerate requests from older clients that used JSON serialization to build query params - if len(str) == 4 && str == "null" { - t.Time = time.Time{} - return nil - } - - pt, err := time.Parse(time.RFC3339, str) - if err != nil { - return err - } - - t.Time = pt.Local() - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (t Time) MarshalJSON() ([]byte, error) { - if t.IsZero() { - // Encode unset/nil objects as JSON's "null". - return []byte("null"), nil - } - - return json.Marshal(t.UTC().Format(time.RFC3339)) -} - -// MarshalQueryParameter converts to a URL query parameter value -func (t Time) MarshalQueryParameter() (string, error) { - if t.IsZero() { - // Encode unset/nil objects as an empty string - return "", nil - } - - return t.UTC().Format(time.RFC3339), nil -} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/unversioned.go b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/unversioned.go deleted file mode 100644 index f123f52..0000000 --- a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/unversioned.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8sapi - -// TypeMeta describes an individual object in an API response or request -// with strings representing the type of the object and its API schema version. -// Structures that are versioned or persisted should inline TypeMeta. -type TypeMeta struct { - // Kind is a string value representing the REST resource this object represents. - // Servers may infer this from the endpoint the client submits requests to. - // Cannot be updated. - // In CamelCase. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` - - // APIVersion defines the versioned schema of this representation of an object. - // Servers should convert recognized schemas to the latest internal value, and - // may reject unrecognized values. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#resources - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` -} - -// ListMeta describes metadata that synthetic resources must have, including lists and -// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. -type ListMeta struct { - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"` - - // String that identifies the server's internal version of this object that - // can be used by clients to determine when objects have changed. - // Value must be treated as opaque by clients and passed unmodified back to the server. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#concurrency-control-and-consistency - ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` -} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/v1.go b/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/v1.go deleted file mode 100644 index 55825e6..0000000 --- a/vendor/github.com/dexidp/dex/storage/kubernetes/k8sapi/v1.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8sapi - -// ObjectMeta is metadata that all persisted resources must have, which includes all objects -// users must create. -type ObjectMeta struct { - // Name must be unique within a namespace. Is required when creating resources, although - // some resources may allow a client to request the generation of an appropriate name - // automatically. Name is primarily intended for creation idempotence and configuration - // definition. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - - // GenerateName is an optional prefix, used by the server, to generate a unique - // name ONLY IF the Name field has not been provided. - // If this field is used, the name returned to the client will be different - // than the name passed. This value will also be combined with a unique suffix. - // The provided value has the same validation rules as the Name field, - // and may be truncated by the length of the suffix required to make the value - // unique on the server. - // - // If this field is specified and the generated name exists, the server will - // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason - // ServerTimeout indicating a unique name could not be found in the time allotted, and the client - // should retry (optionally after the time indicated in the Retry-After header). - // - // Applied only if Name is not specified. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#idempotency - GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` - - // Namespace defines the space within each name must be unique. An empty namespace is - // equivalent to the "default" namespace, but "default" is the canonical representation. - // Not all objects are required to be scoped to a namespace - the value of this field for - // those objects will be empty. - // - // Must be a DNS_LABEL. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/namespaces.md - Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` - - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` - - // UID is the unique in time and space value for this object. It is typically generated by - // the server on successful creation of a resource and is not allowed to change on PUT - // operations. - // - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#uids - UID string `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` - - // An opaque value that represents the internal version of this object that can - // be used by clients to determine when objects have changed. May be used for optimistic - // concurrency, change detection, and the watch operation on a resource or set of resources. - // Clients must treat these values as opaque and passed unmodified back to the server. - // They may only be valid for a particular resource or set of resources. - // - // Populated by the system. - // Read-only. - // Value must be treated as opaque by clients and . - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#concurrency-control-and-consistency - ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` - - // A sequence number representing a specific generation of the desired state. - // Populated by the system. Read-only. - Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"` - - // CreationTimestamp is a timestamp representing the server time when this object was - // created. It is not guaranteed to be set in happens-before order across separate operations. - // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // - // Populated by the system. - // Read-only. - // Null for lists. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` - - // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This - // field is set by the server when a graceful deletion is requested by the user, and is not - // directly settable by a client. The resource will be deleted (no longer visible from - // resource lists, and not reachable by name) after the time in this field. Once set, this - // value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet - // will send a hard termination signal to the container. - // If not set, graceful deletion of the object has not been requested. - // - // Populated by the system when a graceful deletion is requested. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` - - // Number of seconds allowed for this object to gracefully terminate before - // it will be removed from the system. Only set when deletionTimestamp is also set. - // May only be shortened. - // Read-only. - DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"` - - // Map of string keys and values that can be used to organize and categorize - // (scope and select) objects. May match selectors of replication controllers - // and services. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md - // TODO: replace map[string]string with labels.LabelSet type - Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` - - // Annotations is an unstructured key value map stored with a resource that may be - // set by external tools to store and retrieve arbitrary metadata. They are not - // queryable and should be preserved when modifying objects. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/annotations.md - Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` - - // List of objects depended by this object. If ALL objects in the list have - // been deleted, this object will be garbage collected. If this object is managed by a controller, - // then an entry in this list will point to this controller, with the controller field set to true. - // There cannot be more than one managing controller. - OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` - - // Must be empty before the object is deleted from the registry. Each entry - // is an identifier for the responsible component that will remove the entry - // from the list. If the deletionTimestamp of the object is non-nil, entries - // in this list can only be removed. - Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` -} - -// OwnerReference contains enough information to let you identify an owning -// object. Currently, an owning object must be in the same namespace, so there -// is no namespace field. -type OwnerReference struct { - // API version of the referent. - APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` - // Kind of the referent. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names - Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // UID of the referent. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids - UID string `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` - // If true, this reference points to the managing controller. - Controller *bool `json:"controller,omitempty" protobuf:"varint,6,opt,name=controller"` -} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/storage.go b/vendor/github.com/dexidp/dex/storage/kubernetes/storage.go deleted file mode 100644 index 4bdf3dd..0000000 --- a/vendor/github.com/dexidp/dex/storage/kubernetes/storage.go +++ /dev/null @@ -1,597 +0,0 @@ -package kubernetes - -import ( - "context" - "errors" - "fmt" - "strings" - "time" - - "github.com/dexidp/dex/pkg/log" - "github.com/dexidp/dex/storage" - "github.com/dexidp/dex/storage/kubernetes/k8sapi" -) - -const ( - kindAuthCode = "AuthCode" - kindAuthRequest = "AuthRequest" - kindClient = "OAuth2Client" - kindRefreshToken = "RefreshToken" - kindKeys = "SigningKey" - kindPassword = "Password" - kindOfflineSessions = "OfflineSessions" - kindConnector = "Connector" -) - -const ( - resourceAuthCode = "authcodes" - resourceAuthRequest = "authrequests" - resourceClient = "oauth2clients" - resourceRefreshToken = "refreshtokens" - resourceKeys = "signingkeies" // Kubernetes attempts to pluralize. - resourcePassword = "passwords" - resourceOfflineSessions = "offlinesessionses" // Again attempts to pluralize. - resourceConnector = "connectors" -) - -// Config values for the Kubernetes storage type. -type Config struct { - InCluster bool `json:"inCluster"` - KubeConfigFile string `json:"kubeConfigFile"` -} - -// Open returns a storage using Kubernetes third party resource. -func (c *Config) Open(logger log.Logger) (storage.Storage, error) { - cli, err := c.open(logger, false) - if err != nil { - return nil, err - } - return cli, nil -} - -// open returns a kubernetes client, initializing the third party resources used -// by dex. -// -// waitForResources controls if errors creating the resources cause this method to return -// immediately (used during testing), or if the client will asynchronously retry. -func (c *Config) open(logger log.Logger, waitForResources bool) (*client, error) { - if c.InCluster && (c.KubeConfigFile != "") { - return nil, errors.New("cannot specify both 'inCluster' and 'kubeConfigFile'") - } - if !c.InCluster && (c.KubeConfigFile == "") { - return nil, errors.New("must specify either 'inCluster' or 'kubeConfigFile'") - } - - var ( - cluster k8sapi.Cluster - user k8sapi.AuthInfo - namespace string - err error - ) - if c.InCluster { - cluster, user, namespace, err = inClusterConfig() - } else { - cluster, user, namespace, err = loadKubeConfig(c.KubeConfigFile) - } - if err != nil { - return nil, err - } - - cli, err := newClient(cluster, user, namespace, logger) - if err != nil { - return nil, fmt.Errorf("create client: %v", err) - } - - ctx, cancel := context.WithCancel(context.Background()) - - logger.Info("creating custom Kubernetes resources") - if !cli.registerCustomResources() { - if waitForResources { - cancel() - return nil, fmt.Errorf("failed creating custom resources") - } - - // Try to synchronously create the custom resources once. This doesn't mean - // they'll immediately be available, but ensures that the client will actually try - // once. - logger.Errorf("failed creating custom resources: %v", err) - go func() { - for { - if cli.registerCustomResources() { - return - } - - select { - case <-ctx.Done(): - return - case <-time.After(30 * time.Second): - } - } - }() - } - - if waitForResources { - if err := cli.waitForCRDs(ctx); err != nil { - cancel() - return nil, err - } - } - - // If the client is closed, stop trying to create resources. - cli.cancel = cancel - return cli, nil -} - -// registerCustomResources attempts to create the custom resources dex -// requires or identifies that they're already enabled. This function creates -// custom resource definitions(CRDs) -// It logs all errors, returning true if the resources were created successfully. -// -// Creating a custom resource does not mean that they'll be immediately available. -func (cli *client) registerCustomResources() (ok bool) { - ok = true - length := len(customResourceDefinitions) - for i := 0; i < length; i++ { - var err error - var resourceName string - - r := customResourceDefinitions[i] - var i interface{} - cli.logger.Infof("checking if custom resource %s has been created already...", r.ObjectMeta.Name) - if err := cli.list(r.Spec.Names.Plural, &i); err == nil { - cli.logger.Infof("The custom resource %s already available, skipping create", r.ObjectMeta.Name) - continue - } else { - cli.logger.Infof("failed to list custom resource %s, attempting to create: %v", r.ObjectMeta.Name, err) - } - err = cli.postResource("apiextensions.k8s.io/v1beta1", "", "customresourcedefinitions", r) - resourceName = r.ObjectMeta.Name - - if err != nil { - switch err { - case storage.ErrAlreadyExists: - cli.logger.Infof("custom resource already created %s", resourceName) - case storage.ErrNotFound: - cli.logger.Errorf("custom resources not found, please enable the respective API group") - ok = false - default: - cli.logger.Errorf("creating custom resource %s: %v", resourceName, err) - ok = false - } - continue - } - cli.logger.Errorf("create custom resource %s", resourceName) - } - return ok -} - -// waitForCRDs waits for all CRDs to be in a ready state, and is used -// by the tests to synchronize before running conformance. -func (cli *client) waitForCRDs(ctx context.Context) error { - ctx, cancel := context.WithTimeout(ctx, time.Second*30) - defer cancel() - - for _, crd := range customResourceDefinitions { - for { - err := cli.isCRDReady(crd.Name) - if err == nil { - break - } - - cli.logger.Errorf("checking CRD: %v", err) - - select { - case <-ctx.Done(): - return errors.New("timed out waiting for CRDs to be available") - case <-time.After(time.Millisecond * 100): - } - } - } - return nil -} - -// isCRDReady determines if a CRD is ready by inspecting its conditions. -func (cli *client) isCRDReady(name string) error { - var r k8sapi.CustomResourceDefinition - err := cli.getResource("apiextensions.k8s.io/v1beta1", "", "customresourcedefinitions", name, &r) - if err != nil { - return fmt.Errorf("get crd %s: %v", name, err) - } - - conds := make(map[string]string) // For debugging, keep the conditions around. - for _, c := range r.Status.Conditions { - if c.Type == k8sapi.Established && c.Status == k8sapi.ConditionTrue { - return nil - } - conds[string(c.Type)] = string(c.Status) - } - return fmt.Errorf("crd %s not ready %#v", name, conds) -} - -func (cli *client) Close() error { - if cli.cancel != nil { - cli.cancel() - } - return nil -} - -func (cli *client) CreateAuthRequest(a storage.AuthRequest) error { - return cli.post(resourceAuthRequest, cli.fromStorageAuthRequest(a)) -} - -func (cli *client) CreateClient(c storage.Client) error { - return cli.post(resourceClient, cli.fromStorageClient(c)) -} - -func (cli *client) CreateAuthCode(c storage.AuthCode) error { - return cli.post(resourceAuthCode, cli.fromStorageAuthCode(c)) -} - -func (cli *client) CreatePassword(p storage.Password) error { - return cli.post(resourcePassword, cli.fromStoragePassword(p)) -} - -func (cli *client) CreateRefresh(r storage.RefreshToken) error { - return cli.post(resourceRefreshToken, cli.fromStorageRefreshToken(r)) -} - -func (cli *client) CreateOfflineSessions(o storage.OfflineSessions) error { - return cli.post(resourceOfflineSessions, cli.fromStorageOfflineSessions(o)) -} - -func (cli *client) CreateConnector(c storage.Connector) error { - return cli.post(resourceConnector, cli.fromStorageConnector(c)) -} - -func (cli *client) GetAuthRequest(id string) (storage.AuthRequest, error) { - var req AuthRequest - if err := cli.get(resourceAuthRequest, id, &req); err != nil { - return storage.AuthRequest{}, err - } - return toStorageAuthRequest(req), nil -} - -func (cli *client) GetAuthCode(id string) (storage.AuthCode, error) { - var code AuthCode - if err := cli.get(resourceAuthCode, id, &code); err != nil { - return storage.AuthCode{}, err - } - return toStorageAuthCode(code), nil -} - -func (cli *client) GetClient(id string) (storage.Client, error) { - c, err := cli.getClient(id) - if err != nil { - return storage.Client{}, err - } - return toStorageClient(c), nil -} - -func (cli *client) getClient(id string) (Client, error) { - var c Client - name := cli.idToName(id) - if err := cli.get(resourceClient, name, &c); err != nil { - return Client{}, err - } - if c.ID != id { - return Client{}, fmt.Errorf("get client: ID %q mapped to client with ID %q", id, c.ID) - } - return c, nil -} - -func (cli *client) GetPassword(email string) (storage.Password, error) { - p, err := cli.getPassword(email) - if err != nil { - return storage.Password{}, err - } - return toStoragePassword(p), nil -} - -func (cli *client) getPassword(email string) (Password, error) { - // TODO(ericchiang): Figure out whose job it is to lowercase emails. - email = strings.ToLower(email) - var p Password - name := cli.idToName(email) - if err := cli.get(resourcePassword, name, &p); err != nil { - return Password{}, err - } - if email != p.Email { - return Password{}, fmt.Errorf("get email: email %q mapped to password with email %q", email, p.Email) - } - return p, nil -} - -func (cli *client) GetKeys() (storage.Keys, error) { - var keys Keys - if err := cli.get(resourceKeys, keysName, &keys); err != nil { - return storage.Keys{}, err - } - return toStorageKeys(keys), nil -} - -func (cli *client) GetRefresh(id string) (storage.RefreshToken, error) { - r, err := cli.getRefreshToken(id) - if err != nil { - return storage.RefreshToken{}, err - } - return toStorageRefreshToken(r), nil -} - -func (cli *client) getRefreshToken(id string) (r RefreshToken, err error) { - err = cli.get(resourceRefreshToken, id, &r) - return -} - -func (cli *client) GetOfflineSessions(userID string, connID string) (storage.OfflineSessions, error) { - o, err := cli.getOfflineSessions(userID, connID) - if err != nil { - return storage.OfflineSessions{}, err - } - return toStorageOfflineSessions(o), nil -} - -func (cli *client) getOfflineSessions(userID string, connID string) (o OfflineSessions, err error) { - name := cli.offlineTokenName(userID, connID) - if err = cli.get(resourceOfflineSessions, name, &o); err != nil { - return OfflineSessions{}, err - } - if userID != o.UserID || connID != o.ConnID { - return OfflineSessions{}, fmt.Errorf("get offline session: wrong session retrieved") - } - return o, nil -} - -func (cli *client) GetConnector(id string) (storage.Connector, error) { - var c Connector - if err := cli.get(resourceConnector, id, &c); err != nil { - return storage.Connector{}, err - } - return toStorageConnector(c), nil -} - -func (cli *client) ListClients() ([]storage.Client, error) { - return nil, errors.New("not implemented") -} - -func (cli *client) ListRefreshTokens() ([]storage.RefreshToken, error) { - return nil, errors.New("not implemented") -} - -func (cli *client) ListPasswords() (passwords []storage.Password, err error) { - var passwordList PasswordList - if err = cli.list(resourcePassword, &passwordList); err != nil { - return passwords, fmt.Errorf("failed to list passwords: %v", err) - } - - for _, password := range passwordList.Passwords { - p := storage.Password{ - Email: password.Email, - Hash: password.Hash, - Username: password.Username, - UserID: password.UserID, - } - passwords = append(passwords, p) - } - - return -} - -func (cli *client) ListConnectors() (connectors []storage.Connector, err error) { - var connectorList ConnectorList - if err = cli.list(resourceConnector, &connectorList); err != nil { - return connectors, fmt.Errorf("failed to list connectors: %v", err) - } - - connectors = make([]storage.Connector, len(connectorList.Connectors)) - for i, connector := range connectorList.Connectors { - connectors[i] = toStorageConnector(connector) - } - - return -} - -func (cli *client) DeleteAuthRequest(id string) error { - return cli.delete(resourceAuthRequest, id) -} - -func (cli *client) DeleteAuthCode(code string) error { - return cli.delete(resourceAuthCode, code) -} - -func (cli *client) DeleteClient(id string) error { - // Check for hash collition. - c, err := cli.getClient(id) - if err != nil { - return err - } - return cli.delete(resourceClient, c.ObjectMeta.Name) -} - -func (cli *client) DeleteRefresh(id string) error { - return cli.delete(resourceRefreshToken, id) -} - -func (cli *client) DeletePassword(email string) error { - // Check for hash collision. - p, err := cli.getPassword(email) - if err != nil { - return err - } - return cli.delete(resourcePassword, p.ObjectMeta.Name) -} - -func (cli *client) DeleteOfflineSessions(userID string, connID string) error { - // Check for hash collision. - o, err := cli.getOfflineSessions(userID, connID) - if err != nil { - return err - } - return cli.delete(resourceOfflineSessions, o.ObjectMeta.Name) -} - -func (cli *client) DeleteConnector(id string) error { - return cli.delete(resourceConnector, id) -} - -func (cli *client) UpdateRefreshToken(id string, updater func(old storage.RefreshToken) (storage.RefreshToken, error)) error { - r, err := cli.getRefreshToken(id) - if err != nil { - return err - } - updated, err := updater(toStorageRefreshToken(r)) - if err != nil { - return err - } - updated.ID = id - - newToken := cli.fromStorageRefreshToken(updated) - newToken.ObjectMeta = r.ObjectMeta - return cli.put(resourceRefreshToken, r.ObjectMeta.Name, newToken) -} - -func (cli *client) UpdateClient(id string, updater func(old storage.Client) (storage.Client, error)) error { - c, err := cli.getClient(id) - if err != nil { - return err - } - - updated, err := updater(toStorageClient(c)) - if err != nil { - return err - } - updated.ID = c.ID - - newClient := cli.fromStorageClient(updated) - newClient.ObjectMeta = c.ObjectMeta - return cli.put(resourceClient, c.ObjectMeta.Name, newClient) -} - -func (cli *client) UpdatePassword(email string, updater func(old storage.Password) (storage.Password, error)) error { - p, err := cli.getPassword(email) - if err != nil { - return err - } - - updated, err := updater(toStoragePassword(p)) - if err != nil { - return err - } - updated.Email = p.Email - - newPassword := cli.fromStoragePassword(updated) - newPassword.ObjectMeta = p.ObjectMeta - return cli.put(resourcePassword, p.ObjectMeta.Name, newPassword) -} - -func (cli *client) UpdateOfflineSessions(userID string, connID string, updater func(old storage.OfflineSessions) (storage.OfflineSessions, error)) error { - o, err := cli.getOfflineSessions(userID, connID) - if err != nil { - return err - } - - updated, err := updater(toStorageOfflineSessions(o)) - if err != nil { - return err - } - - newOfflineSessions := cli.fromStorageOfflineSessions(updated) - newOfflineSessions.ObjectMeta = o.ObjectMeta - return cli.put(resourceOfflineSessions, o.ObjectMeta.Name, newOfflineSessions) -} - -func (cli *client) UpdateKeys(updater func(old storage.Keys) (storage.Keys, error)) error { - firstUpdate := false - var keys Keys - if err := cli.get(resourceKeys, keysName, &keys); err != nil { - if err != storage.ErrNotFound { - return err - } - firstUpdate = true - } - var oldKeys storage.Keys - if !firstUpdate { - oldKeys = toStorageKeys(keys) - } - - updated, err := updater(oldKeys) - if err != nil { - return err - } - newKeys := cli.fromStorageKeys(updated) - if firstUpdate { - return cli.post(resourceKeys, newKeys) - } - newKeys.ObjectMeta = keys.ObjectMeta - return cli.put(resourceKeys, keysName, newKeys) -} - -func (cli *client) UpdateAuthRequest(id string, updater func(a storage.AuthRequest) (storage.AuthRequest, error)) error { - var req AuthRequest - err := cli.get(resourceAuthRequest, id, &req) - if err != nil { - return err - } - - updated, err := updater(toStorageAuthRequest(req)) - if err != nil { - return err - } - - newReq := cli.fromStorageAuthRequest(updated) - newReq.ObjectMeta = req.ObjectMeta - return cli.put(resourceAuthRequest, id, newReq) -} - -func (cli *client) UpdateConnector(id string, updater func(a storage.Connector) (storage.Connector, error)) error { - var c Connector - err := cli.get(resourceConnector, id, &c) - if err != nil { - return err - } - - updated, err := updater(toStorageConnector(c)) - if err != nil { - return err - } - - newConn := cli.fromStorageConnector(updated) - newConn.ObjectMeta = c.ObjectMeta - return cli.put(resourceConnector, id, newConn) -} - -func (cli *client) GarbageCollect(now time.Time) (result storage.GCResult, err error) { - var authRequests AuthRequestList - if err := cli.list(resourceAuthRequest, &authRequests); err != nil { - return result, fmt.Errorf("failed to list auth requests: %v", err) - } - - var delErr error - for _, authRequest := range authRequests.AuthRequests { - if now.After(authRequest.Expiry) { - if err := cli.delete(resourceAuthRequest, authRequest.ObjectMeta.Name); err != nil { - cli.logger.Errorf("failed to delete auth request: %v", err) - delErr = fmt.Errorf("failed to delete auth request: %v", err) - } - result.AuthRequests++ - } - } - if delErr != nil { - return result, delErr - } - - var authCodes AuthCodeList - if err := cli.list(resourceAuthCode, &authCodes); err != nil { - return result, fmt.Errorf("failed to list auth codes: %v", err) - } - - for _, authCode := range authCodes.AuthCodes { - if now.After(authCode.Expiry) { - if err := cli.delete(resourceAuthCode, authCode.ObjectMeta.Name); err != nil { - cli.logger.Errorf("failed to delete auth code %v", err) - delErr = fmt.Errorf("failed to delete auth code: %v", err) - } - result.AuthCodes++ - } - } - return result, delErr -} diff --git a/vendor/github.com/dexidp/dex/storage/kubernetes/types.go b/vendor/github.com/dexidp/dex/storage/kubernetes/types.go deleted file mode 100644 index 0fbb290..0000000 --- a/vendor/github.com/dexidp/dex/storage/kubernetes/types.go +++ /dev/null @@ -1,637 +0,0 @@ -package kubernetes - -import ( - "strings" - "time" - - jose "gopkg.in/square/go-jose.v2" - - "github.com/dexidp/dex/storage" - "github.com/dexidp/dex/storage/kubernetes/k8sapi" -) - -var crdMeta = k8sapi.TypeMeta{ - APIVersion: "apiextensions.k8s.io/v1beta1", - Kind: "CustomResourceDefinition", -} - -const apiGroup = "dex.coreos.com" - -// The set of custom resource definitions required by the storage. These are managed by -// the storage so it can migrate itself by creating new resources. -var customResourceDefinitions = []k8sapi.CustomResourceDefinition{ - { - ObjectMeta: k8sapi.ObjectMeta{ - Name: "authcodes.dex.coreos.com", - }, - TypeMeta: crdMeta, - Spec: k8sapi.CustomResourceDefinitionSpec{ - Group: apiGroup, - Version: "v1", - Names: k8sapi.CustomResourceDefinitionNames{ - Plural: "authcodes", - Singular: "authcode", - Kind: "AuthCode", - }, - }, - }, - { - ObjectMeta: k8sapi.ObjectMeta{ - Name: "authrequests.dex.coreos.com", - }, - TypeMeta: crdMeta, - Spec: k8sapi.CustomResourceDefinitionSpec{ - Group: apiGroup, - Version: "v1", - Names: k8sapi.CustomResourceDefinitionNames{ - Plural: "authrequests", - Singular: "authrequest", - Kind: "AuthRequest", - }, - }, - }, - { - ObjectMeta: k8sapi.ObjectMeta{ - Name: "oauth2clients.dex.coreos.com", - }, - TypeMeta: crdMeta, - Spec: k8sapi.CustomResourceDefinitionSpec{ - Group: apiGroup, - Version: "v1", - Names: k8sapi.CustomResourceDefinitionNames{ - Plural: "oauth2clients", - Singular: "oauth2client", - Kind: "OAuth2Client", - }, - }, - }, - { - ObjectMeta: k8sapi.ObjectMeta{ - Name: "signingkeies.dex.coreos.com", - }, - TypeMeta: crdMeta, - Spec: k8sapi.CustomResourceDefinitionSpec{ - Group: apiGroup, - Version: "v1", - Names: k8sapi.CustomResourceDefinitionNames{ - // `signingkeies` is an artifact from the old TPR pluralization. - // Users don't directly interact with this value, hence leaving it - // as is. - Plural: "signingkeies", - Singular: "signingkey", - Kind: "SigningKey", - }, - }, - }, - { - ObjectMeta: k8sapi.ObjectMeta{ - Name: "refreshtokens.dex.coreos.com", - }, - TypeMeta: crdMeta, - Spec: k8sapi.CustomResourceDefinitionSpec{ - Group: apiGroup, - Version: "v1", - Names: k8sapi.CustomResourceDefinitionNames{ - Plural: "refreshtokens", - Singular: "refreshtoken", - Kind: "RefreshToken", - }, - }, - }, - { - ObjectMeta: k8sapi.ObjectMeta{ - Name: "passwords.dex.coreos.com", - }, - TypeMeta: crdMeta, - Spec: k8sapi.CustomResourceDefinitionSpec{ - Group: apiGroup, - Version: "v1", - Names: k8sapi.CustomResourceDefinitionNames{ - Plural: "passwords", - Singular: "password", - Kind: "Password", - }, - }, - }, - { - ObjectMeta: k8sapi.ObjectMeta{ - Name: "offlinesessionses.dex.coreos.com", - }, - TypeMeta: crdMeta, - Spec: k8sapi.CustomResourceDefinitionSpec{ - Group: apiGroup, - Version: "v1", - Names: k8sapi.CustomResourceDefinitionNames{ - Plural: "offlinesessionses", - Singular: "offlinesessions", - Kind: "OfflineSessions", - }, - }, - }, - { - ObjectMeta: k8sapi.ObjectMeta{ - Name: "connectors.dex.coreos.com", - }, - TypeMeta: crdMeta, - Spec: k8sapi.CustomResourceDefinitionSpec{ - Group: apiGroup, - Version: "v1", - Names: k8sapi.CustomResourceDefinitionNames{ - Plural: "connectors", - Singular: "connector", - Kind: "Connector", - }, - }, - }, -} - -// There will only ever be a single keys resource. Maintain this by setting a -// common name. -const keysName = "openid-connect-keys" - -// Client is a mirrored struct from storage with JSON struct tags and -// Kubernetes type metadata. -type Client struct { - // Name is a hash of the ID. - k8sapi.TypeMeta `json:",inline"` - k8sapi.ObjectMeta `json:"metadata,omitempty"` - - // ID is immutable, since it's a primary key and should not be changed. - ID string `json:"id,omitempty"` - - Secret string `json:"secret,omitempty"` - RedirectURIs []string `json:"redirectURIs,omitempty"` - TrustedPeers []string `json:"trustedPeers,omitempty"` - - Public bool `json:"public"` - - Name string `json:"name,omitempty"` - LogoURL string `json:"logoURL,omitempty"` -} - -// ClientList is a list of Clients. -type ClientList struct { - k8sapi.TypeMeta `json:",inline"` - k8sapi.ListMeta `json:"metadata,omitempty"` - Clients []Client `json:"items"` -} - -func (cli *client) fromStorageClient(c storage.Client) Client { - return Client{ - TypeMeta: k8sapi.TypeMeta{ - Kind: kindClient, - APIVersion: cli.apiVersion, - }, - ObjectMeta: k8sapi.ObjectMeta{ - Name: cli.idToName(c.ID), - Namespace: cli.namespace, - }, - ID: c.ID, - Secret: c.Secret, - RedirectURIs: c.RedirectURIs, - TrustedPeers: c.TrustedPeers, - Public: c.Public, - Name: c.Name, - LogoURL: c.LogoURL, - } -} - -func toStorageClient(c Client) storage.Client { - return storage.Client{ - ID: c.ID, - Secret: c.Secret, - RedirectURIs: c.RedirectURIs, - TrustedPeers: c.TrustedPeers, - Public: c.Public, - Name: c.Name, - LogoURL: c.LogoURL, - } -} - -// Claims is a mirrored struct from storage with JSON struct tags. -type Claims struct { - UserID string `json:"userID"` - Username string `json:"username"` - PreferredUsername string `json:"preferredUsername"` - Email string `json:"email"` - EmailVerified bool `json:"emailVerified"` - Groups []string `json:"groups,omitempty"` -} - -func fromStorageClaims(i storage.Claims) Claims { - return Claims{ - UserID: i.UserID, - Username: i.Username, - PreferredUsername: i.PreferredUsername, - Email: i.Email, - EmailVerified: i.EmailVerified, - Groups: i.Groups, - } -} - -func toStorageClaims(i Claims) storage.Claims { - return storage.Claims{ - UserID: i.UserID, - Username: i.Username, - PreferredUsername: i.PreferredUsername, - Email: i.Email, - EmailVerified: i.EmailVerified, - Groups: i.Groups, - } -} - -// AuthRequest is a mirrored struct from storage with JSON struct tags and -// Kubernetes type metadata. -type AuthRequest struct { - k8sapi.TypeMeta `json:",inline"` - k8sapi.ObjectMeta `json:"metadata,omitempty"` - - ClientID string `json:"clientID"` - ResponseTypes []string `json:"responseTypes,omitempty"` - Scopes []string `json:"scopes,omitempty"` - RedirectURI string `json:"redirectURI"` - - Nonce string `json:"nonce,omitempty"` - State string `json:"state,omitempty"` - - // The client has indicated that the end user must be shown an approval prompt - // on all requests. The server cannot cache their initial action for subsequent - // attempts. - ForceApprovalPrompt bool `json:"forceApprovalPrompt,omitempty"` - - LoggedIn bool `json:"loggedIn"` - - // The identity of the end user. Generally nil until the user authenticates - // with a backend. - Claims Claims `json:"claims,omitempty"` - // The connector used to login the user. Set when the user authenticates. - ConnectorID string `json:"connectorID,omitempty"` - ConnectorData []byte `json:"connectorData,omitempty"` - - Expiry time.Time `json:"expiry"` -} - -// AuthRequestList is a list of AuthRequests. -type AuthRequestList struct { - k8sapi.TypeMeta `json:",inline"` - k8sapi.ListMeta `json:"metadata,omitempty"` - AuthRequests []AuthRequest `json:"items"` -} - -func toStorageAuthRequest(req AuthRequest) storage.AuthRequest { - a := storage.AuthRequest{ - ID: req.ObjectMeta.Name, - ClientID: req.ClientID, - ResponseTypes: req.ResponseTypes, - Scopes: req.Scopes, - RedirectURI: req.RedirectURI, - Nonce: req.Nonce, - State: req.State, - ForceApprovalPrompt: req.ForceApprovalPrompt, - LoggedIn: req.LoggedIn, - ConnectorID: req.ConnectorID, - ConnectorData: req.ConnectorData, - Expiry: req.Expiry, - Claims: toStorageClaims(req.Claims), - } - return a -} - -func (cli *client) fromStorageAuthRequest(a storage.AuthRequest) AuthRequest { - req := AuthRequest{ - TypeMeta: k8sapi.TypeMeta{ - Kind: kindAuthRequest, - APIVersion: cli.apiVersion, - }, - ObjectMeta: k8sapi.ObjectMeta{ - Name: a.ID, - Namespace: cli.namespace, - }, - ClientID: a.ClientID, - ResponseTypes: a.ResponseTypes, - Scopes: a.Scopes, - RedirectURI: a.RedirectURI, - Nonce: a.Nonce, - State: a.State, - LoggedIn: a.LoggedIn, - ForceApprovalPrompt: a.ForceApprovalPrompt, - ConnectorID: a.ConnectorID, - ConnectorData: a.ConnectorData, - Expiry: a.Expiry, - Claims: fromStorageClaims(a.Claims), - } - return req -} - -// Password is a mirrored struct from the stroage with JSON struct tags and -// Kubernetes type metadata. -type Password struct { - k8sapi.TypeMeta `json:",inline"` - k8sapi.ObjectMeta `json:"metadata,omitempty"` - - // The Kubernetes name is actually an encoded version of this value. - // - // This field is IMMUTABLE. Do not change. - Email string `json:"email,omitempty"` - - Hash []byte `json:"hash,omitempty"` - Username string `json:"username,omitempty"` - UserID string `json:"userID,omitempty"` -} - -// PasswordList is a list of Passwords. -type PasswordList struct { - k8sapi.TypeMeta `json:",inline"` - k8sapi.ListMeta `json:"metadata,omitempty"` - Passwords []Password `json:"items"` -} - -func (cli *client) fromStoragePassword(p storage.Password) Password { - email := strings.ToLower(p.Email) - return Password{ - TypeMeta: k8sapi.TypeMeta{ - Kind: kindPassword, - APIVersion: cli.apiVersion, - }, - ObjectMeta: k8sapi.ObjectMeta{ - Name: cli.idToName(email), - Namespace: cli.namespace, - }, - Email: email, - Hash: p.Hash, - Username: p.Username, - UserID: p.UserID, - } -} - -func toStoragePassword(p Password) storage.Password { - return storage.Password{ - Email: p.Email, - Hash: p.Hash, - Username: p.Username, - UserID: p.UserID, - } -} - -// AuthCode is a mirrored struct from storage with JSON struct tags and -// Kubernetes type metadata. -type AuthCode struct { - k8sapi.TypeMeta `json:",inline"` - k8sapi.ObjectMeta `json:"metadata,omitempty"` - - ClientID string `json:"clientID"` - Scopes []string `json:"scopes,omitempty"` - RedirectURI string `json:"redirectURI"` - - Nonce string `json:"nonce,omitempty"` - State string `json:"state,omitempty"` - - Claims Claims `json:"claims,omitempty"` - - ConnectorID string `json:"connectorID,omitempty"` - ConnectorData []byte `json:"connectorData,omitempty"` - - Expiry time.Time `json:"expiry"` -} - -// AuthCodeList is a list of AuthCodes. -type AuthCodeList struct { - k8sapi.TypeMeta `json:",inline"` - k8sapi.ListMeta `json:"metadata,omitempty"` - AuthCodes []AuthCode `json:"items"` -} - -func (cli *client) fromStorageAuthCode(a storage.AuthCode) AuthCode { - return AuthCode{ - TypeMeta: k8sapi.TypeMeta{ - Kind: kindAuthCode, - APIVersion: cli.apiVersion, - }, - ObjectMeta: k8sapi.ObjectMeta{ - Name: a.ID, - Namespace: cli.namespace, - }, - ClientID: a.ClientID, - RedirectURI: a.RedirectURI, - ConnectorID: a.ConnectorID, - ConnectorData: a.ConnectorData, - Nonce: a.Nonce, - Scopes: a.Scopes, - Claims: fromStorageClaims(a.Claims), - Expiry: a.Expiry, - } -} - -func toStorageAuthCode(a AuthCode) storage.AuthCode { - return storage.AuthCode{ - ID: a.ObjectMeta.Name, - ClientID: a.ClientID, - RedirectURI: a.RedirectURI, - ConnectorID: a.ConnectorID, - ConnectorData: a.ConnectorData, - Nonce: a.Nonce, - Scopes: a.Scopes, - Claims: toStorageClaims(a.Claims), - Expiry: a.Expiry, - } -} - -// RefreshToken is a mirrored struct from storage with JSON struct tags and -// Kubernetes type metadata. -type RefreshToken struct { - k8sapi.TypeMeta `json:",inline"` - k8sapi.ObjectMeta `json:"metadata,omitempty"` - - CreatedAt time.Time - LastUsed time.Time - - ClientID string `json:"clientID"` - Scopes []string `json:"scopes,omitempty"` - - Token string `json:"token,omitempty"` - - Nonce string `json:"nonce,omitempty"` - - Claims Claims `json:"claims,omitempty"` - ConnectorID string `json:"connectorID,omitempty"` - ConnectorData []byte `json:"connectorData,omitempty"` -} - -// RefreshList is a list of refresh tokens. -type RefreshList struct { - k8sapi.TypeMeta `json:",inline"` - k8sapi.ListMeta `json:"metadata,omitempty"` - RefreshTokens []RefreshToken `json:"items"` -} - -func toStorageRefreshToken(r RefreshToken) storage.RefreshToken { - return storage.RefreshToken{ - ID: r.ObjectMeta.Name, - Token: r.Token, - CreatedAt: r.CreatedAt, - LastUsed: r.LastUsed, - ClientID: r.ClientID, - ConnectorID: r.ConnectorID, - ConnectorData: r.ConnectorData, - Scopes: r.Scopes, - Nonce: r.Nonce, - Claims: toStorageClaims(r.Claims), - } -} - -func (cli *client) fromStorageRefreshToken(r storage.RefreshToken) RefreshToken { - return RefreshToken{ - TypeMeta: k8sapi.TypeMeta{ - Kind: kindRefreshToken, - APIVersion: cli.apiVersion, - }, - ObjectMeta: k8sapi.ObjectMeta{ - Name: r.ID, - Namespace: cli.namespace, - }, - Token: r.Token, - CreatedAt: r.CreatedAt, - LastUsed: r.LastUsed, - ClientID: r.ClientID, - ConnectorID: r.ConnectorID, - ConnectorData: r.ConnectorData, - Scopes: r.Scopes, - Nonce: r.Nonce, - Claims: fromStorageClaims(r.Claims), - } -} - -// Keys is a mirrored struct from storage with JSON struct tags and Kubernetes -// type metadata. -type Keys struct { - k8sapi.TypeMeta `json:",inline"` - k8sapi.ObjectMeta `json:"metadata,omitempty"` - - // Key for creating and verifying signatures. These may be nil. - SigningKey *jose.JSONWebKey `json:"signingKey,omitempty"` - SigningKeyPub *jose.JSONWebKey `json:"signingKeyPub,omitempty"` - // Old signing keys which have been rotated but can still be used to validate - // existing signatures. - VerificationKeys []storage.VerificationKey `json:"verificationKeys,omitempty"` - - // The next time the signing key will rotate. - // - // For caching purposes, implementations MUST NOT update keys before this time. - NextRotation time.Time `json:"nextRotation"` -} - -func (cli *client) fromStorageKeys(keys storage.Keys) Keys { - return Keys{ - TypeMeta: k8sapi.TypeMeta{ - Kind: kindKeys, - APIVersion: cli.apiVersion, - }, - ObjectMeta: k8sapi.ObjectMeta{ - Name: keysName, - Namespace: cli.namespace, - }, - SigningKey: keys.SigningKey, - SigningKeyPub: keys.SigningKeyPub, - VerificationKeys: keys.VerificationKeys, - NextRotation: keys.NextRotation, - } -} - -func toStorageKeys(keys Keys) storage.Keys { - return storage.Keys{ - SigningKey: keys.SigningKey, - SigningKeyPub: keys.SigningKeyPub, - VerificationKeys: keys.VerificationKeys, - NextRotation: keys.NextRotation, - } -} - -// OfflineSessions is a mirrored struct from storage with JSON struct tags and Kubernetes -// type metadata. -type OfflineSessions struct { - k8sapi.TypeMeta `json:",inline"` - k8sapi.ObjectMeta `json:"metadata,omitempty"` - - UserID string `json:"userID,omitempty"` - ConnID string `json:"connID,omitempty"` - Refresh map[string]*storage.RefreshTokenRef `json:"refresh,omitempty"` - ConnectorData []byte `json:"connectorData,omitempty"` -} - -func (cli *client) fromStorageOfflineSessions(o storage.OfflineSessions) OfflineSessions { - return OfflineSessions{ - TypeMeta: k8sapi.TypeMeta{ - Kind: kindOfflineSessions, - APIVersion: cli.apiVersion, - }, - ObjectMeta: k8sapi.ObjectMeta{ - Name: cli.offlineTokenName(o.UserID, o.ConnID), - Namespace: cli.namespace, - }, - UserID: o.UserID, - ConnID: o.ConnID, - Refresh: o.Refresh, - ConnectorData: o.ConnectorData, - } -} - -func toStorageOfflineSessions(o OfflineSessions) storage.OfflineSessions { - s := storage.OfflineSessions{ - UserID: o.UserID, - ConnID: o.ConnID, - Refresh: o.Refresh, - ConnectorData: o.ConnectorData, - } - if s.Refresh == nil { - // Server code assumes this will be non-nil. - s.Refresh = make(map[string]*storage.RefreshTokenRef) - } - return s -} - -// Connector is a mirrored struct from storage with JSON struct tags and Kubernetes -// type metadata. -type Connector struct { - k8sapi.TypeMeta `json:",inline"` - k8sapi.ObjectMeta `json:"metadata,omitempty"` - - ID string `json:"id,omitempty"` - Type string `json:"type,omitempty"` - Name string `json:"name,omitempty"` - // Config holds connector specific configuration information - Config []byte `json:"config,omitempty"` -} - -func (cli *client) fromStorageConnector(c storage.Connector) Connector { - return Connector{ - TypeMeta: k8sapi.TypeMeta{ - Kind: kindConnector, - APIVersion: cli.apiVersion, - }, - ObjectMeta: k8sapi.ObjectMeta{ - Name: c.ID, - Namespace: cli.namespace, - }, - ID: c.ID, - Type: c.Type, - Name: c.Name, - Config: c.Config, - } -} - -func toStorageConnector(c Connector) storage.Connector { - return storage.Connector{ - ID: c.ID, - Type: c.Type, - Name: c.Name, - ResourceVersion: c.ObjectMeta.ResourceVersion, - Config: c.Config, - } -} - -// ConnectorList is a list of Connectors. -type ConnectorList struct { - k8sapi.TypeMeta `json:",inline"` - k8sapi.ListMeta `json:"metadata,omitempty"` - Connectors []Connector `json:"items"` -} diff --git a/vendor/github.com/dexidp/dex/storage/memory/memory.go b/vendor/github.com/dexidp/dex/storage/memory/memory.go deleted file mode 100644 index 681d204..0000000 --- a/vendor/github.com/dexidp/dex/storage/memory/memory.go +++ /dev/null @@ -1,467 +0,0 @@ -// Package memory provides an in memory implementation of the storage interface. -package memory - -import ( - "strings" - "sync" - "time" - - "github.com/dexidp/dex/pkg/log" - "github.com/dexidp/dex/storage" -) - -// New returns an in memory storage. -func New(logger log.Logger) storage.Storage { - return &memStorage{ - clients: make(map[string]storage.Client), - authCodes: make(map[string]storage.AuthCode), - refreshTokens: make(map[string]storage.RefreshToken), - authReqs: make(map[string]storage.AuthRequest), - passwords: make(map[string]storage.Password), - offlineSessions: make(map[offlineSessionID]storage.OfflineSessions), - connectors: make(map[string]storage.Connector), - logger: logger, - } -} - -// Config is an implementation of a storage configuration. -// -// TODO(ericchiang): Actually define a storage config interface and have registration. -type Config struct { - // The in memory implementation has no config. -} - -// Open always returns a new in memory storage. -func (c *Config) Open(logger log.Logger) (storage.Storage, error) { - return New(logger), nil -} - -type memStorage struct { - mu sync.Mutex - - clients map[string]storage.Client - authCodes map[string]storage.AuthCode - refreshTokens map[string]storage.RefreshToken - authReqs map[string]storage.AuthRequest - passwords map[string]storage.Password - offlineSessions map[offlineSessionID]storage.OfflineSessions - connectors map[string]storage.Connector - - keys storage.Keys - - logger log.Logger -} - -type offlineSessionID struct { - userID string - connID string -} - -func (s *memStorage) tx(f func()) { - s.mu.Lock() - defer s.mu.Unlock() - f() -} - -func (s *memStorage) Close() error { return nil } - -func (s *memStorage) GarbageCollect(now time.Time) (result storage.GCResult, err error) { - s.tx(func() { - for id, a := range s.authCodes { - if now.After(a.Expiry) { - delete(s.authCodes, id) - result.AuthCodes++ - } - } - for id, a := range s.authReqs { - if now.After(a.Expiry) { - delete(s.authReqs, id) - result.AuthRequests++ - } - } - }) - return result, nil -} - -func (s *memStorage) CreateClient(c storage.Client) (err error) { - s.tx(func() { - if _, ok := s.clients[c.ID]; ok { - err = storage.ErrAlreadyExists - } else { - s.clients[c.ID] = c - } - }) - return -} - -func (s *memStorage) CreateAuthCode(c storage.AuthCode) (err error) { - s.tx(func() { - if _, ok := s.authCodes[c.ID]; ok { - err = storage.ErrAlreadyExists - } else { - s.authCodes[c.ID] = c - } - }) - return -} - -func (s *memStorage) CreateRefresh(r storage.RefreshToken) (err error) { - s.tx(func() { - if _, ok := s.refreshTokens[r.ID]; ok { - err = storage.ErrAlreadyExists - } else { - s.refreshTokens[r.ID] = r - } - }) - return -} - -func (s *memStorage) CreateAuthRequest(a storage.AuthRequest) (err error) { - s.tx(func() { - if _, ok := s.authReqs[a.ID]; ok { - err = storage.ErrAlreadyExists - } else { - s.authReqs[a.ID] = a - } - }) - return -} - -func (s *memStorage) CreatePassword(p storage.Password) (err error) { - lowerEmail := strings.ToLower(p.Email) - s.tx(func() { - if _, ok := s.passwords[lowerEmail]; ok { - err = storage.ErrAlreadyExists - } else { - s.passwords[lowerEmail] = p - } - }) - return -} - -func (s *memStorage) CreateOfflineSessions(o storage.OfflineSessions) (err error) { - id := offlineSessionID{ - userID: o.UserID, - connID: o.ConnID, - } - s.tx(func() { - if _, ok := s.offlineSessions[id]; ok { - err = storage.ErrAlreadyExists - } else { - s.offlineSessions[id] = o - } - }) - return -} - -func (s *memStorage) CreateConnector(connector storage.Connector) (err error) { - s.tx(func() { - if _, ok := s.connectors[connector.ID]; ok { - err = storage.ErrAlreadyExists - } else { - s.connectors[connector.ID] = connector - } - }) - return -} - -func (s *memStorage) GetAuthCode(id string) (c storage.AuthCode, err error) { - s.tx(func() { - var ok bool - if c, ok = s.authCodes[id]; !ok { - err = storage.ErrNotFound - return - } - }) - return -} - -func (s *memStorage) GetPassword(email string) (p storage.Password, err error) { - email = strings.ToLower(email) - s.tx(func() { - var ok bool - if p, ok = s.passwords[email]; !ok { - err = storage.ErrNotFound - } - }) - return -} - -func (s *memStorage) GetClient(id string) (client storage.Client, err error) { - s.tx(func() { - var ok bool - if client, ok = s.clients[id]; !ok { - err = storage.ErrNotFound - } - }) - return -} - -func (s *memStorage) GetKeys() (keys storage.Keys, err error) { - s.tx(func() { keys = s.keys }) - return -} - -func (s *memStorage) GetRefresh(id string) (tok storage.RefreshToken, err error) { - s.tx(func() { - var ok bool - if tok, ok = s.refreshTokens[id]; !ok { - err = storage.ErrNotFound - return - } - }) - return -} - -func (s *memStorage) GetAuthRequest(id string) (req storage.AuthRequest, err error) { - s.tx(func() { - var ok bool - if req, ok = s.authReqs[id]; !ok { - err = storage.ErrNotFound - return - } - }) - return -} - -func (s *memStorage) GetOfflineSessions(userID string, connID string) (o storage.OfflineSessions, err error) { - id := offlineSessionID{ - userID: userID, - connID: connID, - } - s.tx(func() { - var ok bool - if o, ok = s.offlineSessions[id]; !ok { - err = storage.ErrNotFound - return - } - }) - return -} - -func (s *memStorage) GetConnector(id string) (connector storage.Connector, err error) { - s.tx(func() { - var ok bool - if connector, ok = s.connectors[id]; !ok { - err = storage.ErrNotFound - } - }) - return -} - -func (s *memStorage) ListClients() (clients []storage.Client, err error) { - s.tx(func() { - for _, client := range s.clients { - clients = append(clients, client) - } - }) - return -} - -func (s *memStorage) ListRefreshTokens() (tokens []storage.RefreshToken, err error) { - s.tx(func() { - for _, refresh := range s.refreshTokens { - tokens = append(tokens, refresh) - } - }) - return -} - -func (s *memStorage) ListPasswords() (passwords []storage.Password, err error) { - s.tx(func() { - for _, password := range s.passwords { - passwords = append(passwords, password) - } - }) - return -} - -func (s *memStorage) ListConnectors() (conns []storage.Connector, err error) { - s.tx(func() { - for _, c := range s.connectors { - conns = append(conns, c) - } - }) - return -} - -func (s *memStorage) DeletePassword(email string) (err error) { - email = strings.ToLower(email) - s.tx(func() { - if _, ok := s.passwords[email]; !ok { - err = storage.ErrNotFound - return - } - delete(s.passwords, email) - }) - return -} - -func (s *memStorage) DeleteClient(id string) (err error) { - s.tx(func() { - if _, ok := s.clients[id]; !ok { - err = storage.ErrNotFound - return - } - delete(s.clients, id) - }) - return -} - -func (s *memStorage) DeleteRefresh(id string) (err error) { - s.tx(func() { - if _, ok := s.refreshTokens[id]; !ok { - err = storage.ErrNotFound - return - } - delete(s.refreshTokens, id) - }) - return -} - -func (s *memStorage) DeleteAuthCode(id string) (err error) { - s.tx(func() { - if _, ok := s.authCodes[id]; !ok { - err = storage.ErrNotFound - return - } - delete(s.authCodes, id) - }) - return -} - -func (s *memStorage) DeleteAuthRequest(id string) (err error) { - s.tx(func() { - if _, ok := s.authReqs[id]; !ok { - err = storage.ErrNotFound - return - } - delete(s.authReqs, id) - }) - return -} - -func (s *memStorage) DeleteOfflineSessions(userID string, connID string) (err error) { - id := offlineSessionID{ - userID: userID, - connID: connID, - } - s.tx(func() { - if _, ok := s.offlineSessions[id]; !ok { - err = storage.ErrNotFound - return - } - delete(s.offlineSessions, id) - }) - return -} - -func (s *memStorage) DeleteConnector(id string) (err error) { - s.tx(func() { - if _, ok := s.connectors[id]; !ok { - err = storage.ErrNotFound - return - } - delete(s.connectors, id) - }) - return -} - -func (s *memStorage) UpdateClient(id string, updater func(old storage.Client) (storage.Client, error)) (err error) { - s.tx(func() { - client, ok := s.clients[id] - if !ok { - err = storage.ErrNotFound - return - } - if client, err = updater(client); err == nil { - s.clients[id] = client - } - }) - return -} - -func (s *memStorage) UpdateKeys(updater func(old storage.Keys) (storage.Keys, error)) (err error) { - s.tx(func() { - var keys storage.Keys - if keys, err = updater(s.keys); err == nil { - s.keys = keys - } - }) - return -} - -func (s *memStorage) UpdateAuthRequest(id string, updater func(old storage.AuthRequest) (storage.AuthRequest, error)) (err error) { - s.tx(func() { - req, ok := s.authReqs[id] - if !ok { - err = storage.ErrNotFound - return - } - if req, err = updater(req); err == nil { - s.authReqs[id] = req - } - }) - return -} - -func (s *memStorage) UpdatePassword(email string, updater func(p storage.Password) (storage.Password, error)) (err error) { - email = strings.ToLower(email) - s.tx(func() { - req, ok := s.passwords[email] - if !ok { - err = storage.ErrNotFound - return - } - if req, err = updater(req); err == nil { - s.passwords[email] = req - } - }) - return -} - -func (s *memStorage) UpdateRefreshToken(id string, updater func(p storage.RefreshToken) (storage.RefreshToken, error)) (err error) { - s.tx(func() { - r, ok := s.refreshTokens[id] - if !ok { - err = storage.ErrNotFound - return - } - if r, err = updater(r); err == nil { - s.refreshTokens[id] = r - } - }) - return -} - -func (s *memStorage) UpdateOfflineSessions(userID string, connID string, updater func(o storage.OfflineSessions) (storage.OfflineSessions, error)) (err error) { - id := offlineSessionID{ - userID: userID, - connID: connID, - } - s.tx(func() { - r, ok := s.offlineSessions[id] - if !ok { - err = storage.ErrNotFound - return - } - if r, err = updater(r); err == nil { - s.offlineSessions[id] = r - } - }) - return -} - -func (s *memStorage) UpdateConnector(id string, updater func(c storage.Connector) (storage.Connector, error)) (err error) { - s.tx(func() { - r, ok := s.connectors[id] - if !ok { - err = storage.ErrNotFound - return - } - if r, err = updater(r); err == nil { - s.connectors[id] = r - } - }) - return -} diff --git a/vendor/github.com/dexidp/dex/storage/sql/config.go b/vendor/github.com/dexidp/dex/storage/sql/config.go deleted file mode 100644 index 170dfd0..0000000 --- a/vendor/github.com/dexidp/dex/storage/sql/config.go +++ /dev/null @@ -1,379 +0,0 @@ -package sql - -import ( - "crypto/tls" - "crypto/x509" - "database/sql" - "fmt" - "io/ioutil" - "net" - "regexp" - "strconv" - "strings" - "time" - - "github.com/go-sql-driver/mysql" - "github.com/lib/pq" - sqlite3 "github.com/mattn/go-sqlite3" - - "github.com/dexidp/dex/pkg/log" - "github.com/dexidp/dex/storage" -) - -const ( - // postgres error codes - pgErrUniqueViolation = "23505" // unique_violation -) - -const ( - // MySQL error codes - mysqlErrDupEntry = 1062 - mysqlErrDupEntryWithKeyName = 1586 - mysqlErrUnknownSysVar = 1193 -) - -// SQLite3 options for creating an SQL db. -type SQLite3 struct { - // File to - File string `json:"file"` -} - -// Open creates a new storage implementation backed by SQLite3 -func (s *SQLite3) Open(logger log.Logger) (storage.Storage, error) { - conn, err := s.open(logger) - if err != nil { - return nil, err - } - return conn, nil -} - -func (s *SQLite3) open(logger log.Logger) (*conn, error) { - db, err := sql.Open("sqlite3", s.File) - if err != nil { - return nil, err - } - if s.File == ":memory:" { - // sqlite3 uses file locks to coordinate concurrent access. In memory - // doesn't support this, so limit the number of connections to 1. - db.SetMaxOpenConns(1) - } - - errCheck := func(err error) bool { - sqlErr, ok := err.(sqlite3.Error) - if !ok { - return false - } - return sqlErr.ExtendedCode == sqlite3.ErrConstraintPrimaryKey - } - - c := &conn{db, &flavorSQLite3, logger, errCheck} - if _, err := c.migrate(); err != nil { - return nil, fmt.Errorf("failed to perform migrations: %v", err) - } - return c, nil -} - -// nolint -const ( - // postgres SSL modes - pgSSLDisable = "disable" - pgSSLRequire = "require" - pgSSLVerifyCA = "verify-ca" - pgSSLVerifyFull = "verify-full" -) - -// nolint -const ( - // MySQL SSL modes - mysqlSSLTrue = "true" - mysqlSSLFalse = "false" - mysqlSSLSkipVerify = "skip-verify" - mysqlSSLCustom = "custom" -) - -// NetworkDB contains options common to SQL databases accessed over network. -type NetworkDB struct { - Database string - User string - Password string - Host string - Port uint16 - - ConnectionTimeout int // Seconds - - // database/sql tunables, see - // https://golang.org/pkg/database/sql/#DB.SetConnMaxLifetime and below - // Note: defaults will be set if these are 0 - MaxOpenConns int // default: 5 - MaxIdleConns int // default: 5 - ConnMaxLifetime int // Seconds, default: not set -} - -// SSL represents SSL options for network databases. -type SSL struct { - Mode string - CAFile string - // Files for client auth. - KeyFile string - CertFile string -} - -// Postgres options for creating an SQL db. -type Postgres struct { - NetworkDB - - SSL SSL `json:"ssl" yaml:"ssl"` -} - -// Open creates a new storage implementation backed by Postgres. -func (p *Postgres) Open(logger log.Logger) (storage.Storage, error) { - conn, err := p.open(logger) - if err != nil { - return nil, err - } - return conn, nil -} - -var strEsc = regexp.MustCompile(`([\\'])`) - -func dataSourceStr(str string) string { - return "'" + strEsc.ReplaceAllString(str, `\$1`) + "'" -} - -// createDataSourceName takes the configuration provided via the Postgres -// struct to create a data-source name that Go's database/sql package can -// make use of. -func (p *Postgres) createDataSourceName() string { - parameters := []string{} - - addParam := func(key, val string) { - parameters = append(parameters, fmt.Sprintf("%s=%s", key, val)) - } - - addParam("connect_timeout", strconv.Itoa(p.ConnectionTimeout)) - - // detect host:port for backwards-compatibility - host, port, err := net.SplitHostPort(p.Host) - if err != nil { - // not host:port, probably unix socket or bare address - - host = p.Host - - if p.Port != 0 { - port = strconv.Itoa(int(p.Port)) - } - } - - if host != "" { - addParam("host", dataSourceStr(host)) - } - - if port != "" { - addParam("port", port) - } - - if p.User != "" { - addParam("user", dataSourceStr(p.User)) - } - - if p.Password != "" { - addParam("password", dataSourceStr(p.Password)) - } - - if p.Database != "" { - addParam("dbname", dataSourceStr(p.Database)) - } - - if p.SSL.Mode == "" { - // Assume the strictest mode if unspecified. - addParam("sslmode", dataSourceStr(pgSSLVerifyFull)) - } else { - addParam("sslmode", dataSourceStr(p.SSL.Mode)) - } - - if p.SSL.CAFile != "" { - addParam("sslrootcert", dataSourceStr(p.SSL.CAFile)) - } - - if p.SSL.CertFile != "" { - addParam("sslcert", dataSourceStr(p.SSL.CertFile)) - } - - if p.SSL.KeyFile != "" { - addParam("sslkey", dataSourceStr(p.SSL.KeyFile)) - } - - return strings.Join(parameters, " ") -} - -func (p *Postgres) open(logger log.Logger) (*conn, error) { - dataSourceName := p.createDataSourceName() - - db, err := sql.Open("postgres", dataSourceName) - if err != nil { - return nil, err - } - - // set database/sql tunables if configured - if p.ConnMaxLifetime != 0 { - db.SetConnMaxLifetime(time.Duration(p.ConnMaxLifetime) * time.Second) - } - - if p.MaxIdleConns == 0 { - db.SetMaxIdleConns(5) - } else { - db.SetMaxIdleConns(p.MaxIdleConns) - } - - if p.MaxOpenConns == 0 { - db.SetMaxOpenConns(5) - } else { - db.SetMaxOpenConns(p.MaxOpenConns) - } - - errCheck := func(err error) bool { - sqlErr, ok := err.(*pq.Error) - if !ok { - return false - } - return sqlErr.Code == pgErrUniqueViolation - } - - c := &conn{db, &flavorPostgres, logger, errCheck} - if _, err := c.migrate(); err != nil { - return nil, fmt.Errorf("failed to perform migrations: %v", err) - } - return c, nil -} - -// MySQL options for creating a MySQL db. -type MySQL struct { - NetworkDB - - SSL SSL `json:"ssl" yaml:"ssl"` - - // TODO(pborzenkov): used by tests to reduce lock wait timeout. Should - // we make it exported and allow users to provide arbitrary params? - params map[string]string -} - -// Open creates a new storage implementation backed by MySQL. -func (s *MySQL) Open(logger log.Logger) (storage.Storage, error) { - conn, err := s.open(logger) - if err != nil { - return nil, err - } - return conn, nil -} - -func (s *MySQL) open(logger log.Logger) (*conn, error) { - cfg := mysql.Config{ - User: s.User, - Passwd: s.Password, - DBName: s.Database, - AllowNativePasswords: true, - - Timeout: time.Second * time.Duration(s.ConnectionTimeout), - - ParseTime: true, - Params: map[string]string{ - "transaction_isolation": "'SERIALIZABLE'", - }, - } - if s.Host != "" { - if s.Host[0] != '/' { - cfg.Net = "tcp" - cfg.Addr = s.Host - } else { - cfg.Net = "unix" - cfg.Addr = s.Host - } - } - if s.SSL.CAFile != "" || s.SSL.CertFile != "" || s.SSL.KeyFile != "" { - if err := s.makeTLSConfig(); err != nil { - return nil, fmt.Errorf("failed to make TLS config: %v", err) - } - cfg.TLSConfig = mysqlSSLCustom - } else if s.SSL.Mode == "" { - cfg.TLSConfig = mysqlSSLTrue - } else { - cfg.TLSConfig = s.SSL.Mode - } - for k, v := range s.params { - cfg.Params[k] = v - } - - db, err := sql.Open("mysql", cfg.FormatDSN()) - if err != nil { - return nil, err - } - - if s.MaxIdleConns == 0 { - /*Override default behaviour to fix https://github.com/dexidp/dex/issues/1608*/ - db.SetMaxIdleConns(0) - } else { - db.SetMaxIdleConns(s.MaxIdleConns) - } - - err = db.Ping() - if err != nil { - if mysqlErr, ok := err.(*mysql.MySQLError); ok && mysqlErr.Number == mysqlErrUnknownSysVar { - logger.Info("reconnecting with MySQL pre-5.7.20 compatibility mode") - - // MySQL 5.7.20 introduced transaction_isolation and deprecated tx_isolation. - // MySQL 8.0 doesn't have tx_isolation at all. - // https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_transaction_isolation - delete(cfg.Params, "transaction_isolation") - cfg.Params["tx_isolation"] = "'SERIALIZABLE'" - - db, err = sql.Open("mysql", cfg.FormatDSN()) - if err != nil { - return nil, err - } - } else { - return nil, err - } - } - - errCheck := func(err error) bool { - sqlErr, ok := err.(*mysql.MySQLError) - if !ok { - return false - } - return sqlErr.Number == mysqlErrDupEntry || - sqlErr.Number == mysqlErrDupEntryWithKeyName - } - - c := &conn{db, &flavorMySQL, logger, errCheck} - if _, err := c.migrate(); err != nil { - return nil, fmt.Errorf("failed to perform migrations: %v", err) - } - return c, nil -} - -func (s *MySQL) makeTLSConfig() error { - cfg := &tls.Config{} - if s.SSL.CAFile != "" { - rootCertPool := x509.NewCertPool() - pem, err := ioutil.ReadFile(s.SSL.CAFile) - if err != nil { - return err - } - if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { - return fmt.Errorf("failed to append PEM") - } - cfg.RootCAs = rootCertPool - } - if s.SSL.CertFile != "" && s.SSL.KeyFile != "" { - clientCert := make([]tls.Certificate, 0, 1) - certs, err := tls.LoadX509KeyPair(s.SSL.CertFile, s.SSL.KeyFile) - if err != nil { - return err - } - clientCert = append(clientCert, certs) - cfg.Certificates = clientCert - } - - mysql.RegisterTLSConfig(mysqlSSLCustom, cfg) - return nil -} diff --git a/vendor/github.com/dexidp/dex/storage/sql/crud.go b/vendor/github.com/dexidp/dex/storage/sql/crud.go deleted file mode 100644 index e87dc56..0000000 --- a/vendor/github.com/dexidp/dex/storage/sql/crud.go +++ /dev/null @@ -1,869 +0,0 @@ -package sql - -import ( - "database/sql" - "database/sql/driver" - "encoding/json" - "errors" - "fmt" - "strings" - "time" - - "github.com/dexidp/dex/storage" -) - -// TODO(ericchiang): The update, insert, and select methods queries are all -// very repetitive. Consider creating them programmatically. - -// keysRowID is the ID of the only row we expect to populate the "keys" table. -const keysRowID = "keys" - -// encoder wraps the underlying value in a JSON marshaler which is automatically -// called by the database/sql package. -// -// s := []string{"planes", "bears"} -// err := db.Exec(`insert into t1 (id, things) values (1, $1)`, encoder(s)) -// if err != nil { -// // handle error -// } -// -// var r []byte -// err = db.QueryRow(`select things from t1 where id = 1;`).Scan(&r) -// if err != nil { -// // handle error -// } -// fmt.Printf("%s\n", r) // ["planes","bears"] -// -func encoder(i interface{}) driver.Valuer { - return jsonEncoder{i} -} - -// decoder wraps the underlying value in a JSON unmarshaler which can then be passed -// to a database Scan() method. -func decoder(i interface{}) sql.Scanner { - return jsonDecoder{i} -} - -type jsonEncoder struct { - i interface{} -} - -func (j jsonEncoder) Value() (driver.Value, error) { - b, err := json.Marshal(j.i) - if err != nil { - return nil, fmt.Errorf("marshal: %v", err) - } - return b, nil -} - -type jsonDecoder struct { - i interface{} -} - -func (j jsonDecoder) Scan(dest interface{}) error { - if dest == nil { - return errors.New("nil value") - } - b, ok := dest.([]byte) - if !ok { - return fmt.Errorf("expected []byte got %T", dest) - } - if err := json.Unmarshal(b, &j.i); err != nil { - return fmt.Errorf("unmarshal: %v", err) - } - return nil -} - -// Abstract conn vs trans. -type querier interface { - QueryRow(query string, args ...interface{}) *sql.Row -} - -// Abstract row vs rows. -type scanner interface { - Scan(dest ...interface{}) error -} - -func (c *conn) GarbageCollect(now time.Time) (result storage.GCResult, err error) { - r, err := c.Exec(`delete from auth_request where expiry < $1`, now) - if err != nil { - return result, fmt.Errorf("gc auth_request: %v", err) - } - if n, err := r.RowsAffected(); err == nil { - result.AuthRequests = n - } - - r, err = c.Exec(`delete from auth_code where expiry < $1`, now) - if err != nil { - return result, fmt.Errorf("gc auth_code: %v", err) - } - if n, err := r.RowsAffected(); err == nil { - result.AuthCodes = n - } - return -} - -func (c *conn) CreateAuthRequest(a storage.AuthRequest) error { - _, err := c.Exec(` - insert into auth_request ( - id, client_id, response_types, scopes, redirect_uri, nonce, state, - force_approval_prompt, logged_in, - claims_user_id, claims_username, claims_preferred_username, - claims_email, claims_email_verified, claims_groups, - connector_id, connector_data, - expiry - ) - values ( - $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18 - ); - `, - a.ID, a.ClientID, encoder(a.ResponseTypes), encoder(a.Scopes), a.RedirectURI, a.Nonce, a.State, - a.ForceApprovalPrompt, a.LoggedIn, - a.Claims.UserID, a.Claims.Username, a.Claims.PreferredUsername, - a.Claims.Email, a.Claims.EmailVerified, encoder(a.Claims.Groups), - a.ConnectorID, a.ConnectorData, - a.Expiry, - ) - if err != nil { - if c.alreadyExistsCheck(err) { - return storage.ErrAlreadyExists - } - return fmt.Errorf("insert auth request: %v", err) - } - return nil -} - -func (c *conn) UpdateAuthRequest(id string, updater func(a storage.AuthRequest) (storage.AuthRequest, error)) error { - return c.ExecTx(func(tx *trans) error { - r, err := getAuthRequest(tx, id) - if err != nil { - return err - } - - a, err := updater(r) - if err != nil { - return err - } - _, err = tx.Exec(` - update auth_request - set - client_id = $1, response_types = $2, scopes = $3, redirect_uri = $4, - nonce = $5, state = $6, force_approval_prompt = $7, logged_in = $8, - claims_user_id = $9, claims_username = $10, claims_preferred_username = $11, - claims_email = $12, claims_email_verified = $13, - claims_groups = $14, - connector_id = $15, connector_data = $16, - expiry = $17 - where id = $18; - `, - a.ClientID, encoder(a.ResponseTypes), encoder(a.Scopes), a.RedirectURI, a.Nonce, a.State, - a.ForceApprovalPrompt, a.LoggedIn, - a.Claims.UserID, a.Claims.Username, a.Claims.PreferredUsername, - a.Claims.Email, a.Claims.EmailVerified, - encoder(a.Claims.Groups), - a.ConnectorID, a.ConnectorData, - a.Expiry, r.ID, - ) - if err != nil { - return fmt.Errorf("update auth request: %v", err) - } - return nil - }) -} - -func (c *conn) GetAuthRequest(id string) (storage.AuthRequest, error) { - return getAuthRequest(c, id) -} - -func getAuthRequest(q querier, id string) (a storage.AuthRequest, err error) { - err = q.QueryRow(` - select - id, client_id, response_types, scopes, redirect_uri, nonce, state, - force_approval_prompt, logged_in, - claims_user_id, claims_username, claims_preferred_username, - claims_email, claims_email_verified, claims_groups, - connector_id, connector_data, expiry - from auth_request where id = $1; - `, id).Scan( - &a.ID, &a.ClientID, decoder(&a.ResponseTypes), decoder(&a.Scopes), &a.RedirectURI, &a.Nonce, &a.State, - &a.ForceApprovalPrompt, &a.LoggedIn, - &a.Claims.UserID, &a.Claims.Username, &a.Claims.PreferredUsername, - &a.Claims.Email, &a.Claims.EmailVerified, - decoder(&a.Claims.Groups), - &a.ConnectorID, &a.ConnectorData, &a.Expiry, - ) - if err != nil { - if err == sql.ErrNoRows { - return a, storage.ErrNotFound - } - return a, fmt.Errorf("select auth request: %v", err) - } - return a, nil -} - -func (c *conn) CreateAuthCode(a storage.AuthCode) error { - _, err := c.Exec(` - insert into auth_code ( - id, client_id, scopes, nonce, redirect_uri, - claims_user_id, claims_username, claims_preferred_username, - claims_email, claims_email_verified, claims_groups, - connector_id, connector_data, - expiry - ) - values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14); - `, - a.ID, a.ClientID, encoder(a.Scopes), a.Nonce, a.RedirectURI, a.Claims.UserID, - a.Claims.Username, a.Claims.PreferredUsername, a.Claims.Email, a.Claims.EmailVerified, - encoder(a.Claims.Groups), a.ConnectorID, a.ConnectorData, a.Expiry, - ) - - if err != nil { - if c.alreadyExistsCheck(err) { - return storage.ErrAlreadyExists - } - return fmt.Errorf("insert auth code: %v", err) - } - return nil -} - -func (c *conn) GetAuthCode(id string) (a storage.AuthCode, err error) { - err = c.QueryRow(` - select - id, client_id, scopes, nonce, redirect_uri, - claims_user_id, claims_username, claims_preferred_username, - claims_email, claims_email_verified, claims_groups, - connector_id, connector_data, - expiry - from auth_code where id = $1; - `, id).Scan( - &a.ID, &a.ClientID, decoder(&a.Scopes), &a.Nonce, &a.RedirectURI, &a.Claims.UserID, - &a.Claims.Username, &a.Claims.PreferredUsername, &a.Claims.Email, &a.Claims.EmailVerified, - decoder(&a.Claims.Groups), &a.ConnectorID, &a.ConnectorData, &a.Expiry, - ) - if err != nil { - if err == sql.ErrNoRows { - return a, storage.ErrNotFound - } - return a, fmt.Errorf("select auth code: %v", err) - } - return a, nil -} - -func (c *conn) CreateRefresh(r storage.RefreshToken) error { - _, err := c.Exec(` - insert into refresh_token ( - id, client_id, scopes, nonce, - claims_user_id, claims_username, claims_preferred_username, - claims_email, claims_email_verified, claims_groups, - connector_id, connector_data, - token, created_at, last_used - ) - values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15); - `, - r.ID, r.ClientID, encoder(r.Scopes), r.Nonce, - r.Claims.UserID, r.Claims.Username, r.Claims.PreferredUsername, - r.Claims.Email, r.Claims.EmailVerified, - encoder(r.Claims.Groups), - r.ConnectorID, r.ConnectorData, - r.Token, r.CreatedAt, r.LastUsed, - ) - if err != nil { - if c.alreadyExistsCheck(err) { - return storage.ErrAlreadyExists - } - return fmt.Errorf("insert refresh_token: %v", err) - } - return nil -} - -func (c *conn) UpdateRefreshToken(id string, updater func(old storage.RefreshToken) (storage.RefreshToken, error)) error { - return c.ExecTx(func(tx *trans) error { - r, err := getRefresh(tx, id) - if err != nil { - return err - } - if r, err = updater(r); err != nil { - return err - } - _, err = tx.Exec(` - update refresh_token - set - client_id = $1, - scopes = $2, - nonce = $3, - claims_user_id = $4, - claims_username = $5, - claims_preferred_username = $6, - claims_email = $7, - claims_email_verified = $8, - claims_groups = $9, - connector_id = $10, - connector_data = $11, - token = $12, - created_at = $13, - last_used = $14 - where - id = $15 - `, - r.ClientID, encoder(r.Scopes), r.Nonce, - r.Claims.UserID, r.Claims.Username, r.Claims.PreferredUsername, - r.Claims.Email, r.Claims.EmailVerified, - encoder(r.Claims.Groups), - r.ConnectorID, r.ConnectorData, - r.Token, r.CreatedAt, r.LastUsed, id, - ) - if err != nil { - return fmt.Errorf("update refresh token: %v", err) - } - return nil - }) -} - -func (c *conn) GetRefresh(id string) (storage.RefreshToken, error) { - return getRefresh(c, id) -} - -func getRefresh(q querier, id string) (storage.RefreshToken, error) { - return scanRefresh(q.QueryRow(` - select - id, client_id, scopes, nonce, - claims_user_id, claims_username, claims_preferred_username, - claims_email, claims_email_verified, - claims_groups, - connector_id, connector_data, - token, created_at, last_used - from refresh_token where id = $1; - `, id)) -} - -func (c *conn) ListRefreshTokens() ([]storage.RefreshToken, error) { - rows, err := c.Query(` - select - id, client_id, scopes, nonce, - claims_user_id, claims_username, claims_preferred_username, - claims_email, claims_email_verified, claims_groups, - connector_id, connector_data, - token, created_at, last_used - from refresh_token; - `) - if err != nil { - return nil, fmt.Errorf("query: %v", err) - } - var tokens []storage.RefreshToken - for rows.Next() { - r, err := scanRefresh(rows) - if err != nil { - return nil, err - } - tokens = append(tokens, r) - } - if err := rows.Err(); err != nil { - return nil, fmt.Errorf("scan: %v", err) - } - return tokens, nil -} - -func scanRefresh(s scanner) (r storage.RefreshToken, err error) { - err = s.Scan( - &r.ID, &r.ClientID, decoder(&r.Scopes), &r.Nonce, - &r.Claims.UserID, &r.Claims.Username, &r.Claims.PreferredUsername, - &r.Claims.Email, &r.Claims.EmailVerified, - decoder(&r.Claims.Groups), - &r.ConnectorID, &r.ConnectorData, - &r.Token, &r.CreatedAt, &r.LastUsed, - ) - if err != nil { - if err == sql.ErrNoRows { - return r, storage.ErrNotFound - } - return r, fmt.Errorf("scan refresh_token: %v", err) - } - return r, nil -} - -func (c *conn) UpdateKeys(updater func(old storage.Keys) (storage.Keys, error)) error { - return c.ExecTx(func(tx *trans) error { - firstUpdate := false - // TODO(ericchiang): errors may cause a transaction be rolled back by the SQL - // server. Test this, and consider adding a COUNT() command beforehand. - old, err := getKeys(tx) - if err != nil { - if err != storage.ErrNotFound { - return fmt.Errorf("get keys: %v", err) - } - firstUpdate = true - old = storage.Keys{} - } - - nk, err := updater(old) - if err != nil { - return err - } - - if firstUpdate { - _, err = tx.Exec(` - insert into keys ( - id, verification_keys, signing_key, signing_key_pub, next_rotation - ) - values ($1, $2, $3, $4, $5); - `, - keysRowID, encoder(nk.VerificationKeys), encoder(nk.SigningKey), - encoder(nk.SigningKeyPub), nk.NextRotation, - ) - if err != nil { - return fmt.Errorf("insert: %v", err) - } - } else { - _, err = tx.Exec(` - update keys - set - verification_keys = $1, - signing_key = $2, - signing_key_pub = $3, - next_rotation = $4 - where id = $5; - `, - encoder(nk.VerificationKeys), encoder(nk.SigningKey), - encoder(nk.SigningKeyPub), nk.NextRotation, keysRowID, - ) - if err != nil { - return fmt.Errorf("update: %v", err) - } - } - return nil - }) -} - -func (c *conn) GetKeys() (keys storage.Keys, err error) { - return getKeys(c) -} - -func getKeys(q querier) (keys storage.Keys, err error) { - err = q.QueryRow(` - select - verification_keys, signing_key, signing_key_pub, next_rotation - from keys - where id=$1 - `, keysRowID).Scan( - decoder(&keys.VerificationKeys), decoder(&keys.SigningKey), - decoder(&keys.SigningKeyPub), &keys.NextRotation, - ) - if err != nil { - if err == sql.ErrNoRows { - return keys, storage.ErrNotFound - } - return keys, fmt.Errorf("query keys: %v", err) - } - return keys, nil -} - -func (c *conn) UpdateClient(id string, updater func(old storage.Client) (storage.Client, error)) error { - return c.ExecTx(func(tx *trans) error { - cli, err := getClient(tx, id) - if err != nil { - return err - } - nc, err := updater(cli) - if err != nil { - return err - } - - _, err = tx.Exec(` - update client - set - secret = $1, - redirect_uris = $2, - trusted_peers = $3, - public = $4, - name = $5, - logo_url = $6 - where id = $7; - `, nc.Secret, encoder(nc.RedirectURIs), encoder(nc.TrustedPeers), nc.Public, nc.Name, nc.LogoURL, id, - ) - if err != nil { - return fmt.Errorf("update client: %v", err) - } - return nil - }) -} - -func (c *conn) CreateClient(cli storage.Client) error { - _, err := c.Exec(` - insert into client ( - id, secret, redirect_uris, trusted_peers, public, name, logo_url - ) - values ($1, $2, $3, $4, $5, $6, $7); - `, - cli.ID, cli.Secret, encoder(cli.RedirectURIs), encoder(cli.TrustedPeers), - cli.Public, cli.Name, cli.LogoURL, - ) - if err != nil { - if c.alreadyExistsCheck(err) { - return storage.ErrAlreadyExists - } - return fmt.Errorf("insert client: %v", err) - } - return nil -} - -func getClient(q querier, id string) (storage.Client, error) { - return scanClient(q.QueryRow(` - select - id, secret, redirect_uris, trusted_peers, public, name, logo_url - from client where id = $1; - `, id)) -} - -func (c *conn) GetClient(id string) (storage.Client, error) { - return getClient(c, id) -} - -func (c *conn) ListClients() ([]storage.Client, error) { - rows, err := c.Query(` - select - id, secret, redirect_uris, trusted_peers, public, name, logo_url - from client; - `) - if err != nil { - return nil, err - } - var clients []storage.Client - for rows.Next() { - cli, err := scanClient(rows) - if err != nil { - return nil, err - } - clients = append(clients, cli) - } - if err := rows.Err(); err != nil { - return nil, err - } - return clients, nil -} - -func scanClient(s scanner) (cli storage.Client, err error) { - err = s.Scan( - &cli.ID, &cli.Secret, decoder(&cli.RedirectURIs), decoder(&cli.TrustedPeers), - &cli.Public, &cli.Name, &cli.LogoURL, - ) - if err != nil { - if err == sql.ErrNoRows { - return cli, storage.ErrNotFound - } - return cli, fmt.Errorf("get client: %v", err) - } - return cli, nil -} - -func (c *conn) CreatePassword(p storage.Password) error { - p.Email = strings.ToLower(p.Email) - _, err := c.Exec(` - insert into password ( - email, hash, username, user_id - ) - values ( - $1, $2, $3, $4 - ); - `, - p.Email, p.Hash, p.Username, p.UserID, - ) - if err != nil { - if c.alreadyExistsCheck(err) { - return storage.ErrAlreadyExists - } - return fmt.Errorf("insert password: %v", err) - } - return nil -} - -func (c *conn) UpdatePassword(email string, updater func(p storage.Password) (storage.Password, error)) error { - return c.ExecTx(func(tx *trans) error { - p, err := getPassword(tx, email) - if err != nil { - return err - } - - np, err := updater(p) - if err != nil { - return err - } - _, err = tx.Exec(` - update password - set - hash = $1, username = $2, user_id = $3 - where email = $4; - `, - np.Hash, np.Username, np.UserID, p.Email, - ) - if err != nil { - return fmt.Errorf("update password: %v", err) - } - return nil - }) -} - -func (c *conn) GetPassword(email string) (storage.Password, error) { - return getPassword(c, email) -} - -func getPassword(q querier, email string) (p storage.Password, err error) { - return scanPassword(q.QueryRow(` - select - email, hash, username, user_id - from password where email = $1; - `, strings.ToLower(email))) -} - -func (c *conn) ListPasswords() ([]storage.Password, error) { - rows, err := c.Query(` - select - email, hash, username, user_id - from password; - `) - if err != nil { - return nil, err - } - - var passwords []storage.Password - for rows.Next() { - p, err := scanPassword(rows) - if err != nil { - return nil, err - } - passwords = append(passwords, p) - } - if err := rows.Err(); err != nil { - return nil, err - } - return passwords, nil -} - -func scanPassword(s scanner) (p storage.Password, err error) { - err = s.Scan( - &p.Email, &p.Hash, &p.Username, &p.UserID, - ) - if err != nil { - if err == sql.ErrNoRows { - return p, storage.ErrNotFound - } - return p, fmt.Errorf("select password: %v", err) - } - return p, nil -} - -func (c *conn) CreateOfflineSessions(s storage.OfflineSessions) error { - _, err := c.Exec(` - insert into offline_session ( - user_id, conn_id, refresh, connector_data - ) - values ( - $1, $2, $3, $4 - ); - `, - s.UserID, s.ConnID, encoder(s.Refresh), s.ConnectorData, - ) - if err != nil { - if c.alreadyExistsCheck(err) { - return storage.ErrAlreadyExists - } - return fmt.Errorf("insert offline session: %v", err) - } - return nil -} - -func (c *conn) UpdateOfflineSessions(userID string, connID string, updater func(s storage.OfflineSessions) (storage.OfflineSessions, error)) error { - return c.ExecTx(func(tx *trans) error { - s, err := getOfflineSessions(tx, userID, connID) - if err != nil { - return err - } - - newSession, err := updater(s) - if err != nil { - return err - } - _, err = tx.Exec(` - update offline_session - set - refresh = $1, - connector_data = $2 - where user_id = $3 AND conn_id = $4; - `, - encoder(newSession.Refresh), newSession.ConnectorData, s.UserID, s.ConnID, - ) - if err != nil { - return fmt.Errorf("update offline session: %v", err) - } - return nil - }) -} - -func (c *conn) GetOfflineSessions(userID string, connID string) (storage.OfflineSessions, error) { - return getOfflineSessions(c, userID, connID) -} - -func getOfflineSessions(q querier, userID string, connID string) (storage.OfflineSessions, error) { - return scanOfflineSessions(q.QueryRow(` - select - user_id, conn_id, refresh, connector_data - from offline_session - where user_id = $1 AND conn_id = $2; - `, userID, connID)) -} - -func scanOfflineSessions(s scanner) (o storage.OfflineSessions, err error) { - err = s.Scan( - &o.UserID, &o.ConnID, decoder(&o.Refresh), &o.ConnectorData, - ) - if err != nil { - if err == sql.ErrNoRows { - return o, storage.ErrNotFound - } - return o, fmt.Errorf("select offline session: %v", err) - } - return o, nil -} - -func (c *conn) CreateConnector(connector storage.Connector) error { - _, err := c.Exec(` - insert into connector ( - id, type, name, resource_version, config - ) - values ( - $1, $2, $3, $4, $5 - ); - `, - connector.ID, connector.Type, connector.Name, connector.ResourceVersion, connector.Config, - ) - if err != nil { - if c.alreadyExistsCheck(err) { - return storage.ErrAlreadyExists - } - return fmt.Errorf("insert connector: %v", err) - } - return nil -} - -func (c *conn) UpdateConnector(id string, updater func(s storage.Connector) (storage.Connector, error)) error { - return c.ExecTx(func(tx *trans) error { - connector, err := getConnector(tx, id) - if err != nil { - return err - } - - newConn, err := updater(connector) - if err != nil { - return err - } - _, err = tx.Exec(` - update connector - set - type = $1, - name = $2, - resource_version = $3, - config = $4 - where id = $5; - `, - newConn.Type, newConn.Name, newConn.ResourceVersion, newConn.Config, connector.ID, - ) - if err != nil { - return fmt.Errorf("update connector: %v", err) - } - return nil - }) -} - -func (c *conn) GetConnector(id string) (storage.Connector, error) { - return getConnector(c, id) -} - -func getConnector(q querier, id string) (storage.Connector, error) { - return scanConnector(q.QueryRow(` - select - id, type, name, resource_version, config - from connector - where id = $1; - `, id)) -} - -func scanConnector(s scanner) (c storage.Connector, err error) { - err = s.Scan( - &c.ID, &c.Type, &c.Name, &c.ResourceVersion, &c.Config, - ) - if err != nil { - if err == sql.ErrNoRows { - return c, storage.ErrNotFound - } - return c, fmt.Errorf("select connector: %v", err) - } - return c, nil -} - -func (c *conn) ListConnectors() ([]storage.Connector, error) { - rows, err := c.Query(` - select - id, type, name, resource_version, config - from connector; - `) - if err != nil { - return nil, err - } - var connectors []storage.Connector - for rows.Next() { - conn, err := scanConnector(rows) - if err != nil { - return nil, err - } - connectors = append(connectors, conn) - } - if err := rows.Err(); err != nil { - return nil, err - } - return connectors, nil -} - -func (c *conn) DeleteAuthRequest(id string) error { return c.delete("auth_request", "id", id) } -func (c *conn) DeleteAuthCode(id string) error { return c.delete("auth_code", "id", id) } -func (c *conn) DeleteClient(id string) error { return c.delete("client", "id", id) } -func (c *conn) DeleteRefresh(id string) error { return c.delete("refresh_token", "id", id) } -func (c *conn) DeletePassword(email string) error { - return c.delete("password", "email", strings.ToLower(email)) -} -func (c *conn) DeleteConnector(id string) error { return c.delete("connector", "id", id) } - -func (c *conn) DeleteOfflineSessions(userID string, connID string) error { - result, err := c.Exec(`delete from offline_session where user_id = $1 AND conn_id = $2`, userID, connID) - if err != nil { - return fmt.Errorf("delete offline_session: user_id = %s, conn_id = %s", userID, connID) - } - - // For now mandate that the driver implements RowsAffected. If we ever need to support - // a driver that doesn't implement this, we can run this in a transaction with a get beforehand. - n, err := result.RowsAffected() - if err != nil { - return fmt.Errorf("rows affected: %v", err) - } - if n < 1 { - return storage.ErrNotFound - } - return nil -} - -// Do NOT call directly. Does not escape table. -func (c *conn) delete(table, field, id string) error { - result, err := c.Exec(`delete from `+table+` where `+field+` = $1`, id) - if err != nil { - return fmt.Errorf("delete %s: %v", table, id) - } - - // For now mandate that the driver implements RowsAffected. If we ever need to support - // a driver that doesn't implement this, we can run this in a transaction with a get beforehand. - n, err := result.RowsAffected() - if err != nil { - return fmt.Errorf("rows affected: %v", err) - } - if n < 1 { - return storage.ErrNotFound - } - return nil -} diff --git a/vendor/github.com/dexidp/dex/storage/sql/migrate.go b/vendor/github.com/dexidp/dex/storage/sql/migrate.go deleted file mode 100644 index dc72753..0000000 --- a/vendor/github.com/dexidp/dex/storage/sql/migrate.go +++ /dev/null @@ -1,232 +0,0 @@ -package sql - -import ( - "database/sql" - "fmt" -) - -func (c *conn) migrate() (int, error) { - _, err := c.Exec(` - create table if not exists migrations ( - num integer not null, - at timestamptz not null - ); - `) - if err != nil { - return 0, fmt.Errorf("creating migration table: %v", err) - } - - i := 0 - done := false - - var flavorMigrations []migration - for _, m := range migrations { - if m.flavor == nil || m.flavor == c.flavor { - flavorMigrations = append(flavorMigrations, m) - } - } - - for { - err := c.ExecTx(func(tx *trans) error { - // Within a transaction, perform a single migration. - var ( - num sql.NullInt64 - n int - ) - if err := tx.QueryRow(`select max(num) from migrations;`).Scan(&num); err != nil { - return fmt.Errorf("select max migration: %v", err) - } - if num.Valid { - n = int(num.Int64) - } - if n >= len(flavorMigrations) { - done = true - return nil - } - - migrationNum := n + 1 - m := flavorMigrations[n] - for i := range m.stmts { - if _, err := tx.Exec(m.stmts[i]); err != nil { - return fmt.Errorf("migration %d statement %d failed: %v", migrationNum, i+1, err) - } - } - - q := `insert into migrations (num, at) values ($1, now());` - if _, err := tx.Exec(q, migrationNum); err != nil { - return fmt.Errorf("update migration table: %v", err) - } - return nil - }) - if err != nil { - return i, err - } - if done { - break - } - i++ - } - - return i, nil -} - -type migration struct { - stmts []string - - // If flavor is nil the migration will take place for all database backend flavors. - // If specified, only for that corresponding flavor, in that case stmts can be written - // in the specific SQL dialect. - flavor *flavor -} - -// All SQL flavors share migration strategies. -var migrations = []migration{ - { - stmts: []string{` - create table client ( - id text not null primary key, - secret text not null, - redirect_uris bytea not null, -- JSON array of strings - trusted_peers bytea not null, -- JSON array of strings - public boolean not null, - name text not null, - logo_url text not null - );`, - ` - create table auth_request ( - id text not null primary key, - client_id text not null, - response_types bytea not null, -- JSON array of strings - scopes bytea not null, -- JSON array of strings - redirect_uri text not null, - nonce text not null, - state text not null, - force_approval_prompt boolean not null, - - logged_in boolean not null, - - claims_user_id text not null, - claims_username text not null, - claims_email text not null, - claims_email_verified boolean not null, - claims_groups bytea not null, -- JSON array of strings - - connector_id text not null, - connector_data bytea, - - expiry timestamptz not null - );`, - ` - create table auth_code ( - id text not null primary key, - client_id text not null, - scopes bytea not null, -- JSON array of strings - nonce text not null, - redirect_uri text not null, - - claims_user_id text not null, - claims_username text not null, - claims_email text not null, - claims_email_verified boolean not null, - claims_groups bytea not null, -- JSON array of strings - - connector_id text not null, - connector_data bytea, - - expiry timestamptz not null - );`, - ` - create table refresh_token ( - id text not null primary key, - client_id text not null, - scopes bytea not null, -- JSON array of strings - nonce text not null, - - claims_user_id text not null, - claims_username text not null, - claims_email text not null, - claims_email_verified boolean not null, - claims_groups bytea not null, -- JSON array of strings - - connector_id text not null, - connector_data bytea - );`, - ` - create table password ( - email text not null primary key, - hash bytea not null, - username text not null, - user_id text not null - );`, - ` - -- keys is a weird table because we only ever expect there to be a single row - create table keys ( - id text not null primary key, - verification_keys bytea not null, -- JSON array - signing_key bytea not null, -- JSON object - signing_key_pub bytea not null, -- JSON object - next_rotation timestamptz not null - );`, - }, - }, - { - stmts: []string{` - alter table refresh_token - add column token text not null default '';`, - ` - alter table refresh_token - add column created_at timestamptz not null default '0001-01-01 00:00:00 UTC';`, - ` - alter table refresh_token - add column last_used timestamptz not null default '0001-01-01 00:00:00 UTC';`, - }, - }, - { - stmts: []string{` - create table offline_session ( - user_id text not null, - conn_id text not null, - refresh bytea not null, - PRIMARY KEY (user_id, conn_id) - );`, - }, - }, - { - stmts: []string{` - create table connector ( - id text not null primary key, - type text not null, - name text not null, - resource_version text not null, - config bytea - );`, - }, - }, - { - stmts: []string{` - alter table auth_code - add column claims_preferred_username text not null default '';`, - ` - alter table auth_request - add column claims_preferred_username text not null default '';`, - ` - alter table refresh_token - add column claims_preferred_username text not null default '';`, - }, - }, - { - stmts: []string{` - alter table offline_session - add column connector_data bytea; - `, - }, - }, - { - stmts: []string{` - alter table auth_request - modify column state varchar(4096); - `, - }, - flavor: &flavorMySQL, - }, -} diff --git a/vendor/github.com/dexidp/dex/storage/sql/sql.go b/vendor/github.com/dexidp/dex/storage/sql/sql.go deleted file mode 100644 index 4f1ed6c..0000000 --- a/vendor/github.com/dexidp/dex/storage/sql/sql.go +++ /dev/null @@ -1,198 +0,0 @@ -// Package sql provides SQL implementations of the storage interface. -package sql - -import ( - "database/sql" - "regexp" - "time" - - // import third party drivers - _ "github.com/lib/pq" - _ "github.com/mattn/go-sqlite3" - - "github.com/dexidp/dex/pkg/log" -) - -// flavor represents a specific SQL implementation, and is used to translate query strings -// between different drivers. Flavors shouldn't aim to translate all possible SQL statements, -// only the specific queries used by the SQL storages. -type flavor struct { - queryReplacers []replacer - - // Optional function to create and finish a transaction. - executeTx func(db *sql.DB, fn func(*sql.Tx) error) error - - // Does the flavor support timezones? - supportsTimezones bool -} - -// A regexp with a replacement string. -type replacer struct { - re *regexp.Regexp - with string -} - -// Match a postgres query binds. E.g. "$1", "$12", etc. -var bindRegexp = regexp.MustCompile(`\$\d+`) - -func matchLiteral(s string) *regexp.Regexp { - return regexp.MustCompile(`\b` + regexp.QuoteMeta(s) + `\b`) -} - -var ( - // The "github.com/lib/pq" driver is the default flavor. All others are - // translations of this. - flavorPostgres = flavor{ - // The default behavior for Postgres transactions is consistent reads, not consistent writes. - // For each transaction opened, ensure it has the correct isolation level. - // - // See: https://www.postgresql.org/docs/9.3/static/sql-set-transaction.html - // - // NOTE(ericchiang): For some reason using `SET SESSION CHARACTERISTICS AS TRANSACTION` at a - // session level didn't work for some edge cases. Might be something worth exploring. - executeTx: func(db *sql.DB, fn func(sqlTx *sql.Tx) error) error { - tx, err := db.Begin() - if err != nil { - return err - } - defer tx.Rollback() - - if _, err := tx.Exec(`SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;`); err != nil { - return err - } - if err := fn(tx); err != nil { - return err - } - return tx.Commit() - }, - - supportsTimezones: true, - } - - flavorSQLite3 = flavor{ - queryReplacers: []replacer{ - {bindRegexp, "?"}, - // Translate for booleans to integers. - {matchLiteral("true"), "1"}, - {matchLiteral("false"), "0"}, - {matchLiteral("boolean"), "integer"}, - // Translate other types. - {matchLiteral("bytea"), "blob"}, - {matchLiteral("timestamptz"), "timestamp"}, - // SQLite doesn't have a "now()" method, replace with "date('now')" - {regexp.MustCompile(`\bnow\(\)`), "date('now')"}, - }, - } - - flavorMySQL = flavor{ - queryReplacers: []replacer{ - {bindRegexp, "?"}, - // Translate types. - {matchLiteral("bytea"), "blob"}, - {matchLiteral("timestamptz"), "datetime(3)"}, - // MySQL doesn't support indicies on text fields w/o - // specifying key length. Use varchar instead (767 byte - // is the max key length for InnoDB with 4k pages). - // For compound indexes (with two keys) even less. - {matchLiteral("text"), "varchar(384)"}, - // Quote keywords and reserved words used as identifiers. - {regexp.MustCompile(`\b(keys)\b`), "`$1`"}, - // Change default timestamp to fit datetime. - {regexp.MustCompile(`0001-01-01 00:00:00 UTC`), "1000-01-01 00:00:00"}, - }, - } -) - -func (f flavor) translate(query string) string { - // TODO(ericchiang): Heavy cashing. - for _, r := range f.queryReplacers { - query = r.re.ReplaceAllString(query, r.with) - } - return query -} - -// translateArgs translates query parameters that may be unique to -// a specific SQL flavor. For example, standardizing "time.Time" -// types to UTC for clients that don't provide timezone support. -func (c *conn) translateArgs(args []interface{}) []interface{} { - if c.flavor.supportsTimezones { - return args - } - - for i, arg := range args { - if t, ok := arg.(time.Time); ok { - args[i] = t.UTC() - } - } - return args -} - -// conn is the main database connection. -type conn struct { - db *sql.DB - flavor *flavor - logger log.Logger - alreadyExistsCheck func(err error) bool -} - -func (c *conn) Close() error { - return c.db.Close() -} - -// conn implements the same method signatures as encoding/sql.DB. - -func (c *conn) Exec(query string, args ...interface{}) (sql.Result, error) { - query = c.flavor.translate(query) - return c.db.Exec(query, c.translateArgs(args)...) -} - -func (c *conn) Query(query string, args ...interface{}) (*sql.Rows, error) { - query = c.flavor.translate(query) - return c.db.Query(query, c.translateArgs(args)...) -} - -func (c *conn) QueryRow(query string, args ...interface{}) *sql.Row { - query = c.flavor.translate(query) - return c.db.QueryRow(query, c.translateArgs(args)...) -} - -// ExecTx runs a method which operates on a transaction. -func (c *conn) ExecTx(fn func(tx *trans) error) error { - if c.flavor.executeTx != nil { - return c.flavor.executeTx(c.db, func(sqlTx *sql.Tx) error { - return fn(&trans{sqlTx, c}) - }) - } - - sqlTx, err := c.db.Begin() - if err != nil { - return err - } - if err := fn(&trans{sqlTx, c}); err != nil { - sqlTx.Rollback() - return err - } - return sqlTx.Commit() -} - -type trans struct { - tx *sql.Tx - c *conn -} - -// trans implements the same method signatures as encoding/sql.Tx. - -func (t *trans) Exec(query string, args ...interface{}) (sql.Result, error) { - query = t.c.flavor.translate(query) - return t.tx.Exec(query, t.c.translateArgs(args)...) -} - -func (t *trans) Query(query string, args ...interface{}) (*sql.Rows, error) { - query = t.c.flavor.translate(query) - return t.tx.Query(query, t.c.translateArgs(args)...) -} - -func (t *trans) QueryRow(query string, args ...interface{}) *sql.Row { - query = t.c.flavor.translate(query) - return t.tx.QueryRow(query, t.c.translateArgs(args)...) -} diff --git a/vendor/github.com/dexidp/dex/storage/static.go b/vendor/github.com/dexidp/dex/storage/static.go deleted file mode 100644 index 8a38385..0000000 --- a/vendor/github.com/dexidp/dex/storage/static.go +++ /dev/null @@ -1,232 +0,0 @@ -package storage - -import ( - "errors" - "strings" - - "github.com/dexidp/dex/pkg/log" -) - -// Tests for this code are in the "memory" package, since this package doesn't -// define a concrete storage implementation. - -// staticClientsStorage is a storage that only allow read-only actions on clients. -// All read actions return from the list of clients stored in memory, not the -// underlying -type staticClientsStorage struct { - Storage - - // A read-only set of clients. - clients []Client - clientsByID map[string]Client -} - -// WithStaticClients adds a read-only set of clients to the underlying storages. -func WithStaticClients(s Storage, staticClients []Client) Storage { - clientsByID := make(map[string]Client, len(staticClients)) - for _, client := range staticClients { - clientsByID[client.ID] = client - } - - return staticClientsStorage{s, staticClients, clientsByID} -} - -func (s staticClientsStorage) GetClient(id string) (Client, error) { - if client, ok := s.clientsByID[id]; ok { - return client, nil - } - return s.Storage.GetClient(id) -} - -func (s staticClientsStorage) isStatic(id string) bool { - _, ok := s.clientsByID[id] - return ok -} - -func (s staticClientsStorage) ListClients() ([]Client, error) { - clients, err := s.Storage.ListClients() - if err != nil { - return nil, err - } - n := 0 - for _, client := range clients { - // If a client in the backing storage has the same ID as a static client - // prefer the static client. - if !s.isStatic(client.ID) { - clients[n] = client - n++ - } - } - return append(clients[:n], s.clients...), nil -} - -func (s staticClientsStorage) CreateClient(c Client) error { - if s.isStatic(c.ID) { - return errors.New("static clients: read-only cannot create client") - } - return s.Storage.CreateClient(c) -} - -func (s staticClientsStorage) DeleteClient(id string) error { - if s.isStatic(id) { - return errors.New("static clients: read-only cannot delete client") - } - return s.Storage.DeleteClient(id) -} - -func (s staticClientsStorage) UpdateClient(id string, updater func(old Client) (Client, error)) error { - if s.isStatic(id) { - return errors.New("static clients: read-only cannot update client") - } - return s.Storage.UpdateClient(id, updater) -} - -type staticPasswordsStorage struct { - Storage - - // A read-only set of passwords. - passwords []Password - // A map of passwords that is indexed by lower-case email ids - passwordsByEmail map[string]Password - - logger log.Logger -} - -// WithStaticPasswords returns a storage with a read-only set of passwords. -func WithStaticPasswords(s Storage, staticPasswords []Password, logger log.Logger) Storage { - passwordsByEmail := make(map[string]Password, len(staticPasswords)) - for _, p := range staticPasswords { - //Enable case insensitive email comparison. - lowerEmail := strings.ToLower(p.Email) - if _, ok := passwordsByEmail[lowerEmail]; ok { - logger.Errorf("Attempting to create StaticPasswords with the same email id: %s", p.Email) - } - passwordsByEmail[lowerEmail] = p - } - - return staticPasswordsStorage{s, staticPasswords, passwordsByEmail, logger} -} - -func (s staticPasswordsStorage) isStatic(email string) bool { - _, ok := s.passwordsByEmail[strings.ToLower(email)] - return ok -} - -func (s staticPasswordsStorage) GetPassword(email string) (Password, error) { - // TODO(ericchiang): BLAH. We really need to figure out how to handle - // lower cased emails better. - email = strings.ToLower(email) - if password, ok := s.passwordsByEmail[email]; ok { - return password, nil - } - return s.Storage.GetPassword(email) -} - -func (s staticPasswordsStorage) ListPasswords() ([]Password, error) { - passwords, err := s.Storage.ListPasswords() - if err != nil { - return nil, err - } - - n := 0 - for _, password := range passwords { - // If an entry has the same email as those provided in the static - // values, prefer the static value. - if !s.isStatic(password.Email) { - passwords[n] = password - n++ - } - } - return append(passwords[:n], s.passwords...), nil -} - -func (s staticPasswordsStorage) CreatePassword(p Password) error { - if s.isStatic(p.Email) { - return errors.New("static passwords: read-only cannot create password") - } - return s.Storage.CreatePassword(p) -} - -func (s staticPasswordsStorage) DeletePassword(email string) error { - if s.isStatic(email) { - return errors.New("static passwords: read-only cannot delete password") - } - return s.Storage.DeletePassword(email) -} - -func (s staticPasswordsStorage) UpdatePassword(email string, updater func(old Password) (Password, error)) error { - if s.isStatic(email) { - return errors.New("static passwords: read-only cannot update password") - } - return s.Storage.UpdatePassword(email, updater) -} - -// staticConnectorsStorage represents a storage with read-only set of connectors. -type staticConnectorsStorage struct { - Storage - - // A read-only set of connectors. - connectors []Connector - connectorsByID map[string]Connector -} - -// WithStaticConnectors returns a storage with a read-only set of Connectors. Write actions, -// such as updating existing Connectors, will fail. -func WithStaticConnectors(s Storage, staticConnectors []Connector) Storage { - connectorsByID := make(map[string]Connector, len(staticConnectors)) - for _, c := range staticConnectors { - connectorsByID[c.ID] = c - } - return staticConnectorsStorage{s, staticConnectors, connectorsByID} -} - -func (s staticConnectorsStorage) isStatic(id string) bool { - _, ok := s.connectorsByID[id] - return ok -} - -func (s staticConnectorsStorage) GetConnector(id string) (Connector, error) { - if connector, ok := s.connectorsByID[id]; ok { - return connector, nil - } - return s.Storage.GetConnector(id) -} - -func (s staticConnectorsStorage) ListConnectors() ([]Connector, error) { - connectors, err := s.Storage.ListConnectors() - if err != nil { - return nil, err - } - - n := 0 - for _, connector := range connectors { - // If an entry has the same id as those provided in the static - // values, prefer the static value. - if !s.isStatic(connector.ID) { - connectors[n] = connector - n++ - } - } - return append(connectors[:n], s.connectors...), nil -} - -func (s staticConnectorsStorage) CreateConnector(c Connector) error { - if s.isStatic(c.ID) { - return errors.New("static connectors: read-only cannot create connector") - } - return s.Storage.CreateConnector(c) -} - -func (s staticConnectorsStorage) DeleteConnector(id string) error { - if s.isStatic(id) { - return errors.New("static connectors: read-only cannot delete connector") - } - return s.Storage.DeleteConnector(id) -} - -func (s staticConnectorsStorage) UpdateConnector(id string, updater func(old Connector) (Connector, error)) error { - if s.isStatic(id) { - return errors.New("static connectors: read-only cannot update connector") - } - return s.Storage.UpdateConnector(id, updater) -} diff --git a/vendor/github.com/dexidp/dex/storage/storage.go b/vendor/github.com/dexidp/dex/storage/storage.go deleted file mode 100644 index 5bbb2b3..0000000 --- a/vendor/github.com/dexidp/dex/storage/storage.go +++ /dev/null @@ -1,344 +0,0 @@ -package storage - -import ( - "crypto/rand" - "encoding/base32" - "errors" - "io" - "strings" - "time" - - jose "gopkg.in/square/go-jose.v2" -) - -var ( - // ErrNotFound is the error returned by storages if a resource cannot be found. - ErrNotFound = errors.New("not found") - - // ErrAlreadyExists is the error returned by storages if a resource ID is taken during a create. - ErrAlreadyExists = errors.New("ID already exists") -) - -// Kubernetes only allows lower case letters for names. -// -// TODO(ericchiang): refactor ID creation onto the storage. -var encoding = base32.NewEncoding("abcdefghijklmnopqrstuvwxyz234567") - -// NewID returns a random string which can be used as an ID for objects. -func NewID() string { - buff := make([]byte, 16) // 128 bit random ID. - if _, err := io.ReadFull(rand.Reader, buff); err != nil { - panic(err) - } - // Avoid the identifier to begin with number and trim padding - return string(buff[0]%26+'a') + strings.TrimRight(encoding.EncodeToString(buff[1:]), "=") -} - -// GCResult returns the number of objects deleted by garbage collection. -type GCResult struct { - AuthRequests int64 - AuthCodes int64 -} - -// Storage is the storage interface used by the server. Implementations are -// required to be able to perform atomic compare-and-swap updates and either -// support timezones or standardize on UTC. -type Storage interface { - Close() error - - // TODO(ericchiang): Let the storages set the IDs of these objects. - CreateAuthRequest(a AuthRequest) error - CreateClient(c Client) error - CreateAuthCode(c AuthCode) error - CreateRefresh(r RefreshToken) error - CreatePassword(p Password) error - CreateOfflineSessions(s OfflineSessions) error - CreateConnector(c Connector) error - - // TODO(ericchiang): return (T, bool, error) so we can indicate not found - // requests that way instead of using ErrNotFound. - GetAuthRequest(id string) (AuthRequest, error) - GetAuthCode(id string) (AuthCode, error) - GetClient(id string) (Client, error) - GetKeys() (Keys, error) - GetRefresh(id string) (RefreshToken, error) - GetPassword(email string) (Password, error) - GetOfflineSessions(userID string, connID string) (OfflineSessions, error) - GetConnector(id string) (Connector, error) - - ListClients() ([]Client, error) - ListRefreshTokens() ([]RefreshToken, error) - ListPasswords() ([]Password, error) - ListConnectors() ([]Connector, error) - - // Delete methods MUST be atomic. - DeleteAuthRequest(id string) error - DeleteAuthCode(code string) error - DeleteClient(id string) error - DeleteRefresh(id string) error - DeletePassword(email string) error - DeleteOfflineSessions(userID string, connID string) error - DeleteConnector(id string) error - - // Update methods take a function for updating an object then performs that update within - // a transaction. "updater" functions may be called multiple times by a single update call. - // - // Because new fields may be added to resources, updaters should only modify existing - // fields on the old object rather then creating new structs. For example: - // - // updater := func(old storage.Client) (storage.Client, error) { - // old.Secret = newSecret - // return old, nil - // } - // if err := s.UpdateClient(clientID, updater); err != nil { - // // update failed, handle error - // } - // - UpdateClient(id string, updater func(old Client) (Client, error)) error - UpdateKeys(updater func(old Keys) (Keys, error)) error - UpdateAuthRequest(id string, updater func(a AuthRequest) (AuthRequest, error)) error - UpdateRefreshToken(id string, updater func(r RefreshToken) (RefreshToken, error)) error - UpdatePassword(email string, updater func(p Password) (Password, error)) error - UpdateOfflineSessions(userID string, connID string, updater func(s OfflineSessions) (OfflineSessions, error)) error - UpdateConnector(id string, updater func(c Connector) (Connector, error)) error - - // GarbageCollect deletes all expired AuthCodes and AuthRequests. - GarbageCollect(now time.Time) (GCResult, error) -} - -// Client represents an OAuth2 client. -// -// For further reading see: -// * Trusted peers: https://developers.google.com/identity/protocols/CrossClientAuth -// * Public clients: https://developers.google.com/api-client-library/python/auth/installed-app -type Client struct { - // Client ID and secret used to identify the client. - ID string `json:"id" yaml:"id"` - IDEnv string `json:"idEnv" yaml:"idEnv"` - Secret string `json:"secret" yaml:"secret"` - SecretEnv string `json:"secretEnv" yaml:"secretEnv"` - - // A registered set of redirect URIs. When redirecting from dex to the client, the URI - // requested to redirect to MUST match one of these values, unless the client is "public". - RedirectURIs []string `json:"redirectURIs" yaml:"redirectURIs"` - - // TrustedPeers are a list of peers which can issue tokens on this client's behalf using - // the dynamic "oauth2:server:client_id:(client_id)" scope. If a peer makes such a request, - // this client's ID will appear as the ID Token's audience. - // - // Clients inherently trust themselves. - TrustedPeers []string `json:"trustedPeers" yaml:"trustedPeers"` - - // Public clients must use either use a redirectURL 127.0.0.1:X or "urn:ietf:wg:oauth:2.0:oob" - Public bool `json:"public" yaml:"public"` - - // Name and LogoURL used when displaying this client to the end user. - Name string `json:"name" yaml:"name"` - LogoURL string `json:"logoURL" yaml:"logoURL"` -} - -// Claims represents the ID Token claims supported by the server. -type Claims struct { - UserID string - Username string - PreferredUsername string - Email string - EmailVerified bool - - Groups []string -} - -// AuthRequest represents a OAuth2 client authorization request. It holds the state -// of a single auth flow up to the point that the user authorizes the client. -type AuthRequest struct { - // ID used to identify the authorization request. - ID string - - // ID of the client requesting authorization from a user. - ClientID string - - // Values parsed from the initial request. These describe the resources the client is - // requesting as well as values describing the form of the response. - ResponseTypes []string - Scopes []string - RedirectURI string - Nonce string - State string - - // The client has indicated that the end user must be shown an approval prompt - // on all requests. The server cannot cache their initial action for subsequent - // attempts. - ForceApprovalPrompt bool - - Expiry time.Time - - // Has the user proved their identity through a backing identity provider? - // - // If false, the following fields are invalid. - LoggedIn bool - - // The identity of the end user. Generally nil until the user authenticates - // with a backend. - Claims Claims - - // The connector used to login the user and any data the connector wishes to persists. - // Set when the user authenticates. - ConnectorID string - ConnectorData []byte -} - -// AuthCode represents a code which can be exchanged for an OAuth2 token response. -// -// This value is created once an end user has authorized a client, the server has -// redirect the end user back to the client, but the client hasn't exchanged the -// code for an access_token and id_token. -type AuthCode struct { - // Actual string returned as the "code" value. - ID string - - // The client this code value is valid for. When exchanging the code for a - // token response, the client must use its client_secret to authenticate. - ClientID string - - // As part of the OAuth2 spec when a client makes a token request it MUST - // present the same redirect_uri as the initial redirect. This values is saved - // to make this check. - // - // https://tools.ietf.org/html/rfc6749#section-4.1.3 - RedirectURI string - - // If provided by the client in the initial request, the provider MUST create - // a ID Token with this nonce in the JWT payload. - Nonce string - - // Scopes authorized by the end user for the client. - Scopes []string - - // Authentication data provided by an upstream source. - ConnectorID string - ConnectorData []byte - Claims Claims - - Expiry time.Time -} - -// RefreshToken is an OAuth2 refresh token which allows a client to request new -// tokens on the end user's behalf. -type RefreshToken struct { - ID string - - // A single token that's rotated every time the refresh token is refreshed. - // - // May be empty. - Token string - - CreatedAt time.Time - LastUsed time.Time - - // Client this refresh token is valid for. - ClientID string - - // Authentication data provided by an upstream source. - ConnectorID string - ConnectorData []byte - Claims Claims - - // Scopes present in the initial request. Refresh requests may specify a set - // of scopes different from the initial request when refreshing a token, - // however those scopes must be encompassed by this set. - Scopes []string - - // Nonce value supplied during the initial redirect. This is required to be part - // of the claims of any future id_token generated by the client. - Nonce string -} - -// RefreshTokenRef is a reference object that contains metadata about refresh tokens. -type RefreshTokenRef struct { - ID string - - // Client the refresh token is valid for. - ClientID string - - CreatedAt time.Time - LastUsed time.Time -} - -// OfflineSessions objects are sessions pertaining to users with refresh tokens. -type OfflineSessions struct { - // UserID of an end user who has logged in to the server. - UserID string - - // The ID of the connector used to login the user. - ConnID string - - // Refresh is a hash table of refresh token reference objects - // indexed by the ClientID of the refresh token. - Refresh map[string]*RefreshTokenRef - - // Authentication data provided by an upstream source. - ConnectorData []byte -} - -// Password is an email to password mapping managed by the storage. -type Password struct { - // Email and identifying name of the password. Emails are assumed to be valid and - // determining that an end-user controls the address is left to an outside application. - // - // Emails are case insensitive and should be standardized by the storage. - // - // Storages that don't support an extended character set for IDs, such as '.' and '@' - // (cough cough, kubernetes), must map this value appropriately. - Email string `json:"email"` - - // Bcrypt encoded hash of the password. This package enforces a min cost value of 10 - Hash []byte `json:"hash"` - - // Bcrypt encoded hash of the password set in environment variable of this name. - HashFromEnv string `json:"hashFromEnv"` - - // Optional username to display. NOT used during login. - Username string `json:"username"` - - // Randomly generated user ID. This is NOT the primary ID of the Password object. - UserID string `json:"userID"` -} - -// Connector is an object that contains the metadata about connectors used to login to Dex. -type Connector struct { - // ID that will uniquely identify the connector object. - ID string `json:"id"` - // The Type of the connector. E.g. 'oidc' or 'ldap' - Type string `json:"type"` - // The Name of the connector that is used when displaying it to the end user. - Name string `json:"name"` - // ResourceVersion is the static versioning used to keep track of dynamic configuration - // changes to the connector object made by the API calls. - ResourceVersion string `json:"resourceVersion"` - // Config holds all the configuration information specific to the connector type. Since there - // no generic struct we can use for this purpose, it is stored as a byte stream. - Config []byte `json:"email"` -} - -// VerificationKey is a rotated signing key which can still be used to verify -// signatures. -type VerificationKey struct { - PublicKey *jose.JSONWebKey `json:"publicKey"` - Expiry time.Time `json:"expiry"` -} - -// Keys hold encryption and signing keys. -type Keys struct { - // Key for creating and verifying signatures. These may be nil. - SigningKey *jose.JSONWebKey - SigningKeyPub *jose.JSONWebKey - - // Old signing keys which have been rotated but can still be used to validate - // existing signatures. - VerificationKeys []VerificationKey - - // The next time the signing key will rotate. - // - // For caching purposes, implementations MUST NOT update keys before this time. - NextRotation time.Time -} diff --git a/vendor/github.com/dexidp/dex/version/version.go b/vendor/github.com/dexidp/dex/version/version.go deleted file mode 100644 index 04f684a..0000000 --- a/vendor/github.com/dexidp/dex/version/version.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package version contains version information for this app. -package version - -// Version is set by the build scripts. -var Version = "was not built properly" diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md deleted file mode 100644 index 25fdaf6..0000000 --- a/vendor/github.com/fatih/color/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Fatih Arslan - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md deleted file mode 100644 index 42d9abc..0000000 --- a/vendor/github.com/fatih/color/README.md +++ /dev/null @@ -1,182 +0,0 @@ -# Archived project. No maintenance. - -This project is not maintained anymore and is archived. Feel free to fork and -make your own changes if needed. For more detail read my blog post: [Taking an indefinite sabbatical from my projects](https://arslan.io/2018/10/09/taking-an-indefinite-sabbatical-from-my-projects/) - -Thanks to everyone for their valuable feedback and contributions. - - -# Color [![GoDoc](https://godoc.org/github.com/fatih/color?status.svg)](https://godoc.org/github.com/fatih/color) - -Color lets you use colorized outputs in terms of [ANSI Escape -Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It -has support for Windows too! The API can be used in several ways, pick one that -suits you. - - -![Color](https://i.imgur.com/c1JI0lA.png) - - -## Install - -```bash -go get github.com/fatih/color -``` - -## Examples - -### Standard colors - -```go -// Print with default helper functions -color.Cyan("Prints text in cyan.") - -// A newline will be appended automatically -color.Blue("Prints %s in blue.", "text") - -// These are using the default foreground colors -color.Red("We have red") -color.Magenta("And many others ..") - -``` - -### Mix and reuse colors - -```go -// Create a new color object -c := color.New(color.FgCyan).Add(color.Underline) -c.Println("Prints cyan text with an underline.") - -// Or just add them to New() -d := color.New(color.FgCyan, color.Bold) -d.Printf("This prints bold cyan %s\n", "too!.") - -// Mix up foreground and background colors, create new mixes! -red := color.New(color.FgRed) - -boldRed := red.Add(color.Bold) -boldRed.Println("This will print text in bold red.") - -whiteBackground := red.Add(color.BgWhite) -whiteBackground.Println("Red text with white background.") -``` - -### Use your own output (io.Writer) - -```go -// Use your own io.Writer output -color.New(color.FgBlue).Fprintln(myWriter, "blue color!") - -blue := color.New(color.FgBlue) -blue.Fprint(writer, "This will print text in blue.") -``` - -### Custom print functions (PrintFunc) - -```go -// Create a custom print function for convenience -red := color.New(color.FgRed).PrintfFunc() -red("Warning") -red("Error: %s", err) - -// Mix up multiple attributes -notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() -notice("Don't forget this...") -``` - -### Custom fprint functions (FprintFunc) - -```go -blue := color.New(FgBlue).FprintfFunc() -blue(myWriter, "important notice: %s", stars) - -// Mix up with multiple attributes -success := color.New(color.Bold, color.FgGreen).FprintlnFunc() -success(myWriter, "Don't forget this...") -``` - -### Insert into noncolor strings (SprintFunc) - -```go -// Create SprintXxx functions to mix strings with other non-colorized strings: -yellow := color.New(color.FgYellow).SprintFunc() -red := color.New(color.FgRed).SprintFunc() -fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) - -info := color.New(color.FgWhite, color.BgGreen).SprintFunc() -fmt.Printf("This %s rocks!\n", info("package")) - -// Use helper functions -fmt.Println("This", color.RedString("warning"), "should be not neglected.") -fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") - -// Windows supported too! Just don't forget to change the output to color.Output -fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) -``` - -### Plug into existing code - -```go -// Use handy standard colors -color.Set(color.FgYellow) - -fmt.Println("Existing text will now be in yellow") -fmt.Printf("This one %s\n", "too") - -color.Unset() // Don't forget to unset - -// You can mix up parameters -color.Set(color.FgMagenta, color.Bold) -defer color.Unset() // Use it in your function - -fmt.Println("All text will now be bold magenta.") -``` - -### Disable/Enable color - -There might be a case where you want to explicitly disable/enable color output. the -`go-isatty` package will automatically disable color output for non-tty output streams -(for example if the output were piped directly to `less`) - -`Color` has support to disable/enable colors both globally and for single color -definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You -can easily disable the color output with: - -```go - -var flagNoColor = flag.Bool("no-color", false, "Disable color output") - -if *flagNoColor { - color.NoColor = true // disables colorized output -} -``` - -It also has support for single color definitions (local). You can -disable/enable color output on the fly: - -```go -c := color.New(color.FgCyan) -c.Println("Prints cyan text") - -c.DisableColor() -c.Println("This is printed without any color") - -c.EnableColor() -c.Println("This prints again cyan...") -``` - -## Todo - -* Save/Return previous values -* Evaluate fmt.Formatter interface - - -## Credits - - * [Fatih Arslan](https://github.com/fatih) - * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) - -## License - -The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details - diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go deleted file mode 100644 index 91c8e9f..0000000 --- a/vendor/github.com/fatih/color/color.go +++ /dev/null @@ -1,603 +0,0 @@ -package color - -import ( - "fmt" - "io" - "os" - "strconv" - "strings" - "sync" - - "github.com/mattn/go-colorable" - "github.com/mattn/go-isatty" -) - -var ( - // NoColor defines if the output is colorized or not. It's dynamically set to - // false or true based on the stdout's file descriptor referring to a terminal - // or not. This is a global option and affects all colors. For more control - // over each color block use the methods DisableColor() individually. - NoColor = os.Getenv("TERM") == "dumb" || - (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) - - // Output defines the standard output of the print functions. By default - // os.Stdout is used. - Output = colorable.NewColorableStdout() - - // Error defines a color supporting writer for os.Stderr. - Error = colorable.NewColorableStderr() - - // colorsCache is used to reduce the count of created Color objects and - // allows to reuse already created objects with required Attribute. - colorsCache = make(map[Attribute]*Color) - colorsCacheMu sync.Mutex // protects colorsCache -) - -// Color defines a custom color object which is defined by SGR parameters. -type Color struct { - params []Attribute - noColor *bool -} - -// Attribute defines a single SGR Code -type Attribute int - -const escape = "\x1b" - -// Base attributes -const ( - Reset Attribute = iota - Bold - Faint - Italic - Underline - BlinkSlow - BlinkRapid - ReverseVideo - Concealed - CrossedOut -) - -// Foreground text colors -const ( - FgBlack Attribute = iota + 30 - FgRed - FgGreen - FgYellow - FgBlue - FgMagenta - FgCyan - FgWhite -) - -// Foreground Hi-Intensity text colors -const ( - FgHiBlack Attribute = iota + 90 - FgHiRed - FgHiGreen - FgHiYellow - FgHiBlue - FgHiMagenta - FgHiCyan - FgHiWhite -) - -// Background text colors -const ( - BgBlack Attribute = iota + 40 - BgRed - BgGreen - BgYellow - BgBlue - BgMagenta - BgCyan - BgWhite -) - -// Background Hi-Intensity text colors -const ( - BgHiBlack Attribute = iota + 100 - BgHiRed - BgHiGreen - BgHiYellow - BgHiBlue - BgHiMagenta - BgHiCyan - BgHiWhite -) - -// New returns a newly created color object. -func New(value ...Attribute) *Color { - c := &Color{params: make([]Attribute, 0)} - c.Add(value...) - return c -} - -// Set sets the given parameters immediately. It will change the color of -// output with the given SGR parameters until color.Unset() is called. -func Set(p ...Attribute) *Color { - c := New(p...) - c.Set() - return c -} - -// Unset resets all escape attributes and clears the output. Usually should -// be called after Set(). -func Unset() { - if NoColor { - return - } - - fmt.Fprintf(Output, "%s[%dm", escape, Reset) -} - -// Set sets the SGR sequence. -func (c *Color) Set() *Color { - if c.isNoColorSet() { - return c - } - - fmt.Fprintf(Output, c.format()) - return c -} - -func (c *Color) unset() { - if c.isNoColorSet() { - return - } - - Unset() -} - -func (c *Color) setWriter(w io.Writer) *Color { - if c.isNoColorSet() { - return c - } - - fmt.Fprintf(w, c.format()) - return c -} - -func (c *Color) unsetWriter(w io.Writer) { - if c.isNoColorSet() { - return - } - - if NoColor { - return - } - - fmt.Fprintf(w, "%s[%dm", escape, Reset) -} - -// Add is used to chain SGR parameters. Use as many as parameters to combine -// and create custom color objects. Example: Add(color.FgRed, color.Underline). -func (c *Color) Add(value ...Attribute) *Color { - c.params = append(c.params, value...) - return c -} - -func (c *Color) prepend(value Attribute) { - c.params = append(c.params, 0) - copy(c.params[1:], c.params[0:]) - c.params[0] = value -} - -// Fprint formats using the default formats for its operands and writes to w. -// Spaces are added between operands when neither is a string. -// It returns the number of bytes written and any write error encountered. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) - - return fmt.Fprint(w, a...) -} - -// Print formats using the default formats for its operands and writes to -// standard output. Spaces are added between operands when neither is a -// string. It returns the number of bytes written and any write error -// encountered. This is the standard fmt.Print() method wrapped with the given -// color. -func (c *Color) Print(a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprint(Output, a...) -} - -// Fprintf formats according to a format specifier and writes to w. -// It returns the number of bytes written and any write error encountered. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) - - return fmt.Fprintf(w, format, a...) -} - -// Printf formats according to a format specifier and writes to standard output. -// It returns the number of bytes written and any write error encountered. -// This is the standard fmt.Printf() method wrapped with the given color. -func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprintf(Output, format, a...) -} - -// Fprintln formats using the default formats for its operands and writes to w. -// Spaces are always added between operands and a newline is appended. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) - - return fmt.Fprintln(w, a...) -} - -// Println formats using the default formats for its operands and writes to -// standard output. Spaces are always added between operands and a newline is -// appended. It returns the number of bytes written and any write error -// encountered. This is the standard fmt.Print() method wrapped with the given -// color. -func (c *Color) Println(a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprintln(Output, a...) -} - -// Sprint is just like Print, but returns a string instead of printing it. -func (c *Color) Sprint(a ...interface{}) string { - return c.wrap(fmt.Sprint(a...)) -} - -// Sprintln is just like Println, but returns a string instead of printing it. -func (c *Color) Sprintln(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) -} - -// Sprintf is just like Printf, but returns a string instead of printing it. -func (c *Color) Sprintf(format string, a ...interface{}) string { - return c.wrap(fmt.Sprintf(format, a...)) -} - -// FprintFunc returns a new function that prints the passed arguments as -// colorized with color.Fprint(). -func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { - return func(w io.Writer, a ...interface{}) { - c.Fprint(w, a...) - } -} - -// PrintFunc returns a new function that prints the passed arguments as -// colorized with color.Print(). -func (c *Color) PrintFunc() func(a ...interface{}) { - return func(a ...interface{}) { - c.Print(a...) - } -} - -// FprintfFunc returns a new function that prints the passed arguments as -// colorized with color.Fprintf(). -func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { - return func(w io.Writer, format string, a ...interface{}) { - c.Fprintf(w, format, a...) - } -} - -// PrintfFunc returns a new function that prints the passed arguments as -// colorized with color.Printf(). -func (c *Color) PrintfFunc() func(format string, a ...interface{}) { - return func(format string, a ...interface{}) { - c.Printf(format, a...) - } -} - -// FprintlnFunc returns a new function that prints the passed arguments as -// colorized with color.Fprintln(). -func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { - return func(w io.Writer, a ...interface{}) { - c.Fprintln(w, a...) - } -} - -// PrintlnFunc returns a new function that prints the passed arguments as -// colorized with color.Println(). -func (c *Color) PrintlnFunc() func(a ...interface{}) { - return func(a ...interface{}) { - c.Println(a...) - } -} - -// SprintFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprint(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output, example: -// -// put := New(FgYellow).SprintFunc() -// fmt.Fprintf(color.Output, "This is a %s", put("warning")) -func (c *Color) SprintFunc() func(a ...interface{}) string { - return func(a ...interface{}) string { - return c.wrap(fmt.Sprint(a...)) - } -} - -// SprintfFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprintf(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output. -func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { - return func(format string, a ...interface{}) string { - return c.wrap(fmt.Sprintf(format, a...)) - } -} - -// SprintlnFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprintln(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output. -func (c *Color) SprintlnFunc() func(a ...interface{}) string { - return func(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) - } -} - -// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" -// an example output might be: "1;36" -> bold cyan -func (c *Color) sequence() string { - format := make([]string, len(c.params)) - for i, v := range c.params { - format[i] = strconv.Itoa(int(v)) - } - - return strings.Join(format, ";") -} - -// wrap wraps the s string with the colors attributes. The string is ready to -// be printed. -func (c *Color) wrap(s string) string { - if c.isNoColorSet() { - return s - } - - return c.format() + s + c.unformat() -} - -func (c *Color) format() string { - return fmt.Sprintf("%s[%sm", escape, c.sequence()) -} - -func (c *Color) unformat() string { - return fmt.Sprintf("%s[%dm", escape, Reset) -} - -// DisableColor disables the color output. Useful to not change any existing -// code and still being able to output. Can be used for flags like -// "--no-color". To enable back use EnableColor() method. -func (c *Color) DisableColor() { - c.noColor = boolPtr(true) -} - -// EnableColor enables the color output. Use it in conjunction with -// DisableColor(). Otherwise this method has no side effects. -func (c *Color) EnableColor() { - c.noColor = boolPtr(false) -} - -func (c *Color) isNoColorSet() bool { - // check first if we have user setted action - if c.noColor != nil { - return *c.noColor - } - - // if not return the global option, which is disabled by default - return NoColor -} - -// Equals returns a boolean value indicating whether two colors are equal. -func (c *Color) Equals(c2 *Color) bool { - if len(c.params) != len(c2.params) { - return false - } - - for _, attr := range c.params { - if !c2.attrExists(attr) { - return false - } - } - - return true -} - -func (c *Color) attrExists(a Attribute) bool { - for _, attr := range c.params { - if attr == a { - return true - } - } - - return false -} - -func boolPtr(v bool) *bool { - return &v -} - -func getCachedColor(p Attribute) *Color { - colorsCacheMu.Lock() - defer colorsCacheMu.Unlock() - - c, ok := colorsCache[p] - if !ok { - c = New(p) - colorsCache[p] = c - } - - return c -} - -func colorPrint(format string, p Attribute, a ...interface{}) { - c := getCachedColor(p) - - if !strings.HasSuffix(format, "\n") { - format += "\n" - } - - if len(a) == 0 { - c.Print(format) - } else { - c.Printf(format, a...) - } -} - -func colorString(format string, p Attribute, a ...interface{}) string { - c := getCachedColor(p) - - if len(a) == 0 { - return c.SprintFunc()(format) - } - - return c.SprintfFunc()(format, a...) -} - -// Black is a convenient helper function to print with black foreground. A -// newline is appended to format by default. -func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } - -// Red is a convenient helper function to print with red foreground. A -// newline is appended to format by default. -func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } - -// Green is a convenient helper function to print with green foreground. A -// newline is appended to format by default. -func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } - -// Yellow is a convenient helper function to print with yellow foreground. -// A newline is appended to format by default. -func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } - -// Blue is a convenient helper function to print with blue foreground. A -// newline is appended to format by default. -func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } - -// Magenta is a convenient helper function to print with magenta foreground. -// A newline is appended to format by default. -func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } - -// Cyan is a convenient helper function to print with cyan foreground. A -// newline is appended to format by default. -func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } - -// White is a convenient helper function to print with white foreground. A -// newline is appended to format by default. -func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } - -// BlackString is a convenient helper function to return a string with black -// foreground. -func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } - -// RedString is a convenient helper function to return a string with red -// foreground. -func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } - -// GreenString is a convenient helper function to return a string with green -// foreground. -func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } - -// YellowString is a convenient helper function to return a string with yellow -// foreground. -func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } - -// BlueString is a convenient helper function to return a string with blue -// foreground. -func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } - -// MagentaString is a convenient helper function to return a string with magenta -// foreground. -func MagentaString(format string, a ...interface{}) string { - return colorString(format, FgMagenta, a...) -} - -// CyanString is a convenient helper function to return a string with cyan -// foreground. -func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } - -// WhiteString is a convenient helper function to return a string with white -// foreground. -func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } - -// HiBlack is a convenient helper function to print with hi-intensity black foreground. A -// newline is appended to format by default. -func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } - -// HiRed is a convenient helper function to print with hi-intensity red foreground. A -// newline is appended to format by default. -func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } - -// HiGreen is a convenient helper function to print with hi-intensity green foreground. A -// newline is appended to format by default. -func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } - -// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. -// A newline is appended to format by default. -func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } - -// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A -// newline is appended to format by default. -func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } - -// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. -// A newline is appended to format by default. -func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } - -// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A -// newline is appended to format by default. -func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } - -// HiWhite is a convenient helper function to print with hi-intensity white foreground. A -// newline is appended to format by default. -func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } - -// HiBlackString is a convenient helper function to return a string with hi-intensity black -// foreground. -func HiBlackString(format string, a ...interface{}) string { - return colorString(format, FgHiBlack, a...) -} - -// HiRedString is a convenient helper function to return a string with hi-intensity red -// foreground. -func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } - -// HiGreenString is a convenient helper function to return a string with hi-intensity green -// foreground. -func HiGreenString(format string, a ...interface{}) string { - return colorString(format, FgHiGreen, a...) -} - -// HiYellowString is a convenient helper function to return a string with hi-intensity yellow -// foreground. -func HiYellowString(format string, a ...interface{}) string { - return colorString(format, FgHiYellow, a...) -} - -// HiBlueString is a convenient helper function to return a string with hi-intensity blue -// foreground. -func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } - -// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta -// foreground. -func HiMagentaString(format string, a ...interface{}) string { - return colorString(format, FgHiMagenta, a...) -} - -// HiCyanString is a convenient helper function to return a string with hi-intensity cyan -// foreground. -func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } - -// HiWhiteString is a convenient helper function to return a string with hi-intensity white -// foreground. -func HiWhiteString(format string, a ...interface{}) string { - return colorString(format, FgHiWhite, a...) -} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go deleted file mode 100644 index cf1e965..0000000 --- a/vendor/github.com/fatih/color/doc.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Package color is an ANSI color package to output colorized or SGR defined -output to the standard output. The API can be used in several way, pick one -that suits you. - -Use simple and default helper functions with predefined foreground colors: - - color.Cyan("Prints text in cyan.") - - // a newline will be appended automatically - color.Blue("Prints %s in blue.", "text") - - // More default foreground colors.. - color.Red("We have red") - color.Yellow("Yellow color too!") - color.Magenta("And many others ..") - - // Hi-intensity colors - color.HiGreen("Bright green color.") - color.HiBlack("Bright black means gray..") - color.HiWhite("Shiny white color!") - -However there are times where custom color mixes are required. Below are some -examples to create custom color objects and use the print functions of each -separate color object. - - // Create a new color object - c := color.New(color.FgCyan).Add(color.Underline) - c.Println("Prints cyan text with an underline.") - - // Or just add them to New() - d := color.New(color.FgCyan, color.Bold) - d.Printf("This prints bold cyan %s\n", "too!.") - - - // Mix up foreground and background colors, create new mixes! - red := color.New(color.FgRed) - - boldRed := red.Add(color.Bold) - boldRed.Println("This will print text in bold red.") - - whiteBackground := red.Add(color.BgWhite) - whiteBackground.Println("Red text with White background.") - - // Use your own io.Writer output - color.New(color.FgBlue).Fprintln(myWriter, "blue color!") - - blue := color.New(color.FgBlue) - blue.Fprint(myWriter, "This will print text in blue.") - -You can create PrintXxx functions to simplify even more: - - // Create a custom print function for convenient - red := color.New(color.FgRed).PrintfFunc() - red("warning") - red("error: %s", err) - - // Mix up multiple attributes - notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() - notice("don't forget this...") - -You can also FprintXxx functions to pass your own io.Writer: - - blue := color.New(FgBlue).FprintfFunc() - blue(myWriter, "important notice: %s", stars) - - // Mix up with multiple attributes - success := color.New(color.Bold, color.FgGreen).FprintlnFunc() - success(myWriter, don't forget this...") - - -Or create SprintXxx functions to mix strings with other non-colorized strings: - - yellow := New(FgYellow).SprintFunc() - red := New(FgRed).SprintFunc() - - fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) - - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Printf("this %s rocks!\n", info("package")) - -Windows support is enabled by default. All Print functions work as intended. -However only for color.SprintXXX functions, user should use fmt.FprintXXX and -set the output to color.Output: - - fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) - - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) - -Using with existing code is possible. Just use the Set() method to set the -standard output to the given parameters. That way a rewrite of an existing -code is not required. - - // Use handy standard colors. - color.Set(color.FgYellow) - - fmt.Println("Existing text will be now in Yellow") - fmt.Printf("This one %s\n", "too") - - color.Unset() // don't forget to unset - - // You can mix up parameters - color.Set(color.FgMagenta, color.Bold) - defer color.Unset() // use it in your function - - fmt.Println("All text will be now bold magenta.") - -There might be a case where you want to disable color output (for example to -pipe the standard output of your app to somewhere else). `Color` has support to -disable colors both globally and for single color definition. For example -suppose you have a CLI app and a `--no-color` bool flag. You can easily disable -the color output with: - - var flagNoColor = flag.Bool("no-color", false, "Disable color output") - - if *flagNoColor { - color.NoColor = true // disables colorized output - } - -It also has support for single color definitions (local). You can -disable/enable color output on the fly: - - c := color.New(color.FgCyan) - c.Println("Prints cyan text") - - c.DisableColor() - c.Println("This is printed without any color") - - c.EnableColor() - c.Println("This prints again cyan...") -*/ -package color diff --git a/vendor/github.com/felixge/httpsnoop/.travis.yml b/vendor/github.com/felixge/httpsnoop/.travis.yml deleted file mode 100644 index bfc4212..0000000 --- a/vendor/github.com/felixge/httpsnoop/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -go: - - 1.6 - - 1.7 - - 1.8 diff --git a/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/vendor/github.com/felixge/httpsnoop/LICENSE.txt deleted file mode 100644 index e028b46..0000000 --- a/vendor/github.com/felixge/httpsnoop/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile deleted file mode 100644 index 2d84889..0000000 --- a/vendor/github.com/felixge/httpsnoop/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -.PHONY: ci generate clean - -ci: clean generate - go test -v ./... - -generate: - go generate . - -clean: - rm -rf *_generated*.go diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md deleted file mode 100644 index ae44137..0000000 --- a/vendor/github.com/felixge/httpsnoop/README.md +++ /dev/null @@ -1,94 +0,0 @@ -# httpsnoop - -Package httpsnoop provides an easy way to capture http related metrics (i.e. -response time, bytes written, and http status code) from your application's -http.Handlers. - -Doing this requires non-trivial wrapping of the http.ResponseWriter interface, -which is also exposed for users interested in a more low-level API. - -[![GoDoc](https://godoc.org/github.com/felixge/httpsnoop?status.svg)](https://godoc.org/github.com/felixge/httpsnoop) -[![Build Status](https://travis-ci.org/felixge/httpsnoop.svg?branch=master)](https://travis-ci.org/felixge/httpsnoop) - -## Usage Example - -```go -// myH is your app's http handler, perhaps a http.ServeMux or similar. -var myH http.Handler -// wrappedH wraps myH in order to log every request. -wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - m := httpsnoop.CaptureMetrics(myH, w, r) - log.Printf( - "%s %s (code=%d dt=%s written=%d)", - r.Method, - r.URL, - m.Code, - m.Duration, - m.Written, - ) -}) -http.ListenAndServe(":8080", wrappedH) -``` - -## Why this package exists - -Instrumenting an application's http.Handler is surprisingly difficult. - -However if you google for e.g. "capture ResponseWriter status code" you'll find -lots of advise and code examples that suggest it to be a fairly trivial -undertaking. Unfortunately everything I've seen so far has a high chance of -breaking your application. - -The main problem is that a `http.ResponseWriter` often implements additional -interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and -`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter` -in your own struct that also implements the `http.ResponseWriter` interface -will hide the additional interfaces mentioned above. This has a high change of -introducing subtle bugs into any non-trivial application. - -Another approach I've seen people take is to return a struct that implements -all of the interfaces above. However, that's also problematic, because it's -difficult to fake some of these interfaces behaviors when the underlying -`http.ResponseWriter` doesn't have an implementation. It's also dangerous, -because an application may choose to operate differently, merely because it -detects the presence of these additional interfaces. - -This package solves this problem by checking which additional interfaces a -`http.ResponseWriter` implements, returning a wrapped version implementing the -exact same set of interfaces. - -Additionally this package properly handles edge cases such as `WriteHeader` not -being called, or called more than once, as well as concurrent calls to -`http.ResponseWriter` methods, and even calls happening after the wrapped -`ServeHTTP` has already returned. - -Unfortunately this package is not perfect either. It's possible that it is -still missing some interfaces provided by the go core (let me know if you find -one), and it won't work for applications adding their own interfaces into the -mix. - -However, hopefully the explanation above has sufficiently scared you of rolling -your own solution to this problem. httpsnoop may still break your application, -but at least it tries to avoid it as much as possible. - -Anyway, the real problem here is that smuggling additional interfaces inside -`http.ResponseWriter` is a problematic design choice, but it probably goes as -deep as the Go language specification itself. But that's okay, I still prefer -Go over the alternatives ;). - -## Performance - -``` -BenchmarkBaseline-8 20000 94912 ns/op -BenchmarkCaptureMetrics-8 20000 95461 ns/op -``` - -As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an -overhead of ~500 ns per http request on my machine. However, the margin of -error appears to be larger than that, therefor it should be reasonable to -assume that the overhead introduced by `CaptureMetrics` is absolutely -negligible. - -## License - -MIT diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go deleted file mode 100644 index 4c45b1a..0000000 --- a/vendor/github.com/felixge/httpsnoop/capture_metrics.go +++ /dev/null @@ -1,84 +0,0 @@ -package httpsnoop - -import ( - "io" - "net/http" - "sync" - "time" -) - -// Metrics holds metrics captured from CaptureMetrics. -type Metrics struct { - // Code is the first http response code passed to the WriteHeader func of - // the ResponseWriter. If no such call is made, a default code of 200 is - // assumed instead. - Code int - // Duration is the time it took to execute the handler. - Duration time.Duration - // Written is the number of bytes successfully written by the Write or - // ReadFrom function of the ResponseWriter. ResponseWriters may also write - // data to their underlaying connection directly (e.g. headers), but those - // are not tracked. Therefor the number of Written bytes will usually match - // the size of the response body. - Written int64 -} - -// CaptureMetrics wraps the given hnd, executes it with the given w and r, and -// returns the metrics it captured from it. -func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics { - return CaptureMetricsFn(w, func(ww http.ResponseWriter) { - hnd.ServeHTTP(ww, r) - }) -} - -// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the -// resulting metrics. This is very similar to CaptureMetrics (which is just -// sugar on top of this func), but is a more usable interface if your -// application doesn't use the Go http.Handler interface. -func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics { - var ( - start = time.Now() - m = Metrics{Code: http.StatusOK} - headerWritten bool - lock sync.Mutex - hooks = Hooks{ - WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc { - return func(code int) { - next(code) - lock.Lock() - defer lock.Unlock() - if !headerWritten { - m.Code = code - headerWritten = true - } - } - }, - - Write: func(next WriteFunc) WriteFunc { - return func(p []byte) (int, error) { - n, err := next(p) - lock.Lock() - defer lock.Unlock() - m.Written += int64(n) - headerWritten = true - return n, err - } - }, - - ReadFrom: func(next ReadFromFunc) ReadFromFunc { - return func(src io.Reader) (int64, error) { - n, err := next(src) - lock.Lock() - defer lock.Unlock() - headerWritten = true - m.Written += n - return n, err - } - }, - } - ) - - fn(Wrap(w, hooks)) - m.Duration = time.Since(start) - return m -} diff --git a/vendor/github.com/felixge/httpsnoop/docs.go b/vendor/github.com/felixge/httpsnoop/docs.go deleted file mode 100644 index 203c35b..0000000 --- a/vendor/github.com/felixge/httpsnoop/docs.go +++ /dev/null @@ -1,10 +0,0 @@ -// Package httpsnoop provides an easy way to capture http related metrics (i.e. -// response time, bytes written, and http status code) from your application's -// http.Handlers. -// -// Doing this requires non-trivial wrapping of the http.ResponseWriter -// interface, which is also exposed for users interested in a more low-level -// API. -package httpsnoop - -//go:generate go run codegen/main.go diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go deleted file mode 100644 index 41a20da..0000000 --- a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go +++ /dev/null @@ -1,385 +0,0 @@ -// +build go1.8 -// Code generated by "httpsnoop/codegen"; DO NOT EDIT - -package httpsnoop - -import ( - "bufio" - "io" - "net" - "net/http" -) - -// HeaderFunc is part of the http.ResponseWriter interface. -type HeaderFunc func() http.Header - -// WriteHeaderFunc is part of the http.ResponseWriter interface. -type WriteHeaderFunc func(code int) - -// WriteFunc is part of the http.ResponseWriter interface. -type WriteFunc func(b []byte) (int, error) - -// FlushFunc is part of the http.Flusher interface. -type FlushFunc func() - -// CloseNotifyFunc is part of the http.CloseNotifier interface. -type CloseNotifyFunc func() <-chan bool - -// HijackFunc is part of the http.Hijacker interface. -type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) - -// ReadFromFunc is part of the io.ReaderFrom interface. -type ReadFromFunc func(src io.Reader) (int64, error) - -// PushFunc is part of the http.Pusher interface. -type PushFunc func(target string, opts *http.PushOptions) error - -// Hooks defines a set of method interceptors for methods included in -// http.ResponseWriter as well as some others. You can think of them as -// middleware for the function calls they target. See Wrap for more details. -type Hooks struct { - Header func(HeaderFunc) HeaderFunc - WriteHeader func(WriteHeaderFunc) WriteHeaderFunc - Write func(WriteFunc) WriteFunc - Flush func(FlushFunc) FlushFunc - CloseNotify func(CloseNotifyFunc) CloseNotifyFunc - Hijack func(HijackFunc) HijackFunc - ReadFrom func(ReadFromFunc) ReadFromFunc - Push func(PushFunc) PushFunc -} - -// Wrap returns a wrapped version of w that provides the exact same interface -// as w. Specifically if w implements any combination of: -// -// - http.Flusher -// - http.CloseNotifier -// - http.Hijacker -// - io.ReaderFrom -// - http.Pusher -// -// The wrapped version will implement the exact same combination. If no hooks -// are set, the wrapped version also behaves exactly as w. Hooks targeting -// methods not supported by w are ignored. Any other hooks will intercept the -// method they target and may modify the call's arguments and/or return values. -// The CaptureMetrics implementation serves as a working example for how the -// hooks can be used. -func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { - rw := &rw{w: w, h: hooks} - _, i0 := w.(http.Flusher) - _, i1 := w.(http.CloseNotifier) - _, i2 := w.(http.Hijacker) - _, i3 := w.(io.ReaderFrom) - _, i4 := w.(http.Pusher) - switch { - // combination 1/32 - case !i0 && !i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - }{rw} - // combination 2/32 - case !i0 && !i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Pusher - }{rw, rw} - // combination 3/32 - case !i0 && !i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - io.ReaderFrom - }{rw, rw} - // combination 4/32 - case !i0 && !i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - io.ReaderFrom - http.Pusher - }{rw, rw, rw} - // combination 5/32 - case !i0 && !i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - }{rw, rw} - // combination 6/32 - case !i0 && !i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - }{rw, rw, rw} - // combination 7/32 - case !i0 && !i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - io.ReaderFrom - }{rw, rw, rw} - // combination 8/32 - case !i0 && !i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw} - // combination 9/32 - case !i0 && i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - }{rw, rw} - // combination 10/32 - case !i0 && i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - }{rw, rw, rw} - // combination 11/32 - case !i0 && i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - io.ReaderFrom - }{rw, rw, rw} - // combination 12/32 - case !i0 && i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw} - // combination 13/32 - case !i0 && i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Hijacker - }{rw, rw, rw} - // combination 14/32 - case !i0 && i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Hijacker - http.Pusher - }{rw, rw, rw, rw} - // combination 15/32 - case !i0 && i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 16/32 - case !i0 && i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Hijacker - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw} - // combination 17/32 - case i0 && !i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Flusher - }{rw, rw} - // combination 18/32 - case i0 && !i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Flusher - http.Pusher - }{rw, rw, rw} - // combination 19/32 - case i0 && !i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Flusher - io.ReaderFrom - }{rw, rw, rw} - // combination 20/32 - case i0 && !i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Flusher - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw} - // combination 21/32 - case i0 && !i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Flusher - http.Hijacker - }{rw, rw, rw} - // combination 22/32 - case i0 && !i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Flusher - http.Hijacker - http.Pusher - }{rw, rw, rw, rw} - // combination 23/32 - case i0 && !i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Flusher - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 24/32 - case i0 && !i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Flusher - http.Hijacker - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw} - // combination 25/32 - case i0 && i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Flusher - http.CloseNotifier - }{rw, rw, rw} - // combination 26/32 - case i0 && i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Pusher - }{rw, rw, rw, rw} - // combination 27/32 - case i0 && i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Flusher - http.CloseNotifier - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 28/32 - case i0 && i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Flusher - http.CloseNotifier - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw} - // combination 29/32 - case i0 && i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - }{rw, rw, rw, rw} - // combination 30/32 - case i0 && i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - http.Pusher - }{rw, rw, rw, rw, rw} - // combination 31/32 - case i0 && i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw, rw} - // combination 32/32 - case i0 && i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw, rw} - } - panic("unreachable") -} - -type rw struct { - w http.ResponseWriter - h Hooks -} - -func (w *rw) Header() http.Header { - f := w.w.(http.ResponseWriter).Header - if w.h.Header != nil { - f = w.h.Header(f) - } - return f() -} - -func (w *rw) WriteHeader(code int) { - f := w.w.(http.ResponseWriter).WriteHeader - if w.h.WriteHeader != nil { - f = w.h.WriteHeader(f) - } - f(code) -} - -func (w *rw) Write(b []byte) (int, error) { - f := w.w.(http.ResponseWriter).Write - if w.h.Write != nil { - f = w.h.Write(f) - } - return f(b) -} - -func (w *rw) Flush() { - f := w.w.(http.Flusher).Flush - if w.h.Flush != nil { - f = w.h.Flush(f) - } - f() -} - -func (w *rw) CloseNotify() <-chan bool { - f := w.w.(http.CloseNotifier).CloseNotify - if w.h.CloseNotify != nil { - f = w.h.CloseNotify(f) - } - return f() -} - -func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { - f := w.w.(http.Hijacker).Hijack - if w.h.Hijack != nil { - f = w.h.Hijack(f) - } - return f() -} - -func (w *rw) ReadFrom(src io.Reader) (int64, error) { - f := w.w.(io.ReaderFrom).ReadFrom - if w.h.ReadFrom != nil { - f = w.h.ReadFrom(f) - } - return f(src) -} - -func (w *rw) Push(target string, opts *http.PushOptions) error { - f := w.w.(http.Pusher).Push - if w.h.Push != nil { - f = w.h.Push(f) - } - return f(target, opts) -} diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go deleted file mode 100644 index 36bb59b..0000000 --- a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go +++ /dev/null @@ -1,243 +0,0 @@ -// +build !go1.8 -// Code generated by "httpsnoop/codegen"; DO NOT EDIT - -package httpsnoop - -import ( - "bufio" - "io" - "net" - "net/http" -) - -// HeaderFunc is part of the http.ResponseWriter interface. -type HeaderFunc func() http.Header - -// WriteHeaderFunc is part of the http.ResponseWriter interface. -type WriteHeaderFunc func(code int) - -// WriteFunc is part of the http.ResponseWriter interface. -type WriteFunc func(b []byte) (int, error) - -// FlushFunc is part of the http.Flusher interface. -type FlushFunc func() - -// CloseNotifyFunc is part of the http.CloseNotifier interface. -type CloseNotifyFunc func() <-chan bool - -// HijackFunc is part of the http.Hijacker interface. -type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) - -// ReadFromFunc is part of the io.ReaderFrom interface. -type ReadFromFunc func(src io.Reader) (int64, error) - -// Hooks defines a set of method interceptors for methods included in -// http.ResponseWriter as well as some others. You can think of them as -// middleware for the function calls they target. See Wrap for more details. -type Hooks struct { - Header func(HeaderFunc) HeaderFunc - WriteHeader func(WriteHeaderFunc) WriteHeaderFunc - Write func(WriteFunc) WriteFunc - Flush func(FlushFunc) FlushFunc - CloseNotify func(CloseNotifyFunc) CloseNotifyFunc - Hijack func(HijackFunc) HijackFunc - ReadFrom func(ReadFromFunc) ReadFromFunc -} - -// Wrap returns a wrapped version of w that provides the exact same interface -// as w. Specifically if w implements any combination of: -// -// - http.Flusher -// - http.CloseNotifier -// - http.Hijacker -// - io.ReaderFrom -// -// The wrapped version will implement the exact same combination. If no hooks -// are set, the wrapped version also behaves exactly as w. Hooks targeting -// methods not supported by w are ignored. Any other hooks will intercept the -// method they target and may modify the call's arguments and/or return values. -// The CaptureMetrics implementation serves as a working example for how the -// hooks can be used. -func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { - rw := &rw{w: w, h: hooks} - _, i0 := w.(http.Flusher) - _, i1 := w.(http.CloseNotifier) - _, i2 := w.(http.Hijacker) - _, i3 := w.(io.ReaderFrom) - switch { - // combination 1/16 - case !i0 && !i1 && !i2 && !i3: - return struct { - http.ResponseWriter - }{rw} - // combination 2/16 - case !i0 && !i1 && !i2 && i3: - return struct { - http.ResponseWriter - io.ReaderFrom - }{rw, rw} - // combination 3/16 - case !i0 && !i1 && i2 && !i3: - return struct { - http.ResponseWriter - http.Hijacker - }{rw, rw} - // combination 4/16 - case !i0 && !i1 && i2 && i3: - return struct { - http.ResponseWriter - http.Hijacker - io.ReaderFrom - }{rw, rw, rw} - // combination 5/16 - case !i0 && i1 && !i2 && !i3: - return struct { - http.ResponseWriter - http.CloseNotifier - }{rw, rw} - // combination 6/16 - case !i0 && i1 && !i2 && i3: - return struct { - http.ResponseWriter - http.CloseNotifier - io.ReaderFrom - }{rw, rw, rw} - // combination 7/16 - case !i0 && i1 && i2 && !i3: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Hijacker - }{rw, rw, rw} - // combination 8/16 - case !i0 && i1 && i2 && i3: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 9/16 - case i0 && !i1 && !i2 && !i3: - return struct { - http.ResponseWriter - http.Flusher - }{rw, rw} - // combination 10/16 - case i0 && !i1 && !i2 && i3: - return struct { - http.ResponseWriter - http.Flusher - io.ReaderFrom - }{rw, rw, rw} - // combination 11/16 - case i0 && !i1 && i2 && !i3: - return struct { - http.ResponseWriter - http.Flusher - http.Hijacker - }{rw, rw, rw} - // combination 12/16 - case i0 && !i1 && i2 && i3: - return struct { - http.ResponseWriter - http.Flusher - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 13/16 - case i0 && i1 && !i2 && !i3: - return struct { - http.ResponseWriter - http.Flusher - http.CloseNotifier - }{rw, rw, rw} - // combination 14/16 - case i0 && i1 && !i2 && i3: - return struct { - http.ResponseWriter - http.Flusher - http.CloseNotifier - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 15/16 - case i0 && i1 && i2 && !i3: - return struct { - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - }{rw, rw, rw, rw} - // combination 16/16 - case i0 && i1 && i2 && i3: - return struct { - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw, rw} - } - panic("unreachable") -} - -type rw struct { - w http.ResponseWriter - h Hooks -} - -func (w *rw) Header() http.Header { - f := w.w.(http.ResponseWriter).Header - if w.h.Header != nil { - f = w.h.Header(f) - } - return f() -} - -func (w *rw) WriteHeader(code int) { - f := w.w.(http.ResponseWriter).WriteHeader - if w.h.WriteHeader != nil { - f = w.h.WriteHeader(f) - } - f(code) -} - -func (w *rw) Write(b []byte) (int, error) { - f := w.w.(http.ResponseWriter).Write - if w.h.Write != nil { - f = w.h.Write(f) - } - return f(b) -} - -func (w *rw) Flush() { - f := w.w.(http.Flusher).Flush - if w.h.Flush != nil { - f = w.h.Flush(f) - } - f() -} - -func (w *rw) CloseNotify() <-chan bool { - f := w.w.(http.CloseNotifier).CloseNotify - if w.h.CloseNotify != nil { - f = w.h.CloseNotify(f) - } - return f() -} - -func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { - f := w.w.(http.Hijacker).Hijack - if w.h.Hijack != nil { - f = w.h.Hijack(f) - } - return f() -} - -func (w *rw) ReadFrom(src io.Reader) (int64, error) { - f := w.w.(io.ReaderFrom).ReadFrom - if w.h.ReadFrom != nil { - f = w.h.ReadFrom(f) - } - return f(src) -} diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad8958..0000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001..0000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore deleted file mode 100644 index 4cd0cba..0000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# Setup a Global .gitignore for OS and editor generated files: -# https://help.github.com/articles/ignoring-files -# git config --global core.excludesfile ~/.gitignore_global - -.vagrant -*.sublime-project diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml deleted file mode 100644 index a9c3016..0000000 --- a/vendor/github.com/fsnotify/fsnotify/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -sudo: false -language: go - -go: - - "stable" - - "1.11.x" - - "1.10.x" - - "1.9.x" - -matrix: - include: - - go: "stable" - env: GOLINT=true - allow_failures: - - go: tip - fast_finish: true - - -before_install: - - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi - -script: - - go test --race ./... - -after_script: - - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" - - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi - - go vet ./... - -os: - - linux - - osx - - windows - -notifications: - email: false diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS deleted file mode 100644 index 5ab5d41..0000000 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ /dev/null @@ -1,52 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Evan Phoenix -Francisco Souza -Hari haran -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Nathan Youngman -Nickolai Zeldovich -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md deleted file mode 100644 index be4d7ea..0000000 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ /dev/null @@ -1,317 +0,0 @@ -# Changelog - -## v1.4.7 / 2018-01-09 - -* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) -* Tests: Fix missing verb on format string (thanks @rchiossi) -* Linux: Fix deadlock in Remove (thanks @aarondl) -* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) -* Docs: Moved FAQ into the README (thanks @vahe) -* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) -* Docs: replace references to OS X with macOS - -## v1.4.2 / 2016-10-10 - -* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) - -## v1.4.1 / 2016-10-04 - -* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) - -## v1.4.0 / 2016-10-01 - -* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) - -## v1.3.1 / 2016-06-28 - -* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) - -## v1.3.0 / 2016-04-19 - -* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) - -## v1.2.10 / 2016-03-02 - -* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) - -## v1.2.9 / 2016-01-13 - -kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) - -## v1.2.8 / 2015-12-17 - -* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) -* inotify: fix race in test -* enable race detection for continuous integration (Linux, Mac, Windows) - -## v1.2.5 / 2015-10-17 - -* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) -* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) -* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) -* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) - -## v1.2.1 / 2015-10-14 - -* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) - -## v1.2.0 / 2015-02-08 - -* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) -* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) -* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) - -## v1.1.1 / 2015-02-05 - -* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) - -## v1.1.0 / 2014-12-12 - -* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) - * add low-level functions - * only need to store flags on directories - * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) - * done can be an unbuffered channel - * remove calls to os.NewSyscallError -* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) -* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## v1.0.4 / 2014-09-07 - -* kqueue: add dragonfly to the build tags. -* Rename source code files, rearrange code so exported APIs are at the top. -* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) - -## v1.0.3 / 2014-08-19 - -* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) - -## v1.0.2 / 2014-08-17 - -* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) -* [Fix] Make ./path and path equivalent. (thanks @zhsso) - -## v1.0.0 / 2014-08-15 - -* [API] Remove AddWatch on Windows, use Add. -* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) -* Minor updates based on feedback from golint. - -## dev / 2014-07-09 - -* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). -* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) - -## dev / 2014-07-04 - -* kqueue: fix incorrect mutex used in Close() -* Update example to demonstrate usage of Op. - -## dev / 2014-06-28 - -* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) -* Fix for String() method on Event (thanks Alex Brainman) -* Don't build on Plan 9 or Solaris (thanks @4ad) - -## dev / 2014-06-21 - -* Events channel of type Event rather than *Event. -* [internal] use syscall constants directly for inotify and kqueue. -* [internal] kqueue: rename events to kevents and fileEvent to event. - -## dev / 2014-06-19 - -* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). -* [internal] remove cookie from Event struct (unused). -* [internal] Event struct has the same definition across every OS. -* [internal] remove internal watch and removeWatch methods. - -## dev / 2014-06-12 - -* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). -* [API] Pluralized channel names: Events and Errors. -* [API] Renamed FileEvent struct to Event. -* [API] Op constants replace methods like IsCreate(). - -## dev / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## dev / 2014-05-23 - -* [API] Remove current implementation of WatchFlags. - * current implementation doesn't take advantage of OS for efficiency - * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes - * no tests for the current implementation - * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) - -## v0.9.3 / 2014-12-31 - -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## v0.9.2 / 2014-08-17 - -* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) - -## v0.9.1 / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## v0.9.0 / 2014-01-17 - -* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) -* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) -* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. - -## v0.8.12 / 2013-11-13 - -* [API] Remove FD_SET and friends from Linux adapter - -## v0.8.11 / 2013-11-02 - -* [Doc] Add Changelog [#72][] (thanks @nathany) -* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) - -## v0.8.10 / 2013-10-19 - -* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) -* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) -* [Doc] specify OS-specific limits in README (thanks @debrando) - -## v0.8.9 / 2013-09-08 - -* [Doc] Contributing (thanks @nathany) -* [Doc] update package path in example code [#63][] (thanks @paulhammond) -* [Doc] GoCI badge in README (Linux only) [#60][] -* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) - -## v0.8.8 / 2013-06-17 - -* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) - -## v0.8.7 / 2013-06-03 - -* [API] Make syscall flags internal -* [Fix] inotify: ignore event changes -* [Fix] race in symlink test [#45][] (reported by @srid) -* [Fix] tests on Windows -* lower case error messages - -## v0.8.6 / 2013-05-23 - -* kqueue: Use EVT_ONLY flag on Darwin -* [Doc] Update README with full example - -## v0.8.5 / 2013-05-09 - -* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) - -## v0.8.4 / 2013-04-07 - -* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) - -## v0.8.3 / 2013-03-13 - -* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) -* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) - -## v0.8.2 / 2013-02-07 - -* [Doc] add Authors -* [Fix] fix data races for map access [#29][] (thanks @fsouza) - -## v0.8.1 / 2013-01-09 - -* [Fix] Windows path separators -* [Doc] BSD License - -## v0.8.0 / 2012-11-09 - -* kqueue: directory watching improvements (thanks @vmirage) -* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) -* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) - -## v0.7.4 / 2012-10-09 - -* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) -* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) -* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) -* [Fix] kqueue: modify after recreation of file - -## v0.7.3 / 2012-09-27 - -* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) -* [Fix] kqueue: no longer get duplicate CREATE events - -## v0.7.2 / 2012-09-01 - -* kqueue: events for created directories - -## v0.7.1 / 2012-07-14 - -* [Fix] for renaming files - -## v0.7.0 / 2012-07-02 - -* [Feature] FSNotify flags -* [Fix] inotify: Added file name back to event path - -## v0.6.0 / 2012-06-06 - -* kqueue: watch files after directory created (thanks @tmc) - -## v0.5.1 / 2012-05-22 - -* [Fix] inotify: remove all watches before Close() - -## v0.5.0 / 2012-05-03 - -* [API] kqueue: return errors during watch instead of sending over channel -* kqueue: match symlink behavior on Linux -* inotify: add `DELETE_SELF` (requested by @taralx) -* [Fix] kqueue: handle EINTR (reported by @robfig) -* [Doc] Godoc example [#1][] (thanks @davecheney) - -## v0.4.0 / 2012-03-30 - -* Go 1 released: build with go tool -* [Feature] Windows support using winfsnotify -* Windows does not have attribute change notifications -* Roll attribute notifications into IsModify - -## v0.3.0 / 2012-02-19 - -* kqueue: add files when watch directory - -## v0.2.0 / 2011-12-30 - -* update to latest Go weekly code - -## v0.1.0 / 2011-10-19 - -* kqueue: add watch on file creation to match inotify -* kqueue: create file event -* inotify: ignore `IN_IGNORED` events -* event String() -* linux: common FileEvent functions -* initial commit - -[#79]: https://github.com/howeyc/fsnotify/pull/79 -[#77]: https://github.com/howeyc/fsnotify/pull/77 -[#72]: https://github.com/howeyc/fsnotify/issues/72 -[#71]: https://github.com/howeyc/fsnotify/issues/71 -[#70]: https://github.com/howeyc/fsnotify/issues/70 -[#63]: https://github.com/howeyc/fsnotify/issues/63 -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#60]: https://github.com/howeyc/fsnotify/issues/60 -[#59]: https://github.com/howeyc/fsnotify/issues/59 -[#49]: https://github.com/howeyc/fsnotify/issues/49 -[#45]: https://github.com/howeyc/fsnotify/issues/45 -[#40]: https://github.com/howeyc/fsnotify/issues/40 -[#36]: https://github.com/howeyc/fsnotify/issues/36 -[#33]: https://github.com/howeyc/fsnotify/issues/33 -[#29]: https://github.com/howeyc/fsnotify/issues/29 -[#25]: https://github.com/howeyc/fsnotify/issues/25 -[#24]: https://github.com/howeyc/fsnotify/issues/24 -[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md deleted file mode 100644 index 828a60b..0000000 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ /dev/null @@ -1,77 +0,0 @@ -# Contributing - -## Issues - -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. - -## Pull Requests - -### Contributor License Agreement - -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). - -Please indicate that you have signed the CLA in your pull request. - -### How fsnotify is Developed - -* Development is done on feature branches. -* Tests are run on BSD, Linux, macOS and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork - -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. - -1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) - -Contribute upstream: - -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub - -This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). - -### Testing - -fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. - -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. - -To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. - -* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) -* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. -* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) -* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. -* When you're done, you will want to halt or destroy the Vagrant boxes. - -Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. - -Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). - -### Maintainers - -Help maintaining fsnotify is welcome. To be a maintainer: - -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. - -To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. - -All code changes should be internal pull requests. - -Releases are tagged using [Semantic Versioning](http://semver.org/). - -[hub]: https://github.com/github/hub -[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE deleted file mode 100644 index e180c8f..0000000 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md deleted file mode 100644 index b2629e5..0000000 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# File system notifications for Go - -[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) - -fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: - -```console -go get -u golang.org/x/sys/... -``` - -Cross platform: Windows, Linux, BSD and macOS. - -| Adapter | OS | Status | -| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | -| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | -| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | -| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | - -\* Android and iOS are untested. - -Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. - -## API stability - -fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). - -All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. - -Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. - -## Usage - -```go -package main - -import ( - "log" - - "github.com/fsnotify/fsnotify" -) - -func main() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - done := make(chan bool) - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() - - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done -} -``` - -## Contributing - -Please refer to [CONTRIBUTING][] before opening an issue or pull request. - -## Example - -See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). - -## FAQ - -**When a file is moved to another directory is it still being watched?** - -No (it shouldn't be, unless you are watching where it was moved to). - -**When I watch a directory, are all subdirectories watched as well?** - -No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). - -**Do I have to watch the Error and Event channels in a separate goroutine?** - -As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) - -**Why am I receiving multiple events for the same file on OS X?** - -Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). - -**How many files can be watched at once?** - -There are OS-specific limits as to how many watches can be created: -* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. -* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. - -**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** - -fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. - -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#18]: https://github.com/fsnotify/fsnotify/issues/18 -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#7]: https://github.com/howeyc/fsnotify/issues/7 - -[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md - -## Related Projects - -* [notify](https://github.com/rjeczalik/notify) -* [fsevents](https://github.com/fsnotify/fsevents) - diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go deleted file mode 100644 index ced39cb..0000000 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go deleted file mode 100644 index 89cab04..0000000 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9 - -// Package fsnotify provides a platform-independent interface for file system notifications. -package fsnotify - -import ( - "bytes" - "errors" - "fmt" -) - -// Event represents a single file system notification. -type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. -} - -// Op describes a set of file operations. -type Op uint32 - -// These are the generalized file operations that can trigger a notification. -const ( - Create Op = 1 << iota - Write - Remove - Rename - Chmod -) - -func (op Op) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer - - if op&Create == Create { - buffer.WriteString("|CREATE") - } - if op&Remove == Remove { - buffer.WriteString("|REMOVE") - } - if op&Write == Write { - buffer.WriteString("|WRITE") - } - if op&Rename == Rename { - buffer.WriteString("|RENAME") - } - if op&Chmod == Chmod { - buffer.WriteString("|CHMOD") - } - if buffer.Len() == 0 { - return "" - } - return buffer.String()[1:] // Strip leading pipe -} - -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." -func (e Event) String() string { - return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) -} - -// Common errors that can be reported by a watcher -var ( - ErrEventOverflow = errors.New("fsnotify queue overflow") -) diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go deleted file mode 100644 index d9fd1b8..0000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - unix.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - - return nil -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer unix.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = unix.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == unix.EINTR { - continue - } - - // unix.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - - if mask&unix.IN_Q_OVERFLOW != 0 { - select { - case w.Errors <- ErrEventOverflow: - case <-w.done: - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&unix.IN_IGNORED == unix.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go deleted file mode 100644 index b33f2b4..0000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - poller.fd = fd - - // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := unix.EpollEvent{ - Fd: int32(poller.fd), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = unix.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]unix.EpollEvent, 7) - for { - n, errno := unix.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == unix.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&unix.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let unix.Read pick up the error. - epollerr = true - } - if event.Events&unix.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&unix.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&unix.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := unix.Write(poller.pipe[1], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := unix.Read(poller.pipe[0], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - unix.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - unix.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - unix.Close(poller.epfd) - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go deleted file mode 100644 index 86e76a3..0000000 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - var pathsToRemove = make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() - // unlock before calling Remove, which also locks - - for _, name := range pathsToRemove { - w.Remove(name) - } - - // send a "quit" message to the reader goroutine - close(w.done) - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = unix.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return "", nil - } - - // Don't watch named pipes. - if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return "", nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - watchfd, err = unix.Open(name, openMode, 0700) - if watchfd == -1 { - return "", err - } - - isDir = fi.IsDir() - } - - const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]unix.Kevent_t, 10) - -loop: - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - break loop - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - select { - case w.Errors <- err: - case <-w.done: - break loop - } - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel. - select { - case w.Events <- event: - case <-w.done: - break loop - } - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } - - // cleanup - err := unix.Close(w.kq) - if err != nil { - // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. - select { - case w.Errors <- err: - default: - } - } - close(w.Events) - close(w.Errors) -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - select { - case w.Errors <- err: - case <-w.done: - return - } - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - err := w.sendFileCreatedEventIfNew(filePath, fileInfo) - - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - select { - case w.Events <- newCreateEvent(filePath): - case <-w.done: - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := unix.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) unix.Timespec { - return unix.NsecToTimespec(d.Nanoseconds()) -} diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go deleted file mode 100644 index 2306c46..0000000 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly - -package fsnotify - -import "golang.org/x/sys/unix" - -const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go deleted file mode 100644 index 870c4d6..0000000 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin - -package fsnotify - -import "golang.org/x/sys/unix" - -// note: this constant is not defined on BSD -const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go deleted file mode 100644 index 09436f3..0000000 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ /dev/null @@ -1,561 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -const ( - // Options for AddWatch - sysFSONESHOT = 0x80000000 - sysFSONLYDIR = 0x1000000 - - // Events - sysFSACCESS = 0x1 - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCLOSE = 0x18 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - - // Special events - sysFSIGNORED = 0x8000 - sysFSQOVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sysFSONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sysFSQOVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) - name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case syscall.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sysFSONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sysFSMODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sysFSCREATE - case syscall.FILE_ACTION_REMOVED: - return sysFSDELETE - case syscall.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore deleted file mode 100644 index e256a31..0000000 --- a/vendor/github.com/ghodss/yaml/.gitignore +++ /dev/null @@ -1,20 +0,0 @@ -# OSX leaves these everywhere on SMB shares -._* - -# Eclipse files -.classpath -.project -.settings/** - -# Emacs save files -*~ - -# Vim-related files -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -*.un~ -Session.vim -.netrwhist - -# Go test binaries -*.test diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml deleted file mode 100644 index 0e9d6ed..0000000 --- a/vendor/github.com/ghodss/yaml/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 -script: - - go test - - go build diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE deleted file mode 100644 index 7805d36..0000000 --- a/vendor/github.com/ghodss/yaml/LICENSE +++ /dev/null @@ -1,50 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Sam Ghods - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md deleted file mode 100644 index 0200f75..0000000 --- a/vendor/github.com/ghodss/yaml/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# YAML marshaling and unmarshaling support for Go - -[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) - -## Introduction - -A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. - -In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). - -## Compatibility - -This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). - -## Caveats - -**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: - -``` -BAD: - exampleKey: !!binary gIGC - -GOOD: - exampleKey: gIGC -... and decode the base64 data in your code. -``` - -**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. - -## Installation and usage - -To install, run: - -``` -$ go get github.com/ghodss/yaml -``` - -And import using: - -``` -import "github.com/ghodss/yaml" -``` - -Usage is very similar to the JSON library: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -type Person struct { - Name string `json:"name"` // Affects YAML field names too. - Age int `json:"age"` -} - -func main() { - // Marshal a Person struct to YAML. - p := Person{"John", 30} - y, err := yaml.Marshal(p) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - age: 30 - name: John - */ - - // Unmarshal the YAML back into a Person struct. - var p2 Person - err = yaml.Unmarshal(y, &p2) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(p2) - /* Output: - {John 30} - */ -} -``` - -`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -func main() { - j := []byte(`{"name": "John", "age": 30}`) - y, err := yaml.JSONToYAML(j) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - name: John - age: 30 - */ - j2, err := yaml.YAMLToJSON(y) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(j2)) - /* Output: - {"age":30,"name":"John"} - */ -} -``` diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go deleted file mode 100644 index 5860074..0000000 --- a/vendor/github.com/ghodss/yaml/fields.go +++ /dev/null @@ -1,501 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package yaml - -import ( - "bytes" - "encoding" - "encoding/json" - "reflect" - "sort" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - if v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } else { - v = reflect.New(v.Type().Elem()) - } - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(json.Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// A field represents a single field found in a struct. -type field struct { - name string - nameBytes []byte // []byte(name) - equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent - - tag bool - index []int - typ reflect.Type - omitEmpty bool - quoted bool -} - -func fillField(f field) field { - f.nameBytes = []byte(f.name) - f.equalFold = foldFunc(f.nameBytes) - return f -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from json tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that JSON should recognize for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - tag := sf.Tag.Get("json") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, fillField(field{ - name: name, - tag: tagged, - index: index, - typ: ft, - omitEmpty: opts.Contains("omitempty"), - quoted: opts.Contains("string"), - })) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with JSON tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// JSON tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -const ( - caseMask = ^byte(0x20) // Mask to ignore case in ASCII. - kelvin = '\u212a' - smallLongEss = '\u017f' -) - -// foldFunc returns one of four different case folding equivalence -// functions, from most general (and slow) to fastest: -// -// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 -// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') -// 3) asciiEqualFold, no special, but includes non-letters (including _) -// 4) simpleLetterEqualFold, no specials, no non-letters. -// -// The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign -// See http://play.golang.org/p/tTxjOc0OGo -// -// The returned function is specialized for matching against s and -// should only be given s. It's not curried for performance reasons. -func foldFunc(s []byte) func(s, t []byte) bool { - nonLetter := false - special := false // special letter - for _, b := range s { - if b >= utf8.RuneSelf { - return bytes.EqualFold - } - upper := b & caseMask - if upper < 'A' || upper > 'Z' { - nonLetter = true - } else if upper == 'K' || upper == 'S' { - // See above for why these letters are special. - special = true - } - } - if special { - return equalFoldRight - } - if nonLetter { - return asciiEqualFold - } - return simpleLetterEqualFold -} - -// equalFoldRight is a specialization of bytes.EqualFold when s is -// known to be all ASCII (including punctuation), but contains an 's', -// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. -// See comments on foldFunc. -func equalFoldRight(s, t []byte) bool { - for _, sb := range s { - if len(t) == 0 { - return false - } - tb := t[0] - if tb < utf8.RuneSelf { - if sb != tb { - sbUpper := sb & caseMask - if 'A' <= sbUpper && sbUpper <= 'Z' { - if sbUpper != tb&caseMask { - return false - } - } else { - return false - } - } - t = t[1:] - continue - } - // sb is ASCII and t is not. t must be either kelvin - // sign or long s; sb must be s, S, k, or K. - tr, size := utf8.DecodeRune(t) - switch sb { - case 's', 'S': - if tr != smallLongEss { - return false - } - case 'k', 'K': - if tr != kelvin { - return false - } - default: - return false - } - t = t[size:] - - } - if len(t) > 0 { - return false - } - return true -} - -// asciiEqualFold is a specialization of bytes.EqualFold for use when -// s is all ASCII (but may contain non-letters) and contains no -// special-folding letters. -// See comments on foldFunc. -func asciiEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, sb := range s { - tb := t[i] - if sb == tb { - continue - } - if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { - if sb&caseMask != tb&caseMask { - return false - } - } else { - return false - } - } - return true -} - -// simpleLetterEqualFold is a specialization of bytes.EqualFold for -// use when s is all ASCII letters (no underscores, etc) and also -// doesn't contain 'k', 'K', 's', or 'S'. -// See comments on foldFunc. -func simpleLetterEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, b := range s { - if b&caseMask != t[i]&caseMask { - return false - } - } - return true -} - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go deleted file mode 100644 index 4fb4054..0000000 --- a/vendor/github.com/ghodss/yaml/yaml.go +++ /dev/null @@ -1,277 +0,0 @@ -package yaml - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "strconv" - - "gopkg.in/yaml.v2" -) - -// Marshals the object into JSON then converts JSON to YAML and returns the -// YAML. -func Marshal(o interface{}) ([]byte, error) { - j, err := json.Marshal(o) - if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) - } - - y, err := JSONToYAML(j) - if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) - } - - return y, nil -} - -// Converts YAML to JSON then uses JSON to unmarshal into an object. -func Unmarshal(y []byte, o interface{}) error { - vo := reflect.ValueOf(o) - j, err := yamlToJSON(y, &vo) - if err != nil { - return fmt.Errorf("error converting YAML to JSON: %v", err) - } - - err = json.Unmarshal(j, o) - if err != nil { - return fmt.Errorf("error unmarshaling JSON: %v", err) - } - - return nil -} - -// Convert JSON to YAML. -func JSONToYAML(j []byte) ([]byte, error) { - // Convert the JSON to an object. - var jsonObj interface{} - // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the - // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshalling to interface{}, it just picks float64 - // universally. go-yaml does go through the effort of picking the right - // number type, so we can preserve number type throughout this process. - err := yaml.Unmarshal(j, &jsonObj) - if err != nil { - return nil, err - } - - // Marshal this object into YAML. - return yaml.Marshal(jsonObj) -} - -// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through -// this method should be a no-op. -// -// Things YAML can do that are not supported by JSON: -// * In YAML you can have binary and null keys in your maps. These are invalid -// in JSON. (int and float keys are converted to strings.) -// * Binary data in YAML with the !!binary tag is not supported. If you want to -// use binary data with this library, encode the data as base64 as usual but do -// not use the !!binary tag in your YAML. This will ensure the original base64 -// encoded data makes it all the way through to the JSON. -func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil) -} - -func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { - // Convert the YAML to an object. - var yamlObj interface{} - err := yaml.Unmarshal(y, &yamlObj) - if err != nil { - return nil, err - } - - // YAML objects are not completely compatible with JSON objects (e.g. you - // can have non-string keys in YAML). So, convert the YAML-compatible object - // to a JSON-compatible object, failing with an error if irrecoverable - // incompatibilties happen along the way. - jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) - if err != nil { - return nil, err - } - - // Convert this object to JSON and return the data. - return json.Marshal(jsonObj) -} - -func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { - var err error - - // Resolve jsonTarget to a concrete value (i.e. not a pointer or an - // interface). We pass decodingNull as false because we're not actually - // decoding into the value, we're just checking if the ultimate target is a - // string. - if jsonTarget != nil { - ju, tu, pv := indirect(*jsonTarget, false) - // We have a JSON or Text Umarshaler at this level, so we can't be trying - // to decode into a string. - if ju != nil || tu != nil { - jsonTarget = nil - } else { - jsonTarget = &pv - } - } - - // If yamlObj is a number or a boolean, check if jsonTarget is a string - - // if so, coerce. Else return normal. - // If yamlObj is a map or array, find the field that each key is - // unmarshaling to, and when you recurse pass the reflect.Value for that - // field back into this function. - switch typedYAMLObj := yamlObj.(type) { - case map[interface{}]interface{}: - // JSON does not support arbitrary keys in a map, so we must convert - // these keys to strings. - // - // From my reading of go-yaml v2 (specifically the resolve function), - // keys can only have the types string, int, int64, float64, binary - // (unsupported), or null (unsupported). - strMap := make(map[string]interface{}) - for k, v := range typedYAMLObj { - // Resolve the key to a string first. - var keyString string - switch typedKey := k.(type) { - case string: - keyString = typedKey - case int: - keyString = strconv.Itoa(typedKey) - case int64: - // go-yaml will only return an int64 as a key if the system - // architecture is 32-bit and the key's value is between 32-bit - // and 64-bit. Otherwise the key type will simply be int. - keyString = strconv.FormatInt(typedKey, 10) - case float64: - // Stolen from go-yaml to use the same conversion to string as - // the go-yaml library uses to convert float to string when - // Marshaling. - s := strconv.FormatFloat(typedKey, 'g', -1, 32) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - keyString = s - case bool: - if typedKey { - keyString = "true" - } else { - keyString = "false" - } - default: - return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", - reflect.TypeOf(k), k, v) - } - - // jsonTarget should be a struct or a map. If it's a struct, find - // the field it's going to map to and pass its reflect.Value. If - // it's a map, find the element type of the map and pass the - // reflect.Value created from that type. If it's neither, just pass - // nil - JSON conversion will error for us if it's a real issue. - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Struct { - keyBytes := []byte(keyString) - // Find the field that the JSON library would use. - var f *field - fields := cachedTypeFields(t.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, keyBytes) { - f = ff - break - } - // Do case-insensitive comparison. - if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { - f = ff - } - } - if f != nil { - // Find the reflect.Value of the most preferential - // struct field. - jtf := t.Field(f.index[0]) - strMap[keyString], err = convertToJSONableObject(v, &jtf) - if err != nil { - return nil, err - } - continue - } - } else if t.Kind() == reflect.Map { - // Create a zero value of the map's element type to use as - // the JSON target. - jtv := reflect.Zero(t.Type().Elem()) - strMap[keyString], err = convertToJSONableObject(v, &jtv) - if err != nil { - return nil, err - } - continue - } - } - strMap[keyString], err = convertToJSONableObject(v, nil) - if err != nil { - return nil, err - } - } - return strMap, nil - case []interface{}: - // We need to recurse into arrays in case there are any - // map[interface{}]interface{}'s inside and to convert any - // numbers to strings. - - // If jsonTarget is a slice (which it really should be), find the - // thing it's going to map to. If it's not a slice, just pass nil - // - JSON conversion will error for us if it's a real issue. - var jsonSliceElemValue *reflect.Value - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Slice { - // By default slices point to nil, but we need a reflect.Value - // pointing to a value of the slice type, so we create one here. - ev := reflect.Indirect(reflect.New(t.Type().Elem())) - jsonSliceElemValue = &ev - } - } - - // Make and use a new array. - arr := make([]interface{}, len(typedYAMLObj)) - for i, v := range typedYAMLObj { - arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) - if err != nil { - return nil, err - } - } - return arr, nil - default: - // If the target type is a string and the YAML type is a number, - // convert the YAML type to a string. - if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { - // Based on my reading of go-yaml, it may return int, int64, - // float64, or uint64. - var s string - switch typedVal := typedYAMLObj.(type) { - case int: - s = strconv.FormatInt(int64(typedVal), 10) - case int64: - s = strconv.FormatInt(typedVal, 10) - case float64: - s = strconv.FormatFloat(typedVal, 'g', -1, 32) - case uint64: - s = strconv.FormatUint(typedVal, 10) - case bool: - if typedVal { - s = "true" - } else { - s = "false" - } - } - if len(s) > 0 { - yamlObj = interface{}(s) - } - } - return yamlObj, nil - } - - return nil, nil -} diff --git a/vendor/github.com/go-chi/chi/.gitignore b/vendor/github.com/go-chi/chi/.gitignore deleted file mode 100644 index ba22c99..0000000 --- a/vendor/github.com/go-chi/chi/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -.idea -*.sw? -.vscode diff --git a/vendor/github.com/go-chi/chi/.travis.yml b/vendor/github.com/go-chi/chi/.travis.yml deleted file mode 100644 index 7b8e26b..0000000 --- a/vendor/github.com/go-chi/chi/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -language: go - -go: - - 1.10.x - - 1.11.x - - 1.12.x - - 1.13.x - - 1.14.x - -script: - - go get -d -t ./... - - go vet ./... - - go test ./... - - > - go_version=$(go version); - if [ ${go_version:13:4} = "1.12" ]; then - go get -u golang.org/x/tools/cmd/goimports; - goimports -d -e ./ | grep '.*' && { echo; echo "Aborting due to non-empty goimports output."; exit 1; } || :; - fi - diff --git a/vendor/github.com/go-chi/chi/CHANGELOG.md b/vendor/github.com/go-chi/chi/CHANGELOG.md deleted file mode 100644 index 901ebc1..0000000 --- a/vendor/github.com/go-chi/chi/CHANGELOG.md +++ /dev/null @@ -1,166 +0,0 @@ -# Changelog - -## v4.0.4 (2020-03-24) - -- middleware.Recoverer: new pretty stack trace printing (https://github.com/go-chi/chi/pull/496) -- a few minor improvements and fixes -- History of changes: see https://github.com/go-chi/chi/compare/v4.0.3...v4.0.4 - - -## v4.0.3 (2020-01-09) - -- core: fix regexp routing to include default value when param is not matched -- middleware: rewrite of middleware.Compress -- middleware: suppress http.ErrAbortHandler in middleware.Recoverer -- History of changes: see https://github.com/go-chi/chi/compare/v4.0.2...v4.0.3 - - -## v4.0.2 (2019-02-26) - -- Minor fixes -- History of changes: see https://github.com/go-chi/chi/compare/v4.0.1...v4.0.2 - - -## v4.0.1 (2019-01-21) - -- Fixes issue with compress middleware: #382 #385 -- History of changes: see https://github.com/go-chi/chi/compare/v4.0.0...v4.0.1 - - -## v4.0.0 (2019-01-10) - -- chi v4 requires Go 1.10.3+ (or Go 1.9.7+) - we have deprecated support for Go 1.7 and 1.8 -- router: respond with 404 on router with no routes (#362) -- router: additional check to ensure wildcard is at the end of a url pattern (#333) -- middleware: deprecate use of http.CloseNotifier (#347) -- middleware: fix RedirectSlashes to include query params on redirect (#334) -- History of changes: see https://github.com/go-chi/chi/compare/v3.3.4...v4.0.0 - - -## v3.3.4 (2019-01-07) - -- Minor middleware improvements. No changes to core library/router. Moving v3 into its -- own branch as a version of chi for Go 1.7, 1.8, 1.9, 1.10, 1.11 -- History of changes: see https://github.com/go-chi/chi/compare/v3.3.3...v3.3.4 - - -## v3.3.3 (2018-08-27) - -- Minor release -- See https://github.com/go-chi/chi/compare/v3.3.2...v3.3.3 - - -## v3.3.2 (2017-12-22) - -- Support to route trailing slashes on mounted sub-routers (#281) -- middleware: new `ContentCharset` to check matching charsets. Thank you - @csucu for your community contribution! - - -## v3.3.1 (2017-11-20) - -- middleware: new `AllowContentType` handler for explicit whitelist of accepted request Content-Types -- middleware: new `SetHeader` handler for short-hand middleware to set a response header key/value -- Minor bug fixes - - -## v3.3.0 (2017-10-10) - -- New chi.RegisterMethod(method) to add support for custom HTTP methods, see _examples/custom-method for usage -- Deprecated LINK and UNLINK methods from the default list, please use `chi.RegisterMethod("LINK")` and `chi.RegisterMethod("UNLINK")` in an `init()` function - - -## v3.2.1 (2017-08-31) - -- Add new `Match(rctx *Context, method, path string) bool` method to `Routes` interface - and `Mux`. Match searches the mux's routing tree for a handler that matches the method/path -- Add new `RouteMethod` to `*Context` -- Add new `Routes` pointer to `*Context` -- Add new `middleware.GetHead` to route missing HEAD requests to GET handler -- Updated benchmarks (see README) - - -## v3.1.5 (2017-08-02) - -- Setup golint and go vet for the project -- As per golint, we've redefined `func ServerBaseContext(h http.Handler, baseCtx context.Context) http.Handler` - to `func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler` - - -## v3.1.0 (2017-07-10) - -- Fix a few minor issues after v3 release -- Move `docgen` sub-pkg to https://github.com/go-chi/docgen -- Move `render` sub-pkg to https://github.com/go-chi/render -- Add new `URLFormat` handler to chi/middleware sub-pkg to make working with url mime - suffixes easier, ie. parsing `/articles/1.json` and `/articles/1.xml`. See comments in - https://github.com/go-chi/chi/blob/master/middleware/url_format.go for example usage. - - -## v3.0.0 (2017-06-21) - -- Major update to chi library with many exciting updates, but also some *breaking changes* -- URL parameter syntax changed from `/:id` to `/{id}` for even more flexible routing, such as - `/articles/{month}-{day}-{year}-{slug}`, `/articles/{id}`, and `/articles/{id}.{ext}` on the - same router -- Support for regexp for routing patterns, in the form of `/{paramKey:regExp}` for example: - `r.Get("/articles/{name:[a-z]+}", h)` and `chi.URLParam(r, "name")` -- Add `Method` and `MethodFunc` to `chi.Router` to allow routing definitions such as - `r.Method("GET", "/", h)` which provides a cleaner interface for custom handlers like - in `_examples/custom-handler` -- Deprecating `mux#FileServer` helper function. Instead, we encourage users to create their - own using file handler with the stdlib, see `_examples/fileserver` for an example -- Add support for LINK/UNLINK http methods via `r.Method()` and `r.MethodFunc()` -- Moved the chi project to its own organization, to allow chi-related community packages to - be easily discovered and supported, at: https://github.com/go-chi -- *NOTE:* please update your import paths to `"github.com/go-chi/chi"` -- *NOTE:* chi v2 is still available at https://github.com/go-chi/chi/tree/v2 - - -## v2.1.0 (2017-03-30) - -- Minor improvements and update to the chi core library -- Introduced a brand new `chi/render` sub-package to complete the story of building - APIs to offer a pattern for managing well-defined request / response payloads. Please - check out the updated `_examples/rest` example for how it works. -- Added `MethodNotAllowed(h http.HandlerFunc)` to chi.Router interface - - -## v2.0.0 (2017-01-06) - -- After many months of v2 being in an RC state with many companies and users running it in - production, the inclusion of some improvements to the middlewares, we are very pleased to - announce v2.0.0 of chi. - - -## v2.0.0-rc1 (2016-07-26) - -- Huge update! chi v2 is a large refactor targetting Go 1.7+. As of Go 1.7, the popular - community `"net/context"` package has been included in the standard library as `"context"` and - utilized by `"net/http"` and `http.Request` to managing deadlines, cancelation signals and other - request-scoped values. We're very excited about the new context addition and are proud to - introduce chi v2, a minimal and powerful routing package for building large HTTP services, - with zero external dependencies. Chi focuses on idiomatic design and encourages the use of - stdlib HTTP handlers and middlwares. -- chi v2 deprecates its `chi.Handler` interface and requires `http.Handler` or `http.HandlerFunc` -- chi v2 stores URL routing parameters and patterns in the standard request context: `r.Context()` -- chi v2 lower-level routing context is accessible by `chi.RouteContext(r.Context()) *chi.Context`, - which provides direct access to URL routing parameters, the routing path and the matching - routing patterns. -- Users upgrading from chi v1 to v2, need to: - 1. Update the old chi.Handler signature, `func(ctx context.Context, w http.ResponseWriter, r *http.Request)` to - the standard http.Handler: `func(w http.ResponseWriter, r *http.Request)` - 2. Use `chi.URLParam(r *http.Request, paramKey string) string` - or `URLParamFromCtx(ctx context.Context, paramKey string) string` to access a url parameter value - - -## v1.0.0 (2016-07-01) - -- Released chi v1 stable https://github.com/go-chi/chi/tree/v1.0.0 for Go 1.6 and older. - - -## v0.9.0 (2016-03-31) - -- Reuse context objects via sync.Pool for zero-allocation routing [#33](https://github.com/go-chi/chi/pull/33) -- BREAKING NOTE: due to subtle API changes, previously `chi.URLParams(ctx)["id"]` used to access url parameters - has changed to: `chi.URLParam(ctx, "id")` diff --git a/vendor/github.com/go-chi/chi/CONTRIBUTING.md b/vendor/github.com/go-chi/chi/CONTRIBUTING.md deleted file mode 100644 index c0ac2df..0000000 --- a/vendor/github.com/go-chi/chi/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing - -## Prerequisites - -1. [Install Go][go-install]. -2. Download the sources and switch the working directory: - - ```bash - go get -u -d github.com/go-chi/chi - cd $GOPATH/src/github.com/go-chi/chi - ``` - -## Submitting a Pull Request - -A typical workflow is: - -1. [Fork the repository.][fork] [This tip maybe also helpful.][go-fork-tip] -2. [Create a topic branch.][branch] -3. Add tests for your change. -4. Run `go test`. If your tests pass, return to the step 3. -5. Implement the change and ensure the steps from the previous step pass. -6. Run `goimports -w .`, to ensure the new code conforms to Go formatting guideline. -7. [Add, commit and push your changes.][git-help] -8. [Submit a pull request.][pull-req] - -[go-install]: https://golang.org/doc/install -[go-fork-tip]: http://blog.campoy.cat/2014/03/github-and-go-forking-pull-requests-and.html -[fork]: https://help.github.com/articles/fork-a-repo -[branch]: http://learn.github.com/p/branching.html -[git-help]: https://guides.github.com -[pull-req]: https://help.github.com/articles/using-pull-requests diff --git a/vendor/github.com/go-chi/chi/LICENSE b/vendor/github.com/go-chi/chi/LICENSE deleted file mode 100644 index d99f02f..0000000 --- a/vendor/github.com/go-chi/chi/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc. - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/go-chi/chi/README.md b/vendor/github.com/go-chi/chi/README.md deleted file mode 100644 index 31b35a7..0000000 --- a/vendor/github.com/go-chi/chi/README.md +++ /dev/null @@ -1,436 +0,0 @@ -# chi - - -[![GoDoc Widget]][GoDoc] [![Travis Widget]][Travis] - -`chi` is a lightweight, idiomatic and composable router for building Go HTTP services. It's -especially good at helping you write large REST API services that are kept maintainable as your -project grows and changes. `chi` is built on the new `context` package introduced in Go 1.7 to -handle signaling, cancelation and request-scoped values across a handler chain. - -The focus of the project has been to seek out an elegant and comfortable design for writing -REST API servers, written during the development of the Pressly API service that powers our -public API service, which in turn powers all of our client-side applications. - -The key considerations of chi's design are: project structure, maintainability, standard http -handlers (stdlib-only), developer productivity, and deconstructing a large system into many small -parts. The core router `github.com/go-chi/chi` is quite small (less than 1000 LOC), but we've also -included some useful/optional subpackages: [middleware](/middleware), [render](https://github.com/go-chi/render) and [docgen](https://github.com/go-chi/docgen). We hope you enjoy it too! - -## Install - -`go get -u github.com/go-chi/chi` - - -## Features - -* **Lightweight** - cloc'd in ~1000 LOC for the chi router -* **Fast** - yes, see [benchmarks](#benchmarks) -* **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compatible with `net/http` -* **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and subrouter mounting -* **Context control** - built on new `context` package, providing value chaining, cancellations and timeouts -* **Robust** - in production at Pressly, CloudFlare, Heroku, 99Designs, and many others (see [discussion](https://github.com/go-chi/chi/issues/91)) -* **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown -* **No external dependencies** - plain ol' Go stdlib + net/http - - -## Examples - -See [_examples/](https://github.com/go-chi/chi/blob/master/_examples/) for a variety of examples. - - -**As easy as:** - -```go -package main - -import ( - "net/http" - "github.com/go-chi/chi" -) - -func main() { - r := chi.NewRouter() - r.Get("/", func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("welcome")) - }) - http.ListenAndServe(":3000", r) -} -``` - -**REST Preview:** - -Here is a little preview of how routing looks like with chi. Also take a look at the generated routing docs -in JSON ([routes.json](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.json)) and in -Markdown ([routes.md](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.md)). - -I highly recommend reading the source of the [examples](https://github.com/go-chi/chi/blob/master/_examples/) listed -above, they will show you all the features of chi and serve as a good form of documentation. - -```go -import ( - //... - "context" - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" -) - -func main() { - r := chi.NewRouter() - - // A good base middleware stack - r.Use(middleware.RequestID) - r.Use(middleware.RealIP) - r.Use(middleware.Logger) - r.Use(middleware.Recoverer) - - // Set a timeout value on the request context (ctx), that will signal - // through ctx.Done() that the request has timed out and further - // processing should be stopped. - r.Use(middleware.Timeout(60 * time.Second)) - - r.Get("/", func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("hi")) - }) - - // RESTy routes for "articles" resource - r.Route("/articles", func(r chi.Router) { - r.With(paginate).Get("/", listArticles) // GET /articles - r.With(paginate).Get("/{month}-{day}-{year}", listArticlesByDate) // GET /articles/01-16-2017 - - r.Post("/", createArticle) // POST /articles - r.Get("/search", searchArticles) // GET /articles/search - - // Regexp url parameters: - r.Get("/{articleSlug:[a-z-]+}", getArticleBySlug) // GET /articles/home-is-toronto - - // Subrouters: - r.Route("/{articleID}", func(r chi.Router) { - r.Use(ArticleCtx) - r.Get("/", getArticle) // GET /articles/123 - r.Put("/", updateArticle) // PUT /articles/123 - r.Delete("/", deleteArticle) // DELETE /articles/123 - }) - }) - - // Mount the admin sub-router - r.Mount("/admin", adminRouter()) - - http.ListenAndServe(":3333", r) -} - -func ArticleCtx(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - articleID := chi.URLParam(r, "articleID") - article, err := dbGetArticle(articleID) - if err != nil { - http.Error(w, http.StatusText(404), 404) - return - } - ctx := context.WithValue(r.Context(), "article", article) - next.ServeHTTP(w, r.WithContext(ctx)) - }) -} - -func getArticle(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - article, ok := ctx.Value("article").(*Article) - if !ok { - http.Error(w, http.StatusText(422), 422) - return - } - w.Write([]byte(fmt.Sprintf("title:%s", article.Title))) -} - -// A completely separate router for administrator routes -func adminRouter() http.Handler { - r := chi.NewRouter() - r.Use(AdminOnly) - r.Get("/", adminIndex) - r.Get("/accounts", adminListAccounts) - return r -} - -func AdminOnly(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - perm, ok := ctx.Value("acl.permission").(YourPermissionType) - if !ok || !perm.IsAdmin() { - http.Error(w, http.StatusText(403), 403) - return - } - next.ServeHTTP(w, r) - }) -} -``` - - -## Router design - -chi's router is based on a kind of [Patricia Radix trie](https://en.wikipedia.org/wiki/Radix_tree). -The router is fully compatible with `net/http`. - -Built on top of the tree is the `Router` interface: - -```go -// Router consisting of the core routing methods used by chi's Mux, -// using only the standard net/http. -type Router interface { - http.Handler - Routes - - // Use appends one or more middlewares onto the Router stack. - Use(middlewares ...func(http.Handler) http.Handler) - - // With adds inline middlewares for an endpoint handler. - With(middlewares ...func(http.Handler) http.Handler) Router - - // Group adds a new inline-Router along the current routing - // path, with a fresh middleware stack for the inline-Router. - Group(fn func(r Router)) Router - - // Route mounts a sub-Router along a `pattern`` string. - Route(pattern string, fn func(r Router)) Router - - // Mount attaches another http.Handler along ./pattern/* - Mount(pattern string, h http.Handler) - - // Handle and HandleFunc adds routes for `pattern` that matches - // all HTTP methods. - Handle(pattern string, h http.Handler) - HandleFunc(pattern string, h http.HandlerFunc) - - // Method and MethodFunc adds routes for `pattern` that matches - // the `method` HTTP method. - Method(method, pattern string, h http.Handler) - MethodFunc(method, pattern string, h http.HandlerFunc) - - // HTTP-method routing along `pattern` - Connect(pattern string, h http.HandlerFunc) - Delete(pattern string, h http.HandlerFunc) - Get(pattern string, h http.HandlerFunc) - Head(pattern string, h http.HandlerFunc) - Options(pattern string, h http.HandlerFunc) - Patch(pattern string, h http.HandlerFunc) - Post(pattern string, h http.HandlerFunc) - Put(pattern string, h http.HandlerFunc) - Trace(pattern string, h http.HandlerFunc) - - // NotFound defines a handler to respond whenever a route could - // not be found. - NotFound(h http.HandlerFunc) - - // MethodNotAllowed defines a handler to respond whenever a method is - // not allowed. - MethodNotAllowed(h http.HandlerFunc) -} - -// Routes interface adds two methods for router traversal, which is also -// used by the github.com/go-chi/docgen package to generate documentation for Routers. -type Routes interface { - // Routes returns the routing tree in an easily traversable structure. - Routes() []Route - - // Middlewares returns the list of middlewares in use by the router. - Middlewares() Middlewares - - // Match searches the routing tree for a handler that matches - // the method/path - similar to routing a http request, but without - // executing the handler thereafter. - Match(rctx *Context, method, path string) bool -} -``` - -Each routing method accepts a URL `pattern` and chain of `handlers`. The URL pattern -supports named params (ie. `/users/{userID}`) and wildcards (ie. `/admin/*`). URL parameters -can be fetched at runtime by calling `chi.URLParam(r, "userID")` for named parameters -and `chi.URLParam(r, "*")` for a wildcard parameter. - - -### Middleware handlers - -chi's middlewares are just stdlib net/http middleware handlers. There is nothing special -about them, which means the router and all the tooling is designed to be compatible and -friendly with any middleware in the community. This offers much better extensibility and reuse -of packages and is at the heart of chi's purpose. - -Here is an example of a standard net/http middleware handler using the new request context -available in Go. This middleware sets a hypothetical user identifier on the request -context and calls the next handler in the chain. - -```go -// HTTP middleware setting a value on the request context -func MyMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := context.WithValue(r.Context(), "user", "123") - next.ServeHTTP(w, r.WithContext(ctx)) - }) -} -``` - - -### Request handlers - -chi uses standard net/http request handlers. This little snippet is an example of a http.Handler -func that reads a user identifier from the request context - hypothetically, identifying -the user sending an authenticated request, validated+set by a previous middleware handler. - -```go -// HTTP handler accessing data from the request context. -func MyRequestHandler(w http.ResponseWriter, r *http.Request) { - user := r.Context().Value("user").(string) - w.Write([]byte(fmt.Sprintf("hi %s", user))) -} -``` - - -### URL parameters - -chi's router parses and stores URL parameters right onto the request context. Here is -an example of how to access URL params in your net/http handlers. And of course, middlewares -are able to access the same information. - -```go -// HTTP handler accessing the url routing parameters. -func MyRequestHandler(w http.ResponseWriter, r *http.Request) { - userID := chi.URLParam(r, "userID") // from a route like /users/{userID} - - ctx := r.Context() - key := ctx.Value("key").(string) - - w.Write([]byte(fmt.Sprintf("hi %v, %v", userID, key))) -} -``` - - -## Middlewares - -chi comes equipped with an optional `middleware` package, providing a suite of standard -`net/http` middlewares. Please note, any middleware in the ecosystem that is also compatible -with `net/http` can be used with chi's mux. - -### Core middlewares - ------------------------------------------------------------------------------------------------------------ -| chi/middleware Handler | description | -|:----------------------|:--------------------------------------------------------------------------------- -| AllowContentType | Explicit whitelist of accepted request Content-Types | -| BasicAuth | Basic HTTP authentication | -| Compress | Gzip compression for clients that accept compressed responses | -| GetHead | Automatically route undefined HEAD requests to GET handlers | -| Heartbeat | Monitoring endpoint to check the servers pulse | -| Logger | Logs the start and end of each request with the elapsed processing time | -| NoCache | Sets response headers to prevent clients from caching | -| Profiler | Easily attach net/http/pprof to your routers | -| RealIP | Sets a http.Request's RemoteAddr to either X-Forwarded-For or X-Real-IP | -| Recoverer | Gracefully absorb panics and prints the stack trace | -| RequestID | Injects a request ID into the context of each request | -| RedirectSlashes | Redirect slashes on routing paths | -| SetHeader | Short-hand middleware to set a response header key/value | -| StripSlashes | Strip slashes on routing paths | -| Throttle | Puts a ceiling on the number of concurrent requests | -| Timeout | Signals to the request context when the timeout deadline is reached | -| URLFormat | Parse extension from url and put it on request context | -| WithValue | Short-hand middleware to set a key/value on the request context | ------------------------------------------------------------------------------------------------------------ - -### Auxiliary middlewares & packages - -Please see https://github.com/go-chi for additional packages. - --------------------------------------------------------------------------------------------------------------------- -| package | description | -|:---------------------------------------------------|:------------------------------------------------------------- -| [cors](https://github.com/go-chi/cors) | Cross-origin resource sharing (CORS) | -| [docgen](https://github.com/go-chi/docgen) | Print chi.Router routes at runtime | -| [jwtauth](https://github.com/go-chi/jwtauth) | JWT authentication | -| [hostrouter](https://github.com/go-chi/hostrouter) | Domain/host based request routing | -| [httpcoala](https://github.com/go-chi/httpcoala) | HTTP request coalescer | -| [chi-authz](https://github.com/casbin/chi-authz) | Request ACL via https://github.com/hsluoyz/casbin | -| [phi](https://github.com/fate-lovely/phi) | Port chi to [fasthttp](https://github.com/valyala/fasthttp) | --------------------------------------------------------------------------------------------------------------------- - -please [submit a PR](./CONTRIBUTING.md) if you'd like to include a link to a chi-compatible middleware - - -## context? - -`context` is a tiny pkg that provides simple interface to signal context across call stacks -and goroutines. It was originally written by [Sameer Ajmani](https://github.com/Sajmani) -and is available in stdlib since go1.7. - -Learn more at https://blog.golang.org/context - -and.. -* Docs: https://golang.org/pkg/context -* Source: https://github.com/golang/go/tree/master/src/context - - -## Benchmarks - -The benchmark suite: https://github.com/pkieltyka/go-http-routing-benchmark - -Results as of Jan 9, 2019 with Go 1.11.4 on Linux X1 Carbon laptop - -```shell -BenchmarkChi_Param 3000000 475 ns/op 432 B/op 3 allocs/op -BenchmarkChi_Param5 2000000 696 ns/op 432 B/op 3 allocs/op -BenchmarkChi_Param20 1000000 1275 ns/op 432 B/op 3 allocs/op -BenchmarkChi_ParamWrite 3000000 505 ns/op 432 B/op 3 allocs/op -BenchmarkChi_GithubStatic 3000000 508 ns/op 432 B/op 3 allocs/op -BenchmarkChi_GithubParam 2000000 669 ns/op 432 B/op 3 allocs/op -BenchmarkChi_GithubAll 10000 134627 ns/op 87699 B/op 609 allocs/op -BenchmarkChi_GPlusStatic 3000000 402 ns/op 432 B/op 3 allocs/op -BenchmarkChi_GPlusParam 3000000 500 ns/op 432 B/op 3 allocs/op -BenchmarkChi_GPlus2Params 3000000 586 ns/op 432 B/op 3 allocs/op -BenchmarkChi_GPlusAll 200000 7237 ns/op 5616 B/op 39 allocs/op -BenchmarkChi_ParseStatic 3000000 408 ns/op 432 B/op 3 allocs/op -BenchmarkChi_ParseParam 3000000 488 ns/op 432 B/op 3 allocs/op -BenchmarkChi_Parse2Params 3000000 551 ns/op 432 B/op 3 allocs/op -BenchmarkChi_ParseAll 100000 13508 ns/op 11232 B/op 78 allocs/op -BenchmarkChi_StaticAll 20000 81933 ns/op 67826 B/op 471 allocs/op -``` - -Comparison with other routers: https://gist.github.com/pkieltyka/123032f12052520aaccab752bd3e78cc - -NOTE: the allocs in the benchmark above are from the calls to http.Request's -`WithContext(context.Context)` method that clones the http.Request, sets the `Context()` -on the duplicated (alloc'd) request and returns it the new request object. This is just -how setting context on a request in Go works. - - -## Credits - -* Carl Jackson for https://github.com/zenazn/goji - * Parts of chi's thinking comes from goji, and chi's middleware package - sources from goji. -* Armon Dadgar for https://github.com/armon/go-radix -* Contributions: [@VojtechVitek](https://github.com/VojtechVitek) - -We'll be more than happy to see [your contributions](./CONTRIBUTING.md)! - - -## Beyond REST - -chi is just a http router that lets you decompose request handling into many smaller layers. -Many companies use chi to write REST services for their public APIs. But, REST is just a convention -for managing state via HTTP, and there's a lot of other pieces required to write a complete client-server -system or network of microservices. - -Looking beyond REST, I also recommend some newer works in the field: -* [webrpc](https://github.com/webrpc/webrpc) - Web-focused RPC client+server framework with code-gen -* [gRPC](https://github.com/grpc/grpc-go) - Google's RPC framework via protobufs -* [graphql](https://github.com/99designs/gqlgen) - Declarative query language -* [NATS](https://nats.io) - lightweight pub-sub - - -## License - -Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka) - -Licensed under [MIT License](./LICENSE) - -[GoDoc]: https://godoc.org/github.com/go-chi/chi -[GoDoc Widget]: https://godoc.org/github.com/go-chi/chi?status.svg -[Travis]: https://travis-ci.org/go-chi/chi -[Travis Widget]: https://travis-ci.org/go-chi/chi.svg?branch=master diff --git a/vendor/github.com/go-chi/chi/chain.go b/vendor/github.com/go-chi/chi/chain.go deleted file mode 100644 index 88e6846..0000000 --- a/vendor/github.com/go-chi/chi/chain.go +++ /dev/null @@ -1,49 +0,0 @@ -package chi - -import "net/http" - -// Chain returns a Middlewares type from a slice of middleware handlers. -func Chain(middlewares ...func(http.Handler) http.Handler) Middlewares { - return Middlewares(middlewares) -} - -// Handler builds and returns a http.Handler from the chain of middlewares, -// with `h http.Handler` as the final handler. -func (mws Middlewares) Handler(h http.Handler) http.Handler { - return &ChainHandler{mws, h, chain(mws, h)} -} - -// HandlerFunc builds and returns a http.Handler from the chain of middlewares, -// with `h http.Handler` as the final handler. -func (mws Middlewares) HandlerFunc(h http.HandlerFunc) http.Handler { - return &ChainHandler{mws, h, chain(mws, h)} -} - -// ChainHandler is a http.Handler with support for handler composition and -// execution. -type ChainHandler struct { - Middlewares Middlewares - Endpoint http.Handler - chain http.Handler -} - -func (c *ChainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - c.chain.ServeHTTP(w, r) -} - -// chain builds a http.Handler composed of an inline middleware stack and endpoint -// handler in the order they are passed. -func chain(middlewares []func(http.Handler) http.Handler, endpoint http.Handler) http.Handler { - // Return ahead of time if there aren't any middlewares for the chain - if len(middlewares) == 0 { - return endpoint - } - - // Wrap the end handler with the middleware chain - h := middlewares[len(middlewares)-1](endpoint) - for i := len(middlewares) - 2; i >= 0; i-- { - h = middlewares[i](h) - } - - return h -} diff --git a/vendor/github.com/go-chi/chi/chi.go b/vendor/github.com/go-chi/chi/chi.go deleted file mode 100644 index b7063dc..0000000 --- a/vendor/github.com/go-chi/chi/chi.go +++ /dev/null @@ -1,134 +0,0 @@ -// -// Package chi is a small, idiomatic and composable router for building HTTP services. -// -// chi requires Go 1.10 or newer. -// -// Example: -// package main -// -// import ( -// "net/http" -// -// "github.com/go-chi/chi" -// "github.com/go-chi/chi/middleware" -// ) -// -// func main() { -// r := chi.NewRouter() -// r.Use(middleware.Logger) -// r.Use(middleware.Recoverer) -// -// r.Get("/", func(w http.ResponseWriter, r *http.Request) { -// w.Write([]byte("root.")) -// }) -// -// http.ListenAndServe(":3333", r) -// } -// -// See github.com/go-chi/chi/_examples/ for more in-depth examples. -// -// URL patterns allow for easy matching of path components in HTTP -// requests. The matching components can then be accessed using -// chi.URLParam(). All patterns must begin with a slash. -// -// A simple named placeholder {name} matches any sequence of characters -// up to the next / or the end of the URL. Trailing slashes on paths must -// be handled explicitly. -// -// A placeholder with a name followed by a colon allows a regular -// expression match, for example {number:\\d+}. The regular expression -// syntax is Go's normal regexp RE2 syntax, except that regular expressions -// including { or } are not supported, and / will never be -// matched. An anonymous regexp pattern is allowed, using an empty string -// before the colon in the placeholder, such as {:\\d+} -// -// The special placeholder of asterisk matches the rest of the requested -// URL. Any trailing characters in the pattern are ignored. This is the only -// placeholder which will match / characters. -// -// Examples: -// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/" -// "/user/{name}/info" matches "/user/jsmith/info" -// "/page/*" matches "/page/intro/latest" -// "/page/*/index" also matches "/page/intro/latest" -// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01" -// -package chi - -import "net/http" - -// NewRouter returns a new Mux object that implements the Router interface. -func NewRouter() *Mux { - return NewMux() -} - -// Router consisting of the core routing methods used by chi's Mux, -// using only the standard net/http. -type Router interface { - http.Handler - Routes - - // Use appends one or more middlewares onto the Router stack. - Use(middlewares ...func(http.Handler) http.Handler) - - // With adds inline middlewares for an endpoint handler. - With(middlewares ...func(http.Handler) http.Handler) Router - - // Group adds a new inline-Router along the current routing - // path, with a fresh middleware stack for the inline-Router. - Group(fn func(r Router)) Router - - // Route mounts a sub-Router along a `pattern`` string. - Route(pattern string, fn func(r Router)) Router - - // Mount attaches another http.Handler along ./pattern/* - Mount(pattern string, h http.Handler) - - // Handle and HandleFunc adds routes for `pattern` that matches - // all HTTP methods. - Handle(pattern string, h http.Handler) - HandleFunc(pattern string, h http.HandlerFunc) - - // Method and MethodFunc adds routes for `pattern` that matches - // the `method` HTTP method. - Method(method, pattern string, h http.Handler) - MethodFunc(method, pattern string, h http.HandlerFunc) - - // HTTP-method routing along `pattern` - Connect(pattern string, h http.HandlerFunc) - Delete(pattern string, h http.HandlerFunc) - Get(pattern string, h http.HandlerFunc) - Head(pattern string, h http.HandlerFunc) - Options(pattern string, h http.HandlerFunc) - Patch(pattern string, h http.HandlerFunc) - Post(pattern string, h http.HandlerFunc) - Put(pattern string, h http.HandlerFunc) - Trace(pattern string, h http.HandlerFunc) - - // NotFound defines a handler to respond whenever a route could - // not be found. - NotFound(h http.HandlerFunc) - - // MethodNotAllowed defines a handler to respond whenever a method is - // not allowed. - MethodNotAllowed(h http.HandlerFunc) -} - -// Routes interface adds two methods for router traversal, which is also -// used by the `docgen` subpackage to generation documentation for Routers. -type Routes interface { - // Routes returns the routing tree in an easily traversable structure. - Routes() []Route - - // Middlewares returns the list of middlewares in use by the router. - Middlewares() Middlewares - - // Match searches the routing tree for a handler that matches - // the method/path - similar to routing a http request, but without - // executing the handler thereafter. - Match(rctx *Context, method, path string) bool -} - -// Middlewares type is a slice of standard middleware handlers with methods -// to compose middleware chains and http.Handler's. -type Middlewares []func(http.Handler) http.Handler diff --git a/vendor/github.com/go-chi/chi/context.go b/vendor/github.com/go-chi/chi/context.go deleted file mode 100644 index 112e87a..0000000 --- a/vendor/github.com/go-chi/chi/context.go +++ /dev/null @@ -1,162 +0,0 @@ -package chi - -import ( - "context" - "net" - "net/http" - "strings" -) - -var ( - // RouteCtxKey is the context.Context key to store the request context. - RouteCtxKey = &contextKey{"RouteContext"} -) - -// Context is the default routing context set on the root node of a -// request context to track route patterns, URL parameters and -// an optional routing path. -type Context struct { - Routes Routes - - // Routing path/method override used during the route search. - // See Mux#routeHTTP method. - RoutePath string - RouteMethod string - - // Routing pattern stack throughout the lifecycle of the request, - // across all connected routers. It is a record of all matching - // patterns across a stack of sub-routers. - RoutePatterns []string - - // URLParams are the stack of routeParams captured during the - // routing lifecycle across a stack of sub-routers. - URLParams RouteParams - - // The endpoint routing pattern that matched the request URI path - // or `RoutePath` of the current sub-router. This value will update - // during the lifecycle of a request passing through a stack of - // sub-routers. - routePattern string - - // Route parameters matched for the current sub-router. It is - // intentionally unexported so it cant be tampered. - routeParams RouteParams - - // methodNotAllowed hint - methodNotAllowed bool -} - -// NewRouteContext returns a new routing Context object. -func NewRouteContext() *Context { - return &Context{} -} - -// Reset a routing context to its initial state. -func (x *Context) Reset() { - x.Routes = nil - x.RoutePath = "" - x.RouteMethod = "" - x.RoutePatterns = x.RoutePatterns[:0] - x.URLParams.Keys = x.URLParams.Keys[:0] - x.URLParams.Values = x.URLParams.Values[:0] - - x.routePattern = "" - x.routeParams.Keys = x.routeParams.Keys[:0] - x.routeParams.Values = x.routeParams.Values[:0] - x.methodNotAllowed = false -} - -// URLParam returns the corresponding URL parameter value from the request -// routing context. -func (x *Context) URLParam(key string) string { - for k := len(x.URLParams.Keys) - 1; k >= 0; k-- { - if x.URLParams.Keys[k] == key { - return x.URLParams.Values[k] - } - } - return "" -} - -// RoutePattern builds the routing pattern string for the particular -// request, at the particular point during routing. This means, the value -// will change throughout the execution of a request in a router. That is -// why its advised to only use this value after calling the next handler. -// -// For example, -// -// func Instrument(next http.Handler) http.Handler { -// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { -// next.ServeHTTP(w, r) -// routePattern := chi.RouteContext(r.Context()).RoutePattern() -// measure(w, r, routePattern) -// }) -// } -func (x *Context) RoutePattern() string { - routePattern := strings.Join(x.RoutePatterns, "") - return strings.Replace(routePattern, "/*/", "/", -1) -} - -// RouteContext returns chi's routing Context object from a -// http.Request Context. -func RouteContext(ctx context.Context) *Context { - val, _ := ctx.Value(RouteCtxKey).(*Context) - return val -} - -// URLParam returns the url parameter from a http.Request object. -func URLParam(r *http.Request, key string) string { - if rctx := RouteContext(r.Context()); rctx != nil { - return rctx.URLParam(key) - } - return "" -} - -// URLParamFromCtx returns the url parameter from a http.Request Context. -func URLParamFromCtx(ctx context.Context, key string) string { - if rctx := RouteContext(ctx); rctx != nil { - return rctx.URLParam(key) - } - return "" -} - -// RouteParams is a structure to track URL routing parameters efficiently. -type RouteParams struct { - Keys, Values []string -} - -// Add will append a URL parameter to the end of the route param -func (s *RouteParams) Add(key, value string) { - s.Keys = append(s.Keys, key) - s.Values = append(s.Values, value) -} - -// ServerBaseContext wraps an http.Handler to set the request context to the -// `baseCtx`. -func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler { - fn := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - baseCtx := baseCtx - - // Copy over default net/http server context keys - if v, ok := ctx.Value(http.ServerContextKey).(*http.Server); ok { - baseCtx = context.WithValue(baseCtx, http.ServerContextKey, v) - } - if v, ok := ctx.Value(http.LocalAddrContextKey).(net.Addr); ok { - baseCtx = context.WithValue(baseCtx, http.LocalAddrContextKey, v) - } - - h.ServeHTTP(w, r.WithContext(baseCtx)) - }) - return fn -} - -// contextKey is a value for use with context.WithValue. It's used as -// a pointer so it fits in an interface{} without allocation. This technique -// for defining context keys was copied from Go 1.7's new use of context in net/http. -type contextKey struct { - name string -} - -func (k *contextKey) String() string { - return "chi context value " + k.name -} diff --git a/vendor/github.com/go-chi/chi/middleware/basic_auth.go b/vendor/github.com/go-chi/chi/middleware/basic_auth.go deleted file mode 100644 index 87b2641..0000000 --- a/vendor/github.com/go-chi/chi/middleware/basic_auth.go +++ /dev/null @@ -1,32 +0,0 @@ -package middleware - -import ( - "fmt" - "net/http" -) - -// BasicAuth implements a simple middleware handler for adding basic http auth to a route. -func BasicAuth(realm string, creds map[string]string) func(next http.Handler) http.Handler { - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - user, pass, ok := r.BasicAuth() - if !ok { - basicAuthFailed(w, realm) - return - } - - credPass, credUserOk := creds[user] - if !credUserOk || pass != credPass { - basicAuthFailed(w, realm) - return - } - - next.ServeHTTP(w, r) - }) - } -} - -func basicAuthFailed(w http.ResponseWriter, realm string) { - w.Header().Add("WWW-Authenticate", fmt.Sprintf(`Basic realm="%s"`, realm)) - w.WriteHeader(http.StatusUnauthorized) -} diff --git a/vendor/github.com/go-chi/chi/middleware/compress.go b/vendor/github.com/go-chi/chi/middleware/compress.go deleted file mode 100644 index 2f40cc1..0000000 --- a/vendor/github.com/go-chi/chi/middleware/compress.go +++ /dev/null @@ -1,399 +0,0 @@ -package middleware - -import ( - "bufio" - "compress/flate" - "compress/gzip" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "strings" - "sync" -) - -var defaultCompressibleContentTypes = []string{ - "text/html", - "text/css", - "text/plain", - "text/javascript", - "application/javascript", - "application/x-javascript", - "application/json", - "application/atom+xml", - "application/rss+xml", - "image/svg+xml", -} - -// Compress is a middleware that compresses response -// body of a given content types to a data format based -// on Accept-Encoding request header. It uses a given -// compression level. -// -// NOTE: make sure to set the Content-Type header on your response -// otherwise this middleware will not compress the response body. For ex, in -// your handler you should set w.Header().Set("Content-Type", http.DetectContentType(yourBody)) -// or set it manually. -// -// Passing a compression level of 5 is sensible value -func Compress(level int, types ...string) func(next http.Handler) http.Handler { - compressor := NewCompressor(level, types...) - return compressor.Handler -} - -// Compressor represents a set of encoding configurations. -type Compressor struct { - level int // The compression level. - // The mapping of encoder names to encoder functions. - encoders map[string]EncoderFunc - // The mapping of pooled encoders to pools. - pooledEncoders map[string]*sync.Pool - // The set of content types allowed to be compressed. - allowedTypes map[string]struct{} - allowedWildcards map[string]struct{} - // The list of encoders in order of decreasing precedence. - encodingPrecedence []string -} - -// NewCompressor creates a new Compressor that will handle encoding responses. -// -// The level should be one of the ones defined in the flate package. -// The types are the content types that are allowed to be compressed. -func NewCompressor(level int, types ...string) *Compressor { - // If types are provided, set those as the allowed types. If none are - // provided, use the default list. - allowedTypes := make(map[string]struct{}) - allowedWildcards := make(map[string]struct{}) - if len(types) > 0 { - for _, t := range types { - if strings.Contains(strings.TrimSuffix(t, "/*"), "*") { - panic(fmt.Sprintf("middleware/compress: Unsupported content-type wildcard pattern '%s'. Only '/*' supported", t)) - } - if strings.HasSuffix(t, "/*") { - allowedWildcards[strings.TrimSuffix(t, "/*")] = struct{}{} - } else { - allowedTypes[t] = struct{}{} - } - } - } else { - for _, t := range defaultCompressibleContentTypes { - allowedTypes[t] = struct{}{} - } - } - - c := &Compressor{ - level: level, - encoders: make(map[string]EncoderFunc), - pooledEncoders: make(map[string]*sync.Pool), - allowedTypes: allowedTypes, - allowedWildcards: allowedWildcards, - } - - // Set the default encoders. The precedence order uses the reverse - // ordering that the encoders were added. This means adding new encoders - // will move them to the front of the order. - // - // TODO: - // lzma: Opera. - // sdch: Chrome, Android. Gzip output + dictionary header. - // br: Brotli, see https://github.com/go-chi/chi/pull/326 - - // HTTP 1.1 "deflate" (RFC 2616) stands for DEFLATE data (RFC 1951) - // wrapped with zlib (RFC 1950). The zlib wrapper uses Adler-32 - // checksum compared to CRC-32 used in "gzip" and thus is faster. - // - // But.. some old browsers (MSIE, Safari 5.1) incorrectly expect - // raw DEFLATE data only, without the mentioned zlib wrapper. - // Because of this major confusion, most modern browsers try it - // both ways, first looking for zlib headers. - // Quote by Mark Adler: http://stackoverflow.com/a/9186091/385548 - // - // The list of browsers having problems is quite big, see: - // http://zoompf.com/blog/2012/02/lose-the-wait-http-compression - // https://web.archive.org/web/20120321182910/http://www.vervestudios.co/projects/compression-tests/results - // - // That's why we prefer gzip over deflate. It's just more reliable - // and not significantly slower than gzip. - c.SetEncoder("deflate", encoderDeflate) - - // TODO: Exception for old MSIE browsers that can't handle non-HTML? - // https://zoompf.com/blog/2012/02/lose-the-wait-http-compression - c.SetEncoder("gzip", encoderGzip) - - // NOTE: Not implemented, intentionally: - // case "compress": // LZW. Deprecated. - // case "bzip2": // Too slow on-the-fly. - // case "zopfli": // Too slow on-the-fly. - // case "xz": // Too slow on-the-fly. - return c -} - -// SetEncoder can be used to set the implementation of a compression algorithm. -// -// The encoding should be a standardised identifier. See: -// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding -// -// For example, add the Brotli algortithm: -// -// import brotli_enc "gopkg.in/kothar/brotli-go.v0/enc" -// -// compressor := middleware.NewCompressor(5, "text/html") -// compressor.SetEncoder("br", func(w http.ResponseWriter, level int) io.Writer { -// params := brotli_enc.NewBrotliParams() -// params.SetQuality(level) -// return brotli_enc.NewBrotliWriter(params, w) -// }) -func (c *Compressor) SetEncoder(encoding string, fn EncoderFunc) { - encoding = strings.ToLower(encoding) - if encoding == "" { - panic("the encoding can not be empty") - } - if fn == nil { - panic("attempted to set a nil encoder function") - } - - // If we are adding a new encoder that is already registered, we have to - // clear that one out first. - if _, ok := c.pooledEncoders[encoding]; ok { - delete(c.pooledEncoders, encoding) - } - if _, ok := c.encoders[encoding]; ok { - delete(c.encoders, encoding) - } - - // If the encoder supports Resetting (IoReseterWriter), then it can be pooled. - encoder := fn(ioutil.Discard, c.level) - if encoder != nil { - if _, ok := encoder.(ioResetterWriter); ok { - pool := &sync.Pool{ - New: func() interface{} { - return fn(ioutil.Discard, c.level) - }, - } - c.pooledEncoders[encoding] = pool - } - } - // If the encoder is not in the pooledEncoders, add it to the normal encoders. - if _, ok := c.pooledEncoders[encoding]; !ok { - c.encoders[encoding] = fn - } - - for i, v := range c.encodingPrecedence { - if v == encoding { - c.encodingPrecedence = append(c.encodingPrecedence[:i], c.encodingPrecedence[i+1:]...) - } - } - - c.encodingPrecedence = append([]string{encoding}, c.encodingPrecedence...) -} - -// Handler returns a new middleware that will compress the response based on the -// current Compressor. -func (c *Compressor) Handler(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - encoder, encoding, cleanup := c.selectEncoder(r.Header, w) - - cw := &compressResponseWriter{ - ResponseWriter: w, - w: w, - contentTypes: c.allowedTypes, - contentWildcards: c.allowedWildcards, - encoding: encoding, - compressable: false, // determined in post-handler - } - if encoder != nil { - cw.w = encoder - } - // Re-add the encoder to the pool if applicable. - defer cleanup() - defer cw.Close() - - next.ServeHTTP(cw, r) - }) -} - -// selectEncoder returns the encoder, the name of the encoder, and a closer function. -func (c *Compressor) selectEncoder(h http.Header, w io.Writer) (io.Writer, string, func()) { - header := h.Get("Accept-Encoding") - - // Parse the names of all accepted algorithms from the header. - accepted := strings.Split(strings.ToLower(header), ",") - - // Find supported encoder by accepted list by precedence - for _, name := range c.encodingPrecedence { - if matchAcceptEncoding(accepted, name) { - if pool, ok := c.pooledEncoders[name]; ok { - encoder := pool.Get().(ioResetterWriter) - cleanup := func() { - pool.Put(encoder) - } - encoder.Reset(w) - return encoder, name, cleanup - - } - if fn, ok := c.encoders[name]; ok { - return fn(w, c.level), name, func() {} - } - } - - } - - // No encoder found to match the accepted encoding - return nil, "", func() {} -} - -func matchAcceptEncoding(accepted []string, encoding string) bool { - for _, v := range accepted { - if strings.Contains(v, encoding) { - return true - } - } - return false -} - -// An EncoderFunc is a function that wraps the provided io.Writer with a -// streaming compression algorithm and returns it. -// -// In case of failure, the function should return nil. -type EncoderFunc func(w io.Writer, level int) io.Writer - -// Interface for types that allow resetting io.Writers. -type ioResetterWriter interface { - io.Writer - Reset(w io.Writer) -} - -type compressResponseWriter struct { - http.ResponseWriter - - // The streaming encoder writer to be used if there is one. Otherwise, - // this is just the normal writer. - w io.Writer - encoding string - contentTypes map[string]struct{} - contentWildcards map[string]struct{} - wroteHeader bool - compressable bool -} - -func (cw *compressResponseWriter) isCompressable() bool { - // Parse the first part of the Content-Type response header. - contentType := cw.Header().Get("Content-Type") - if idx := strings.Index(contentType, ";"); idx >= 0 { - contentType = contentType[0:idx] - } - - // Is the content type compressable? - if _, ok := cw.contentTypes[contentType]; ok { - return true - } - if idx := strings.Index(contentType, "/"); idx > 0 { - contentType = contentType[0:idx] - _, ok := cw.contentWildcards[contentType] - return ok - } - return false -} - -func (cw *compressResponseWriter) WriteHeader(code int) { - if cw.wroteHeader { - cw.ResponseWriter.WriteHeader(code) // Allow multiple calls to propagate. - return - } - cw.wroteHeader = true - defer cw.ResponseWriter.WriteHeader(code) - - // Already compressed data? - if cw.Header().Get("Content-Encoding") != "" { - return - } - - if !cw.isCompressable() { - cw.compressable = false - return - } - - if cw.encoding != "" { - cw.compressable = true - cw.Header().Set("Content-Encoding", cw.encoding) - cw.Header().Set("Vary", "Accept-Encoding") - - // The content-length after compression is unknown - cw.Header().Del("Content-Length") - } -} - -func (cw *compressResponseWriter) Write(p []byte) (int, error) { - if !cw.wroteHeader { - cw.WriteHeader(http.StatusOK) - } - - return cw.writer().Write(p) -} - -func (cw *compressResponseWriter) writer() io.Writer { - if cw.compressable { - return cw.w - } else { - return cw.ResponseWriter - } -} - -type compressFlusher interface { - Flush() error -} - -func (cw *compressResponseWriter) Flush() { - if f, ok := cw.writer().(http.Flusher); ok { - f.Flush() - } - // If the underlying writer has a compression flush signature, - // call this Flush() method instead - if f, ok := cw.writer().(compressFlusher); ok { - f.Flush() - - // Also flush the underlying response writer - if f, ok := cw.ResponseWriter.(http.Flusher); ok { - f.Flush() - } - } -} - -func (cw *compressResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - if hj, ok := cw.writer().(http.Hijacker); ok { - return hj.Hijack() - } - return nil, nil, errors.New("chi/middleware: http.Hijacker is unavailable on the writer") -} - -func (cw *compressResponseWriter) Push(target string, opts *http.PushOptions) error { - if ps, ok := cw.writer().(http.Pusher); ok { - return ps.Push(target, opts) - } - return errors.New("chi/middleware: http.Pusher is unavailable on the writer") -} - -func (cw *compressResponseWriter) Close() error { - if c, ok := cw.writer().(io.WriteCloser); ok { - return c.Close() - } - return errors.New("chi/middleware: io.WriteCloser is unavailable on the writer") -} - -func encoderGzip(w io.Writer, level int) io.Writer { - gw, err := gzip.NewWriterLevel(w, level) - if err != nil { - return nil - } - return gw -} - -func encoderDeflate(w io.Writer, level int) io.Writer { - dw, err := flate.NewWriter(w, level) - if err != nil { - return nil - } - return dw -} diff --git a/vendor/github.com/go-chi/chi/middleware/content_charset.go b/vendor/github.com/go-chi/chi/middleware/content_charset.go deleted file mode 100644 index 07b5ce6..0000000 --- a/vendor/github.com/go-chi/chi/middleware/content_charset.go +++ /dev/null @@ -1,51 +0,0 @@ -package middleware - -import ( - "net/http" - "strings" -) - -// ContentCharset generates a handler that writes a 415 Unsupported Media Type response if none of the charsets match. -// An empty charset will allow requests with no Content-Type header or no specified charset. -func ContentCharset(charsets ...string) func(next http.Handler) http.Handler { - for i, c := range charsets { - charsets[i] = strings.ToLower(c) - } - - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !contentEncoding(r.Header.Get("Content-Type"), charsets...) { - w.WriteHeader(http.StatusUnsupportedMediaType) - return - } - - next.ServeHTTP(w, r) - }) - } -} - -// Check the content encoding against a list of acceptable values. -func contentEncoding(ce string, charsets ...string) bool { - _, ce = split(strings.ToLower(ce), ";") - _, ce = split(ce, "charset=") - ce, _ = split(ce, ";") - for _, c := range charsets { - if ce == c { - return true - } - } - - return false -} - -// Split a string in two parts, cleaning any whitespace. -func split(str, sep string) (string, string) { - var a, b string - var parts = strings.SplitN(str, sep, 2) - a = strings.TrimSpace(parts[0]) - if len(parts) == 2 { - b = strings.TrimSpace(parts[1]) - } - - return a, b -} diff --git a/vendor/github.com/go-chi/chi/middleware/content_encoding.go b/vendor/github.com/go-chi/chi/middleware/content_encoding.go deleted file mode 100644 index e0b9ccc..0000000 --- a/vendor/github.com/go-chi/chi/middleware/content_encoding.go +++ /dev/null @@ -1,34 +0,0 @@ -package middleware - -import ( - "net/http" - "strings" -) - -// AllowContentEncoding enforces a whitelist of request Content-Encoding otherwise responds -// with a 415 Unsupported Media Type status. -func AllowContentEncoding(contentEncoding ...string) func(next http.Handler) http.Handler { - allowedEncodings := make(map[string]struct{}, len(contentEncoding)) - for _, encoding := range contentEncoding { - allowedEncodings[strings.TrimSpace(strings.ToLower(encoding))] = struct{}{} - } - return func(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - requestEncodings := r.Header["Content-Encoding"] - // skip check for empty content body or no Content-Encoding - if r.ContentLength == 0 { - next.ServeHTTP(w, r) - return - } - // All encodings in the request must be allowed - for _, encoding := range requestEncodings { - if _, ok := allowedEncodings[strings.TrimSpace(strings.ToLower(encoding))]; !ok { - w.WriteHeader(http.StatusUnsupportedMediaType) - return - } - } - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) - } -} diff --git a/vendor/github.com/go-chi/chi/middleware/content_type.go b/vendor/github.com/go-chi/chi/middleware/content_type.go deleted file mode 100644 index ee49578..0000000 --- a/vendor/github.com/go-chi/chi/middleware/content_type.go +++ /dev/null @@ -1,51 +0,0 @@ -package middleware - -import ( - "net/http" - "strings" -) - -// SetHeader is a convenience handler to set a response header key/value -func SetHeader(key, value string) func(next http.Handler) http.Handler { - return func(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - w.Header().Set(key, value) - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) - } -} - -// AllowContentType enforces a whitelist of request Content-Types otherwise responds -// with a 415 Unsupported Media Type status. -func AllowContentType(contentTypes ...string) func(next http.Handler) http.Handler { - cT := []string{} - for _, t := range contentTypes { - cT = append(cT, strings.ToLower(t)) - } - - return func(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - if r.ContentLength == 0 { - // skip check for empty content body - next.ServeHTTP(w, r) - return - } - - s := strings.ToLower(strings.TrimSpace(r.Header.Get("Content-Type"))) - if i := strings.Index(s, ";"); i > -1 { - s = s[0:i] - } - - for _, t := range cT { - if t == s { - next.ServeHTTP(w, r) - return - } - } - - w.WriteHeader(http.StatusUnsupportedMediaType) - } - return http.HandlerFunc(fn) - } -} diff --git a/vendor/github.com/go-chi/chi/middleware/get_head.go b/vendor/github.com/go-chi/chi/middleware/get_head.go deleted file mode 100644 index 86068a9..0000000 --- a/vendor/github.com/go-chi/chi/middleware/get_head.go +++ /dev/null @@ -1,39 +0,0 @@ -package middleware - -import ( - "net/http" - - "github.com/go-chi/chi" -) - -// GetHead automatically route undefined HEAD requests to GET handlers. -func GetHead(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == "HEAD" { - rctx := chi.RouteContext(r.Context()) - routePath := rctx.RoutePath - if routePath == "" { - if r.URL.RawPath != "" { - routePath = r.URL.RawPath - } else { - routePath = r.URL.Path - } - } - - // Temporary routing context to look-ahead before routing the request - tctx := chi.NewRouteContext() - - // Attempt to find a HEAD handler for the routing path, if not found, traverse - // the router as through its a GET route, but proceed with the request - // with the HEAD method. - if !rctx.Routes.Match(tctx, "HEAD", routePath) { - rctx.RouteMethod = "GET" - rctx.RoutePath = routePath - next.ServeHTTP(w, r) - return - } - } - - next.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/go-chi/chi/middleware/heartbeat.go b/vendor/github.com/go-chi/chi/middleware/heartbeat.go deleted file mode 100644 index fe822fb..0000000 --- a/vendor/github.com/go-chi/chi/middleware/heartbeat.go +++ /dev/null @@ -1,26 +0,0 @@ -package middleware - -import ( - "net/http" - "strings" -) - -// Heartbeat endpoint middleware useful to setting up a path like -// `/ping` that load balancers or uptime testing external services -// can make a request before hitting any routes. It's also convenient -// to place this above ACL middlewares as well. -func Heartbeat(endpoint string) func(http.Handler) http.Handler { - f := func(h http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - if r.Method == "GET" && strings.EqualFold(r.URL.Path, endpoint) { - w.Header().Set("Content-Type", "text/plain") - w.WriteHeader(http.StatusOK) - w.Write([]byte(".")) - return - } - h.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) - } - return f -} diff --git a/vendor/github.com/go-chi/chi/middleware/logger.go b/vendor/github.com/go-chi/chi/middleware/logger.go deleted file mode 100644 index 6aa2d7b..0000000 --- a/vendor/github.com/go-chi/chi/middleware/logger.go +++ /dev/null @@ -1,155 +0,0 @@ -package middleware - -import ( - "bytes" - "context" - "log" - "net/http" - "os" - "time" -) - -var ( - // LogEntryCtxKey is the context.Context key to store the request log entry. - LogEntryCtxKey = &contextKey{"LogEntry"} - - // DefaultLogger is called by the Logger middleware handler to log each request. - // Its made a package-level variable so that it can be reconfigured for custom - // logging configurations. - DefaultLogger = RequestLogger(&DefaultLogFormatter{Logger: log.New(os.Stdout, "", log.LstdFlags), NoColor: false}) -) - -// Logger is a middleware that logs the start and end of each request, along -// with some useful data about what was requested, what the response status was, -// and how long it took to return. When standard output is a TTY, Logger will -// print in color, otherwise it will print in black and white. Logger prints a -// request ID if one is provided. -// -// Alternatively, look at https://github.com/pressly/lg and the `lg.RequestLogger` -// middleware pkg. -func Logger(next http.Handler) http.Handler { - return DefaultLogger(next) -} - -// RequestLogger returns a logger handler using a custom LogFormatter. -func RequestLogger(f LogFormatter) func(next http.Handler) http.Handler { - return func(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - entry := f.NewLogEntry(r) - ww := NewWrapResponseWriter(w, r.ProtoMajor) - - t1 := time.Now() - defer func() { - entry.Write(ww.Status(), ww.BytesWritten(), time.Since(t1)) - }() - - next.ServeHTTP(ww, WithLogEntry(r, entry)) - } - return http.HandlerFunc(fn) - } -} - -// LogFormatter initiates the beginning of a new LogEntry per request. -// See DefaultLogFormatter for an example implementation. -type LogFormatter interface { - NewLogEntry(r *http.Request) LogEntry -} - -// LogEntry records the final log when a request completes. -// See defaultLogEntry for an example implementation. -type LogEntry interface { - Write(status, bytes int, elapsed time.Duration) - Panic(v interface{}, stack []byte) -} - -// GetLogEntry returns the in-context LogEntry for a request. -func GetLogEntry(r *http.Request) LogEntry { - entry, _ := r.Context().Value(LogEntryCtxKey).(LogEntry) - return entry -} - -// WithLogEntry sets the in-context LogEntry for a request. -func WithLogEntry(r *http.Request, entry LogEntry) *http.Request { - r = r.WithContext(context.WithValue(r.Context(), LogEntryCtxKey, entry)) - return r -} - -// LoggerInterface accepts printing to stdlib logger or compatible logger. -type LoggerInterface interface { - Print(v ...interface{}) -} - -// DefaultLogFormatter is a simple logger that implements a LogFormatter. -type DefaultLogFormatter struct { - Logger LoggerInterface - NoColor bool -} - -// NewLogEntry creates a new LogEntry for the request. -func (l *DefaultLogFormatter) NewLogEntry(r *http.Request) LogEntry { - useColor := !l.NoColor - entry := &defaultLogEntry{ - DefaultLogFormatter: l, - request: r, - buf: &bytes.Buffer{}, - useColor: useColor, - } - - reqID := GetReqID(r.Context()) - if reqID != "" { - cW(entry.buf, useColor, nYellow, "[%s] ", reqID) - } - cW(entry.buf, useColor, nCyan, "\"") - cW(entry.buf, useColor, bMagenta, "%s ", r.Method) - - scheme := "http" - if r.TLS != nil { - scheme = "https" - } - cW(entry.buf, useColor, nCyan, "%s://%s%s %s\" ", scheme, r.Host, r.RequestURI, r.Proto) - - entry.buf.WriteString("from ") - entry.buf.WriteString(r.RemoteAddr) - entry.buf.WriteString(" - ") - - return entry -} - -type defaultLogEntry struct { - *DefaultLogFormatter - request *http.Request - buf *bytes.Buffer - useColor bool -} - -func (l *defaultLogEntry) Write(status, bytes int, elapsed time.Duration) { - switch { - case status < 200: - cW(l.buf, l.useColor, bBlue, "%03d", status) - case status < 300: - cW(l.buf, l.useColor, bGreen, "%03d", status) - case status < 400: - cW(l.buf, l.useColor, bCyan, "%03d", status) - case status < 500: - cW(l.buf, l.useColor, bYellow, "%03d", status) - default: - cW(l.buf, l.useColor, bRed, "%03d", status) - } - - cW(l.buf, l.useColor, bBlue, " %dB", bytes) - - l.buf.WriteString(" in ") - if elapsed < 500*time.Millisecond { - cW(l.buf, l.useColor, nGreen, "%s", elapsed) - } else if elapsed < 5*time.Second { - cW(l.buf, l.useColor, nYellow, "%s", elapsed) - } else { - cW(l.buf, l.useColor, nRed, "%s", elapsed) - } - - l.Logger.Print(l.buf.String()) -} - -func (l *defaultLogEntry) Panic(v interface{}, stack []byte) { - PrintPrettyStack(v) -} diff --git a/vendor/github.com/go-chi/chi/middleware/middleware.go b/vendor/github.com/go-chi/chi/middleware/middleware.go deleted file mode 100644 index be6a44f..0000000 --- a/vendor/github.com/go-chi/chi/middleware/middleware.go +++ /dev/null @@ -1,12 +0,0 @@ -package middleware - -// contextKey is a value for use with context.WithValue. It's used as -// a pointer so it fits in an interface{} without allocation. This technique -// for defining context keys was copied from Go 1.7's new use of context in net/http. -type contextKey struct { - name string -} - -func (k *contextKey) String() string { - return "chi/middleware context value " + k.name -} diff --git a/vendor/github.com/go-chi/chi/middleware/nocache.go b/vendor/github.com/go-chi/chi/middleware/nocache.go deleted file mode 100644 index 2412829..0000000 --- a/vendor/github.com/go-chi/chi/middleware/nocache.go +++ /dev/null @@ -1,58 +0,0 @@ -package middleware - -// Ported from Goji's middleware, source: -// https://github.com/zenazn/goji/tree/master/web/middleware - -import ( - "net/http" - "time" -) - -// Unix epoch time -var epoch = time.Unix(0, 0).Format(time.RFC1123) - -// Taken from https://github.com/mytrile/nocache -var noCacheHeaders = map[string]string{ - "Expires": epoch, - "Cache-Control": "no-cache, no-store, no-transform, must-revalidate, private, max-age=0", - "Pragma": "no-cache", - "X-Accel-Expires": "0", -} - -var etagHeaders = []string{ - "ETag", - "If-Modified-Since", - "If-Match", - "If-None-Match", - "If-Range", - "If-Unmodified-Since", -} - -// NoCache is a simple piece of middleware that sets a number of HTTP headers to prevent -// a router (or subrouter) from being cached by an upstream proxy and/or client. -// -// As per http://wiki.nginx.org/HttpProxyModule - NoCache sets: -// Expires: Thu, 01 Jan 1970 00:00:00 UTC -// Cache-Control: no-cache, private, max-age=0 -// X-Accel-Expires: 0 -// Pragma: no-cache (for HTTP/1.0 proxies/clients) -func NoCache(h http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - - // Delete any ETag headers that may have been set - for _, v := range etagHeaders { - if r.Header.Get(v) != "" { - r.Header.Del(v) - } - } - - // Set our NoCache headers - for k, v := range noCacheHeaders { - w.Header().Set(k, v) - } - - h.ServeHTTP(w, r) - } - - return http.HandlerFunc(fn) -} diff --git a/vendor/github.com/go-chi/chi/middleware/profiler.go b/vendor/github.com/go-chi/chi/middleware/profiler.go deleted file mode 100644 index 1d44b82..0000000 --- a/vendor/github.com/go-chi/chi/middleware/profiler.go +++ /dev/null @@ -1,55 +0,0 @@ -package middleware - -import ( - "expvar" - "fmt" - "net/http" - "net/http/pprof" - - "github.com/go-chi/chi" -) - -// Profiler is a convenient subrouter used for mounting net/http/pprof. ie. -// -// func MyService() http.Handler { -// r := chi.NewRouter() -// // ..middlewares -// r.Mount("/debug", middleware.Profiler()) -// // ..routes -// return r -// } -func Profiler() http.Handler { - r := chi.NewRouter() - r.Use(NoCache) - - r.Get("/", func(w http.ResponseWriter, r *http.Request) { - http.Redirect(w, r, r.RequestURI+"/pprof/", 301) - }) - r.HandleFunc("/pprof", func(w http.ResponseWriter, r *http.Request) { - http.Redirect(w, r, r.RequestURI+"/", 301) - }) - - r.HandleFunc("/pprof/*", pprof.Index) - r.HandleFunc("/pprof/cmdline", pprof.Cmdline) - r.HandleFunc("/pprof/profile", pprof.Profile) - r.HandleFunc("/pprof/symbol", pprof.Symbol) - r.HandleFunc("/pprof/trace", pprof.Trace) - r.HandleFunc("/vars", expVars) - - return r -} - -// Replicated from expvar.go as not public. -func expVars(w http.ResponseWriter, r *http.Request) { - first := true - w.Header().Set("Content-Type", "application/json") - fmt.Fprintf(w, "{\n") - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} diff --git a/vendor/github.com/go-chi/chi/middleware/realip.go b/vendor/github.com/go-chi/chi/middleware/realip.go deleted file mode 100644 index 72db6ca..0000000 --- a/vendor/github.com/go-chi/chi/middleware/realip.go +++ /dev/null @@ -1,54 +0,0 @@ -package middleware - -// Ported from Goji's middleware, source: -// https://github.com/zenazn/goji/tree/master/web/middleware - -import ( - "net/http" - "strings" -) - -var xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For") -var xRealIP = http.CanonicalHeaderKey("X-Real-IP") - -// RealIP is a middleware that sets a http.Request's RemoteAddr to the results -// of parsing either the X-Forwarded-For header or the X-Real-IP header (in that -// order). -// -// This middleware should be inserted fairly early in the middleware stack to -// ensure that subsequent layers (e.g., request loggers) which examine the -// RemoteAddr will see the intended value. -// -// You should only use this middleware if you can trust the headers passed to -// you (in particular, the two headers this middleware uses), for example -// because you have placed a reverse proxy like HAProxy or nginx in front of -// chi. If your reverse proxies are configured to pass along arbitrary header -// values from the client, or if you use this middleware without a reverse -// proxy, malicious clients will be able to make you very sad (or, depending on -// how you're using RemoteAddr, vulnerable to an attack of some sort). -func RealIP(h http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - if rip := realIP(r); rip != "" { - r.RemoteAddr = rip - } - h.ServeHTTP(w, r) - } - - return http.HandlerFunc(fn) -} - -func realIP(r *http.Request) string { - var ip string - - if xrip := r.Header.Get(xRealIP); xrip != "" { - ip = xrip - } else if xff := r.Header.Get(xForwardedFor); xff != "" { - i := strings.Index(xff, ", ") - if i == -1 { - i = len(xff) - } - ip = xff[:i] - } - - return ip -} diff --git a/vendor/github.com/go-chi/chi/middleware/recoverer.go b/vendor/github.com/go-chi/chi/middleware/recoverer.go deleted file mode 100644 index 7d110b4..0000000 --- a/vendor/github.com/go-chi/chi/middleware/recoverer.go +++ /dev/null @@ -1,179 +0,0 @@ -package middleware - -// The original work was derived from Goji's middleware, source: -// https://github.com/zenazn/goji/tree/master/web/middleware - -import ( - "bytes" - "errors" - "fmt" - "net/http" - "os" - "runtime/debug" - "strings" -) - -// Recoverer is a middleware that recovers from panics, logs the panic (and a -// backtrace), and returns a HTTP 500 (Internal Server Error) status if -// possible. Recoverer prints a request ID if one is provided. -// -// Alternatively, look at https://github.com/pressly/lg middleware pkgs. -func Recoverer(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - defer func() { - if rvr := recover(); rvr != nil && rvr != http.ErrAbortHandler { - - logEntry := GetLogEntry(r) - if logEntry != nil { - logEntry.Panic(rvr, debug.Stack()) - } else { - PrintPrettyStack(rvr) - } - - w.WriteHeader(http.StatusInternalServerError) - } - }() - - next.ServeHTTP(w, r) - } - - return http.HandlerFunc(fn) -} - -func PrintPrettyStack(rvr interface{}) { - debugStack := debug.Stack() - s := prettyStack{} - out, err := s.parse(debugStack, rvr) - if err == nil { - os.Stderr.Write(out) - } else { - // print stdlib output as a fallback - os.Stderr.Write(debugStack) - } -} - -type prettyStack struct { -} - -func (s prettyStack) parse(debugStack []byte, rvr interface{}) ([]byte, error) { - var err error - useColor := true - buf := &bytes.Buffer{} - - cW(buf, useColor, bBlue, "\n>>") - cW(buf, useColor, bRed, " panic: ") - cW(buf, useColor, bMagenta, "%v", rvr) - cW(buf, useColor, bBlue, " <<\n\n") - - // process debug stack info - stack := strings.Split(string(debugStack), "\n") - lines := []string{} - - // locate panic line, as we may have nested panics - for i := len(stack) - 1; i > 0; i-- { - lines = append(lines, stack[i]) - if strings.HasPrefix(stack[i], "panic(0x") { - lines = lines[0 : len(lines)-2] // remove boilerplate - break - } - } - - if strings.HasPrefix(stack[0], "goroutine") { - cW(buf, useColor, bBlue, "%s\n\n", stack[0]) - } - - // reverse - for i := len(lines)/2 - 1; i >= 0; i-- { - opp := len(lines) - 1 - i - lines[i], lines[opp] = lines[opp], lines[i] - } - - // decorate - for i, line := range lines { - lines[i], err = s.decorateLine(line, useColor, i) - if err != nil { - return nil, err - } - } - - for _, l := range lines { - fmt.Fprintf(buf, "%s", l) - } - return buf.Bytes(), nil -} - -func (s prettyStack) decorateLine(line string, useColor bool, num int) (string, error) { - line = strings.TrimSpace(line) - if strings.HasPrefix(line, "\t") || strings.Contains(line, ".go:") { - return s.decorateSourceLine(line, useColor, num) - } else if strings.HasSuffix(line, ")") { - return s.decorateFuncCallLine(line, useColor, num) - } else { - if strings.HasPrefix(line, "\t") { - return strings.Replace(line, "\t", " ", 1), nil - } else { - return fmt.Sprintf(" %s\n", line), nil - } - } -} - -func (s prettyStack) decorateFuncCallLine(line string, useColor bool, num int) (string, error) { - idx := strings.LastIndex(line, "(") - if idx < 0 { - return "", errors.New("not a func call line") - } - - buf := &bytes.Buffer{} - pkg := line[0:idx] - // addr := line[idx:] - method := "" - - idx = strings.LastIndex(pkg, string(os.PathSeparator)) - if idx < 0 { - idx = strings.Index(pkg, ".") - method = pkg[idx:] - pkg = pkg[0:idx] - } else { - method = pkg[idx+1:] - pkg = pkg[0 : idx+1] - idx = strings.Index(method, ".") - pkg += method[0:idx] - method = method[idx:] - } - - cW(buf, useColor, nYellow, " %s", pkg) - cW(buf, useColor, bGreen, "%s\n", method) - // cW(buf, useColor, nBlack, "%s", addr) - return buf.String(), nil -} - -func (s prettyStack) decorateSourceLine(line string, useColor bool, num int) (string, error) { - idx := strings.LastIndex(line, ".go:") - if idx < 0 { - return "", errors.New("not a source line") - } - - buf := &bytes.Buffer{} - path := line[0 : idx+3] - lineno := line[idx+3:] - - idx = strings.LastIndex(path, string(os.PathSeparator)) - dir := path[0 : idx+1] - file := path[idx+1:] - - idx = strings.Index(lineno, " ") - if idx > 0 { - lineno = lineno[0:idx] - } - - cW(buf, useColor, bWhite, " %s", dir) - cW(buf, useColor, bCyan, "%s", file) - cW(buf, useColor, bGreen, "%s", lineno) - - if num == 1 { - cW(buf, useColor, bRed, " <--") - } - cW(buf, useColor, bWhite, "\n\n") - - return buf.String(), nil -} diff --git a/vendor/github.com/go-chi/chi/middleware/request_id.go b/vendor/github.com/go-chi/chi/middleware/request_id.go deleted file mode 100644 index 4903ecc..0000000 --- a/vendor/github.com/go-chi/chi/middleware/request_id.go +++ /dev/null @@ -1,96 +0,0 @@ -package middleware - -// Ported from Goji's middleware, source: -// https://github.com/zenazn/goji/tree/master/web/middleware - -import ( - "context" - "crypto/rand" - "encoding/base64" - "fmt" - "net/http" - "os" - "strings" - "sync/atomic" -) - -// Key to use when setting the request ID. -type ctxKeyRequestID int - -// RequestIDKey is the key that holds the unique request ID in a request context. -const RequestIDKey ctxKeyRequestID = 0 - -// RequestIDHeader is the name of the HTTP Header which contains the request id. -// Exported so that it can be changed by developers -var RequestIDHeader = "X-Request-Id" - -var prefix string -var reqid uint64 - -// A quick note on the statistics here: we're trying to calculate the chance that -// two randomly generated base62 prefixes will collide. We use the formula from -// http://en.wikipedia.org/wiki/Birthday_problem -// -// P[m, n] \approx 1 - e^{-m^2/2n} -// -// We ballpark an upper bound for $m$ by imagining (for whatever reason) a server -// that restarts every second over 10 years, for $m = 86400 * 365 * 10 = 315360000$ -// -// For a $k$ character base-62 identifier, we have $n(k) = 62^k$ -// -// Plugging this in, we find $P[m, n(10)] \approx 5.75%$, which is good enough for -// our purposes, and is surely more than anyone would ever need in practice -- a -// process that is rebooted a handful of times a day for a hundred years has less -// than a millionth of a percent chance of generating two colliding IDs. - -func init() { - hostname, err := os.Hostname() - if hostname == "" || err != nil { - hostname = "localhost" - } - var buf [12]byte - var b64 string - for len(b64) < 10 { - rand.Read(buf[:]) - b64 = base64.StdEncoding.EncodeToString(buf[:]) - b64 = strings.NewReplacer("+", "", "/", "").Replace(b64) - } - - prefix = fmt.Sprintf("%s/%s", hostname, b64[0:10]) -} - -// RequestID is a middleware that injects a request ID into the context of each -// request. A request ID is a string of the form "host.example.com/random-0001", -// where "random" is a base62 random string that uniquely identifies this go -// process, and where the last number is an atomically incremented request -// counter. -func RequestID(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - requestID := r.Header.Get(RequestIDHeader) - if requestID == "" { - myid := atomic.AddUint64(&reqid, 1) - requestID = fmt.Sprintf("%s-%06d", prefix, myid) - } - ctx = context.WithValue(ctx, RequestIDKey, requestID) - next.ServeHTTP(w, r.WithContext(ctx)) - } - return http.HandlerFunc(fn) -} - -// GetReqID returns a request ID from the given context if one is present. -// Returns the empty string if a request ID cannot be found. -func GetReqID(ctx context.Context) string { - if ctx == nil { - return "" - } - if reqID, ok := ctx.Value(RequestIDKey).(string); ok { - return reqID - } - return "" -} - -// NextRequestID generates the next request ID in the sequence. -func NextRequestID() uint64 { - return atomic.AddUint64(&reqid, 1) -} diff --git a/vendor/github.com/go-chi/chi/middleware/strip.go b/vendor/github.com/go-chi/chi/middleware/strip.go deleted file mode 100644 index 2b8b184..0000000 --- a/vendor/github.com/go-chi/chi/middleware/strip.go +++ /dev/null @@ -1,56 +0,0 @@ -package middleware - -import ( - "fmt" - "net/http" - - "github.com/go-chi/chi" -) - -// StripSlashes is a middleware that will match request paths with a trailing -// slash, strip it from the path and continue routing through the mux, if a route -// matches, then it will serve the handler. -func StripSlashes(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - var path string - rctx := chi.RouteContext(r.Context()) - if rctx.RoutePath != "" { - path = rctx.RoutePath - } else { - path = r.URL.Path - } - if len(path) > 1 && path[len(path)-1] == '/' { - rctx.RoutePath = path[:len(path)-1] - } - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) -} - -// RedirectSlashes is a middleware that will match request paths with a trailing -// slash and redirect to the same path, less the trailing slash. -// -// NOTE: RedirectSlashes middleware is *incompatible* with http.FileServer, -// see https://github.com/go-chi/chi/issues/343 -func RedirectSlashes(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - var path string - rctx := chi.RouteContext(r.Context()) - if rctx.RoutePath != "" { - path = rctx.RoutePath - } else { - path = r.URL.Path - } - if len(path) > 1 && path[len(path)-1] == '/' { - if r.URL.RawQuery != "" { - path = fmt.Sprintf("%s?%s", path[:len(path)-1], r.URL.RawQuery) - } else { - path = path[:len(path)-1] - } - http.Redirect(w, r, path, 301) - return - } - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) -} diff --git a/vendor/github.com/go-chi/chi/middleware/terminal.go b/vendor/github.com/go-chi/chi/middleware/terminal.go deleted file mode 100644 index a5d4241..0000000 --- a/vendor/github.com/go-chi/chi/middleware/terminal.go +++ /dev/null @@ -1,63 +0,0 @@ -package middleware - -// Ported from Goji's middleware, source: -// https://github.com/zenazn/goji/tree/master/web/middleware - -import ( - "fmt" - "io" - "os" -) - -var ( - // Normal colors - nBlack = []byte{'\033', '[', '3', '0', 'm'} - nRed = []byte{'\033', '[', '3', '1', 'm'} - nGreen = []byte{'\033', '[', '3', '2', 'm'} - nYellow = []byte{'\033', '[', '3', '3', 'm'} - nBlue = []byte{'\033', '[', '3', '4', 'm'} - nMagenta = []byte{'\033', '[', '3', '5', 'm'} - nCyan = []byte{'\033', '[', '3', '6', 'm'} - nWhite = []byte{'\033', '[', '3', '7', 'm'} - // Bright colors - bBlack = []byte{'\033', '[', '3', '0', ';', '1', 'm'} - bRed = []byte{'\033', '[', '3', '1', ';', '1', 'm'} - bGreen = []byte{'\033', '[', '3', '2', ';', '1', 'm'} - bYellow = []byte{'\033', '[', '3', '3', ';', '1', 'm'} - bBlue = []byte{'\033', '[', '3', '4', ';', '1', 'm'} - bMagenta = []byte{'\033', '[', '3', '5', ';', '1', 'm'} - bCyan = []byte{'\033', '[', '3', '6', ';', '1', 'm'} - bWhite = []byte{'\033', '[', '3', '7', ';', '1', 'm'} - - reset = []byte{'\033', '[', '0', 'm'} -) - -var isTTY bool - -func init() { - // This is sort of cheating: if stdout is a character device, we assume - // that means it's a TTY. Unfortunately, there are many non-TTY - // character devices, but fortunately stdout is rarely set to any of - // them. - // - // We could solve this properly by pulling in a dependency on - // code.google.com/p/go.crypto/ssh/terminal, for instance, but as a - // heuristic for whether to print in color or in black-and-white, I'd - // really rather not. - fi, err := os.Stdout.Stat() - if err == nil { - m := os.ModeDevice | os.ModeCharDevice - isTTY = fi.Mode()&m == m - } -} - -// colorWrite -func cW(w io.Writer, useColor bool, color []byte, s string, args ...interface{}) { - if isTTY && useColor { - w.Write(color) - } - fmt.Fprintf(w, s, args...) - if isTTY && useColor { - w.Write(reset) - } -} diff --git a/vendor/github.com/go-chi/chi/middleware/throttle.go b/vendor/github.com/go-chi/chi/middleware/throttle.go deleted file mode 100644 index fdedd3c..0000000 --- a/vendor/github.com/go-chi/chi/middleware/throttle.go +++ /dev/null @@ -1,132 +0,0 @@ -package middleware - -import ( - "net/http" - "strconv" - "time" -) - -const ( - errCapacityExceeded = "Server capacity exceeded." - errTimedOut = "Timed out while waiting for a pending request to complete." - errContextCanceled = "Context was canceled." -) - -var ( - defaultBacklogTimeout = time.Second * 60 -) - -// ThrottleOpts represents a set of throttling options. -type ThrottleOpts struct { - Limit int - BacklogLimit int - BacklogTimeout time.Duration - RetryAfterFn func(ctxDone bool) time.Duration -} - -// Throttle is a middleware that limits number of currently processed requests -// at a time across all users. Note: Throttle is not a rate-limiter per user, -// instead it just puts a ceiling on the number of currentl in-flight requests -// being processed from the point from where the Throttle middleware is mounted. -func Throttle(limit int) func(http.Handler) http.Handler { - return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogTimeout: defaultBacklogTimeout}) -} - -// ThrottleBacklog is a middleware that limits number of currently processed -// requests at a time and provides a backlog for holding a finite number of -// pending requests. -func ThrottleBacklog(limit int, backlogLimit int, backlogTimeout time.Duration) func(http.Handler) http.Handler { - return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogLimit: backlogLimit, BacklogTimeout: backlogTimeout}) -} - -// ThrottleWithOpts is a middleware that limits number of currently processed requests using passed ThrottleOpts. -func ThrottleWithOpts(opts ThrottleOpts) func(http.Handler) http.Handler { - if opts.Limit < 1 { - panic("chi/middleware: Throttle expects limit > 0") - } - - if opts.BacklogLimit < 0 { - panic("chi/middleware: Throttle expects backlogLimit to be positive") - } - - t := throttler{ - tokens: make(chan token, opts.Limit), - backlogTokens: make(chan token, opts.Limit+opts.BacklogLimit), - backlogTimeout: opts.BacklogTimeout, - retryAfterFn: opts.RetryAfterFn, - } - - // Filling tokens. - for i := 0; i < opts.Limit+opts.BacklogLimit; i++ { - if i < opts.Limit { - t.tokens <- token{} - } - t.backlogTokens <- token{} - } - - return func(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - select { - - case <-ctx.Done(): - t.setRetryAfterHeaderIfNeeded(w, true) - http.Error(w, errContextCanceled, http.StatusServiceUnavailable) - return - - case btok := <-t.backlogTokens: - timer := time.NewTimer(t.backlogTimeout) - - defer func() { - t.backlogTokens <- btok - }() - - select { - case <-timer.C: - t.setRetryAfterHeaderIfNeeded(w, false) - http.Error(w, errTimedOut, http.StatusServiceUnavailable) - return - case <-ctx.Done(): - timer.Stop() - t.setRetryAfterHeaderIfNeeded(w, true) - http.Error(w, errContextCanceled, http.StatusServiceUnavailable) - return - case tok := <-t.tokens: - defer func() { - timer.Stop() - t.tokens <- tok - }() - next.ServeHTTP(w, r) - } - return - - default: - t.setRetryAfterHeaderIfNeeded(w, false) - http.Error(w, errCapacityExceeded, http.StatusServiceUnavailable) - return - } - } - - return http.HandlerFunc(fn) - } -} - -// token represents a request that is being processed. -type token struct{} - -// throttler limits number of currently processed requests at a time. -type throttler struct { - tokens chan token - backlogTokens chan token - backlogTimeout time.Duration - retryAfterFn func(ctxDone bool) time.Duration -} - -// setRetryAfterHeaderIfNeeded sets Retry-After HTTP header if corresponding retryAfterFn option of throttler is initialized. -func (t throttler) setRetryAfterHeaderIfNeeded(w http.ResponseWriter, ctxDone bool) { - if t.retryAfterFn == nil { - return - } - w.Header().Set("Retry-After", strconv.Itoa(int(t.retryAfterFn(ctxDone).Seconds()))) -} diff --git a/vendor/github.com/go-chi/chi/middleware/timeout.go b/vendor/github.com/go-chi/chi/middleware/timeout.go deleted file mode 100644 index 8e37353..0000000 --- a/vendor/github.com/go-chi/chi/middleware/timeout.go +++ /dev/null @@ -1,49 +0,0 @@ -package middleware - -import ( - "context" - "net/http" - "time" -) - -// Timeout is a middleware that cancels ctx after a given timeout and return -// a 504 Gateway Timeout error to the client. -// -// It's required that you select the ctx.Done() channel to check for the signal -// if the context has reached its deadline and return, otherwise the timeout -// signal will be just ignored. -// -// ie. a route/handler may look like: -// -// r.Get("/long", func(w http.ResponseWriter, r *http.Request) { -// ctx := r.Context() -// processTime := time.Duration(rand.Intn(4)+1) * time.Second -// -// select { -// case <-ctx.Done(): -// return -// -// case <-time.After(processTime): -// // The above channel simulates some hard work. -// } -// -// w.Write([]byte("done")) -// }) -// -func Timeout(timeout time.Duration) func(next http.Handler) http.Handler { - return func(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - ctx, cancel := context.WithTimeout(r.Context(), timeout) - defer func() { - cancel() - if ctx.Err() == context.DeadlineExceeded { - w.WriteHeader(http.StatusGatewayTimeout) - } - }() - - r = r.WithContext(ctx) - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) - } -} diff --git a/vendor/github.com/go-chi/chi/middleware/url_format.go b/vendor/github.com/go-chi/chi/middleware/url_format.go deleted file mode 100644 index 5749e4f..0000000 --- a/vendor/github.com/go-chi/chi/middleware/url_format.go +++ /dev/null @@ -1,72 +0,0 @@ -package middleware - -import ( - "context" - "net/http" - "strings" - - "github.com/go-chi/chi" -) - -var ( - // URLFormatCtxKey is the context.Context key to store the URL format data - // for a request. - URLFormatCtxKey = &contextKey{"URLFormat"} -) - -// URLFormat is a middleware that parses the url extension from a request path and stores it -// on the context as a string under the key `middleware.URLFormatCtxKey`. The middleware will -// trim the suffix from the routing path and continue routing. -// -// Routers should not include a url parameter for the suffix when using this middleware. -// -// Sample usage.. for url paths: `/articles/1`, `/articles/1.json` and `/articles/1.xml` -// -// func routes() http.Handler { -// r := chi.NewRouter() -// r.Use(middleware.URLFormat) -// -// r.Get("/articles/{id}", ListArticles) -// -// return r -// } -// -// func ListArticles(w http.ResponseWriter, r *http.Request) { -// urlFormat, _ := r.Context().Value(middleware.URLFormatCtxKey).(string) -// -// switch urlFormat { -// case "json": -// render.JSON(w, r, articles) -// case "xml:" -// render.XML(w, r, articles) -// default: -// render.JSON(w, r, articles) -// } -// } -// -func URLFormat(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - var format string - path := r.URL.Path - - if strings.Index(path, ".") > 0 { - base := strings.LastIndex(path, "/") - idx := strings.Index(path[base:], ".") - - if idx > 0 { - idx += base - format = path[idx+1:] - - rctx := chi.RouteContext(r.Context()) - rctx.RoutePath = path[:idx] - } - } - - r = r.WithContext(context.WithValue(ctx, URLFormatCtxKey, format)) - - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) -} diff --git a/vendor/github.com/go-chi/chi/middleware/value.go b/vendor/github.com/go-chi/chi/middleware/value.go deleted file mode 100644 index fbbd039..0000000 --- a/vendor/github.com/go-chi/chi/middleware/value.go +++ /dev/null @@ -1,17 +0,0 @@ -package middleware - -import ( - "context" - "net/http" -) - -// WithValue is a middleware that sets a given key/value in a context chain. -func WithValue(key interface{}, val interface{}) func(next http.Handler) http.Handler { - return func(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - r = r.WithContext(context.WithValue(r.Context(), key, val)) - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) - } -} diff --git a/vendor/github.com/go-chi/chi/middleware/wrap_writer.go b/vendor/github.com/go-chi/chi/middleware/wrap_writer.go deleted file mode 100644 index 445c8e2..0000000 --- a/vendor/github.com/go-chi/chi/middleware/wrap_writer.go +++ /dev/null @@ -1,183 +0,0 @@ -package middleware - -// The original work was derived from Goji's middleware, source: -// https://github.com/zenazn/goji/tree/master/web/middleware - -import ( - "bufio" - "io" - "net" - "net/http" -) - -// NewWrapResponseWriter wraps an http.ResponseWriter, returning a proxy that allows you to -// hook into various parts of the response process. -func NewWrapResponseWriter(w http.ResponseWriter, protoMajor int) WrapResponseWriter { - _, fl := w.(http.Flusher) - - bw := basicWriter{ResponseWriter: w} - - if protoMajor == 2 { - _, ps := w.(http.Pusher) - if fl && ps { - return &http2FancyWriter{bw} - } - } else { - _, hj := w.(http.Hijacker) - _, rf := w.(io.ReaderFrom) - if fl && hj && rf { - return &httpFancyWriter{bw} - } - } - if fl { - return &flushWriter{bw} - } - - return &bw -} - -// WrapResponseWriter is a proxy around an http.ResponseWriter that allows you to hook -// into various parts of the response process. -type WrapResponseWriter interface { - http.ResponseWriter - // Status returns the HTTP status of the request, or 0 if one has not - // yet been sent. - Status() int - // BytesWritten returns the total number of bytes sent to the client. - BytesWritten() int - // Tee causes the response body to be written to the given io.Writer in - // addition to proxying the writes through. Only one io.Writer can be - // tee'd to at once: setting a second one will overwrite the first. - // Writes will be sent to the proxy before being written to this - // io.Writer. It is illegal for the tee'd writer to be modified - // concurrently with writes. - Tee(io.Writer) - // Unwrap returns the original proxied target. - Unwrap() http.ResponseWriter -} - -// basicWriter wraps a http.ResponseWriter that implements the minimal -// http.ResponseWriter interface. -type basicWriter struct { - http.ResponseWriter - wroteHeader bool - code int - bytes int - tee io.Writer -} - -func (b *basicWriter) WriteHeader(code int) { - if !b.wroteHeader { - b.code = code - b.wroteHeader = true - } - b.ResponseWriter.WriteHeader(code) -} - -func (b *basicWriter) Write(buf []byte) (int, error) { - b.maybeWriteHeader() - n, err := b.ResponseWriter.Write(buf) - if b.tee != nil { - _, err2 := b.tee.Write(buf[:n]) - // Prefer errors generated by the proxied writer. - if err == nil { - err = err2 - } - } - b.bytes += n - return n, err -} - -func (b *basicWriter) maybeWriteHeader() { - if !b.wroteHeader { - b.WriteHeader(http.StatusOK) - } -} - -func (b *basicWriter) Status() int { - return b.code -} - -func (b *basicWriter) BytesWritten() int { - return b.bytes -} - -func (b *basicWriter) Tee(w io.Writer) { - b.tee = w -} - -func (b *basicWriter) Unwrap() http.ResponseWriter { - return b.ResponseWriter -} - -type flushWriter struct { - basicWriter -} - -func (f *flushWriter) Flush() { - f.wroteHeader = true - - fl := f.basicWriter.ResponseWriter.(http.Flusher) - fl.Flush() -} - -var _ http.Flusher = &flushWriter{} - -// httpFancyWriter is a HTTP writer that additionally satisfies -// http.Flusher, http.Hijacker, and io.ReaderFrom. It exists for the common case -// of wrapping the http.ResponseWriter that package http gives you, in order to -// make the proxied object support the full method set of the proxied object. -type httpFancyWriter struct { - basicWriter -} - -func (f *httpFancyWriter) Flush() { - f.wroteHeader = true - - fl := f.basicWriter.ResponseWriter.(http.Flusher) - fl.Flush() -} - -func (f *httpFancyWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - hj := f.basicWriter.ResponseWriter.(http.Hijacker) - return hj.Hijack() -} - -func (f *http2FancyWriter) Push(target string, opts *http.PushOptions) error { - return f.basicWriter.ResponseWriter.(http.Pusher).Push(target, opts) -} - -func (f *httpFancyWriter) ReadFrom(r io.Reader) (int64, error) { - if f.basicWriter.tee != nil { - n, err := io.Copy(&f.basicWriter, r) - f.basicWriter.bytes += int(n) - return n, err - } - rf := f.basicWriter.ResponseWriter.(io.ReaderFrom) - f.basicWriter.maybeWriteHeader() - n, err := rf.ReadFrom(r) - f.basicWriter.bytes += int(n) - return n, err -} - -var _ http.Flusher = &httpFancyWriter{} -var _ http.Hijacker = &httpFancyWriter{} -var _ http.Pusher = &http2FancyWriter{} -var _ io.ReaderFrom = &httpFancyWriter{} - -// http2FancyWriter is a HTTP2 writer that additionally satisfies -// http.Flusher, and io.ReaderFrom. It exists for the common case -// of wrapping the http.ResponseWriter that package http gives you, in order to -// make the proxied object support the full method set of the proxied object. -type http2FancyWriter struct { - basicWriter -} - -func (f *http2FancyWriter) Flush() { - f.wroteHeader = true - - fl := f.basicWriter.ResponseWriter.(http.Flusher) - fl.Flush() -} - -var _ http.Flusher = &http2FancyWriter{} diff --git a/vendor/github.com/go-chi/chi/mux.go b/vendor/github.com/go-chi/chi/mux.go deleted file mode 100644 index 52950e9..0000000 --- a/vendor/github.com/go-chi/chi/mux.go +++ /dev/null @@ -1,466 +0,0 @@ -package chi - -import ( - "context" - "fmt" - "net/http" - "strings" - "sync" -) - -var _ Router = &Mux{} - -// Mux is a simple HTTP route multiplexer that parses a request path, -// records any URL params, and executes an end handler. It implements -// the http.Handler interface and is friendly with the standard library. -// -// Mux is designed to be fast, minimal and offer a powerful API for building -// modular and composable HTTP services with a large set of handlers. It's -// particularly useful for writing large REST API services that break a handler -// into many smaller parts composed of middlewares and end handlers. -type Mux struct { - // The radix trie router - tree *node - - // The middleware stack - middlewares []func(http.Handler) http.Handler - - // Controls the behaviour of middleware chain generation when a mux - // is registered as an inline group inside another mux. - inline bool - parent *Mux - - // The computed mux handler made of the chained middleware stack and - // the tree router - handler http.Handler - - // Routing context pool - pool *sync.Pool - - // Custom route not found handler - notFoundHandler http.HandlerFunc - - // Custom method not allowed handler - methodNotAllowedHandler http.HandlerFunc -} - -// NewMux returns a newly initialized Mux object that implements the Router -// interface. -func NewMux() *Mux { - mux := &Mux{tree: &node{}, pool: &sync.Pool{}} - mux.pool.New = func() interface{} { - return NewRouteContext() - } - return mux -} - -// ServeHTTP is the single method of the http.Handler interface that makes -// Mux interoperable with the standard library. It uses a sync.Pool to get and -// reuse routing contexts for each request. -func (mx *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // Ensure the mux has some routes defined on the mux - if mx.handler == nil { - mx.NotFoundHandler().ServeHTTP(w, r) - return - } - - // Check if a routing context already exists from a parent router. - rctx, _ := r.Context().Value(RouteCtxKey).(*Context) - if rctx != nil { - mx.handler.ServeHTTP(w, r) - return - } - - // Fetch a RouteContext object from the sync pool, and call the computed - // mx.handler that is comprised of mx.middlewares + mx.routeHTTP. - // Once the request is finished, reset the routing context and put it back - // into the pool for reuse from another request. - rctx = mx.pool.Get().(*Context) - rctx.Reset() - rctx.Routes = mx - - // NOTE: r.WithContext() causes 2 allocations and context.WithValue() causes 1 allocation - r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx)) - - // Serve the request and once its done, put the request context back in the sync pool - mx.handler.ServeHTTP(w, r) - mx.pool.Put(rctx) -} - -// Use appends a middleware handler to the Mux middleware stack. -// -// The middleware stack for any Mux will execute before searching for a matching -// route to a specific handler, which provides opportunity to respond early, -// change the course of the request execution, or set request-scoped values for -// the next http.Handler. -func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) { - if mx.handler != nil { - panic("chi: all middlewares must be defined before routes on a mux") - } - mx.middlewares = append(mx.middlewares, middlewares...) -} - -// Handle adds the route `pattern` that matches any http method to -// execute the `handler` http.Handler. -func (mx *Mux) Handle(pattern string, handler http.Handler) { - mx.handle(mALL, pattern, handler) -} - -// HandleFunc adds the route `pattern` that matches any http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mALL, pattern, handlerFn) -} - -// Method adds the route `pattern` that matches `method` http method to -// execute the `handler` http.Handler. -func (mx *Mux) Method(method, pattern string, handler http.Handler) { - m, ok := methodMap[strings.ToUpper(method)] - if !ok { - panic(fmt.Sprintf("chi: '%s' http method is not supported.", method)) - } - mx.handle(m, pattern, handler) -} - -// MethodFunc adds the route `pattern` that matches `method` http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) MethodFunc(method, pattern string, handlerFn http.HandlerFunc) { - mx.Method(method, pattern, handlerFn) -} - -// Connect adds the route `pattern` that matches a CONNECT http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Connect(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mCONNECT, pattern, handlerFn) -} - -// Delete adds the route `pattern` that matches a DELETE http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Delete(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mDELETE, pattern, handlerFn) -} - -// Get adds the route `pattern` that matches a GET http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Get(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mGET, pattern, handlerFn) -} - -// Head adds the route `pattern` that matches a HEAD http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Head(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mHEAD, pattern, handlerFn) -} - -// Options adds the route `pattern` that matches a OPTIONS http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Options(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mOPTIONS, pattern, handlerFn) -} - -// Patch adds the route `pattern` that matches a PATCH http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mPATCH, pattern, handlerFn) -} - -// Post adds the route `pattern` that matches a POST http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Post(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mPOST, pattern, handlerFn) -} - -// Put adds the route `pattern` that matches a PUT http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Put(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mPUT, pattern, handlerFn) -} - -// Trace adds the route `pattern` that matches a TRACE http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Trace(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mTRACE, pattern, handlerFn) -} - -// NotFound sets a custom http.HandlerFunc for routing paths that could -// not be found. The default 404 handler is `http.NotFound`. -func (mx *Mux) NotFound(handlerFn http.HandlerFunc) { - // Build NotFound handler chain - m := mx - hFn := handlerFn - if mx.inline && mx.parent != nil { - m = mx.parent - hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP - } - - // Update the notFoundHandler from this point forward - m.notFoundHandler = hFn - m.updateSubRoutes(func(subMux *Mux) { - if subMux.notFoundHandler == nil { - subMux.NotFound(hFn) - } - }) -} - -// MethodNotAllowed sets a custom http.HandlerFunc for routing paths where the -// method is unresolved. The default handler returns a 405 with an empty body. -func (mx *Mux) MethodNotAllowed(handlerFn http.HandlerFunc) { - // Build MethodNotAllowed handler chain - m := mx - hFn := handlerFn - if mx.inline && mx.parent != nil { - m = mx.parent - hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP - } - - // Update the methodNotAllowedHandler from this point forward - m.methodNotAllowedHandler = hFn - m.updateSubRoutes(func(subMux *Mux) { - if subMux.methodNotAllowedHandler == nil { - subMux.MethodNotAllowed(hFn) - } - }) -} - -// With adds inline middlewares for an endpoint handler. -func (mx *Mux) With(middlewares ...func(http.Handler) http.Handler) Router { - // Similarly as in handle(), we must build the mux handler once additional - // middleware registration isn't allowed for this stack, like now. - if !mx.inline && mx.handler == nil { - mx.buildRouteHandler() - } - - // Copy middlewares from parent inline muxs - var mws Middlewares - if mx.inline { - mws = make(Middlewares, len(mx.middlewares)) - copy(mws, mx.middlewares) - } - mws = append(mws, middlewares...) - - im := &Mux{ - pool: mx.pool, inline: true, parent: mx, tree: mx.tree, middlewares: mws, - notFoundHandler: mx.notFoundHandler, methodNotAllowedHandler: mx.methodNotAllowedHandler, - } - - return im -} - -// Group creates a new inline-Mux with a fresh middleware stack. It's useful -// for a group of handlers along the same routing path that use an additional -// set of middlewares. See _examples/. -func (mx *Mux) Group(fn func(r Router)) Router { - im := mx.With().(*Mux) - if fn != nil { - fn(im) - } - return im -} - -// Route creates a new Mux with a fresh middleware stack and mounts it -// along the `pattern` as a subrouter. Effectively, this is a short-hand -// call to Mount. See _examples/. -func (mx *Mux) Route(pattern string, fn func(r Router)) Router { - subRouter := NewRouter() - if fn != nil { - fn(subRouter) - } - mx.Mount(pattern, subRouter) - return subRouter -} - -// Mount attaches another http.Handler or chi Router as a subrouter along a routing -// path. It's very useful to split up a large API as many independent routers and -// compose them as a single service using Mount. See _examples/. -// -// Note that Mount() simply sets a wildcard along the `pattern` that will continue -// routing at the `handler`, which in most cases is another chi.Router. As a result, -// if you define two Mount() routes on the exact same pattern the mount will panic. -func (mx *Mux) Mount(pattern string, handler http.Handler) { - // Provide runtime safety for ensuring a pattern isn't mounted on an existing - // routing pattern. - if mx.tree.findPattern(pattern+"*") || mx.tree.findPattern(pattern+"/*") { - panic(fmt.Sprintf("chi: attempting to Mount() a handler on an existing path, '%s'", pattern)) - } - - // Assign sub-Router's with the parent not found & method not allowed handler if not specified. - subr, ok := handler.(*Mux) - if ok && subr.notFoundHandler == nil && mx.notFoundHandler != nil { - subr.NotFound(mx.notFoundHandler) - } - if ok && subr.methodNotAllowedHandler == nil && mx.methodNotAllowedHandler != nil { - subr.MethodNotAllowed(mx.methodNotAllowedHandler) - } - - mountHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - rctx := RouteContext(r.Context()) - rctx.RoutePath = mx.nextRoutePath(rctx) - handler.ServeHTTP(w, r) - }) - - if pattern == "" || pattern[len(pattern)-1] != '/' { - mx.handle(mALL|mSTUB, pattern, mountHandler) - mx.handle(mALL|mSTUB, pattern+"/", mountHandler) - pattern += "/" - } - - method := mALL - subroutes, _ := handler.(Routes) - if subroutes != nil { - method |= mSTUB - } - n := mx.handle(method, pattern+"*", mountHandler) - - if subroutes != nil { - n.subroutes = subroutes - } -} - -// Routes returns a slice of routing information from the tree, -// useful for traversing available routes of a router. -func (mx *Mux) Routes() []Route { - return mx.tree.routes() -} - -// Middlewares returns a slice of middleware handler functions. -func (mx *Mux) Middlewares() Middlewares { - return mx.middlewares -} - -// Match searches the routing tree for a handler that matches the method/path. -// It's similar to routing a http request, but without executing the handler -// thereafter. -// -// Note: the *Context state is updated during execution, so manage -// the state carefully or make a NewRouteContext(). -func (mx *Mux) Match(rctx *Context, method, path string) bool { - m, ok := methodMap[method] - if !ok { - return false - } - - node, _, h := mx.tree.FindRoute(rctx, m, path) - - if node != nil && node.subroutes != nil { - rctx.RoutePath = mx.nextRoutePath(rctx) - return node.subroutes.Match(rctx, method, rctx.RoutePath) - } - - return h != nil -} - -// NotFoundHandler returns the default Mux 404 responder whenever a route -// cannot be found. -func (mx *Mux) NotFoundHandler() http.HandlerFunc { - if mx.notFoundHandler != nil { - return mx.notFoundHandler - } - return http.NotFound -} - -// MethodNotAllowedHandler returns the default Mux 405 responder whenever -// a method cannot be resolved for a route. -func (mx *Mux) MethodNotAllowedHandler() http.HandlerFunc { - if mx.methodNotAllowedHandler != nil { - return mx.methodNotAllowedHandler - } - return methodNotAllowedHandler -} - -// buildRouteHandler builds the single mux handler that is a chain of the middleware -// stack, as defined by calls to Use(), and the tree router (Mux) itself. After this -// point, no other middlewares can be registered on this Mux's stack. But you can still -// compose additional middlewares via Group()'s or using a chained middleware handler. -func (mx *Mux) buildRouteHandler() { - mx.handler = chain(mx.middlewares, http.HandlerFunc(mx.routeHTTP)) -} - -// handle registers a http.Handler in the routing tree for a particular http method -// and routing pattern. -func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) *node { - if len(pattern) == 0 || pattern[0] != '/' { - panic(fmt.Sprintf("chi: routing pattern must begin with '/' in '%s'", pattern)) - } - - // Build the computed routing handler for this routing pattern. - if !mx.inline && mx.handler == nil { - mx.buildRouteHandler() - } - - // Build endpoint handler with inline middlewares for the route - var h http.Handler - if mx.inline { - mx.handler = http.HandlerFunc(mx.routeHTTP) - h = Chain(mx.middlewares...).Handler(handler) - } else { - h = handler - } - - // Add the endpoint to the tree and return the node - return mx.tree.InsertRoute(method, pattern, h) -} - -// routeHTTP routes a http.Request through the Mux routing tree to serve -// the matching handler for a particular http method. -func (mx *Mux) routeHTTP(w http.ResponseWriter, r *http.Request) { - // Grab the route context object - rctx := r.Context().Value(RouteCtxKey).(*Context) - - // The request routing path - routePath := rctx.RoutePath - if routePath == "" { - if r.URL.RawPath != "" { - routePath = r.URL.RawPath - } else { - routePath = r.URL.Path - } - } - - // Check if method is supported by chi - if rctx.RouteMethod == "" { - rctx.RouteMethod = r.Method - } - method, ok := methodMap[rctx.RouteMethod] - if !ok { - mx.MethodNotAllowedHandler().ServeHTTP(w, r) - return - } - - // Find the route - if _, _, h := mx.tree.FindRoute(rctx, method, routePath); h != nil { - h.ServeHTTP(w, r) - return - } - if rctx.methodNotAllowed { - mx.MethodNotAllowedHandler().ServeHTTP(w, r) - } else { - mx.NotFoundHandler().ServeHTTP(w, r) - } -} - -func (mx *Mux) nextRoutePath(rctx *Context) string { - routePath := "/" - nx := len(rctx.routeParams.Keys) - 1 // index of last param in list - if nx >= 0 && rctx.routeParams.Keys[nx] == "*" && len(rctx.routeParams.Values) > nx { - routePath = "/" + rctx.routeParams.Values[nx] - } - return routePath -} - -// Recursively update data on child routers. -func (mx *Mux) updateSubRoutes(fn func(subMux *Mux)) { - for _, r := range mx.tree.routes() { - subMux, ok := r.SubRoutes.(*Mux) - if !ok { - continue - } - fn(subMux) - } -} - -// methodNotAllowedHandler is a helper function to respond with a 405, -// method not allowed. -func methodNotAllowedHandler(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(405) - w.Write(nil) -} diff --git a/vendor/github.com/go-chi/chi/tree.go b/vendor/github.com/go-chi/chi/tree.go deleted file mode 100644 index fa9bd4b..0000000 --- a/vendor/github.com/go-chi/chi/tree.go +++ /dev/null @@ -1,846 +0,0 @@ -package chi - -// Radix tree implementation below is a based on the original work by -// Armon Dadgar in https://github.com/armon/go-radix/blob/master/radix.go -// (MIT licensed). It's been heavily modified for use as a HTTP routing tree. - -import ( - "fmt" - "math" - "net/http" - "regexp" - "sort" - "strconv" - "strings" -) - -type methodTyp int - -const ( - mSTUB methodTyp = 1 << iota - mCONNECT - mDELETE - mGET - mHEAD - mOPTIONS - mPATCH - mPOST - mPUT - mTRACE -) - -var mALL = mCONNECT | mDELETE | mGET | mHEAD | - mOPTIONS | mPATCH | mPOST | mPUT | mTRACE - -var methodMap = map[string]methodTyp{ - http.MethodConnect: mCONNECT, - http.MethodDelete: mDELETE, - http.MethodGet: mGET, - http.MethodHead: mHEAD, - http.MethodOptions: mOPTIONS, - http.MethodPatch: mPATCH, - http.MethodPost: mPOST, - http.MethodPut: mPUT, - http.MethodTrace: mTRACE, -} - -// RegisterMethod adds support for custom HTTP method handlers, available -// via Router#Method and Router#MethodFunc -func RegisterMethod(method string) { - if method == "" { - return - } - method = strings.ToUpper(method) - if _, ok := methodMap[method]; ok { - return - } - n := len(methodMap) - if n > strconv.IntSize { - panic(fmt.Sprintf("chi: max number of methods reached (%d)", strconv.IntSize)) - } - mt := methodTyp(math.Exp2(float64(n))) - methodMap[method] = mt - mALL |= mt -} - -type nodeTyp uint8 - -const ( - ntStatic nodeTyp = iota // /home - ntRegexp // /{id:[0-9]+} - ntParam // /{user} - ntCatchAll // /api/v1/* -) - -type node struct { - // node type: static, regexp, param, catchAll - typ nodeTyp - - // first byte of the prefix - label byte - - // first byte of the child prefix - tail byte - - // prefix is the common prefix we ignore - prefix string - - // regexp matcher for regexp nodes - rex *regexp.Regexp - - // HTTP handler endpoints on the leaf node - endpoints endpoints - - // subroutes on the leaf node - subroutes Routes - - // child nodes should be stored in-order for iteration, - // in groups of the node type. - children [ntCatchAll + 1]nodes -} - -// endpoints is a mapping of http method constants to handlers -// for a given route. -type endpoints map[methodTyp]*endpoint - -type endpoint struct { - // endpoint handler - handler http.Handler - - // pattern is the routing pattern for handler nodes - pattern string - - // parameter keys recorded on handler nodes - paramKeys []string -} - -func (s endpoints) Value(method methodTyp) *endpoint { - mh, ok := s[method] - if !ok { - mh = &endpoint{} - s[method] = mh - } - return mh -} - -func (n *node) InsertRoute(method methodTyp, pattern string, handler http.Handler) *node { - var parent *node - search := pattern - - for { - // Handle key exhaustion - if len(search) == 0 { - // Insert or update the node's leaf handler - n.setEndpoint(method, handler, pattern) - return n - } - - // We're going to be searching for a wild node next, - // in this case, we need to get the tail - var label = search[0] - var segTail byte - var segEndIdx int - var segTyp nodeTyp - var segRexpat string - if label == '{' || label == '*' { - segTyp, _, segRexpat, segTail, _, segEndIdx = patNextSegment(search) - } - - var prefix string - if segTyp == ntRegexp { - prefix = segRexpat - } - - // Look for the edge to attach to - parent = n - n = n.getEdge(segTyp, label, segTail, prefix) - - // No edge, create one - if n == nil { - child := &node{label: label, tail: segTail, prefix: search} - hn := parent.addChild(child, search) - hn.setEndpoint(method, handler, pattern) - - return hn - } - - // Found an edge to match the pattern - - if n.typ > ntStatic { - // We found a param node, trim the param from the search path and continue. - // This param/wild pattern segment would already be on the tree from a previous - // call to addChild when creating a new node. - search = search[segEndIdx:] - continue - } - - // Static nodes fall below here. - // Determine longest prefix of the search key on match. - commonPrefix := longestPrefix(search, n.prefix) - if commonPrefix == len(n.prefix) { - // the common prefix is as long as the current node's prefix we're attempting to insert. - // keep the search going. - search = search[commonPrefix:] - continue - } - - // Split the node - child := &node{ - typ: ntStatic, - prefix: search[:commonPrefix], - } - parent.replaceChild(search[0], segTail, child) - - // Restore the existing node - n.label = n.prefix[commonPrefix] - n.prefix = n.prefix[commonPrefix:] - child.addChild(n, n.prefix) - - // If the new key is a subset, set the method/handler on this node and finish. - search = search[commonPrefix:] - if len(search) == 0 { - child.setEndpoint(method, handler, pattern) - return child - } - - // Create a new edge for the node - subchild := &node{ - typ: ntStatic, - label: search[0], - prefix: search, - } - hn := child.addChild(subchild, search) - hn.setEndpoint(method, handler, pattern) - return hn - } -} - -// addChild appends the new `child` node to the tree using the `pattern` as the trie key. -// For a URL router like chi's, we split the static, param, regexp and wildcard segments -// into different nodes. In addition, addChild will recursively call itself until every -// pattern segment is added to the url pattern tree as individual nodes, depending on type. -func (n *node) addChild(child *node, prefix string) *node { - search := prefix - - // handler leaf node added to the tree is the child. - // this may be overridden later down the flow - hn := child - - // Parse next segment - segTyp, _, segRexpat, segTail, segStartIdx, segEndIdx := patNextSegment(search) - - // Add child depending on next up segment - switch segTyp { - - case ntStatic: - // Search prefix is all static (that is, has no params in path) - // noop - - default: - // Search prefix contains a param, regexp or wildcard - - if segTyp == ntRegexp { - rex, err := regexp.Compile(segRexpat) - if err != nil { - panic(fmt.Sprintf("chi: invalid regexp pattern '%s' in route param", segRexpat)) - } - child.prefix = segRexpat - child.rex = rex - } - - if segStartIdx == 0 { - // Route starts with a param - child.typ = segTyp - - if segTyp == ntCatchAll { - segStartIdx = -1 - } else { - segStartIdx = segEndIdx - } - if segStartIdx < 0 { - segStartIdx = len(search) - } - child.tail = segTail // for params, we set the tail - - if segStartIdx != len(search) { - // add static edge for the remaining part, split the end. - // its not possible to have adjacent param nodes, so its certainly - // going to be a static node next. - - search = search[segStartIdx:] // advance search position - - nn := &node{ - typ: ntStatic, - label: search[0], - prefix: search, - } - hn = child.addChild(nn, search) - } - - } else if segStartIdx > 0 { - // Route has some param - - // starts with a static segment - child.typ = ntStatic - child.prefix = search[:segStartIdx] - child.rex = nil - - // add the param edge node - search = search[segStartIdx:] - - nn := &node{ - typ: segTyp, - label: search[0], - tail: segTail, - } - hn = child.addChild(nn, search) - - } - } - - n.children[child.typ] = append(n.children[child.typ], child) - n.children[child.typ].Sort() - return hn -} - -func (n *node) replaceChild(label, tail byte, child *node) { - for i := 0; i < len(n.children[child.typ]); i++ { - if n.children[child.typ][i].label == label && n.children[child.typ][i].tail == tail { - n.children[child.typ][i] = child - n.children[child.typ][i].label = label - n.children[child.typ][i].tail = tail - return - } - } - panic("chi: replacing missing child") -} - -func (n *node) getEdge(ntyp nodeTyp, label, tail byte, prefix string) *node { - nds := n.children[ntyp] - for i := 0; i < len(nds); i++ { - if nds[i].label == label && nds[i].tail == tail { - if ntyp == ntRegexp && nds[i].prefix != prefix { - continue - } - return nds[i] - } - } - return nil -} - -func (n *node) setEndpoint(method methodTyp, handler http.Handler, pattern string) { - // Set the handler for the method type on the node - if n.endpoints == nil { - n.endpoints = make(endpoints) - } - - paramKeys := patParamKeys(pattern) - - if method&mSTUB == mSTUB { - n.endpoints.Value(mSTUB).handler = handler - } - if method&mALL == mALL { - h := n.endpoints.Value(mALL) - h.handler = handler - h.pattern = pattern - h.paramKeys = paramKeys - for _, m := range methodMap { - h := n.endpoints.Value(m) - h.handler = handler - h.pattern = pattern - h.paramKeys = paramKeys - } - } else { - h := n.endpoints.Value(method) - h.handler = handler - h.pattern = pattern - h.paramKeys = paramKeys - } -} - -func (n *node) FindRoute(rctx *Context, method methodTyp, path string) (*node, endpoints, http.Handler) { - // Reset the context routing pattern and params - rctx.routePattern = "" - rctx.routeParams.Keys = rctx.routeParams.Keys[:0] - rctx.routeParams.Values = rctx.routeParams.Values[:0] - - // Find the routing handlers for the path - rn := n.findRoute(rctx, method, path) - if rn == nil { - return nil, nil, nil - } - - // Record the routing params in the request lifecycle - rctx.URLParams.Keys = append(rctx.URLParams.Keys, rctx.routeParams.Keys...) - rctx.URLParams.Values = append(rctx.URLParams.Values, rctx.routeParams.Values...) - - // Record the routing pattern in the request lifecycle - if rn.endpoints[method].pattern != "" { - rctx.routePattern = rn.endpoints[method].pattern - rctx.RoutePatterns = append(rctx.RoutePatterns, rctx.routePattern) - } - - return rn, rn.endpoints, rn.endpoints[method].handler -} - -// Recursive edge traversal by checking all nodeTyp groups along the way. -// It's like searching through a multi-dimensional radix trie. -func (n *node) findRoute(rctx *Context, method methodTyp, path string) *node { - nn := n - search := path - - for t, nds := range nn.children { - ntyp := nodeTyp(t) - if len(nds) == 0 { - continue - } - - var xn *node - xsearch := search - - var label byte - if search != "" { - label = search[0] - } - - switch ntyp { - case ntStatic: - xn = nds.findEdge(label) - if xn == nil || !strings.HasPrefix(xsearch, xn.prefix) { - continue - } - xsearch = xsearch[len(xn.prefix):] - - case ntParam, ntRegexp: - // short-circuit and return no matching route for empty param values - if xsearch == "" { - continue - } - - found := false - - // serially loop through each node grouped by the tail delimiter - for idx := 0; idx < len(nds); idx++ { - xn = nds[idx] - - // label for param nodes is the delimiter byte - p := strings.IndexByte(xsearch, xn.tail) - - if p < 0 { - if xn.tail == '/' { - p = len(xsearch) - } else { - continue - } - } - - if ntyp == ntRegexp && xn.rex != nil { - if !xn.rex.Match([]byte(xsearch[:p])) { - continue - } - } else if strings.IndexByte(xsearch[:p], '/') != -1 { - // avoid a match across path segments - continue - } - - rctx.routeParams.Values = append(rctx.routeParams.Values, xsearch[:p]) - xsearch = xsearch[p:] - found = true - break - } - - if !found { - rctx.routeParams.Values = append(rctx.routeParams.Values, "") - } - - default: - // catch-all nodes - rctx.routeParams.Values = append(rctx.routeParams.Values, search) - xn = nds[0] - xsearch = "" - } - - if xn == nil { - continue - } - - // did we find it yet? - if len(xsearch) == 0 { - if xn.isLeaf() { - h := xn.endpoints[method] - if h != nil && h.handler != nil { - rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...) - return xn - } - - // flag that the routing context found a route, but not a corresponding - // supported method - rctx.methodNotAllowed = true - } - } - - // recursively find the next node.. - fin := xn.findRoute(rctx, method, xsearch) - if fin != nil { - return fin - } - - // Did not find final handler, let's remove the param here if it was set - if xn.typ > ntStatic { - if len(rctx.routeParams.Values) > 0 { - rctx.routeParams.Values = rctx.routeParams.Values[:len(rctx.routeParams.Values)-1] - } - } - - } - - return nil -} - -func (n *node) findEdge(ntyp nodeTyp, label byte) *node { - nds := n.children[ntyp] - num := len(nds) - idx := 0 - - switch ntyp { - case ntStatic, ntParam, ntRegexp: - i, j := 0, num-1 - for i <= j { - idx = i + (j-i)/2 - if label > nds[idx].label { - i = idx + 1 - } else if label < nds[idx].label { - j = idx - 1 - } else { - i = num // breaks cond - } - } - if nds[idx].label != label { - return nil - } - return nds[idx] - - default: // catch all - return nds[idx] - } -} - -func (n *node) isLeaf() bool { - return n.endpoints != nil -} - -func (n *node) findPattern(pattern string) bool { - nn := n - for _, nds := range nn.children { - if len(nds) == 0 { - continue - } - - n = nn.findEdge(nds[0].typ, pattern[0]) - if n == nil { - continue - } - - var idx int - var xpattern string - - switch n.typ { - case ntStatic: - idx = longestPrefix(pattern, n.prefix) - if idx < len(n.prefix) { - continue - } - - case ntParam, ntRegexp: - idx = strings.IndexByte(pattern, '}') + 1 - - case ntCatchAll: - idx = longestPrefix(pattern, "*") - - default: - panic("chi: unknown node type") - } - - xpattern = pattern[idx:] - if len(xpattern) == 0 { - return true - } - - return n.findPattern(xpattern) - } - return false -} - -func (n *node) routes() []Route { - rts := []Route{} - - n.walk(func(eps endpoints, subroutes Routes) bool { - if eps[mSTUB] != nil && eps[mSTUB].handler != nil && subroutes == nil { - return false - } - - // Group methodHandlers by unique patterns - pats := make(map[string]endpoints) - - for mt, h := range eps { - if h.pattern == "" { - continue - } - p, ok := pats[h.pattern] - if !ok { - p = endpoints{} - pats[h.pattern] = p - } - p[mt] = h - } - - for p, mh := range pats { - hs := make(map[string]http.Handler) - if mh[mALL] != nil && mh[mALL].handler != nil { - hs["*"] = mh[mALL].handler - } - - for mt, h := range mh { - if h.handler == nil { - continue - } - m := methodTypString(mt) - if m == "" { - continue - } - hs[m] = h.handler - } - - rt := Route{p, hs, subroutes} - rts = append(rts, rt) - } - - return false - }) - - return rts -} - -func (n *node) walk(fn func(eps endpoints, subroutes Routes) bool) bool { - // Visit the leaf values if any - if (n.endpoints != nil || n.subroutes != nil) && fn(n.endpoints, n.subroutes) { - return true - } - - // Recurse on the children - for _, ns := range n.children { - for _, cn := range ns { - if cn.walk(fn) { - return true - } - } - } - return false -} - -// patNextSegment returns the next segment details from a pattern: -// node type, param key, regexp string, param tail byte, param starting index, param ending index -func patNextSegment(pattern string) (nodeTyp, string, string, byte, int, int) { - ps := strings.Index(pattern, "{") - ws := strings.Index(pattern, "*") - - if ps < 0 && ws < 0 { - return ntStatic, "", "", 0, 0, len(pattern) // we return the entire thing - } - - // Sanity check - if ps >= 0 && ws >= 0 && ws < ps { - panic("chi: wildcard '*' must be the last pattern in a route, otherwise use a '{param}'") - } - - var tail byte = '/' // Default endpoint tail to / byte - - if ps >= 0 { - // Param/Regexp pattern is next - nt := ntParam - - // Read to closing } taking into account opens and closes in curl count (cc) - cc := 0 - pe := ps - for i, c := range pattern[ps:] { - if c == '{' { - cc++ - } else if c == '}' { - cc-- - if cc == 0 { - pe = ps + i - break - } - } - } - if pe == ps { - panic("chi: route param closing delimiter '}' is missing") - } - - key := pattern[ps+1 : pe] - pe++ // set end to next position - - if pe < len(pattern) { - tail = pattern[pe] - } - - var rexpat string - if idx := strings.Index(key, ":"); idx >= 0 { - nt = ntRegexp - rexpat = key[idx+1:] - key = key[:idx] - } - - if len(rexpat) > 0 { - if rexpat[0] != '^' { - rexpat = "^" + rexpat - } - if rexpat[len(rexpat)-1] != '$' { - rexpat += "$" - } - } - - return nt, key, rexpat, tail, ps, pe - } - - // Wildcard pattern as finale - if ws < len(pattern)-1 { - panic("chi: wildcard '*' must be the last value in a route. trim trailing text or use a '{param}' instead") - } - return ntCatchAll, "*", "", 0, ws, len(pattern) -} - -func patParamKeys(pattern string) []string { - pat := pattern - paramKeys := []string{} - for { - ptyp, paramKey, _, _, _, e := patNextSegment(pat) - if ptyp == ntStatic { - return paramKeys - } - for i := 0; i < len(paramKeys); i++ { - if paramKeys[i] == paramKey { - panic(fmt.Sprintf("chi: routing pattern '%s' contains duplicate param key, '%s'", pattern, paramKey)) - } - } - paramKeys = append(paramKeys, paramKey) - pat = pat[e:] - } -} - -// longestPrefix finds the length of the shared prefix -// of two strings -func longestPrefix(k1, k2 string) int { - max := len(k1) - if l := len(k2); l < max { - max = l - } - var i int - for i = 0; i < max; i++ { - if k1[i] != k2[i] { - break - } - } - return i -} - -func methodTypString(method methodTyp) string { - for s, t := range methodMap { - if method == t { - return s - } - } - return "" -} - -type nodes []*node - -// Sort the list of nodes by label -func (ns nodes) Sort() { sort.Sort(ns); ns.tailSort() } -func (ns nodes) Len() int { return len(ns) } -func (ns nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } -func (ns nodes) Less(i, j int) bool { return ns[i].label < ns[j].label } - -// tailSort pushes nodes with '/' as the tail to the end of the list for param nodes. -// The list order determines the traversal order. -func (ns nodes) tailSort() { - for i := len(ns) - 1; i >= 0; i-- { - if ns[i].typ > ntStatic && ns[i].tail == '/' { - ns.Swap(i, len(ns)-1) - return - } - } -} - -func (ns nodes) findEdge(label byte) *node { - num := len(ns) - idx := 0 - i, j := 0, num-1 - for i <= j { - idx = i + (j-i)/2 - if label > ns[idx].label { - i = idx + 1 - } else if label < ns[idx].label { - j = idx - 1 - } else { - i = num // breaks cond - } - } - if ns[idx].label != label { - return nil - } - return ns[idx] -} - -// Route describes the details of a routing handler. -// Handlers map key is an HTTP method -type Route struct { - Pattern string - Handlers map[string]http.Handler - SubRoutes Routes -} - -// WalkFunc is the type of the function called for each method and route visited by Walk. -type WalkFunc func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error - -// Walk walks any router tree that implements Routes interface. -func Walk(r Routes, walkFn WalkFunc) error { - return walk(r, walkFn, "") -} - -func walk(r Routes, walkFn WalkFunc, parentRoute string, parentMw ...func(http.Handler) http.Handler) error { - for _, route := range r.Routes() { - mws := make([]func(http.Handler) http.Handler, len(parentMw)) - copy(mws, parentMw) - mws = append(mws, r.Middlewares()...) - - if route.SubRoutes != nil { - if err := walk(route.SubRoutes, walkFn, parentRoute+route.Pattern, mws...); err != nil { - return err - } - continue - } - - for method, handler := range route.Handlers { - if method == "*" { - // Ignore a "catchAll" method, since we pass down all the specific methods for each route. - continue - } - - fullRoute := parentRoute + route.Pattern - fullRoute = strings.Replace(fullRoute, "/*/", "/", -1) - - if chain, ok := handler.(*ChainHandler); ok { - if err := walkFn(method, fullRoute, chain.Endpoint, append(mws, chain.Middlewares...)...); err != nil { - return err - } - } else { - if err := walkFn(method, fullRoute, handler, mws...); err != nil { - return err - } - } - } - } - - return nil -} diff --git a/vendor/github.com/go-kit/kit/log/README.md b/vendor/github.com/go-kit/kit/log/README.md index a201a3d..5492dd9 100644 --- a/vendor/github.com/go-kit/kit/log/README.md +++ b/vendor/github.com/go-kit/kit/log/README.md @@ -1,5 +1,14 @@ # package log +**Deprecation notice:** The core Go kit log packages (log, log/level, log/term, and +log/syslog) have been moved to their own repository at github.com/go-kit/log. +The corresponding packages in this directory remain for backwards compatibility. +Their types alias the types and their functions call the functions provided by +the new repository. Using either import path should be equivalent. Prefer the +new import path when practical. + +______ + `package log` provides a minimal interface for structured logging in services. It may be wrapped to encode conventions, enforce type-safety, provide leveled logging, and so on. It can be used for both typical application log events, diff --git a/vendor/github.com/go-kit/kit/log/doc.go b/vendor/github.com/go-kit/kit/log/doc.go index 918c0af..c9873f4 100644 --- a/vendor/github.com/go-kit/kit/log/doc.go +++ b/vendor/github.com/go-kit/kit/log/doc.go @@ -1,5 +1,7 @@ // Package log provides a structured logger. // +// Deprecated: Use github.com/go-kit/log instead. +// // Structured logging produces logs easily consumed later by humans or // machines. Humans might be interested in debugging errors, or tracing // specific requests. Machines might be interested in counting interesting @@ -39,8 +41,8 @@ // // A contextual logger stores keyvals that it includes in all log events. // Building appropriate contextual loggers reduces repetition and aids -// consistency in the resulting log output. With and WithPrefix add context to -// a logger. We can use With to improve the RunTask example. +// consistency in the resulting log output. With, WithPrefix, and WithSuffix +// add context to a logger. We can use With to improve the RunTask example. // // func RunTask(task Task, logger log.Logger) string { // logger = log.With(logger, "taskID", task.ID) diff --git a/vendor/github.com/go-kit/kit/log/json_logger.go b/vendor/github.com/go-kit/kit/log/json_logger.go index 0cedbf8..edfde2f 100644 --- a/vendor/github.com/go-kit/kit/log/json_logger.go +++ b/vendor/github.com/go-kit/kit/log/json_logger.go @@ -1,91 +1,15 @@ package log import ( - "encoding" - "encoding/json" - "fmt" "io" - "reflect" -) -type jsonLogger struct { - io.Writer -} + "github.com/go-kit/log" +) // NewJSONLogger returns a Logger that encodes keyvals to the Writer as a // single JSON object. Each log event produces no more than one call to // w.Write. The passed Writer must be safe for concurrent use by multiple // goroutines if the returned Logger will be used concurrently. func NewJSONLogger(w io.Writer) Logger { - return &jsonLogger{w} -} - -func (l *jsonLogger) Log(keyvals ...interface{}) error { - n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd - m := make(map[string]interface{}, n) - for i := 0; i < len(keyvals); i += 2 { - k := keyvals[i] - var v interface{} = ErrMissingValue - if i+1 < len(keyvals) { - v = keyvals[i+1] - } - merge(m, k, v) - } - enc := json.NewEncoder(l.Writer) - enc.SetEscapeHTML(false) - return enc.Encode(m) -} - -func merge(dst map[string]interface{}, k, v interface{}) { - var key string - switch x := k.(type) { - case string: - key = x - case fmt.Stringer: - key = safeString(x) - default: - key = fmt.Sprint(x) - } - - // We want json.Marshaler and encoding.TextMarshaller to take priority over - // err.Error() and v.String(). But json.Marshall (called later) does that by - // default so we force a no-op if it's one of those 2 case. - switch x := v.(type) { - case json.Marshaler: - case encoding.TextMarshaler: - case error: - v = safeError(x) - case fmt.Stringer: - v = safeString(x) - } - - dst[key] = v -} - -func safeString(str fmt.Stringer) (s string) { - defer func() { - if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { - s = "NULL" - } else { - panic(panicVal) - } - } - }() - s = str.String() - return -} - -func safeError(err error) (s interface{}) { - defer func() { - if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { - s = nil - } else { - panic(panicVal) - } - } - }() - s = err.Error() - return + return log.NewJSONLogger(w) } diff --git a/vendor/github.com/go-kit/kit/log/level/doc.go b/vendor/github.com/go-kit/kit/log/level/doc.go index 505d307..7baf870 100644 --- a/vendor/github.com/go-kit/kit/log/level/doc.go +++ b/vendor/github.com/go-kit/kit/log/level/doc.go @@ -1,6 +1,9 @@ -// Package level implements leveled logging on top of Go kit's log package. To -// use the level package, create a logger as per normal in your func main, and -// wrap it with level.NewFilter. +// Package level implements leveled logging on top of Go kit's log package. +// +// Deprecated: Use github.com/go-kit/log/level instead. +// +// To use the level package, create a logger as per normal in your func main, +// and wrap it with level.NewFilter. // // var logger log.Logger // logger = log.NewLogfmtLogger(os.Stderr) diff --git a/vendor/github.com/go-kit/kit/log/level/level.go b/vendor/github.com/go-kit/kit/log/level/level.go index fceafc4..803e8b9 100644 --- a/vendor/github.com/go-kit/kit/log/level/level.go +++ b/vendor/github.com/go-kit/kit/log/level/level.go @@ -1,25 +1,28 @@ package level -import "github.com/go-kit/kit/log" +import ( + "github.com/go-kit/log" + "github.com/go-kit/log/level" +) // Error returns a logger that includes a Key/ErrorValue pair. func Error(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), ErrorValue()) + return level.Error(logger) } // Warn returns a logger that includes a Key/WarnValue pair. func Warn(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), WarnValue()) + return level.Warn(logger) } // Info returns a logger that includes a Key/InfoValue pair. func Info(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), InfoValue()) + return level.Info(logger) } // Debug returns a logger that includes a Key/DebugValue pair. func Debug(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), DebugValue()) + return level.Debug(logger) } // NewFilter wraps next and implements level filtering. See the commentary on @@ -28,76 +31,40 @@ func Debug(logger log.Logger) log.Logger { // Info, Warn or Error helper methods are squelched and non-leveled log // events are passed to next unmodified. func NewFilter(next log.Logger, options ...Option) log.Logger { - l := &logger{ - next: next, - } - for _, option := range options { - option(l) - } - return l -} - -type logger struct { - next log.Logger - allowed level - squelchNoLevel bool - errNotAllowed error - errNoLevel error -} - -func (l *logger) Log(keyvals ...interface{}) error { - var hasLevel, levelAllowed bool - for i := 1; i < len(keyvals); i += 2 { - if v, ok := keyvals[i].(*levelValue); ok { - hasLevel = true - levelAllowed = l.allowed&v.level != 0 - break - } - } - if !hasLevel && l.squelchNoLevel { - return l.errNoLevel - } - if hasLevel && !levelAllowed { - return l.errNotAllowed - } - return l.next.Log(keyvals...) + return level.NewFilter(next, options...) } // Option sets a parameter for the leveled logger. -type Option func(*logger) +type Option = level.Option // AllowAll is an alias for AllowDebug. func AllowAll() Option { - return AllowDebug() + return level.AllowAll() } // AllowDebug allows error, warn, info and debug level log events to pass. func AllowDebug() Option { - return allowed(levelError | levelWarn | levelInfo | levelDebug) + return level.AllowDebug() } // AllowInfo allows error, warn and info level log events to pass. func AllowInfo() Option { - return allowed(levelError | levelWarn | levelInfo) + return level.AllowInfo() } // AllowWarn allows error and warn level log events to pass. func AllowWarn() Option { - return allowed(levelError | levelWarn) + return level.AllowWarn() } // AllowError allows only error level log events to pass. func AllowError() Option { - return allowed(levelError) + return level.AllowError() } // AllowNone allows no leveled log events to pass. func AllowNone() Option { - return allowed(0) -} - -func allowed(allowed level) Option { - return func(l *logger) { l.allowed = allowed } + return level.AllowNone() } // ErrNotAllowed sets the error to return from Log when it squelches a log @@ -105,7 +72,7 @@ func allowed(allowed level) Option { // ErrNotAllowed is nil; in this case the log event is squelched with no // error. func ErrNotAllowed(err error) Option { - return func(l *logger) { l.errNotAllowed = err } + return level.ErrNotAllowed(err) } // SquelchNoLevel instructs Log to squelch log events with no level, so that @@ -113,93 +80,41 @@ func ErrNotAllowed(err error) Option { // to true and a log event is squelched in this way, the error value // configured with ErrNoLevel is returned to the caller. func SquelchNoLevel(squelch bool) Option { - return func(l *logger) { l.squelchNoLevel = squelch } + return level.SquelchNoLevel(squelch) } // ErrNoLevel sets the error to return from Log when it squelches a log event // with no level. By default, ErrNoLevel is nil; in this case the log event is // squelched with no error. func ErrNoLevel(err error) Option { - return func(l *logger) { l.errNoLevel = err } + return level.ErrNoLevel(err) } // NewInjector wraps next and returns a logger that adds a Key/level pair to // the beginning of log events that don't already contain a level. In effect, // this gives a default level to logs without a level. -func NewInjector(next log.Logger, level Value) log.Logger { - return &injector{ - next: next, - level: level, - } -} - -type injector struct { - next log.Logger - level interface{} -} - -func (l *injector) Log(keyvals ...interface{}) error { - for i := 1; i < len(keyvals); i += 2 { - if _, ok := keyvals[i].(*levelValue); ok { - return l.next.Log(keyvals...) - } - } - kvs := make([]interface{}, len(keyvals)+2) - kvs[0], kvs[1] = key, l.level - copy(kvs[2:], keyvals) - return l.next.Log(kvs...) +func NewInjector(next log.Logger, lvl Value) log.Logger { + return level.NewInjector(next, lvl) } // Value is the interface that each of the canonical level values implement. // It contains unexported methods that prevent types from other packages from // implementing it and guaranteeing that NewFilter can distinguish the levels // defined in this package from all other values. -type Value interface { - String() string - levelVal() -} +type Value = level.Value // Key returns the unique key added to log events by the loggers in this // package. -func Key() interface{} { return key } +func Key() interface{} { return level.Key() } // ErrorValue returns the unique value added to log events by Error. -func ErrorValue() Value { return errorValue } +func ErrorValue() Value { return level.ErrorValue() } // WarnValue returns the unique value added to log events by Warn. -func WarnValue() Value { return warnValue } +func WarnValue() Value { return level.WarnValue() } // InfoValue returns the unique value added to log events by Info. -func InfoValue() Value { return infoValue } - -// DebugValue returns the unique value added to log events by Warn. -func DebugValue() Value { return debugValue } - -var ( - // key is of type interface{} so that it allocates once during package - // initialization and avoids allocating every time the value is added to a - // []interface{} later. - key interface{} = "level" - - errorValue = &levelValue{level: levelError, name: "error"} - warnValue = &levelValue{level: levelWarn, name: "warn"} - infoValue = &levelValue{level: levelInfo, name: "info"} - debugValue = &levelValue{level: levelDebug, name: "debug"} -) - -type level byte - -const ( - levelDebug level = 1 << iota - levelInfo - levelWarn - levelError -) - -type levelValue struct { - name string - level -} +func InfoValue() Value { return level.InfoValue() } -func (v *levelValue) String() string { return v.name } -func (v *levelValue) levelVal() {} +// DebugValue returns the unique value added to log events by Debug. +func DebugValue() Value { return level.DebugValue() } diff --git a/vendor/github.com/go-kit/kit/log/log.go b/vendor/github.com/go-kit/kit/log/log.go index 66a9e2f..164a4f9 100644 --- a/vendor/github.com/go-kit/kit/log/log.go +++ b/vendor/github.com/go-kit/kit/log/log.go @@ -1,135 +1,51 @@ package log -import "errors" +import ( + "github.com/go-kit/log" +) // Logger is the fundamental interface for all log operations. Log creates a // log event from keyvals, a variadic sequence of alternating keys and values. // Implementations must be safe for concurrent use by multiple goroutines. In // particular, any implementation of Logger that appends to keyvals or // modifies or retains any of its elements must make a copy first. -type Logger interface { - Log(keyvals ...interface{}) error -} +type Logger = log.Logger // ErrMissingValue is appended to keyvals slices with odd length to substitute // the missing value. -var ErrMissingValue = errors.New("(MISSING)") +var ErrMissingValue = log.ErrMissingValue // With returns a new contextual logger with keyvals prepended to those passed -// to calls to Log. If logger is also a contextual logger created by With or -// WithPrefix, keyvals is appended to the existing context. +// to calls to Log. If logger is also a contextual logger created by With, +// WithPrefix, or WithSuffix, keyvals is appended to the existing context. // // The returned Logger replaces all value elements (odd indexes) containing a // Valuer with their generated value for each call to its Log method. func With(logger Logger, keyvals ...interface{}) Logger { - if len(keyvals) == 0 { - return logger - } - l := newContext(logger) - kvs := append(l.keyvals, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - return &context{ - logger: l.logger, - // Limiting the capacity of the stored keyvals ensures that a new - // backing array is created if the slice must grow in Log or With. - // Using the extra capacity without copying risks a data race that - // would violate the Logger interface contract. - keyvals: kvs[:len(kvs):len(kvs)], - hasValuer: l.hasValuer || containsValuer(keyvals), - } + return log.With(logger, keyvals...) } // WithPrefix returns a new contextual logger with keyvals prepended to those // passed to calls to Log. If logger is also a contextual logger created by -// With or WithPrefix, keyvals is prepended to the existing context. +// With, WithPrefix, or WithSuffix, keyvals is prepended to the existing context. // // The returned Logger replaces all value elements (odd indexes) containing a // Valuer with their generated value for each call to its Log method. func WithPrefix(logger Logger, keyvals ...interface{}) Logger { - if len(keyvals) == 0 { - return logger - } - l := newContext(logger) - // Limiting the capacity of the stored keyvals ensures that a new - // backing array is created if the slice must grow in Log or With. - // Using the extra capacity without copying risks a data race that - // would violate the Logger interface contract. - n := len(l.keyvals) + len(keyvals) - if len(keyvals)%2 != 0 { - n++ - } - kvs := make([]interface{}, 0, n) - kvs = append(kvs, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - kvs = append(kvs, l.keyvals...) - return &context{ - logger: l.logger, - keyvals: kvs, - hasValuer: l.hasValuer || containsValuer(keyvals), - } + return log.WithPrefix(logger, keyvals...) } -// context is the Logger implementation returned by With and WithPrefix. It -// wraps a Logger and holds keyvals that it includes in all log events. Its -// Log method calls bindValues to generate values for each Valuer in the -// context keyvals. -// -// A context must always have the same number of stack frames between calls to -// its Log method and the eventual binding of Valuers to their value. This -// requirement comes from the functional requirement to allow a context to -// resolve application call site information for a Caller stored in the -// context. To do this we must be able to predict the number of logging -// functions on the stack when bindValues is called. -// -// Two implementation details provide the needed stack depth consistency. +// WithSuffix returns a new contextual logger with keyvals appended to those +// passed to calls to Log. If logger is also a contextual logger created by +// With, WithPrefix, or WithSuffix, keyvals is appended to the existing context. // -// 1. newContext avoids introducing an additional layer when asked to -// wrap another context. -// 2. With and WithPrefix avoid introducing an additional layer by -// returning a newly constructed context with a merged keyvals rather -// than simply wrapping the existing context. -type context struct { - logger Logger - keyvals []interface{} - hasValuer bool -} - -func newContext(logger Logger) *context { - if c, ok := logger.(*context); ok { - return c - } - return &context{logger: logger} -} - -// Log replaces all value elements (odd indexes) containing a Valuer in the -// stored context with their generated value, appends keyvals, and passes the -// result to the wrapped Logger. -func (l *context) Log(keyvals ...interface{}) error { - kvs := append(l.keyvals, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - if l.hasValuer { - // If no keyvals were appended above then we must copy l.keyvals so - // that future log events will reevaluate the stored Valuers. - if len(keyvals) == 0 { - kvs = append([]interface{}{}, l.keyvals...) - } - bindValues(kvs[:len(l.keyvals)]) - } - return l.logger.Log(kvs...) +// The returned Logger replaces all value elements (odd indexes) containing a +// Valuer with their generated value for each call to its Log method. +func WithSuffix(logger Logger, keyvals ...interface{}) Logger { + return log.WithSuffix(logger, keyvals...) } // LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If // f is a function with the appropriate signature, LoggerFunc(f) is a Logger // object that calls f. -type LoggerFunc func(...interface{}) error - -// Log implements Logger by calling f(keyvals...). -func (f LoggerFunc) Log(keyvals ...interface{}) error { - return f(keyvals...) -} +type LoggerFunc = log.LoggerFunc diff --git a/vendor/github.com/go-kit/kit/log/logfmt_logger.go b/vendor/github.com/go-kit/kit/log/logfmt_logger.go index a003052..51cde2c 100644 --- a/vendor/github.com/go-kit/kit/log/logfmt_logger.go +++ b/vendor/github.com/go-kit/kit/log/logfmt_logger.go @@ -1,62 +1,15 @@ package log import ( - "bytes" "io" - "sync" - "github.com/go-logfmt/logfmt" + "github.com/go-kit/log" ) -type logfmtEncoder struct { - *logfmt.Encoder - buf bytes.Buffer -} - -func (l *logfmtEncoder) Reset() { - l.Encoder.Reset() - l.buf.Reset() -} - -var logfmtEncoderPool = sync.Pool{ - New: func() interface{} { - var enc logfmtEncoder - enc.Encoder = logfmt.NewEncoder(&enc.buf) - return &enc - }, -} - -type logfmtLogger struct { - w io.Writer -} - // NewLogfmtLogger returns a logger that encodes keyvals to the Writer in // logfmt format. Each log event produces no more than one call to w.Write. // The passed Writer must be safe for concurrent use by multiple goroutines if // the returned Logger will be used concurrently. func NewLogfmtLogger(w io.Writer) Logger { - return &logfmtLogger{w} -} - -func (l logfmtLogger) Log(keyvals ...interface{}) error { - enc := logfmtEncoderPool.Get().(*logfmtEncoder) - enc.Reset() - defer logfmtEncoderPool.Put(enc) - - if err := enc.EncodeKeyvals(keyvals...); err != nil { - return err - } - - // Add newline to the end of the buffer - if err := enc.EndRecord(); err != nil { - return err - } - - // The Logger interface requires implementations to be safe for concurrent - // use by multiple goroutines. For this implementation that means making - // only one call to l.w.Write() for each call to Log. - if _, err := l.w.Write(enc.buf.Bytes()); err != nil { - return err - } - return nil + return log.NewLogfmtLogger(w) } diff --git a/vendor/github.com/go-kit/kit/log/nop_logger.go b/vendor/github.com/go-kit/kit/log/nop_logger.go index 1047d62..b02c686 100644 --- a/vendor/github.com/go-kit/kit/log/nop_logger.go +++ b/vendor/github.com/go-kit/kit/log/nop_logger.go @@ -1,8 +1,8 @@ package log -type nopLogger struct{} +import "github.com/go-kit/log" // NewNopLogger returns a logger that doesn't do anything. -func NewNopLogger() Logger { return nopLogger{} } - -func (nopLogger) Log(...interface{}) error { return nil } +func NewNopLogger() Logger { + return log.NewNopLogger() +} diff --git a/vendor/github.com/go-kit/kit/log/stdlib.go b/vendor/github.com/go-kit/kit/log/stdlib.go index ff96b5d..cb604a7 100644 --- a/vendor/github.com/go-kit/kit/log/stdlib.go +++ b/vendor/github.com/go-kit/kit/log/stdlib.go @@ -2,9 +2,8 @@ package log import ( "io" - "log" - "regexp" - "strings" + + "github.com/go-kit/log" ) // StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's @@ -13,104 +12,43 @@ import ( // // If you have any choice in the matter, you shouldn't use this. Prefer to // redirect the stdlib log to the Go kit logger via NewStdlibAdapter. -type StdlibWriter struct{} - -// Write implements io.Writer. -func (w StdlibWriter) Write(p []byte) (int, error) { - log.Print(strings.TrimSpace(string(p))) - return len(p), nil -} +type StdlibWriter = log.StdlibWriter // StdlibAdapter wraps a Logger and allows it to be passed to the stdlib // logger's SetOutput. It will extract date/timestamps, filenames, and // messages, and place them under relevant keys. -type StdlibAdapter struct { - Logger - timestampKey string - fileKey string - messageKey string -} +type StdlibAdapter = log.StdlibAdapter // StdlibAdapterOption sets a parameter for the StdlibAdapter. -type StdlibAdapterOption func(*StdlibAdapter) +type StdlibAdapterOption = log.StdlibAdapterOption // TimestampKey sets the key for the timestamp field. By default, it's "ts". func TimestampKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.timestampKey = key } + return log.TimestampKey(key) } // FileKey sets the key for the file and line field. By default, it's "caller". func FileKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.fileKey = key } + return log.FileKey(key) } // MessageKey sets the key for the actual log message. By default, it's "msg". func MessageKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.messageKey = key } + return log.MessageKey(key) +} + +// Prefix configures the adapter to parse a prefix from stdlib log events. If +// you provide a non-empty prefix to the stdlib logger, then your should provide +// that same prefix to the adapter via this option. +// +// By default, the prefix isn't included in the msg key. Set joinPrefixToMsg to +// true if you want to include the parsed prefix in the msg. +func Prefix(prefix string, joinPrefixToMsg bool) StdlibAdapterOption { + return log.Prefix(prefix, joinPrefixToMsg) } // NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed // logger. It's designed to be passed to log.SetOutput. func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer { - a := StdlibAdapter{ - Logger: logger, - timestampKey: "ts", - fileKey: "caller", - messageKey: "msg", - } - for _, option := range options { - option(&a) - } - return a -} - -func (a StdlibAdapter) Write(p []byte) (int, error) { - result := subexps(p) - keyvals := []interface{}{} - var timestamp string - if date, ok := result["date"]; ok && date != "" { - timestamp = date - } - if time, ok := result["time"]; ok && time != "" { - if timestamp != "" { - timestamp += " " - } - timestamp += time - } - if timestamp != "" { - keyvals = append(keyvals, a.timestampKey, timestamp) - } - if file, ok := result["file"]; ok && file != "" { - keyvals = append(keyvals, a.fileKey, file) - } - if msg, ok := result["msg"]; ok { - keyvals = append(keyvals, a.messageKey, msg) - } - if err := a.Logger.Log(keyvals...); err != nil { - return 0, err - } - return len(p), nil -} - -const ( - logRegexpDate = `(?P[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?` - logRegexpTime = `(?P