From 33143ba6c06457a8b17267f6351d42339ee0066a Mon Sep 17 00:00:00 2001 From: Nathan LaFreniere Date: Thu, 21 Jul 2016 17:27:25 -0700 Subject: [PATCH] update for drone 0.5 --- .drone.yml | 7 +- Dockerfile | 10 +- Makefile | 34 - README.md | 122 +- aws.go | 116 +- main.go | 325 +- main_test.go | 41 - plugin.go | 203 + types.go | 109 +- vendor/github.com/aws/aws-sdk-go/Gemfile | 6 + vendor/github.com/aws/aws-sdk-go/Makefile | 152 + vendor/github.com/aws/aws-sdk-go/README.md | 116 + .../aws/aws-sdk-go/aws/awserr/error.go | 46 +- .../aws/aws-sdk-go/aws/awserr/types.go | 67 +- .../aws/aws-sdk-go/aws/awsutil/prettify.go | 4 + .../aws-sdk-go/aws/client/default_retryer.go | 53 +- .../github.com/aws/aws-sdk-go/aws/config.go | 54 +- .../aws/aws-sdk-go/aws/convert_types.go | 24 +- .../aws-sdk-go/aws/corehandlers/handlers.go | 53 +- .../aws/corehandlers/param_validator.go | 139 +- .../aws-sdk-go/aws/credentials/credentials.go | 3 + .../ec2rolecreds/ec2_role_provider.go | 18 +- .../aws/credentials/endpointcreds/provider.go | 191 + .../aws/credentials/env_provider.go | 8 +- .../shared_credentials_provider.go | 16 +- .../aws/credentials/static_provider.go | 8 +- .../aws/aws-sdk-go/aws/defaults/defaults.go | 45 +- .../aws/aws-sdk-go/aws/ec2metadata/api.go | 99 +- .../aws/aws-sdk-go/aws/ec2metadata/service.go | 23 +- .../github.com/aws/aws-sdk-go/aws/logger.go | 14 + .../aws/aws-sdk-go/aws/request/handlers.go | 53 +- .../aws-sdk-go/aws/request/http_request.go | 33 + .../aws/request/http_request_1_4.go | 31 + .../aws-sdk-go/aws/request/offset_reader.go | 49 + .../aws/aws-sdk-go/aws/request/request.go | 111 +- .../aws/aws-sdk-go/aws/request/retryer.go | 23 +- .../aws/aws-sdk-go/aws/request/validation.go | 234 ++ .../aws/aws-sdk-go/aws/session/session.go | 2 +- .../aws-sdk-go/aws/signer/v4/header_rules.go | 82 + .../aws/aws-sdk-go/aws/signer/v4/v4.go | 644 ++++ vendor/github.com/aws/aws-sdk-go/aws/types.go | 30 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/private/README.md | 4 + .../private/endpoints/endpoints.json | 39 +- .../private/endpoints/endpoints_map.go | 36 +- .../private/protocol/query/build.go | 3 + .../protocol/query/queryutil/queryutil.go | 15 +- .../private/protocol/query/unmarshal.go | 6 + .../private/protocol/query/unmarshal_error.go | 41 +- .../aws-sdk-go/private/protocol/rest/build.go | 3 +- .../private/protocol/xml/xmlutil/build.go | 16 +- .../aws/aws-sdk-go/private/signer/v4/v4.go | 365 -- vendor/github.com/aws/aws-sdk-go/sdk.go | 7 + .../service/{cloudFront => cloudfront}/api.go | 1545 +++++++- .../{cloudFront => cloudfront}/service.go | 4 +- .../{cloudFront => cloudfront}/waiters.go | 0 .../aws/aws-sdk-go/service/generate.go | 5 + .../aws/aws-sdk-go/service/s3/api.go | 3396 ++++++++++++++++- .../aws-sdk-go/service/s3/customizations.go | 57 +- .../service/s3/host_style_bucket.go | 165 +- .../service/s3/platform_handlers.go | 8 + .../service/s3/platform_handlers_go1.6.go | 28 + .../aws/aws-sdk-go/service/s3/service.go | 12 +- .../aws-sdk-go/service/s3/unmarshal_error.go | 2 + .../aws/aws-sdk-go/service/s3/waiters.go | 6 + .../github.com/codegangsta/cli/CHANGELOG.md | 334 ++ vendor/github.com/codegangsta/cli/LICENSE | 21 + vendor/github.com/codegangsta/cli/README.md | 1307 +++++++ vendor/github.com/codegangsta/cli/app.go | 503 +++ .../github.com/codegangsta/cli/appveyor.yml | 25 + vendor/github.com/codegangsta/cli/category.go | 44 + vendor/github.com/codegangsta/cli/cli.go | 21 + vendor/github.com/codegangsta/cli/command.go | 279 ++ vendor/github.com/codegangsta/cli/context.go | 213 ++ vendor/github.com/codegangsta/cli/errors.go | 92 + .../codegangsta/cli/flag-types.json | 93 + vendor/github.com/codegangsta/cli/flag.go | 621 +++ .../codegangsta/cli/flag_generated.go | 627 +++ vendor/github.com/codegangsta/cli/funcs.go | 28 + .../codegangsta/cli/generate-flag-types | 244 ++ vendor/github.com/codegangsta/cli/help.go | 267 ++ vendor/github.com/codegangsta/cli/runtests | 118 + vendor/github.com/davecgh/go-spew/LICENSE | 13 - .../github.com/davecgh/go-spew/spew/bypass.go | 151 - .../davecgh/go-spew/spew/bypasssafe.go | 37 - .../github.com/davecgh/go-spew/spew/common.go | 341 -- .../github.com/davecgh/go-spew/spew/config.go | 297 -- vendor/github.com/davecgh/go-spew/spew/doc.go | 202 - .../github.com/davecgh/go-spew/spew/dump.go | 509 --- .../github.com/davecgh/go-spew/spew/format.go | 419 -- .../github.com/davecgh/go-spew/spew/spew.go | 148 - vendor/github.com/drone/drone-go/LICENSE | 202 - .../github.com/drone/drone-go/drone/client.go | 404 -- .../github.com/drone/drone-go/drone/const.go | 48 - .../drone/drone-go/drone/interface.go | 95 - .../github.com/drone/drone-go/drone/types.go | 209 - .../drone/drone-go/drone/types_json.go | 127 - .../drone/drone-go/plugin/plugin.go | 134 - vendor/github.com/go-ini/ini/Makefile | 12 + vendor/github.com/go-ini/ini/README.md | 121 +- vendor/github.com/go-ini/ini/README_ZH.md | 115 +- vendor/github.com/go-ini/ini/ini.go | 947 +---- vendor/github.com/go-ini/ini/key.go | 625 +++ vendor/github.com/go-ini/ini/parser.go | 314 ++ vendor/github.com/go-ini/ini/section.go | 202 + vendor/github.com/go-ini/ini/struct.go | 149 +- .../github.com/jmespath/go-jmespath/Makefile | 2 +- vendor/github.com/jmespath/go-jmespath/api.go | 37 + .../jmespath/go-jmespath/functions.go | 128 +- .../github.com/jmespath/go-jmespath/parser.go | 4 +- vendor/github.com/joho/godotenv/LICENCE | 23 + vendor/github.com/joho/godotenv/README.md | 127 + .../joho/godotenv/autoload/autoload.go | 15 + vendor/github.com/joho/godotenv/godotenv.go | 229 ++ vendor/github.com/joho/godotenv/wercker.yml | 1 + vendor/github.com/pmezard/go-difflib/LICENSE | 27 - .../pmezard/go-difflib/difflib/difflib.go | 772 ---- vendor/github.com/ryanuber/go-glob/glob.go | 32 +- vendor/github.com/stretchr/testify/LICENSE | 22 - .../testify/assert/assertion_forward.go | 387 -- .../testify/assert/assertion_forward.go.tmpl | 4 - .../stretchr/testify/assert/assertions.go | 1004 ----- .../github.com/stretchr/testify/assert/doc.go | 45 - .../stretchr/testify/assert/errors.go | 10 - .../testify/assert/forward_assertions.go | 16 - .../testify/assert/http_assertions.go | 106 - vendor/golang.org/x/net/context/context.go | 447 --- vendor/golang.org/x/oauth2/AUTHORS | 3 - vendor/golang.org/x/oauth2/CONTRIBUTING.md | 31 - vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 - vendor/golang.org/x/oauth2/LICENSE | 27 - vendor/golang.org/x/oauth2/README.md | 64 - .../golang.org/x/oauth2/client_appengine.go | 25 - vendor/golang.org/x/oauth2/internal/oauth2.go | 76 - vendor/golang.org/x/oauth2/internal/token.go | 221 -- .../golang.org/x/oauth2/internal/transport.go | 69 - vendor/golang.org/x/oauth2/oauth2.go | 337 -- vendor/golang.org/x/oauth2/token.go | 158 - vendor/golang.org/x/oauth2/transport.go | 132 - vendor/vendor.json | 178 - 140 files changed, 14677 insertions(+), 9906 deletions(-) delete mode 100644 Makefile delete mode 100644 main_test.go create mode 100644 plugin.go create mode 100644 vendor/github.com/aws/aws-sdk-go/Gemfile create mode 100644 vendor/github.com/aws/aws-sdk-go/Makefile create mode 100644 vendor/github.com/aws/aws-sdk-go/README.md create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/validation.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/README.md delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/sdk.go rename vendor/github.com/aws/aws-sdk-go/service/{cloudFront => cloudfront}/api.go (70%) rename vendor/github.com/aws/aws-sdk-go/service/{cloudFront => cloudfront}/service.go (96%) rename vendor/github.com/aws/aws-sdk-go/service/{cloudFront => cloudfront}/waiters.go (100%) create mode 100644 vendor/github.com/aws/aws-sdk-go/service/generate.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go create mode 100644 vendor/github.com/codegangsta/cli/CHANGELOG.md create mode 100644 vendor/github.com/codegangsta/cli/LICENSE create mode 100644 vendor/github.com/codegangsta/cli/README.md create mode 100644 vendor/github.com/codegangsta/cli/app.go create mode 100644 vendor/github.com/codegangsta/cli/appveyor.yml create mode 100644 vendor/github.com/codegangsta/cli/category.go create mode 100644 vendor/github.com/codegangsta/cli/cli.go create mode 100644 vendor/github.com/codegangsta/cli/command.go create mode 100644 vendor/github.com/codegangsta/cli/context.go create mode 100644 vendor/github.com/codegangsta/cli/errors.go create mode 100644 vendor/github.com/codegangsta/cli/flag-types.json create mode 100644 vendor/github.com/codegangsta/cli/flag.go create mode 100644 vendor/github.com/codegangsta/cli/flag_generated.go create mode 100644 vendor/github.com/codegangsta/cli/funcs.go create mode 100755 vendor/github.com/codegangsta/cli/generate-flag-types create mode 100644 vendor/github.com/codegangsta/cli/help.go create mode 100755 vendor/github.com/codegangsta/cli/runtests delete mode 100644 vendor/github.com/davecgh/go-spew/LICENSE delete mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go delete mode 100644 vendor/github.com/drone/drone-go/LICENSE delete mode 100644 vendor/github.com/drone/drone-go/drone/client.go delete mode 100644 vendor/github.com/drone/drone-go/drone/const.go delete mode 100644 vendor/github.com/drone/drone-go/drone/interface.go delete mode 100644 vendor/github.com/drone/drone-go/drone/types.go delete mode 100644 vendor/github.com/drone/drone-go/drone/types_json.go delete mode 100644 vendor/github.com/drone/drone-go/plugin/plugin.go create mode 100644 vendor/github.com/go-ini/ini/Makefile create mode 100644 vendor/github.com/go-ini/ini/key.go create mode 100644 vendor/github.com/go-ini/ini/parser.go create mode 100644 vendor/github.com/go-ini/ini/section.go create mode 100644 vendor/github.com/joho/godotenv/LICENCE create mode 100644 vendor/github.com/joho/godotenv/README.md create mode 100644 vendor/github.com/joho/godotenv/autoload/autoload.go create mode 100644 vendor/github.com/joho/godotenv/godotenv.go create mode 100644 vendor/github.com/joho/godotenv/wercker.yml delete mode 100644 vendor/github.com/pmezard/go-difflib/LICENSE delete mode 100644 vendor/github.com/pmezard/go-difflib/difflib/difflib.go delete mode 100644 vendor/github.com/stretchr/testify/LICENSE delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go delete mode 100644 vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl delete mode 100644 vendor/github.com/stretchr/testify/assert/assertions.go delete mode 100644 vendor/github.com/stretchr/testify/assert/doc.go delete mode 100644 vendor/github.com/stretchr/testify/assert/errors.go delete mode 100644 vendor/github.com/stretchr/testify/assert/forward_assertions.go delete mode 100644 vendor/github.com/stretchr/testify/assert/http_assertions.go delete mode 100644 vendor/golang.org/x/net/context/context.go delete mode 100644 vendor/golang.org/x/oauth2/AUTHORS delete mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md delete mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/oauth2/LICENSE delete mode 100644 vendor/golang.org/x/oauth2/README.md delete mode 100644 vendor/golang.org/x/oauth2/client_appengine.go delete mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go delete mode 100644 vendor/golang.org/x/oauth2/internal/token.go delete mode 100644 vendor/golang.org/x/oauth2/internal/transport.go delete mode 100644 vendor/golang.org/x/oauth2/oauth2.go delete mode 100644 vendor/golang.org/x/oauth2/token.go delete mode 100644 vendor/golang.org/x/oauth2/transport.go delete mode 100644 vendor/vendor.json diff --git a/.drone.yml b/.drone.yml index 639501c..123f471 100644 --- a/.drone.yml +++ b/.drone.yml @@ -1,16 +1,11 @@ workspace: base: /go - path: src/github.com/drone-plugins/drone-s3-sync pipeline: build: image: golang:1.6 - environment: - - CGO_ENABLED=0 commands: - - make vet - - make build - - make test + - CGO_ENABLED=0 go build docker: repo: plugins/drone-s3-sync diff --git a/Dockerfile b/Dockerfile index 23167c4..83aea37 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,13 +3,7 @@ # CGO_ENABLED=0 go build -a -tags netgo # docker build --rm=true -t plugins/drone-s3-sync . -FROM alpine:3.1 - -RUN apk update && \ - apk add \ - ca-certificates \ - mailcap && \ - rm -rf /var/cache/apk/* - +FROM alpine:3.3 +RUN apk update && apk add ca-certificates mailcap && rm -rf /var/cache/apk/* ADD drone-s3-sync /bin/ ENTRYPOINT ["/bin/drone-s3-sync"] diff --git a/Makefile b/Makefile deleted file mode 100644 index 08ec003..0000000 --- a/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -.PHONY: all clean deps fmt vet test docker - -EXECUTABLE ?= drone-s3-sync -IMAGE ?= plugins/$(EXECUTABLE) -COMMIT ?= $(shell git rev-parse --short HEAD) - -LDFLAGS = -X "main.buildCommit=$(COMMIT)" -PACKAGES = $(shell go list ./... | grep -v /vendor/) - -all: deps build test - -clean: - go clean -i ./... - -deps: - go get -t ./... - -fmt: - go fmt $(PACKAGES) - -vet: - go vet $(PACKAGES) - -test: - @for PKG in $(PACKAGES); do go test -cover -coverprofile $$GOPATH/src/$$PKG/coverage.out $$PKG || exit 1; done; - -docker: - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -ldflags '-s -w $(LDFLAGS)' - docker build --rm -t $(IMAGE) . - -$(EXECUTABLE): $(wildcard *.go) - go build -ldflags '-s -w $(LDFLAGS)' - -build: $(EXECUTABLE) diff --git a/README.md b/README.md index ed9b5e0..31907f2 100644 --- a/README.md +++ b/README.md @@ -1,114 +1,50 @@ # drone-s3-sync [![Build Status](http://beta.drone.io/api/badges/drone-plugins/drone-s3-sync/status.svg)](http://beta.drone.io/drone-plugins/drone-s3-sync) -[![Coverage Status](https://aircover.co/badges/drone-plugins/drone-s3-sync/coverage.svg)](https://aircover.co/drone-plugins/drone-s3-sync) [![](https://badge.imagelayers.io/plugins/drone-s3-sync:latest.svg)](https://imagelayers.io/?images=plugins/drone-s3-sync:latest 'Get your own badge on imagelayers.io') Drone plugin to synchronize a directory with an Amazon S3 Bucket. For the usage information and a listing of the available options please take a look at [the docs](DOCS.md). -## Binary +## Build -Build the binary using `make`: +Build the binary with the following commands: ``` -make deps build -``` +export GO15VENDOREXPERIMENT=1 +export GOOS=linux +export GOARCH=amd64 +export CGO_ENABLED=0 -### Example - -```sh -./drone-s3-sync < \ + -e PLUGIN_TARGET= \ + -e PLUGIN_BUCKET= \ + -e AWS_ACCESS_KEY_ID= \ + -e AWS_SECRET_ACCESS_KEY= \ + -v $(pwd):$(pwd) \ + -w $(pwd) \ + plugins/s3-sync ``` diff --git a/aws.go b/aws.go index 7d114f3..495e2df 100644 --- a/aws.go +++ b/aws.go @@ -13,7 +13,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/cloudFront" + "github.com/aws/aws-sdk-go/service/cloudfront" "github.com/aws/aws-sdk-go/service/s3" "github.com/ryanuber/go-glob" ) @@ -23,23 +23,24 @@ type AWS struct { cfClient *cloudfront.CloudFront remote []string local []string - vargs PluginArgs + plugin *Plugin } -func NewAWS(vargs PluginArgs) AWS { +func NewAWS(p *Plugin) AWS { sess := session.New(&aws.Config{ - Credentials: credentials.NewStaticCredentials(vargs.Key, vargs.Secret, ""), - Region: aws.String(vargs.Region), + Credentials: credentials.NewStaticCredentials(p.Key, p.Secret, ""), + Region: aws.String(p.Region), }) c := s3.New(sess) cf := cloudfront.New(sess) r := make([]string, 1, 1) l := make([]string, 1, 1) - return AWS{c, cf, r, l, vargs} + return AWS{c, cf, r, l, p} } func (a *AWS) Upload(local, remote string) error { + p := a.plugin if local == "" { return nil } @@ -52,15 +53,10 @@ func (a *AWS) Upload(local, remote string) error { defer file.Close() access := "" - if a.vargs.Access.IsString() { - access = a.vargs.Access.String() - } else if !a.vargs.Access.IsEmpty() { - accessMap := a.vargs.Access.Map() - for pattern := range accessMap { - if match := glob.Glob(pattern, local); match == true { - access = accessMap[pattern] - break - } + for pattern := range p.Access { + if match := glob.Glob(pattern, local); match == true { + access = p.Access[pattern] + break } } @@ -69,42 +65,12 @@ func (a *AWS) Upload(local, remote string) error { } fileExt := filepath.Ext(local) + var contentType string - if a.vargs.ContentType.IsString() { - contentType = a.vargs.ContentType.String() - } else if !a.vargs.ContentType.IsEmpty() { - contentMap := a.vargs.ContentType.Map() - for patternExt := range contentMap { - if patternExt == fileExt { - contentType = contentMap[patternExt] - break - } - } - } - - var contentEncoding string - if a.vargs.ContentEncoding.IsString() { - contentEncoding = a.vargs.ContentEncoding.String() - } else if !a.vargs.ContentEncoding.IsEmpty() { - encodingMap := a.vargs.ContentEncoding.Map() - for patternExt := range encodingMap { - if patternExt == fileExt { - contentEncoding = encodingMap[patternExt] - break - } - } - } - - metadata := map[string]*string{} - vmap := a.vargs.Metadata.Map() - if len(vmap) > 0 { - for pattern := range vmap { - if match := glob.Glob(pattern, local); match == true { - for k, v := range vmap[pattern] { - metadata[k] = aws.String(v) - } - break - } + for patternExt := range p.ContentType { + if patternExt == fileExt { + contentType = p.ContentType[patternExt] + break } } @@ -112,8 +78,26 @@ func (a *AWS) Upload(local, remote string) error { contentType = mime.TypeByExtension(fileExt) } + var contentEncoding string + for patternExt := range p.ContentEncoding { + if patternExt == fileExt { + contentEncoding = p.ContentEncoding[patternExt] + break + } + } + + metadata := map[string]*string{} + for pattern := range p.Metadata { + if match := glob.Glob(pattern, local); match == true { + for k, v := range p.Metadata[pattern] { + metadata[k] = aws.String(v) + } + break + } + } + head, err := a.client.HeadObject(&s3.HeadObjectInput{ - Bucket: aws.String(a.vargs.Bucket), + Bucket: aws.String(p.Bucket), Key: aws.String(remote), }) if err != nil && err.(awserr.Error).Code() != "404" { @@ -123,7 +107,7 @@ func (a *AWS) Upload(local, remote string) error { debug("\"%s\" not found in bucket, uploading with Content-Type \"%s\" and permissions \"%s\"", local, contentType, access) var putObject = &s3.PutObjectInput{ - Bucket: aws.String(a.vargs.Bucket), + Bucket: aws.String(p.Bucket), Key: aws.String(remote), Body: file, ContentType: aws.String(contentType), @@ -131,7 +115,7 @@ func (a *AWS) Upload(local, remote string) error { Metadata: metadata, } - if(len(contentEncoding) > 0) { + if len(contentEncoding) > 0 { putObject.ContentEncoding = aws.String(contentEncoding) } @@ -185,7 +169,7 @@ func (a *AWS) Upload(local, remote string) error { if !shouldCopy { grant, err := a.client.GetObjectAcl(&s3.GetObjectAclInput{ - Bucket: aws.String(a.vargs.Bucket), + Bucket: aws.String(p.Bucket), Key: aws.String(remote), }) if err != nil { @@ -223,16 +207,16 @@ func (a *AWS) Upload(local, remote string) error { debug("Updating metadata for \"%s\" Content-Type: \"%s\", ACL: \"%s\"", local, contentType, access) var copyObject = &s3.CopyObjectInput{ - Bucket: aws.String(a.vargs.Bucket), + Bucket: aws.String(p.Bucket), Key: aws.String(remote), - CopySource: aws.String(fmt.Sprintf("%s/%s", a.vargs.Bucket, remote)), + CopySource: aws.String(fmt.Sprintf("%s/%s", p.Bucket, remote)), ACL: aws.String(access), ContentType: aws.String(contentType), Metadata: metadata, MetadataDirective: aws.String("REPLACE"), } - if(len(contentEncoding) > 0) { + if len(contentEncoding) > 0 { copyObject.ContentEncoding = aws.String(contentEncoding) } @@ -246,7 +230,7 @@ func (a *AWS) Upload(local, remote string) error { debug("Uploading \"%s\" with Content-Type \"%s\" and permissions \"%s\"", local, contentType, access) var putObject = &s3.PutObjectInput{ - Bucket: aws.String(a.vargs.Bucket), + Bucket: aws.String(p.Bucket), Key: aws.String(remote), Body: file, ContentType: aws.String(contentType), @@ -254,7 +238,7 @@ func (a *AWS) Upload(local, remote string) error { Metadata: metadata, } - if(len(contentEncoding) > 0){ + if len(contentEncoding) > 0 { putObject.ContentEncoding = aws.String(contentEncoding) } @@ -264,9 +248,10 @@ func (a *AWS) Upload(local, remote string) error { } func (a *AWS) Redirect(path, location string) error { + p := a.plugin debug("Adding redirect from \"%s\" to \"%s\"", path, location) _, err := a.client.PutObject(&s3.PutObjectInput{ - Bucket: aws.String(a.vargs.Bucket), + Bucket: aws.String(p.Bucket), Key: aws.String(path), ACL: aws.String("public-read"), WebsiteRedirectLocation: aws.String(location), @@ -275,18 +260,20 @@ func (a *AWS) Redirect(path, location string) error { } func (a *AWS) Delete(remote string) error { + p := a.plugin debug("Removing remote file \"%s\"", remote) _, err := a.client.DeleteObject(&s3.DeleteObjectInput{ - Bucket: aws.String(a.vargs.Bucket), + Bucket: aws.String(p.Bucket), Key: aws.String(remote), }) return err } func (a *AWS) List(path string) ([]string, error) { + p := a.plugin remote := make([]string, 1, 1) resp, err := a.client.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(a.vargs.Bucket), + Bucket: aws.String(p.Bucket), Prefix: aws.String(path), }) if err != nil { @@ -299,7 +286,7 @@ func (a *AWS) List(path string) ([]string, error) { for *resp.IsTruncated { resp, err = a.client.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(a.vargs.Bucket), + Bucket: aws.String(p.Bucket), Prefix: aws.String(path), Marker: aws.String(remote[len(remote)-1]), }) @@ -317,9 +304,10 @@ func (a *AWS) List(path string) ([]string, error) { } func (a *AWS) Invalidate(invalidatePath string) error { + p := a.plugin debug("Invalidating \"%s\"", invalidatePath) _, err := a.cfClient.CreateInvalidation(&cloudfront.CreateInvalidationInput{ - DistributionId: aws.String(a.vargs.CloudFrontDistribution), + DistributionId: aws.String(p.CloudFrontDistribution), InvalidationBatch: &cloudfront.InvalidationBatch{ CallerReference: aws.String(time.Now().Format(time.RFC3339Nano)), Paths: &cloudfront.Paths{ diff --git a/main.go b/main.go index 4db6f88..87289b0 100644 --- a/main.go +++ b/main.go @@ -1,239 +1,118 @@ package main import ( - "errors" - "fmt" + "log" "os" - "path/filepath" - "strings" - "github.com/drone/drone-go/drone" - "github.com/drone/drone-go/plugin" + "github.com/codegangsta/cli" + _ "github.com/joho/godotenv/autoload" ) -const maxConcurrent = 100 - -type job struct { - local string - remote string - action string -} - -type result struct { - j job - err error -} - -type app struct { - vargs *PluginArgs - workspace *drone.Workspace - client AWS - jobs []job -} - -var ( - buildCommit string -) - -var MissingAwsValuesMessage = "Must set access_key, secret_key, and bucket" +var version string // build number set at compile-time func main() { - fmt.Printf("Drone S3 Sync Plugin built from %s\n", buildCommit) - - a := newApp() - - err := a.loadVargs() - if err != nil { - fmt.Println(err) - os.Exit(1) + app := cli.NewApp() + app.Name = "s3 sync plugin" + app.Usage = "s3 sync plugin" + app.Action = run + app.Version = version + app.Flags = []cli.Flag{ + cli.StringFlag{ + Name: "access-key", + Usage: "aws access key", + EnvVar: "PLUGIN_ACCESS_KEY,AWS_ACCESS_KEY_ID", + }, + cli.StringFlag{ + Name: "secret-key", + Usage: "aws secret key", + EnvVar: "PLUGIN_SECRET_KEY,AWS_SECRET_ACCESS_KEY", + }, + cli.StringFlag{ + Name: "bucket", + Usage: "name of bucket", + EnvVar: "PLUGIN_BUCKET", + }, + cli.StringFlag{ + Name: "region", + Usage: "aws region", + Value: "us-east-1", + EnvVar: "PLUGIN_REGION", + }, + cli.StringFlag{ + Name: "source", + Usage: "upload source path", + Value: ".", + EnvVar: "PLUGIN_SOURCE", + }, + cli.StringFlag{ + Name: "target", + Usage: "target path", + Value: "/", + EnvVar: "PLUGIN_TARGET", + }, + cli.BoolFlag{ + Name: "delete", + Usage: "delete locally removed files from the target", + EnvVar: "PLUGIN_DELETE", + }, + cli.GenericFlag{ + Name: "access", + Usage: "access control settings", + EnvVar: "PLUGIN_ACCESS", + Value: &StringMapFlag{}, + }, + cli.GenericFlag{ + Name: "content-type", + Usage: "content-type settings for uploads", + EnvVar: "PLUGIN_CONTENT_TYPE", + Value: &StringMapFlag{}, + }, + cli.GenericFlag{ + Name: "content-encoding", + Usage: "content-encoding settings for uploads", + EnvVar: "PLUGIN_CONTENT_ENCODING", + Value: &StringMapFlag{}, + }, + cli.GenericFlag{ + Name: "metadata", + Usage: "additional metadata for uploads", + EnvVar: "PLUGIN_METADATA", + Value: &DeepStringMapFlag{}, + }, + cli.GenericFlag{ + Name: "redirects", + Usage: "redirects to create", + EnvVar: "PLUGIN_REDIRECTS", + Value: &MapFlag{}, + }, + cli.StringFlag{ + Name: "cloudfront-distribution", + Usage: "id of cloudfront distribution to invalidate", + EnvVar: "PLUGIN_CLOUDFRONT_DISTRIBUTION", + }, } - err = a.sanitizeInputs() - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - a.createClient() - - a.createSyncJobs() - a.createInvalidateJob() - a.runJobs() - - fmt.Println("done!") -} - -func newApp() *app { - return &app{ - vargs: &PluginArgs{}, - workspace: &drone.Workspace{}, - jobs: make([]job, 1, 1), + if err := app.Run(os.Args); err != nil { + log.Fatal(err) } } -func (a *app) loadVargs() error { - plugin.Param("vargs", a.vargs) - plugin.Param("workspace", a.workspace) - - err := plugin.Parse() - return err +func run(c *cli.Context) error { + plugin := Plugin{ + Key: c.String("access-key"), + Secret: c.String("secret-key"), + Bucket: c.String("bucket"), + Region: c.String("region"), + Source: c.String("source"), + Target: c.String("target"), + Delete: c.Bool("delete"), + Access: c.Generic("access").(*StringMapFlag).Get(), + ContentType: c.Generic("content-type").(*StringMapFlag).Get(), + ContentEncoding: c.Generic("content-encoding").(*StringMapFlag).Get(), + Metadata: c.Generic("metadata").(*DeepStringMapFlag).Get(), + Redirects: c.Generic("redirects").(*MapFlag).Get(), + CloudFrontDistribution: c.String("cloudfront-distribution"), + } -} - -func (a *app) createClient() { - a.client = NewAWS(*a.vargs) -} - -func (a *app) sanitizeInputs() error { - vargs := a.vargs - workspace := a.workspace - - if len(a.vargs.Key) == 0 || len(a.vargs.Secret) == 0 || len(a.vargs.Bucket) == 0 { - return errors.New(MissingAwsValuesMessage) - } - - if len(vargs.Region) == 0 { - vargs.Region = "us-east-1" - } - - if len(vargs.Source) == 0 { - vargs.Source = "." - } - vargs.Source = filepath.Join(workspace.Path, vargs.Source) - - if strings.HasPrefix(vargs.Target, "/") { - vargs.Target = vargs.Target[1:] - } - - return nil -} - -func (a *app) createSyncJobs() { - vargs := a.vargs - remote, err := a.client.List(vargs.Target) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - local := make([]string, 1, 1) - - err = filepath.Walk(vargs.Source, func(path string, info os.FileInfo, err error) error { - if err != nil || info.IsDir() { - return err - } - - localPath := path - if vargs.Source != "." { - localPath = strings.TrimPrefix(path, vargs.Source) - if strings.HasPrefix(localPath, "/") { - localPath = localPath[1:] - } - } - local = append(local, localPath) - a.jobs = append(a.jobs, job{ - local: filepath.Join(vargs.Source, localPath), - remote: filepath.Join(vargs.Target, localPath), - action: "upload", - }) - - return nil - }) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - for path, location := range vargs.Redirects { - path = strings.TrimPrefix(path, "/") - local = append(local, path) - a.jobs = append(a.jobs, job{ - local: path, - remote: location, - action: "redirect", - }) - } - if a.vargs.Delete { - for _, r := range remote { - found := false - for _, l := range local { - if l == r { - found = true - break - } - } - - if !found { - a.jobs = append(a.jobs, job{ - local: "", - remote: r, - action: "delete", - }) - } - } - } -} - -func (a *app) createInvalidateJob() { - if len(a.vargs.CloudFrontDistribution) > 0 { - a.jobs = append(a.jobs, job{ - local: "", - remote: filepath.Join("/", a.vargs.Target, "*"), - action: "invalidateCloudFront", - }) - } -} - -func (a *app) runJobs() { - vargs := a.vargs - client := a.client - jobChan := make(chan struct{}, maxConcurrent) - results := make(chan *result, len(a.jobs)) - var invalidateJob *job - - fmt.Printf("Synchronizing with bucket \"%s\"\n", vargs.Bucket) - for _, j := range a.jobs { - jobChan <- struct{}{} - go func(j job) { - var err error - if j.action == "upload" { - err = client.Upload(j.local, j.remote) - } else if j.action == "redirect" { - err = client.Redirect(j.local, j.remote) - } else if j.action == "delete" { - err = client.Delete(j.remote) - } else if j.action == "invalidateCloudFront" { - invalidateJob = &j - // err = client.Invalidate(j.remote) - } else { - err = nil - } - results <- &result{j, err} - <-jobChan - }(j) - } - - for _ = range a.jobs { - r := <-results - if r.err != nil { - fmt.Printf("ERROR: failed to %s %s to %s: %+v\n", r.j.action, r.j.local, r.j.remote, r.err) - os.Exit(1) - } - } - - if invalidateJob != nil { - err := client.Invalidate(invalidateJob.remote) - if err != nil { - fmt.Printf("ERROR: failed to %s %s to %s: %+v\n", invalidateJob.action, invalidateJob.local, invalidateJob.remote, err) - os.Exit(1) - } - } -} - -func debug(format string, args ...interface{}) { - if os.Getenv("DEBUG") != "" { - fmt.Printf(format+"\n", args...) - } else { - fmt.Printf(".") - } + return plugin.Exec() } diff --git a/main_test.go b/main_test.go deleted file mode 100644 index e53a22b..0000000 --- a/main_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "testing" - "github.com/stretchr/testify/assert" -) - -func newAppWithAwsVars() *app { - a := newApp() - a.vargs.Key = "AAAA" - a.vargs.Secret = "AAAA" - a.vargs.Bucket = "AAAA" - - return a -} - -func TestSanitizeInputs(t *testing.T) { - a := newApp() - err := a.sanitizeInputs() - assert.EqualError(t, err, MissingAwsValuesMessage) - - a = newAppWithAwsVars() - a.workspace.Path = "foo/bar" - a.vargs.Target = "/remote/foo" - a.sanitizeInputs() - - assert.EqualValues(t, "foo/bar", a.vargs.Source, "Source should default to workspace.Path") - assert.EqualValues(t, "us-east-1", a.vargs.Region, "Region should default to 'us-east'") - assert.EqualValues(t, "remote/foo", a.vargs.Target, "Target should have first slash stripped") - - a = newAppWithAwsVars() - a.workspace.Path = "foo/bar" - a.vargs.Source = "some/folder" - a.vargs.Target = "remote/foo" - - a.sanitizeInputs() - - assert.EqualValues(t, "foo/bar/some/folder", a.vargs.Source, "Source should combine workspace.Path and specified Source") - assert.EqualValues(t, "remote/foo", a.vargs.Target, "Target should have first slash stripped") - -} \ No newline at end of file diff --git a/plugin.go b/plugin.go new file mode 100644 index 0000000..f2023f4 --- /dev/null +++ b/plugin.go @@ -0,0 +1,203 @@ +package main + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" +) + +type Plugin struct { + Key string + Secret string + Bucket string + Region string + Source string + Target string + Delete bool + Access map[string]string + ContentType map[string]string + ContentEncoding map[string]string + Metadata map[string]map[string]string + Redirects map[string]string + CloudFrontDistribution string + client AWS + jobs []job +} + +const maxConcurrent = 100 + +type job struct { + local string + remote string + action string +} + +type result struct { + j job + err error +} + +var MissingAwsValuesMessage = "Must set access_key, secret_key, and bucket" + +func (p *Plugin) Exec() error { + err := p.sanitizeInputs() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + p.jobs = make([]job, 1, 1) + p.client = NewAWS(p) + + p.createSyncJobs() + p.createInvalidateJob() + p.runJobs() + return nil +} + +func (p *Plugin) sanitizeInputs() error { + if len(p.Key) == 0 || len(p.Secret) == 0 || len(p.Bucket) == 0 { + return errors.New(MissingAwsValuesMessage) + } + + wd, err := os.Getwd() + if err != nil { + return err + } + p.Source = filepath.Join(wd, p.Source) + + if strings.HasPrefix(p.Target, "/") { + p.Target = p.Target[1:] + } + + return nil +} + +func (p *Plugin) createSyncJobs() { + remote, err := p.client.List(p.Target) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + local := make([]string, 1, 1) + + err = filepath.Walk(p.Source, func(path string, info os.FileInfo, err error) error { + if err != nil || info.IsDir() { + return err + } + + localPath := path + if p.Source != "." { + localPath = strings.TrimPrefix(path, p.Source) + if strings.HasPrefix(localPath, "/") { + localPath = localPath[1:] + } + } + local = append(local, localPath) + p.jobs = append(p.jobs, job{ + local: filepath.Join(p.Source, localPath), + remote: filepath.Join(p.Target, localPath), + action: "upload", + }) + + return nil + }) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + for path, location := range p.Redirects { + path = strings.TrimPrefix(path, "/") + local = append(local, path) + p.jobs = append(p.jobs, job{ + local: path, + remote: location, + action: "redirect", + }) + } + if p.Delete { + for _, r := range remote { + found := false + for _, l := range local { + if l == r { + found = true + break + } + } + + if !found { + p.jobs = append(p.jobs, job{ + local: "", + remote: r, + action: "delete", + }) + } + } + } +} + +func (p *Plugin) createInvalidateJob() { + if len(p.CloudFrontDistribution) > 0 { + p.jobs = append(p.jobs, job{ + local: "", + remote: filepath.Join("/", p.Target, "*"), + action: "invalidateCloudFront", + }) + } +} + +func (p *Plugin) runJobs() { + client := p.client + jobChan := make(chan struct{}, maxConcurrent) + results := make(chan *result, len(p.jobs)) + var invalidateJob *job + + fmt.Printf("Synchronizing with bucket \"%s\"\n", p.Bucket) + for _, j := range p.jobs { + jobChan <- struct{}{} + go func(j job) { + var err error + if j.action == "upload" { + err = client.Upload(j.local, j.remote) + } else if j.action == "redirect" { + err = client.Redirect(j.local, j.remote) + } else if j.action == "delete" { + err = client.Delete(j.remote) + } else if j.action == "invalidateCloudFront" { + invalidateJob = &j + } else { + err = nil + } + results <- &result{j, err} + <-jobChan + }(j) + } + + for _ = range p.jobs { + r := <-results + if r.err != nil { + fmt.Printf("ERROR: failed to %s %s to %s: %+v\n", r.j.action, r.j.local, r.j.remote, r.err) + os.Exit(1) + } + } + + if invalidateJob != nil { + err := client.Invalidate(invalidateJob.remote) + if err != nil { + fmt.Printf("ERROR: failed to %s %s to %s: %+v\n", invalidateJob.action, invalidateJob.local, invalidateJob.remote, err) + os.Exit(1) + } + } +} + +func debug(format string, args ...interface{}) { + if os.Getenv("DEBUG") != "" { + fmt.Printf(format+"\n", args...) + } else { + fmt.Printf(".") + } +} diff --git a/types.go b/types.go index 7f35f34..4bad815 100644 --- a/types.go +++ b/types.go @@ -2,99 +2,68 @@ package main import "encoding/json" -type PluginArgs struct { - Key string `json:"access_key"` - Secret string `json:"secret_key"` - Bucket string `json:"bucket"` - Region string `json:"region"` - Source string `json:"source"` - Target string `json:"target"` - Delete bool `json:"delete"` - Access StringMap `json:"acl"` - ContentType StringMap `json:"content_type"` - ContentEncoding StringMap `json:"content_encoding"` - Metadata DeepStringMap `json:"metadata"` - Redirects map[string]string `json:"redirects"` - CloudFrontDistribution string `json:"cloudfront_distribution_id"` -} - -type DeepStringMap struct { +type DeepStringMapFlag struct { parts map[string]map[string]string } -func (e *DeepStringMap) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - return nil - } +func (d *DeepStringMapFlag) String() string { + return "" +} - p := map[string]map[string]string{} - if err := json.Unmarshal(b, &p); err != nil { - s := map[string]string{} - if err := json.Unmarshal(b, &s); err != nil { +func (d *DeepStringMapFlag) Get() map[string]map[string]string { + return d.parts +} + +func (d *DeepStringMapFlag) Set(value string) error { + d.parts = map[string]map[string]string{} + err := json.Unmarshal([]byte(value), &d.parts) + if err != nil { + single := map[string]string{} + err := json.Unmarshal([]byte(value), &single) + if err != nil { return err } - p["*"] = s + + d.parts["*"] = single } - e.parts = p return nil } -func (e *DeepStringMap) Map() map[string]map[string]string { - return e.parts -} - -type StringMap struct { +type StringMapFlag struct { parts map[string]string } -func (e *StringMap) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - return nil - } +func (s *StringMapFlag) String() string { + return "" +} - p := map[string]string{} - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p["_string_"] = s - } +func (s *StringMapFlag) Get() map[string]string { + return s.parts +} - e.parts = p +func (s *StringMapFlag) Set(value string) error { + s.parts = map[string]string{} + err := json.Unmarshal([]byte(value), &s.parts) + if err != nil { + s.parts["*"] = value + } return nil } -func (e *StringMap) IsEmpty() bool { - if e == nil || len(e.parts) == 0 { - return true - } - - return false +type MapFlag struct { + parts map[string]string } -func (e *StringMap) IsString() bool { - if e.IsEmpty() || len(e.parts) != 1 { - return false - } - - _, ok := e.parts["_string_"] - return ok +func (m *MapFlag) String() string { + return "" } -func (e *StringMap) String() string { - if e.IsEmpty() || !e.IsString() { - return "" - } - - return e.parts["_string_"] +func (m *MapFlag) Get() map[string]string { + return m.parts } -func (e *StringMap) Map() map[string]string { - if e.IsEmpty() || e.IsString() { - return map[string]string{} - } - - return e.parts +func (m *MapFlag) Set(value string) error { + m.parts = map[string]string{} + return json.Unmarshal([]byte(value), &m.parts) } diff --git a/vendor/github.com/aws/aws-sdk-go/Gemfile b/vendor/github.com/aws/aws-sdk-go/Gemfile new file mode 100644 index 0000000..2fb295a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/Gemfile @@ -0,0 +1,6 @@ +source 'https://rubygems.org' + +gem 'yard', git: 'git://github.com/lsegal/yard', ref: '5025564a491e1b7c6192632cba2802202ca08449' +gem 'yard-go', git: 'git://github.com/jasdel/yard-go', ref: 'e78e1ef7cdf5e0f3266845b26bb4fd64f1dd6f85' +gem 'rdiscount' + diff --git a/vendor/github.com/aws/aws-sdk-go/Makefile b/vendor/github.com/aws/aws-sdk-go/Makefile new file mode 100644 index 0000000..b8b4e94 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/Makefile @@ -0,0 +1,152 @@ +LINTIGNOREDOT='awstesting/integration.+should not use dot imports' +LINTIGNOREDOC='service/[^/]+/(api|service|waiters)\.go:.+(comment on exported|should have comment or be unexported)' +LINTIGNORECONST='service/[^/]+/(api|service|waiters)\.go:.+(type|struct field|const|func) ([^ ]+) should be ([^ ]+)' +LINTIGNORESTUTTER='service/[^/]+/(api|service)\.go:.+(and that stutters)' +LINTIGNOREINFLECT='service/[^/]+/(api|service)\.go:.+method .+ should be ' +LINTIGNOREINFLECTS3UPLOAD='service/s3/s3manager/upload\.go:.+struct field SSEKMSKeyId should be ' +LINTIGNOREDEPS='vendor/.+\.go' + +SDK_WITH_VENDOR_PKGS=$(shell go list ./... | grep -v "/vendor/src") +SDK_ONLY_PKGS=$(shell go list ./... | grep -v "/vendor/") +SDK_GO_1_4=$(shell go version | grep "go1.4") +SDK_GO_VERSION=$(shell go version | awk '''{print $$3}''' | tr -d '''\n''') + +all: get-deps generate unit + +help: + @echo "Please use \`make ' where is one of" + @echo " api_info to print a list of services and versions" + @echo " docs to build SDK documentation" + @echo " build to go build the SDK" + @echo " unit to run unit tests" + @echo " integration to run integration tests" + @echo " performance to run performance tests" + @echo " verify to verify tests" + @echo " lint to lint the SDK" + @echo " vet to vet the SDK" + @echo " generate to go generate and make services" + @echo " gen-test to generate protocol tests" + @echo " gen-services to generate services" + @echo " get-deps to go get the SDK dependencies" + @echo " get-deps-tests to get the SDK's test dependencies" + @echo " get-deps-verify to get the SDK's verification dependencies" + +generate: gen-test gen-endpoints gen-services + +gen-test: gen-protocol-test + +gen-services: + go generate ./service + +gen-protocol-test: + go generate ./private/protocol/... + +gen-endpoints: + go generate ./private/endpoints + +build: + @echo "go build SDK and vendor packages" + @go build ${SDK_ONLY_PKGS} + +unit: get-deps-tests build verify + @echo "go test SDK and vendor packages" + @go test -tags $(SDK_ONLY_PKGS) + +unit-with-race-cover: get-deps-tests build verify + @echo "go test SDK and vendor packages" + @go test -tags -race -cpu=1,2,4 $(SDK_ONLY_PKGS) + +integration: get-deps-tests integ-custom smoke-tests performance + +integ-custom: + go test -tags "integration" ./awstesting/integration/customizations/... + +smoke-tests: get-deps-tests + gucumber -go-tags "integration" ./awstesting/integration/smoke + +performance: get-deps-tests + AWS_TESTING_LOG_RESULTS=${log-detailed} AWS_TESTING_REGION=$(region) AWS_TESTING_DB_TABLE=$(table) gucumber -go-tags "integration" ./awstesting/performance + +sandbox-tests: sandbox-test-go14 sandbox-test-go15 sandbox-test-go15-novendorexp sandbox-test-go16 sandbox-test-go17 sandbox-test-gotip + +sandbox-test-go14: + docker build -f ./awstesting/sandbox/Dockerfile.test.go1.4 -t "aws-sdk-go-1.4" . + docker run -t aws-sdk-go-1.4 + +sandbox-test-go15: + docker build -f ./awstesting/sandbox/Dockerfile.test.go1.5 -t "aws-sdk-go-1.5" . + docker run -t aws-sdk-go-1.5 + +sandbox-test-go15-novendorexp: + docker build -f ./awstesting/sandbox/Dockerfile.test.go1.5-novendorexp -t "aws-sdk-go-1.5-novendorexp" . + docker run -t aws-sdk-go-1.5-novendorexp + +sandbox-test-go16: + docker build -f ./awstesting/sandbox/Dockerfile.test.go1.6 -t "aws-sdk-go-1.6" . + docker run -t aws-sdk-go-1.6 + +sandbox-test-go17: + docker build -f ./awstesting/sandbox/Dockerfile.test.go1.7 -t "aws-sdk-go-1.7" . + docker run -t aws-sdk-go-1.7 + +sandbox-test-gotip: + @echo "Run make update-aws-golang-tip, if this test fails because missing aws-golang:tip container" + docker build -f ./awstesting/sandbox/Dockerfile.test.gotip -t "aws-sdk-go-tip" . + docker run -t aws-sdk-go-tip + +update-aws-golang-tip: + docker build -f ./awstesting/sandbox/Dockerfile.golang-tip -t "aws-golang:tip" . + +verify: get-deps-verify lint vet + +lint: + @echo "go lint SDK and vendor packages" + @lint=`if [ -z "${SDK_GO_1_4}" ]; then golint ./...; else echo "skipping golint"; fi`; \ + lint=`echo "$$lint" | grep -E -v -e ${LINTIGNOREDOT} -e ${LINTIGNOREDOC} -e ${LINTIGNORECONST} -e ${LINTIGNORESTUTTER} -e ${LINTIGNOREINFLECT} -e ${LINTIGNOREDEPS} -e ${LINTIGNOREINFLECTS3UPLOAD}`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ] && [ "$$lint" != "skipping golint" ]; then exit 1; fi + +SDK_BASE_FOLDERS=$(shell ls -d */ | grep -v vendor | grep -v awsmigrate) +ifneq (,$(findstring go1.5, ${SDK_GO_VERSION})) + GO_VET_CMD=go tool vet --all -shadow +else ifneq (,$(findstring go1.6, ${SDK_GO_VERSION})) + GO_VET_CMD=go tool vet --all -shadow -example=false +else ifneq (,$(findstring devel, ${SDK_GO_VERSION})) + GO_VET_CMD=go tool vet --all -shadow -tests=false +else + GO_VET_CMD=echo skipping go vet, ${SDK_GO_VERSION} +endif + +vet: + ${GO_VET_CMD} ${SDK_BASE_FOLDERS} + +get-deps: get-deps-tests get-deps-verify + @echo "go get SDK dependencies" + @go get -v $(SDK_ONLY_PKGS) + +get-deps-tests: + @echo "go get SDK testing dependencies" + go get github.com/lsegal/gucumber/cmd/gucumber + go get github.com/stretchr/testify + go get github.com/smartystreets/goconvey + +get-deps-verify: + @echo "go get SDK verification utilities" + @if [ -z "${SDK_GO_1_4}" ]; then go get github.com/golang/lint/golint; else echo "skipped getting golint"; fi + +bench: + @echo "go bench SDK packages" + @go test -run NONE -bench . -benchmem -tags 'bench' $(SDK_ONLY_PKGS) + +bench-protocol: + @echo "go bench SDK protocol marshallers" + @go test -run NONE -bench . -benchmem -tags 'bench' ./private/protocol/... + +docs: + @echo "generate SDK docs" + rm -rf doc && bundle install && bundle exec yard + @# This env variable, DOCS, is for internal use + @if [ -n "$(AWS_DOC_GEN_TOOL)" ]; then echo "For internal use. Subject to change."; $(AWS_DOC_GEN_TOOL) `pwd`; fi + +api_info: + @go run private/model/cli/api-info/api-info.go diff --git a/vendor/github.com/aws/aws-sdk-go/README.md b/vendor/github.com/aws/aws-sdk-go/README.md new file mode 100644 index 0000000..812ce71 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/README.md @@ -0,0 +1,116 @@ +# AWS SDK for Go + + +[![API Reference](http://img.shields.io/badge/api-reference-blue.svg)](http://docs.aws.amazon.com/sdk-for-go/api) +[![Join the chat at https://gitter.im/aws/aws-sdk-go](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/aws/aws-sdk-go?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Build Status](https://img.shields.io/travis/aws/aws-sdk-go.svg)](https://travis-ci.org/aws/aws-sdk-go) +[![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) + + +aws-sdk-go is the official AWS SDK for the Go programming language. + +Checkout our [release notes](https://github.com/aws/aws-sdk-go/releases) for information about the latest bug fixes, updates, and features added to the SDK. + +## Installing + +If you are using Go 1.5 with the `GO15VENDOREXPERIMENT=1` vendoring flag, or 1.6 and higher you can use the following command to retrieve the SDK. The SDK's non-testing dependencies will be included and are vendored in the `vendor` folder. + + go get -u github.com/aws/aws-sdk-go + +Otherwise if your Go environment does not have vendoring support enabled, or you do not want to include the vendored SDK's dependencies you can use the following command to retrieve the SDK and its non-testing dependencies using `go get`. + + go get -u github.com/aws/aws-sdk-go/aws/... + go get -u github.com/aws/aws-sdk-go/service/... + +If you're looking to retrieve just the SDK without any dependencies use the following command. + + go get -d github.com/aws/aws-sdk-go/ + +These two processes will still include the `vendor` folder and it should be deleted if its not going to be used by your environment. + + rm -rf $GOPATH/src/github.com/aws/aws-sdk-go/vendor + +## Reference Documentation +[`Getting Started Guide`](https://aws.amazon.com/sdk-for-go/) - This document is a general introduction how to configure and make requests with the SDK. If this is your first time using the SDK, this documentation and the API documentation will help you get started. This document focuses on the syntax and behavior of the SDK. The [Service Developer Guide](https://aws.amazon.com/documentation/) will help you get started using specific AWS services. + +[`SDK API Reference Documentation`](https://docs.aws.amazon.com/sdk-for-go/api/) - Use this document to look up all API operation input and output parameters for AWS services supported by the SDK. The API reference also includes documentation of the SDK, and examples how to using the SDK, service client API operations, and API operation require parameters. + +[`Service Developer Guide`](https://aws.amazon.com/documentation/) - Use this documentation to learn how to interface with an AWS service. These are great guides both, if you're getting started with a service, or looking for more information on a service. You should not need this document for coding, though in some cases, services may supply helpful samples that you might want to look out for. + +[`SDK Examples`](https://github.com/aws/aws-sdk-go/tree/master/example) - Included in the SDK's repo are a several hand crafted examples using the SDK features and AWS services. + +## Configuring Credentials + +Before using the SDK, ensure that you've configured credentials. The best +way to configure credentials on a development machine is to use the +`~/.aws/credentials` file, which might look like: + +``` +[default] +aws_access_key_id = AKID1234567890 +aws_secret_access_key = MY-SECRET-KEY +``` + +You can learn more about the credentials file from this +[blog post](http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs). + +Alternatively, you can set the following environment variables: + +``` +AWS_ACCESS_KEY_ID=AKID1234567890 +AWS_SECRET_ACCESS_KEY=MY-SECRET-KEY +``` + +### AWS CLI config file (`~/.aws/config`) +The AWS SDK for Go does not support the AWS CLI's config file. The SDK will not use any contents from this file. The SDK only supports the shared credentials file (`~/.aws/credentials`). #384 tracks this feature request discussion. + +## Using the Go SDK + +To use a service in the SDK, create a service variable by calling the `New()` +function. Once you have a service client, you can call API operations which each +return response data and a possible error. + +To list a set of instance IDs from EC2, you could run: + +```go +package main + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ec2" +) + +func main() { + // Create an EC2 service object in the "us-west-2" region + // Note that you can also configure your region globally by + // exporting the AWS_REGION environment variable + svc := ec2.New(session.New(), &aws.Config{Region: aws.String("us-west-2")}) + + // Call the DescribeInstances Operation + resp, err := svc.DescribeInstances(nil) + if err != nil { + panic(err) + } + + // resp has all of the response data, pull out instance IDs: + fmt.Println("> Number of reservation sets: ", len(resp.Reservations)) + for idx, res := range resp.Reservations { + fmt.Println(" > Number of instances: ", len(res.Instances)) + for _, inst := range resp.Reservations[idx].Instances { + fmt.Println(" - Instance ID: ", *inst.InstanceId) + } + } +} +``` + +You can find more information and operations in our +[API documentation](http://docs.aws.amazon.com/sdk-for-go/api/). + +## License + +This SDK is distributed under the +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), +see LICENSE.txt and NOTICE.txt for more information. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go index be8b517..e50771f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -14,13 +14,13 @@ package awserr // if err != nil { // if awsErr, ok := err.(awserr.Error); ok { // // Get error details -// log.Println("Error:", err.Code(), err.Message()) +// log.Println("Error:", awsErr.Code(), awsErr.Message()) // // // Prints out full error message, including original error if there was one. -// log.Println("Error:", err.Error()) +// log.Println("Error:", awsErr.Error()) // // // Get original error -// if origErr := err.Err(); origErr != nil { +// if origErr := awsErr.OrigErr(); origErr != nil { // // operate on original error. // } // } else { @@ -42,9 +42,12 @@ type Error interface { OrigErr() error } -// BatchError is a batch of errors which also wraps lower level errors with code, message, -// and original errors. Calling Error() will only return the error that is at the end -// of the list. +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occured in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. type BatchError interface { // Satisfy the generic error interface. error @@ -59,20 +62,35 @@ type BatchError interface { OrigErrs() []error } +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occured in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + // New returns an Error object described by the code, message, and origErr. // // If origErr satisfies the Error interface it will not be wrapped within a new // Error object and will instead be returned. func New(code, message string, origErr error) Error { - if e, ok := origErr.(Error); ok && e != nil { - return e + var errs []error + if origErr != nil { + errs = append(errs, origErr) } - return newBaseError(code, message, origErr) + return newBaseError(code, message, errs) } -// NewBatchError returns an baseError with an expectation of an array of errors -func NewBatchError(code, message string, errs []error) BatchError { - return newBaseErrors(code, message, errs) +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) } // A RequestFailure is an interface to extract request failure information from @@ -85,9 +103,9 @@ func NewBatchError(code, message string, errs []error) BatchError { // output, err := s3manage.Upload(svc, input, opts) // if err != nil { // if reqerr, ok := err.(RequestFailure); ok { -// log.Printf("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) // } else { -// log.Printf("Error:", err.Error() +// log.Println("Error:", err.Error()) // } // } // diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go index 605f73c..e2d333b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -34,36 +34,17 @@ type baseError struct { errs []error } -// newBaseError returns an error object for the code, message, and err. +// newBaseError returns an error object for the code, message, and errors. // // code is a short no whitespace phrase depicting the classification of // the error that is being created. // -// message is the free flow string containing detailed information about the error. +// message is the free flow string containing detailed information about the +// error. // -// origErr is the error object which will be nested under the new error to be returned. -func newBaseError(code, message string, origErr error) *baseError { - b := &baseError{ - code: code, - message: message, - } - - if origErr != nil { - b.errs = append(b.errs, origErr) - } - - return b -} - -// newBaseErrors returns an error object for the code, message, and errors. -// -// code is a short no whitespace phrase depicting the classification of -// the error that is being created. -// -// message is the free flow string containing detailed information about the error. -// -// origErrs is the error objects which will be nested under the new errors to be returned. -func newBaseErrors(code, message string, origErrs []error) *baseError { +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { b := &baseError{ code: code, message: message, @@ -103,19 +84,26 @@ func (b baseError) Message() string { return b.message } -// OrigErr returns the original error if one was set. Nil is returned if no error -// was set. This only returns the first element in the list. If the full list is -// needed, use BatchError +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. func (b baseError) OrigErr() error { - if size := len(b.errs); size > 0 { + switch len(b.errs) { + case 0: + return nil + case 1: return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occured", b.errs) } - - return nil } -// OrigErrs returns the original errors if one was set. An empty slice is returned if -// no error was set:w +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. func (b baseError) OrigErrs() []error { return b.errs } @@ -133,8 +121,8 @@ type requestError struct { requestID string } -// newRequestError returns a wrapped error with additional information for request -// status code, and service requestID. +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. // // Should be used to wrap all request which involve service requests. Even if // the request failed without a service response, but had an HTTP status code @@ -173,6 +161,15 @@ func (r requestError) RequestID() string { return r.requestID } +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + // An error list that satisfies the golang interface type errorList []error diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go index 0de3eaa..fc38172 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -91,6 +91,10 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } format := "%v" switch v.Interface().(type) { case string: diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index 24d39ce..43a3676 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -1,8 +1,8 @@ package client import ( - "math" "math/rand" + "sync" "time" "github.com/aws/aws-sdk-go/aws/request" @@ -30,16 +30,61 @@ func (d DefaultRetryer) MaxRetries() int { return d.NumMaxRetries } +var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) + // RetryRules returns the delay duration before retrying this request again func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { - delay := int(math.Pow(2, float64(r.RetryCount))) * (rand.Intn(30) + 30) + // Set the upper limit of delay in retrying at ~five minutes + minTime := 30 + throttle := d.shouldThrottle(r) + if throttle { + minTime = 500 + } + + retryCount := r.RetryCount + if retryCount > 13 { + retryCount = 13 + } else if throttle && retryCount > 8 { + retryCount = 8 + } + + delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) return time.Duration(delay) * time.Millisecond } -// ShouldRetry returns if the request should be retried. +// ShouldRetry returns true if the request should be retried. func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { if r.HTTPResponse.StatusCode >= 500 { return true } - return r.IsErrorRetryable() + return r.IsErrorRetryable() || d.shouldThrottle(r) +} + +// ShouldThrottle returns true if the request should be throttled. +func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { + if r.HTTPResponse.StatusCode == 502 || + r.HTTPResponse.StatusCode == 503 || + r.HTTPResponse.StatusCode == 504 { + return true + } + return r.IsErrorThrottle() +} + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 9e83e92..da72935 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -100,6 +100,31 @@ type Config struct { // Amazon S3: Virtual Hosting of Buckets S3ForcePathStyle *bool + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait timeout. + // https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disble 100-Continue if you experiance issues + // with proxies or thrid party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations compatible + // with S3 Accelerate will use the accelerate endpoint for requests. Requests not compatible + // will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with accelerate + // enabled. If the bucket is not enabled for accelerate an error will be returned. + // The bucket name must be DNS compatible to also work with accelerate. + S3UseAccelerate *bool + // Set this to `true` to disable the EC2Metadata client from overriding the // default http.Client's Timeout. This is helpful if you do not want the EC2Metadata // client to create a new http.Client. This options is only meaningful if you're not @@ -114,13 +139,18 @@ type Config struct { // EC2MetadataDisableTimeoutOverride *bool + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. SleepDelay func(time.Duration) } // NewConfig returns a new Config pointer that can be chained with builder methods to // set multiple configuration values inline without using pointers. // -// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) +// sess := session.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) // func NewConfig() *Config { return &Config{} @@ -210,6 +240,20 @@ func (c *Config) WithS3ForcePathStyle(force bool) *Config { return c } +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c +} + // WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value // returning a Config pointer for chaining. func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { @@ -288,6 +332,14 @@ func mergeInConfig(dst *Config, other *Config) { dst.S3ForcePathStyle = other.S3ForcePathStyle } + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + if other.EC2MetadataDisableTimeoutOverride != nil { dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go index d6a7b08..3b73a7d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -2,7 +2,7 @@ package aws import "time" -// String returns a pointer to of the string value passed in. +// String returns a pointer to the string value passed in. func String(v string) *string { return &v } @@ -61,7 +61,7 @@ func StringValueMap(src map[string]*string) map[string]string { return dst } -// Bool returns a pointer to of the bool value passed in. +// Bool returns a pointer to the bool value passed in. func Bool(v bool) *bool { return &v } @@ -120,7 +120,7 @@ func BoolValueMap(src map[string]*bool) map[string]bool { return dst } -// Int returns a pointer to of the int value passed in. +// Int returns a pointer to the int value passed in. func Int(v int) *int { return &v } @@ -179,7 +179,7 @@ func IntValueMap(src map[string]*int) map[string]int { return dst } -// Int64 returns a pointer to of the int64 value passed in. +// Int64 returns a pointer to the int64 value passed in. func Int64(v int64) *int64 { return &v } @@ -238,7 +238,7 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 { return dst } -// Float64 returns a pointer to of the float64 value passed in. +// Float64 returns a pointer to the float64 value passed in. func Float64(v float64) *float64 { return &v } @@ -297,7 +297,7 @@ func Float64ValueMap(src map[string]*float64) map[string]float64 { return dst } -// Time returns a pointer to of the time.Time value passed in. +// Time returns a pointer to the time.Time value passed in. func Time(v time.Time) *time.Time { return &v } @@ -311,6 +311,18 @@ func TimeValue(v *time.Time) time.Time { return time.Time{} } +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + // TimeSlice converts a slice of time.Time values into a slice of // time.Time pointers func TimeSlice(src []time.Time) []*time.Time { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index 1d3e656..8456e29 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -24,30 +24,38 @@ type lener interface { // BuildContentLengthHandler builds the content length of a request based on the body, // or will use the HTTPRequest.Header's "Content-Length" if defined. If unable // to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be aded to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { - if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { - length, _ := strconv.ParseInt(slength, 10, 64) - r.HTTPRequest.ContentLength = length - return - } - var length int64 - switch body := r.Body.(type) { - case nil: - length = 0 - case lener: - length = int64(body.Len()) - case io.Seeker: - r.BodyStart, _ = body.Seek(0, 1) - end, _ := body.Seek(0, 2) - body.Seek(r.BodyStart, 0) // make sure to seek back to original location - length = end - r.BodyStart - default: - panic("Cannot get length of body, must provide `ContentLength`") + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + switch body := r.Body.(type) { + case nil: + length = 0 + case lener: + length = int64(body.Len()) + case io.Seeker: + r.BodyStart, _ = body.Seek(0, 1) + end, _ := body.Seek(0, 2) + body.Seek(r.BodyStart, 0) // make sure to seek back to original location + length = end - r.BodyStart + default: + panic("Cannot get length of body, must provide `ContentLength`") + } } - r.HTTPRequest.ContentLength = length - r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } }} // SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. @@ -64,6 +72,11 @@ var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *req var err error r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) if err != nil { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } // Capture the case where url.Error is returned for error processing // response. e.g. 301 without location header comes back as string // error and r.HTTPResponse is nil. Other url redirect errors will diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go index 3b53f5e..7d50b15 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -1,144 +1,17 @@ package corehandlers -import ( - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) +import "github.com/aws/aws-sdk-go/aws/request" // ValidateParametersHandler is a request handler to validate the input parameters. // Validating parameters only has meaning if done prior to the request being sent. var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { - if r.ParamsFilled() { - v := validator{errors: []string{}} - v.validateAny(reflect.ValueOf(r.Params), "") - - if count := len(v.errors); count > 0 { - format := "%d validation errors:\n- %s" - msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- ")) - r.Error = awserr.New("InvalidParameter", msg, nil) - } - } -}} - -// A validator validates values. Collects validations errors which occurs. -type validator struct { - errors []string -} - -// validateAny will validate any struct, slice or map type. All validations -// are also performed recursively for nested types. -func (v *validator) validateAny(value reflect.Value, path string) { - value = reflect.Indirect(value) - if !value.IsValid() { + if !r.ParamsFilled() { return } - switch value.Kind() { - case reflect.Struct: - v.validateStruct(value, path) - case reflect.Slice: - for i := 0; i < value.Len(); i++ { - v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i)) - } - case reflect.Map: - for _, n := range value.MapKeys() { - v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String())) + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err } } -} - -// validateStruct will validate the struct value's fields. If the structure has -// nested types those types will be validated also. -func (v *validator) validateStruct(value reflect.Value, path string) { - prefix := "." - if path == "" { - prefix = "" - } - - for i := 0; i < value.Type().NumField(); i++ { - f := value.Type().Field(i) - if strings.ToLower(f.Name[0:1]) == f.Name[0:1] { - continue - } - fvalue := value.FieldByName(f.Name) - - err := validateField(f, fvalue, validateFieldRequired, validateFieldMin) - if err != nil { - v.errors = append(v.errors, fmt.Sprintf("%s: %s", err.Error(), path+prefix+f.Name)) - continue - } - - v.validateAny(fvalue, path+prefix+f.Name) - } -} - -type validatorFunc func(f reflect.StructField, fvalue reflect.Value) error - -func validateField(f reflect.StructField, fvalue reflect.Value, funcs ...validatorFunc) error { - for _, fn := range funcs { - if err := fn(f, fvalue); err != nil { - return err - } - } - return nil -} - -// Validates that a field has a valid value provided for required fields. -func validateFieldRequired(f reflect.StructField, fvalue reflect.Value) error { - if f.Tag.Get("required") == "" { - return nil - } - - switch fvalue.Kind() { - case reflect.Ptr, reflect.Slice, reflect.Map: - if fvalue.IsNil() { - return fmt.Errorf("missing required parameter") - } - default: - if !fvalue.IsValid() { - return fmt.Errorf("missing required parameter") - } - } - return nil -} - -// Validates that if a value is provided for a field, that value must be at -// least a minimum length. -func validateFieldMin(f reflect.StructField, fvalue reflect.Value) error { - minStr := f.Tag.Get("min") - if minStr == "" { - return nil - } - min, _ := strconv.ParseInt(minStr, 10, 64) - - kind := fvalue.Kind() - if kind == reflect.Ptr { - if fvalue.IsNil() { - return nil - } - fvalue = fvalue.Elem() - } - - switch fvalue.Kind() { - case reflect.String: - if int64(fvalue.Len()) < min { - return fmt.Errorf("field too short, minimum length %d", min) - } - case reflect.Slice, reflect.Map: - if fvalue.IsNil() { - return nil - } - if int64(fvalue.Len()) < min { - return fmt.Errorf("field too short, minimum length %d", min) - } - - // TODO min can also apply to number minimum value. - - } - return nil -} +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index 5dd71f0..7b8ebf5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -76,6 +76,9 @@ type Value struct { // AWS Session Token SessionToken string + + // Provider used to get credentials + ProviderName string } // A Provider is the interface for any component which will provide credentials diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go index 04c8921..aa9d689 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -14,6 +14,9 @@ import ( "github.com/aws/aws-sdk-go/aws/ec2metadata" ) +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + // A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if // those credentials are expired. // @@ -85,17 +88,17 @@ func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(* func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { credsList, err := requestCredList(m.Client) if err != nil { - return credentials.Value{}, err + return credentials.Value{ProviderName: ProviderName}, err } if len(credsList) == 0 { - return credentials.Value{}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) } credsName := credsList[0] roleCreds, err := requestCred(m.Client, credsName) if err != nil { - return credentials.Value{}, err + return credentials.Value{ProviderName: ProviderName}, err } m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) @@ -104,6 +107,7 @@ func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { AccessKeyID: roleCreds.AccessKeyID, SecretAccessKey: roleCreds.SecretAccessKey, SessionToken: roleCreds.Token, + ProviderName: ProviderName, }, nil } @@ -128,7 +132,7 @@ const iamSecurityCredsPath = "/iam/security-credentials" func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { resp, err := client.GetMetadata(iamSecurityCredsPath) if err != nil { - return nil, awserr.New("EC2RoleRequestError", "failed to list EC2 Roles", err) + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) } credsList := []string{} @@ -138,7 +142,7 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { } if err := s.Err(); err != nil { - return nil, awserr.New("SerializationError", "failed to read list of EC2 Roles", err) + return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) } return credsList, nil @@ -153,7 +157,7 @@ func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCred if err != nil { return ec2RoleCredRespBody{}, awserr.New("EC2RoleRequestError", - fmt.Sprintf("failed to get %s EC2 Role credentials", credsName), + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), err) } @@ -161,7 +165,7 @@ func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCred if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { return ec2RoleCredRespBody{}, awserr.New("SerializationError", - fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName), + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), err) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 0000000..a4cec5c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,191 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a Credentials wrapper for retrieving credentials +// from an arbitrary endpoint concurrently. The client will request the +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + resp, err := p.getCredentials() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials() (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.HTTPRequest.Header.Set("Accept", "application/json") + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go index 043e861..96655bc 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -6,6 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" ) +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + var ( // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be // found in the process's environment. @@ -52,11 +55,11 @@ func (e *EnvProvider) Retrieve() (Value, error) { } if id == "" { - return Value{}, ErrAccessKeyIDNotFound + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound } if secret == "" { - return Value{}, ErrSecretAccessKeyNotFound + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound } e.retrieved = true @@ -64,6 +67,7 @@ func (e *EnvProvider) Retrieve() (Value, error) { AccessKeyID: id, SecretAccessKey: secret, SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, }, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go index 09bd00a..7fb7cbf 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -10,6 +10,9 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" ) +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + var ( // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. // @@ -55,12 +58,12 @@ func (p *SharedCredentialsProvider) Retrieve() (Value, error) { filename, err := p.filename() if err != nil { - return Value{}, err + return Value{ProviderName: SharedCredsProviderName}, err } creds, err := loadProfile(filename, p.profile()) if err != nil { - return Value{}, err + return Value{ProviderName: SharedCredsProviderName}, err } p.retrieved = true @@ -78,23 +81,23 @@ func (p *SharedCredentialsProvider) IsExpired() bool { func loadProfile(filename, profile string) (Value, error) { config, err := ini.Load(filename) if err != nil { - return Value{}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) } iniProfile, err := config.GetSection(profile) if err != nil { - return Value{}, awserr.New("SharedCredsLoad", "failed to get profile", err) + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) } id, err := iniProfile.GetKey("aws_access_key_id") if err != nil { - return Value{}, awserr.New("SharedCredsAccessKey", + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), err) } secret, err := iniProfile.GetKey("aws_secret_access_key") if err != nil { - return Value{}, awserr.New("SharedCredsSecret", + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), nil) } @@ -106,6 +109,7 @@ func loadProfile(filename, profile string) (Value, error) { AccessKeyID: id.String(), SecretAccessKey: secret.String(), SessionToken: token.String(), + ProviderName: SharedCredsProviderName, }, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go index 530a9ac..6f07560 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -4,6 +4,9 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" ) +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + var ( // ErrStaticCredentialsEmpty is emitted when static credentials are empty. // @@ -11,7 +14,7 @@ var ( ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) ) -// A StaticProvider is a set of credentials which are set pragmatically, +// A StaticProvider is a set of credentials which are set programmatically, // and will never expire. type StaticProvider struct { Value @@ -30,9 +33,10 @@ func NewStaticCredentials(id, secret, token string) *Credentials { // Retrieve returns the credentials or error if the credentials are invalid. func (s *StaticProvider) Retrieve() (Value, error) { if s.AccessKeyID == "" || s.SecretAccessKey == "" { - return Value{}, ErrStaticCredentialsEmpty + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty } + s.Value.ProviderName = StaticProviderName return s.Value, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 42c883a..570417f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -8,6 +8,7 @@ package defaults import ( + "fmt" "net/http" "os" "time" @@ -16,6 +17,7 @@ import ( "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/endpoints" @@ -66,7 +68,9 @@ func Handlers() request.Handlers { var handlers request.Handlers handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.AfterEachFn = request.HandlerListStopOnError handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) handlers.Send.PushBackNamed(corehandlers.SendHandler) handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) @@ -81,16 +85,43 @@ func Handlers() request.Handlers { // is available if you need to reset the credentials of an // existing service client or session's Config. func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { - endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true) - return credentials.NewCredentials(&credentials.ChainProvider{ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), Providers: []credentials.Provider{ &credentials.EnvProvider{}, &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion), - ExpiryWindow: 5 * time.Minute, - }, - }}) + remoteCredProvider(*cfg, handlers), + }, + }) +} + +func remoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") + + if len(ecsCredURI) > 0 { + return ecsCredProvider(cfg, handlers, ecsCredURI) + } + + return ec2RoleProvider(cfg, handlers) +} + +func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) credentials.Provider { + const host = `169.254.170.2` + + return endpointcreds.NewProviderClient(cfg, handlers, + fmt.Sprintf("http://%s%s", host, uri), + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, + aws.StringValue(cfg.Region), true) + + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, endpoint, signingRegion), + ExpiryWindow: 5 * time.Minute, + } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index e5137ca..669c813 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -1,12 +1,19 @@ package ec2metadata import ( + "encoding/json" + "fmt" "path" + "strings" + "time" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" ) -// GetMetadata uses the path provided to request +// GetMetadata uses the path provided to request information from the EC2 +// instance metdata service. The content will be returned as a string, or +// error if the request failed. func (c *EC2Metadata) GetMetadata(p string) (string, error) { op := &request.Operation{ Name: "GetMetadata", @@ -20,6 +27,68 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) { return output.Content, req.Send() } +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicData("instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("SerializationError", + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + resp, err := c.GetMetadata("iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New("SerializationError", + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + // Region returns the region the instance is running in. func (c *EC2Metadata) Region() (string, error) { resp, err := c.GetMetadata("placement/availability-zone") @@ -41,3 +110,31 @@ func (c *EC2Metadata) Available() bool { return true } + +// An EC2IAMInfo provides the shape for unmarshalling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshalling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index 9e16c1c..5b4379d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -3,7 +3,9 @@ package ec2metadata import ( - "io/ioutil" + "bytes" + "errors" + "io" "net/http" "time" @@ -91,23 +93,28 @@ type metadataOutput struct { func unmarshalHandler(r *request.Request) { defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + return } - data := r.Data.(*metadataOutput) - data.Content = string(b) + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } } func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - _, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + return } - // TODO extract the error... + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) } func validateEndpointHandler(r *request.Request) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go index f536948..db87188 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -79,6 +79,20 @@ type Logger interface { Log(...interface{}) } +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + // NewDefaultLogger returns a Logger which will write log messages to stdout, and // use same formatting runes as the stdlib log.Logger func NewDefaultLogger() Logger { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index 3e90a79..5279c19 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -50,9 +50,28 @@ func (h *Handlers) Clear() { h.AfterRetry.Clear() } +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + // A HandlerList manages zero or more handlers in a list. type HandlerList struct { list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool } // A NamedHandler is a struct that contains a name and function callback. @@ -63,7 +82,9 @@ type NamedHandler struct { // copy creates a copy of the handler list. func (l *HandlerList) copy() HandlerList { - var n HandlerList + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } n.list = append([]NamedHandler{}, l.list...) return n } @@ -111,11 +132,37 @@ func (l *HandlerList) Remove(n NamedHandler) { // Run executes all handlers in the list with a given request object. func (l *HandlerList) Run(r *Request) { - for _, f := range l.list { - f.Fn(r) + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } } } +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + // MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request // header. If the extra parameters are provided they will be added as metadata to the // name/version pair resulting in the following format. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 0000000..a4087f2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,33 @@ +// +build go1.5 + +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := &http.Request{ + URL: &url.URL{}, + Header: http.Header{}, + Close: r.Close, + Body: body, + Host: r.Host, + Method: r.Method, + Proto: r.Proto, + ContentLength: r.ContentLength, + // Cancel will be deprecated in 1.7 and will be replaced with Context + Cancel: r.Cancel, + } + + *req.URL = *r.URL + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go new file mode 100644 index 0000000..75da021 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go @@ -0,0 +1,31 @@ +// +build !go1.5 + +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := &http.Request{ + URL: &url.URL{}, + Header: http.Header{}, + Close: r.Close, + Body: body, + Host: r.Host, + Method: r.Method, + Proto: r.Proto, + ContentLength: r.ContentLength, + } + + *req.URL = *r.URL + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 0000000..da6396d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,49 @@ +package request + +import ( + "io" + "sync" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.RWMutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { + reader := &offsetReader{} + buf.Seek(offset, 0) + + reader.buf = buf + return reader +} + +// Close is a thread-safe close. Uses the write lock. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read using a read lock. +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.RLock() + defer o.lock.RUnlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { + o.Close() + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 3735d7f..2832aaa 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -12,6 +12,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client/metadata" ) @@ -22,20 +23,23 @@ type Request struct { Handlers Handlers Retryer - Time time.Time - ExpireTime time.Duration - Operation *Operation - HTTPRequest *http.Request - HTTPResponse *http.Response - Body io.ReadSeeker - BodyStart int64 // offset from beginning of Body that the request body starts - Params interface{} - Error error - Data interface{} - RequestID string - RetryCount int - Retryable *bool - RetryDelay time.Duration + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time built bool } @@ -69,13 +73,15 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, if method == "" { method = "POST" } - p := operation.HTTPPath - if p == "" { - p = "/" - } httpReq, _ := http.NewRequest(method, "", nil) - httpReq.URL, _ = url.Parse(clientInfo.Endpoint + p) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } r := &Request{ Config: cfg, @@ -89,7 +95,7 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, HTTPRequest: httpReq, Body: nil, Params: params, - Error: nil, + Error: err, Data: data, } r.SetBufferBody([]byte{}) @@ -129,7 +135,7 @@ func (r *Request) SetStringBody(s string) { // SetReaderBody will set the request's body reader. func (r *Request) SetReaderBody(reader io.ReadSeeker) { - r.HTTPRequest.Body = ioutil.NopCloser(reader) + r.HTTPRequest.Body = newOffsetReader(reader, 0) r.Body = reader } @@ -137,6 +143,7 @@ func (r *Request) SetReaderBody(reader io.ReadSeeker) { // if the signing fails. func (r *Request) Presign(expireTime time.Duration) (string, error) { r.ExpireTime = expireTime + r.NotHoist = false r.Sign() if r.Error != nil { return "", r.Error @@ -144,6 +151,18 @@ func (r *Request) Presign(expireTime time.Duration) (string, error) { return r.HTTPRequest.URL.String(), nil } +// PresignRequest behaves just like presign, but hoists all headers and signs them. +// Also returns the signed hash back to the user +func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { + r.ExpireTime = expireTime + r.NotHoist = true + r.Sign() + if r.Error != nil { + return "", nil, r.Error + } + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + func debugLogReqError(r *Request, stage string, retrying bool, err error) { if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { return @@ -170,20 +189,23 @@ func debugLogReqError(r *Request, stage string, retrying bool, err error) { // which occurred will be returned. func (r *Request) Build() error { if !r.built { - r.Error = nil r.Handlers.Validate.Run(r) if r.Error != nil { debugLogReqError(r, "Validate Request", false, r.Error) return r.Error } r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } r.built = true } return r.Error } -// Sign will sign the request retuning error if errors are encountered. +// Sign will sign the request returning error if errors are encountered. // // Send will build the request prior to signing. All Sign Handlers will // be executed in the order they were set. @@ -202,28 +224,53 @@ func (r *Request) Sign() error { // // Send will sign the request prior to sending. All Send Handlers will // be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go func (r *Request) Send() error { for { - r.Sign() - if r.Error != nil { - return r.Error - } - if aws.BoolValue(r.Retryable) { if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) } - // Re-seek the body back to the original point in for a retry so that - // send will send the body's contents again in the upcoming request. - r.Body.Seek(r.BodyStart, 0) - r.HTTPRequest.Body = ioutil.NopCloser(r.Body) + var body io.ReadCloser + if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok { + body = reader.CloseAndCopy(r.BodyStart) + } else { + if r.Config.Logger != nil { + r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions") + } + r.Body.Seek(r.BodyStart, 0) + body = ioutil.NopCloser(r.Body) + } + + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body) + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + // Closing response body. Since we are setting a new request to send off, this + // response will get squashed and leaked. + r.HTTPResponse.Body.Close() + } } + + r.Sign() + if r.Error != nil { + return r.Error + } + r.Retryable = nil r.Handlers.Send.Run(r) if r.Error != nil { + if strings.Contains(r.Error.Error(), "net/http: request canceled") { + return r.Error + } + err := r.Error r.Handlers.Retry.Run(r) r.Handlers.AfterRetry.Run(r) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index ab6fff5..8cc8b01 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -26,8 +26,11 @@ func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { // retryableCodes is a collection of service response codes which are retry-able // without any further action. var retryableCodes = map[string]struct{}{ - "RequestError": {}, - "RequestTimeout": {}, + "RequestError": {}, + "RequestTimeout": {}, +} + +var throttleCodes = map[string]struct{}{ "ProvisionedThroughputExceededException": {}, "Throttling": {}, "ThrottlingException": {}, @@ -46,6 +49,11 @@ var credsExpiredCodes = map[string]struct{}{ "RequestExpired": {}, // EC2 Only } +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + func isCodeRetryable(code string) bool { if _, ok := retryableCodes[code]; ok { return true @@ -70,6 +78,17 @@ func (r *Request) IsErrorRetryable() bool { return false } +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +func (r *Request) IsErrorThrottle() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeThrottle(err.Code()) + } + } + return false +} + // IsErrorExpired returns whether the error code is a credential expiry error. // Returns false if the request has no Error set. func (r *Request) IsErrorExpired() bool { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 0000000..2520286 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,234 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 47e4536..6bc8f1b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -86,7 +86,7 @@ func initHandlers(s *Session) { // // Example: // // Create a copy of the current session, configured for the us-west-2 region. -// sess.Copy(&aws.Config{Region: aws.String("us-west-2"}) +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) func (s *Session) Copy(cfgs ...*aws.Config) *Session { newSession := &Session{ Config: s.Config.Copy(cfgs...), diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 0000000..244c86d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 0000000..f040f9c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,644 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +package v4 + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + credValues credentials.Value + isPresign bool + formattedTime string + formattedShortTime string + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: exp != 0, + ServiceName: service, + Region: region, + } + + if ctx.isRequestSigned() { + if !v4.Credentials.IsExpired() && currentTimeFn().Before(ctx.Time.Add(10*time.Minute)) { + // If the request is already signed, and the credentials have not + // expired, and the request is not too old ignore the signing request. + return ctx.SignedHeaderVals, nil + } + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.Get() + if err != nil { + return http.Header{}, err + } + + ctx.assignAmzQueryValues() + ctx.build(v4.DisableHeaderHoisting) + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler is bested used only with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now) +} +func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + }) + + signingTime := req.Time + if !req.LastSignedAt.IsZero() { + signingTime = req.LastSignedAt + } + + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.Body, name, region, req.ExpireTime, signingTime) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTimeFn() +} + +const logSignInfoMsg = `DEBUG: Request Signiture: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildBodyDigest() + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + "Signature=" + ctx.signature, + } + ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (ctx *signingCtx) buildTime() { + ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) + ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) + + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = strings.Join([]string{ + ctx.formattedShortTime, + ctx.Region, + ctx.ServiceName, + "aws4_request", + }, "/") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + ctx.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + + ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + uri := ctx.Request.URL.Opaque + if uri != "" { + uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") + } else { + uri = ctx.Request.URL.Path + } + if uri == "" { + uri = "/" + } + + if ctx.ServiceName != "s3" { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + ctx.formattedTime, + ctx.credentialString, + hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + secret := ctx.credValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) + region := makeHmac(date, []byte(ctx.Region)) + service := makeHmac(region, []byte(ctx.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if ctx.isPresign && ctx.ServiceName == "s3" { + hash = "UNSIGNED-PAYLOAD" + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + } + if ctx.ServiceName == "s3" { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} + +const doubleSpaces = " " + +var doubleSpaceBytes = []byte(doubleSpaces) + +func stripExcessSpaces(headerVals []string) []string { + vals := make([]string, len(headerVals)) + for i, str := range headerVals { + // Trim leading and trailing spaces + trimmed := strings.TrimSpace(str) + + idx := strings.Index(trimmed, doubleSpaces) + var buf []byte + for idx > -1 { + // Multiple adjacent spaces found + if buf == nil { + // first time create the buffer + buf = []byte(trimmed) + } + + stripToIdx := -1 + for j := idx + 1; j < len(buf); j++ { + if buf[j] != ' ' { + buf = append(buf[:idx+1], buf[j:]...) + stripToIdx = j + break + } + } + + if stripToIdx >= 0 { + idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) + if idx >= 0 { + idx += stripToIdx + } + } else { + idx = -1 + } + } + + if buf != nil { + vals[i] = string(buf) + } else { + vals[i] = trimmed + } + } + return vals +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index 0f067c5..fa014b4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -61,23 +61,41 @@ func (r ReaderSeekerCloser) Close() error { type WriteAtBuffer struct { buf []byte m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} } // WriteAt writes a slice of bytes to a buffer starting at the position provided // The number of bytes written will be returned, or error. Can overwrite previous // written slices if the write ats overlap. func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) b.m.Lock() defer b.m.Unlock() - - expLen := pos + int64(len(p)) if int64(len(b.buf)) < expLen { - newBuf := make([]byte, expLen) - copy(newBuf, b.buf) - b.buf = newBuf + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] } copy(b.buf[pos:], p) - return len(p), nil + return pLen, nil } // Bytes returns a slice of bytes written to the buffer. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index afe2bb1..4382188 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.0.11" +const SDKVersion = "1.2.7" diff --git a/vendor/github.com/aws/aws-sdk-go/private/README.md b/vendor/github.com/aws/aws-sdk-go/private/README.md new file mode 100644 index 0000000..5bdb4c5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/README.md @@ -0,0 +1,4 @@ +## AWS SDK for Go Private packages ## +`private` is a collection of packages used internally by the SDK, and is subject to have breaking changes. This package is not `internal` so that if you really need to use its functionality, and understand breaking changes will be made, you are able to. + +These packages will be refactored in the future so that the API generator and model parsers are exposed cleanly on their own. Making it easier for you to generate your own code based on the API models. diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json index 7819eed..5f4991c 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json @@ -8,6 +8,9 @@ "endpoint": "{service}.{region}.amazonaws.com.cn", "signatureVersion": "v4" }, + "cn-north-1/ec2metadata": { + "endpoint": "http://169.254.169.254/latest" + }, "us-gov-west-1/iam": { "endpoint": "iam.us-gov.amazonaws.com" }, @@ -17,6 +20,9 @@ "us-gov-west-1/s3": { "endpoint": "s3-{region}.amazonaws.com" }, + "us-gov-west-1/ec2metadata": { + "endpoint": "http://169.254.169.254/latest" + }, "*/cloudfront": { "endpoint": "cloudfront.amazonaws.com", "signingRegion": "us-east-1" @@ -30,8 +36,7 @@ "signingRegion": "us-east-1" }, "*/ec2metadata": { - "endpoint": "http://169.254.169.254/latest", - "signingRegion": "us-east-1" + "endpoint": "http://169.254.169.254/latest" }, "*/iam": { "endpoint": "iam.amazonaws.com", @@ -57,36 +62,14 @@ "endpoint": "sdb.amazonaws.com", "signingRegion": "us-east-1" }, + "*/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, "us-east-1/s3": { "endpoint": "s3.amazonaws.com" }, - "us-west-1/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "us-west-2/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "eu-west-1/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "ap-southeast-1/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "ap-southeast-2/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "ap-northeast-1/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "ap-northeast-2/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "sa-east-1/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, "eu-central-1/s3": { - "endpoint": "{service}.{region}.amazonaws.com", - "signatureVersion": "v4" + "endpoint": "{service}.{region}.amazonaws.com" } } } diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go index 9b2e1b6..e995315 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go @@ -31,8 +31,7 @@ var endpointsMap = endpointStruct{ SigningRegion: "us-east-1", }, "*/ec2metadata": { - Endpoint: "http://169.254.169.254/latest", - SigningRegion: "us-east-1", + Endpoint: "http://169.254.169.254/latest", }, "*/iam": { Endpoint: "iam.amazonaws.com", @@ -46,6 +45,9 @@ var endpointsMap = endpointStruct{ Endpoint: "route53.amazonaws.com", SigningRegion: "us-east-1", }, + "*/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, "*/sts": { Endpoint: "sts.amazonaws.com", SigningRegion: "us-east-1", @@ -54,30 +56,15 @@ var endpointsMap = endpointStruct{ Endpoint: "waf.amazonaws.com", SigningRegion: "us-east-1", }, - "ap-northeast-1/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "ap-northeast-2/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "ap-southeast-1/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "ap-southeast-2/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, "cn-north-1/*": { Endpoint: "{service}.{region}.amazonaws.com.cn", }, + "cn-north-1/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, "eu-central-1/s3": { Endpoint: "{service}.{region}.amazonaws.com", }, - "eu-west-1/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "sa-east-1/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, "us-east-1/s3": { Endpoint: "s3.amazonaws.com", }, @@ -85,6 +72,9 @@ var endpointsMap = endpointStruct{ Endpoint: "sdb.amazonaws.com", SigningRegion: "us-east-1", }, + "us-gov-west-1/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, "us-gov-west-1/iam": { Endpoint: "iam.us-gov.amazonaws.com", }, @@ -94,11 +84,5 @@ var endpointsMap = endpointStruct{ "us-gov-west-1/sts": { Endpoint: "sts.us-gov-west-1.amazonaws.com", }, - "us-west-1/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "us-west-2/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index 2d78c35..56d69db 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -11,6 +11,9 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" ) +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + // Build builds a request for an AWS Query service. func Build(r *request.Request) { body := url.Values{ diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 4afa4cf..60ea0bd 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -9,6 +9,8 @@ import ( "strconv" "strings" "time" + + "github.com/aws/aws-sdk-go/private/protocol" ) // Parse parses an object i and fills a url.Values object. The isEC2 flag @@ -68,14 +70,19 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri t := value.Type() for i := 0; i < value.NumField(); i++ { - if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { continue // ignore unexported fields } - elemValue := elemOf(value.Field(i)) - field := t.Field(i) - var name string + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + var name string if q.isEC2 { name = field.Tag.Get("queryName") } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go index f961029..a3ea409 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -10,6 +10,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" ) +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + // Unmarshal unmarshals a response for an AWS Query service. func Unmarshal(r *request.Request) { defer r.HTTPResponse.Body.Close() diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go index cb87b79..f214296 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -2,7 +2,7 @@ package query import ( "encoding/xml" - "io" + "io/ioutil" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" @@ -15,15 +15,27 @@ type xmlErrorResponse struct { RequestID string `xml:"RequestId"` } +type xmlServiceUnavailableResponse struct { + XMLName xml.Name `xml:"ServiceUnavailableException"` +} + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + // UnmarshalError unmarshals an error response for an AWS Query service. func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - resp := &xmlErrorResponse{} - err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) - if err != nil && err != io.EOF { - r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err) - } else { + bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err) + return + } + + // First check for specific error + resp := xmlErrorResponse{} + decodeErr := xml.Unmarshal(bodyBytes, &resp) + if decodeErr == nil { reqID := resp.RequestID if reqID == "" { reqID = r.RequestID @@ -33,5 +45,22 @@ func UnmarshalError(r *request.Request) { r.HTTPResponse.StatusCode, reqID, ) + return } + + // Check for unhandled error + servUnavailResp := xmlServiceUnavailableResponse{} + unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) + if unavailErr == nil { + r.Error = awserr.NewRequestFailure( + awserr.New("ServiceUnavailableException", "service is unavailable", nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Failed to retrieve any error message from the response body + r.Error = awserr.New("SerializationError", + "failed to decode query XML error response", decodeErr) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 5469edb..5f41251 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -222,8 +222,7 @@ func EscapePath(path string, encodeSep bool) string { if noEscape[c] || (c == '/' && !encodeSep) { buf.WriteByte(c) } else { - buf.WriteByte('%') - buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16))) + fmt.Fprintf(&buf, "%%%02X", c) } } return buf.String() diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index 0d76dff..ceb4132 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -8,8 +8,9 @@ import ( "reflect" "sort" "strconv" - "strings" "time" + + "github.com/aws/aws-sdk-go/private/protocol" ) // BuildXML will serialize params into an xml.Encoder. @@ -120,18 +121,23 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl t := value.Type() for i := 0; i < value.NumField(); i++ { - if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { continue // ignore unexported fields } - member := elemOf(value.Field(i)) - field := t.Field(i) mTag := field.Tag - if mTag.Get("location") != "" { // skip non-body members continue } + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + memberName := mTag.Get("locationName") if memberName == "" { memberName = field.Name diff --git a/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go deleted file mode 100644 index dc176f3..0000000 --- a/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go +++ /dev/null @@ -1,365 +0,0 @@ -// Package v4 implements signing for AWS V4 signer -package v4 - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -const ( - authHeaderPrefix = "AWS4-HMAC-SHA256" - timeFormat = "20060102T150405Z" - shortTimeFormat = "20060102" -) - -var ignoredHeaders = map[string]bool{ - "Authorization": true, - "Content-Type": true, - "Content-Length": true, - "User-Agent": true, -} - -type signer struct { - Request *http.Request - Time time.Time - ExpireTime time.Duration - ServiceName string - Region string - CredValues credentials.Value - Credentials *credentials.Credentials - Query url.Values - Body io.ReadSeeker - Debug aws.LogLevelType - Logger aws.Logger - - isPresign bool - formattedTime string - formattedShortTime string - - signedHeaders string - canonicalHeaders string - canonicalString string - credentialString string - stringToSign string - signature string - authorization string -} - -// Sign requests with signature version 4. -// -// Will sign the requests with the service config's Credentials object -// Signing is skipped if the credentials is the credentials.AnonymousCredentials -// object. -func Sign(req *request.Request) { - // If the request does not need to be signed ignore the signing of the - // request if the AnonymousCredentials object is used. - if req.Config.Credentials == credentials.AnonymousCredentials { - return - } - - region := req.ClientInfo.SigningRegion - if region == "" { - region = aws.StringValue(req.Config.Region) - } - - name := req.ClientInfo.SigningName - if name == "" { - name = req.ClientInfo.ServiceName - } - - s := signer{ - Request: req.HTTPRequest, - Time: req.Time, - ExpireTime: req.ExpireTime, - Query: req.HTTPRequest.URL.Query(), - Body: req.Body, - ServiceName: name, - Region: region, - Credentials: req.Config.Credentials, - Debug: req.Config.LogLevel.Value(), - Logger: req.Config.Logger, - } - - req.Error = s.sign() -} - -func (v4 *signer) sign() error { - if v4.ExpireTime != 0 { - v4.isPresign = true - } - - if v4.isRequestSigned() { - if !v4.Credentials.IsExpired() { - // If the request is already signed, and the credentials have not - // expired yet ignore the signing request. - return nil - } - - // The credentials have expired for this request. The current signing - // is invalid, and needs to be request because the request will fail. - if v4.isPresign { - v4.removePresign() - // Update the request's query string to ensure the values stays in - // sync in the case retrieving the new credentials fails. - v4.Request.URL.RawQuery = v4.Query.Encode() - } - } - - var err error - v4.CredValues, err = v4.Credentials.Get() - if err != nil { - return err - } - - if v4.isPresign { - v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix) - if v4.CredValues.SessionToken != "" { - v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) - } else { - v4.Query.Del("X-Amz-Security-Token") - } - } else if v4.CredValues.SessionToken != "" { - v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) - } - - v4.build() - - if v4.Debug.Matches(aws.LogDebugWithSigning) { - v4.logSigningInfo() - } - - return nil -} - -const logSignInfoMsg = `DEBUG: Request Signiture: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` - -func (v4 *signer) logSigningInfo() { - signedURLMsg := "" - if v4.isPresign { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String()) - } - msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg) - v4.Logger.Log(msg) -} - -func (v4 *signer) build() { - v4.buildTime() // no depends - v4.buildCredentialString() // no depends - if v4.isPresign { - v4.buildQuery() // no depends - } - v4.buildCanonicalHeaders() // depends on cred string - v4.buildCanonicalString() // depends on canon headers / signed headers - v4.buildStringToSign() // depends on canon string - v4.buildSignature() // depends on string to sign - - if v4.isPresign { - v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature - } else { - parts := []string{ - authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString, - "SignedHeaders=" + v4.signedHeaders, - "Signature=" + v4.signature, - } - v4.Request.Header.Set("Authorization", strings.Join(parts, ", ")) - } -} - -func (v4 *signer) buildTime() { - v4.formattedTime = v4.Time.UTC().Format(timeFormat) - v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat) - - if v4.isPresign { - duration := int64(v4.ExpireTime / time.Second) - v4.Query.Set("X-Amz-Date", v4.formattedTime) - v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) - } else { - v4.Request.Header.Set("X-Amz-Date", v4.formattedTime) - } -} - -func (v4 *signer) buildCredentialString() { - v4.credentialString = strings.Join([]string{ - v4.formattedShortTime, - v4.Region, - v4.ServiceName, - "aws4_request", - }, "/") - - if v4.isPresign { - v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString) - } -} - -func (v4 *signer) buildQuery() { - for k, h := range v4.Request.Header { - if strings.HasPrefix(http.CanonicalHeaderKey(k), "X-Amz-") { - continue // never hoist x-amz-* headers, they must be signed - } - if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { - continue // never hoist ignored headers - } - - v4.Request.Header.Del(k) - v4.Query.Del(k) - for _, v := range h { - v4.Query.Add(k, v) - } - } -} - -func (v4 *signer) buildCanonicalHeaders() { - var headers []string - headers = append(headers, "host") - for k := range v4.Request.Header { - if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { - continue // ignored header - } - headers = append(headers, strings.ToLower(k)) - } - sort.Strings(headers) - - v4.signedHeaders = strings.Join(headers, ";") - - if v4.isPresign { - v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders) - } - - headerValues := make([]string, len(headers)) - for i, k := range headers { - if k == "host" { - headerValues[i] = "host:" + v4.Request.URL.Host - } else { - headerValues[i] = k + ":" + - strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",") - } - } - - v4.canonicalHeaders = strings.Join(headerValues, "\n") -} - -func (v4 *signer) buildCanonicalString() { - v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1) - uri := v4.Request.URL.Opaque - if uri != "" { - uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") - } else { - uri = v4.Request.URL.Path - } - if uri == "" { - uri = "/" - } - - if v4.ServiceName != "s3" { - uri = rest.EscapePath(uri, false) - } - - v4.canonicalString = strings.Join([]string{ - v4.Request.Method, - uri, - v4.Request.URL.RawQuery, - v4.canonicalHeaders + "\n", - v4.signedHeaders, - v4.bodyDigest(), - }, "\n") -} - -func (v4 *signer) buildStringToSign() { - v4.stringToSign = strings.Join([]string{ - authHeaderPrefix, - v4.formattedTime, - v4.credentialString, - hex.EncodeToString(makeSha256([]byte(v4.canonicalString))), - }, "\n") -} - -func (v4 *signer) buildSignature() { - secret := v4.CredValues.SecretAccessKey - date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime)) - region := makeHmac(date, []byte(v4.Region)) - service := makeHmac(region, []byte(v4.ServiceName)) - credentials := makeHmac(service, []byte("aws4_request")) - signature := makeHmac(credentials, []byte(v4.stringToSign)) - v4.signature = hex.EncodeToString(signature) -} - -func (v4 *signer) bodyDigest() string { - hash := v4.Request.Header.Get("X-Amz-Content-Sha256") - if hash == "" { - if v4.isPresign && v4.ServiceName == "s3" { - hash = "UNSIGNED-PAYLOAD" - } else if v4.Body == nil { - hash = hex.EncodeToString(makeSha256([]byte{})) - } else { - hash = hex.EncodeToString(makeSha256Reader(v4.Body)) - } - v4.Request.Header.Add("X-Amz-Content-Sha256", hash) - } - return hash -} - -// isRequestSigned returns if the request is currently signed or presigned -func (v4 *signer) isRequestSigned() bool { - if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" { - return true - } - if v4.Request.Header.Get("Authorization") != "" { - return true - } - - return false -} - -// unsign removes signing flags for both signed and presigned requests. -func (v4 *signer) removePresign() { - v4.Query.Del("X-Amz-Algorithm") - v4.Query.Del("X-Amz-Signature") - v4.Query.Del("X-Amz-Security-Token") - v4.Query.Del("X-Amz-Date") - v4.Query.Del("X-Amz-Expires") - v4.Query.Del("X-Amz-Credential") - v4.Query.Del("X-Amz-SignedHeaders") -} - -func makeHmac(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256Reader(reader io.ReadSeeker) []byte { - hash := sha256.New() - start, _ := reader.Seek(0, 1) - defer reader.Seek(start, 0) - - io.Copy(hash, reader) - return hash.Sum(nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go/sdk.go b/vendor/github.com/aws/aws-sdk-go/sdk.go new file mode 100644 index 0000000..afa465a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/sdk.go @@ -0,0 +1,7 @@ +// Package sdk is the official AWS SDK for the Go programming language. +// +// See our Developer Guide for information for on getting started and using +// the SDK. +// +// https://github.com/aws/aws-sdk-go/wiki +package sdk diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudFront/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go similarity index 70% rename from vendor/github.com/aws/aws-sdk-go/service/cloudFront/api.go rename to vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go index cfdb355..f9bd33f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudFront/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go @@ -4,6 +4,7 @@ package cloudfront import ( + "fmt" "time" "github.com/aws/aws-sdk-go/aws/awsutil" @@ -14,7 +15,28 @@ import ( const opCreateCloudFrontOriginAccessIdentity = "CreateCloudFrontOriginAccessIdentity2016_01_28" -// CreateCloudFrontOriginAccessIdentityRequest generates a request for the CreateCloudFrontOriginAccessIdentity operation. +// CreateCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the +// client's request for the CreateCloudFrontOriginAccessIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCloudFrontOriginAccessIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateCloudFrontOriginAccessIdentityRequest method. +// req, resp := client.CreateCloudFrontOriginAccessIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) CreateCloudFrontOriginAccessIdentityRequest(input *CreateCloudFrontOriginAccessIdentityInput) (req *request.Request, output *CreateCloudFrontOriginAccessIdentityOutput) { op := &request.Operation{ Name: opCreateCloudFrontOriginAccessIdentity, @@ -41,7 +63,28 @@ func (c *CloudFront) CreateCloudFrontOriginAccessIdentity(input *CreateCloudFron const opCreateDistribution = "CreateDistribution2016_01_28" -// CreateDistributionRequest generates a request for the CreateDistribution operation. +// CreateDistributionRequest generates a "aws/request.Request" representing the +// client's request for the CreateDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDistributionRequest method. +// req, resp := client.CreateDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) (req *request.Request, output *CreateDistributionOutput) { op := &request.Operation{ Name: opCreateDistribution, @@ -68,7 +111,28 @@ func (c *CloudFront) CreateDistribution(input *CreateDistributionInput) (*Create const opCreateInvalidation = "CreateInvalidation2016_01_28" -// CreateInvalidationRequest generates a request for the CreateInvalidation operation. +// CreateInvalidationRequest generates a "aws/request.Request" representing the +// client's request for the CreateInvalidation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateInvalidation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateInvalidationRequest method. +// req, resp := client.CreateInvalidationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) CreateInvalidationRequest(input *CreateInvalidationInput) (req *request.Request, output *CreateInvalidationOutput) { op := &request.Operation{ Name: opCreateInvalidation, @@ -95,7 +159,28 @@ func (c *CloudFront) CreateInvalidation(input *CreateInvalidationInput) (*Create const opCreateStreamingDistribution = "CreateStreamingDistribution2016_01_28" -// CreateStreamingDistributionRequest generates a request for the CreateStreamingDistribution operation. +// CreateStreamingDistributionRequest generates a "aws/request.Request" representing the +// client's request for the CreateStreamingDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateStreamingDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateStreamingDistributionRequest method. +// req, resp := client.CreateStreamingDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) CreateStreamingDistributionRequest(input *CreateStreamingDistributionInput) (req *request.Request, output *CreateStreamingDistributionOutput) { op := &request.Operation{ Name: opCreateStreamingDistribution, @@ -122,7 +207,28 @@ func (c *CloudFront) CreateStreamingDistribution(input *CreateStreamingDistribut const opDeleteCloudFrontOriginAccessIdentity = "DeleteCloudFrontOriginAccessIdentity2016_01_28" -// DeleteCloudFrontOriginAccessIdentityRequest generates a request for the DeleteCloudFrontOriginAccessIdentity operation. +// DeleteCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCloudFrontOriginAccessIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCloudFrontOriginAccessIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteCloudFrontOriginAccessIdentityRequest method. +// req, resp := client.DeleteCloudFrontOriginAccessIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) DeleteCloudFrontOriginAccessIdentityRequest(input *DeleteCloudFrontOriginAccessIdentityInput) (req *request.Request, output *DeleteCloudFrontOriginAccessIdentityOutput) { op := &request.Operation{ Name: opDeleteCloudFrontOriginAccessIdentity, @@ -151,7 +257,28 @@ func (c *CloudFront) DeleteCloudFrontOriginAccessIdentity(input *DeleteCloudFron const opDeleteDistribution = "DeleteDistribution2016_01_28" -// DeleteDistributionRequest generates a request for the DeleteDistribution operation. +// DeleteDistributionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDistributionRequest method. +// req, resp := client.DeleteDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) DeleteDistributionRequest(input *DeleteDistributionInput) (req *request.Request, output *DeleteDistributionOutput) { op := &request.Operation{ Name: opDeleteDistribution, @@ -180,7 +307,28 @@ func (c *CloudFront) DeleteDistribution(input *DeleteDistributionInput) (*Delete const opDeleteStreamingDistribution = "DeleteStreamingDistribution2016_01_28" -// DeleteStreamingDistributionRequest generates a request for the DeleteStreamingDistribution operation. +// DeleteStreamingDistributionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteStreamingDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteStreamingDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteStreamingDistributionRequest method. +// req, resp := client.DeleteStreamingDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) DeleteStreamingDistributionRequest(input *DeleteStreamingDistributionInput) (req *request.Request, output *DeleteStreamingDistributionOutput) { op := &request.Operation{ Name: opDeleteStreamingDistribution, @@ -209,7 +357,28 @@ func (c *CloudFront) DeleteStreamingDistribution(input *DeleteStreamingDistribut const opGetCloudFrontOriginAccessIdentity = "GetCloudFrontOriginAccessIdentity2016_01_28" -// GetCloudFrontOriginAccessIdentityRequest generates a request for the GetCloudFrontOriginAccessIdentity operation. +// GetCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCloudFrontOriginAccessIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCloudFrontOriginAccessIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCloudFrontOriginAccessIdentityRequest method. +// req, resp := client.GetCloudFrontOriginAccessIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) GetCloudFrontOriginAccessIdentityRequest(input *GetCloudFrontOriginAccessIdentityInput) (req *request.Request, output *GetCloudFrontOriginAccessIdentityOutput) { op := &request.Operation{ Name: opGetCloudFrontOriginAccessIdentity, @@ -236,7 +405,28 @@ func (c *CloudFront) GetCloudFrontOriginAccessIdentity(input *GetCloudFrontOrigi const opGetCloudFrontOriginAccessIdentityConfig = "GetCloudFrontOriginAccessIdentityConfig2016_01_28" -// GetCloudFrontOriginAccessIdentityConfigRequest generates a request for the GetCloudFrontOriginAccessIdentityConfig operation. +// GetCloudFrontOriginAccessIdentityConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetCloudFrontOriginAccessIdentityConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCloudFrontOriginAccessIdentityConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCloudFrontOriginAccessIdentityConfigRequest method. +// req, resp := client.GetCloudFrontOriginAccessIdentityConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfigRequest(input *GetCloudFrontOriginAccessIdentityConfigInput) (req *request.Request, output *GetCloudFrontOriginAccessIdentityConfigOutput) { op := &request.Operation{ Name: opGetCloudFrontOriginAccessIdentityConfig, @@ -263,7 +453,28 @@ func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfig(input *GetCloudFron const opGetDistribution = "GetDistribution2016_01_28" -// GetDistributionRequest generates a request for the GetDistribution operation. +// GetDistributionRequest generates a "aws/request.Request" representing the +// client's request for the GetDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDistributionRequest method. +// req, resp := client.GetDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) GetDistributionRequest(input *GetDistributionInput) (req *request.Request, output *GetDistributionOutput) { op := &request.Operation{ Name: opGetDistribution, @@ -290,7 +501,28 @@ func (c *CloudFront) GetDistribution(input *GetDistributionInput) (*GetDistribut const opGetDistributionConfig = "GetDistributionConfig2016_01_28" -// GetDistributionConfigRequest generates a request for the GetDistributionConfig operation. +// GetDistributionConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetDistributionConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDistributionConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDistributionConfigRequest method. +// req, resp := client.GetDistributionConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) GetDistributionConfigRequest(input *GetDistributionConfigInput) (req *request.Request, output *GetDistributionConfigOutput) { op := &request.Operation{ Name: opGetDistributionConfig, @@ -317,7 +549,28 @@ func (c *CloudFront) GetDistributionConfig(input *GetDistributionConfigInput) (* const opGetInvalidation = "GetInvalidation2016_01_28" -// GetInvalidationRequest generates a request for the GetInvalidation operation. +// GetInvalidationRequest generates a "aws/request.Request" representing the +// client's request for the GetInvalidation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetInvalidation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetInvalidationRequest method. +// req, resp := client.GetInvalidationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) GetInvalidationRequest(input *GetInvalidationInput) (req *request.Request, output *GetInvalidationOutput) { op := &request.Operation{ Name: opGetInvalidation, @@ -344,7 +597,28 @@ func (c *CloudFront) GetInvalidation(input *GetInvalidationInput) (*GetInvalidat const opGetStreamingDistribution = "GetStreamingDistribution2016_01_28" -// GetStreamingDistributionRequest generates a request for the GetStreamingDistribution operation. +// GetStreamingDistributionRequest generates a "aws/request.Request" representing the +// client's request for the GetStreamingDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetStreamingDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetStreamingDistributionRequest method. +// req, resp := client.GetStreamingDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) GetStreamingDistributionRequest(input *GetStreamingDistributionInput) (req *request.Request, output *GetStreamingDistributionOutput) { op := &request.Operation{ Name: opGetStreamingDistribution, @@ -371,7 +645,28 @@ func (c *CloudFront) GetStreamingDistribution(input *GetStreamingDistributionInp const opGetStreamingDistributionConfig = "GetStreamingDistributionConfig2016_01_28" -// GetStreamingDistributionConfigRequest generates a request for the GetStreamingDistributionConfig operation. +// GetStreamingDistributionConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetStreamingDistributionConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetStreamingDistributionConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetStreamingDistributionConfigRequest method. +// req, resp := client.GetStreamingDistributionConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) GetStreamingDistributionConfigRequest(input *GetStreamingDistributionConfigInput) (req *request.Request, output *GetStreamingDistributionConfigOutput) { op := &request.Operation{ Name: opGetStreamingDistributionConfig, @@ -398,7 +693,28 @@ func (c *CloudFront) GetStreamingDistributionConfig(input *GetStreamingDistribut const opListCloudFrontOriginAccessIdentities = "ListCloudFrontOriginAccessIdentities2016_01_28" -// ListCloudFrontOriginAccessIdentitiesRequest generates a request for the ListCloudFrontOriginAccessIdentities operation. +// ListCloudFrontOriginAccessIdentitiesRequest generates a "aws/request.Request" representing the +// client's request for the ListCloudFrontOriginAccessIdentities operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListCloudFrontOriginAccessIdentities method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListCloudFrontOriginAccessIdentitiesRequest method. +// req, resp := client.ListCloudFrontOriginAccessIdentitiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesRequest(input *ListCloudFrontOriginAccessIdentitiesInput) (req *request.Request, output *ListCloudFrontOriginAccessIdentitiesOutput) { op := &request.Operation{ Name: opListCloudFrontOriginAccessIdentities, @@ -429,6 +745,23 @@ func (c *CloudFront) ListCloudFrontOriginAccessIdentities(input *ListCloudFrontO return out, err } +// ListCloudFrontOriginAccessIdentitiesPages iterates over the pages of a ListCloudFrontOriginAccessIdentities operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListCloudFrontOriginAccessIdentities method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListCloudFrontOriginAccessIdentities operation. +// pageNum := 0 +// err := client.ListCloudFrontOriginAccessIdentitiesPages(params, +// func(page *ListCloudFrontOriginAccessIdentitiesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesPages(input *ListCloudFrontOriginAccessIdentitiesInput, fn func(p *ListCloudFrontOriginAccessIdentitiesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListCloudFrontOriginAccessIdentitiesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -439,7 +772,28 @@ func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesPages(input *ListCloudF const opListDistributions = "ListDistributions2016_01_28" -// ListDistributionsRequest generates a request for the ListDistributions operation. +// ListDistributionsRequest generates a "aws/request.Request" representing the +// client's request for the ListDistributions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDistributions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDistributionsRequest method. +// req, resp := client.ListDistributionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) ListDistributionsRequest(input *ListDistributionsInput) (req *request.Request, output *ListDistributionsOutput) { op := &request.Operation{ Name: opListDistributions, @@ -470,6 +824,23 @@ func (c *CloudFront) ListDistributions(input *ListDistributionsInput) (*ListDist return out, err } +// ListDistributionsPages iterates over the pages of a ListDistributions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDistributions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDistributions operation. +// pageNum := 0 +// err := client.ListDistributionsPages(params, +// func(page *ListDistributionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudFront) ListDistributionsPages(input *ListDistributionsInput, fn func(p *ListDistributionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListDistributionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -480,7 +851,28 @@ func (c *CloudFront) ListDistributionsPages(input *ListDistributionsInput, fn fu const opListDistributionsByWebACLId = "ListDistributionsByWebACLId2016_01_28" -// ListDistributionsByWebACLIdRequest generates a request for the ListDistributionsByWebACLId operation. +// ListDistributionsByWebACLIdRequest generates a "aws/request.Request" representing the +// client's request for the ListDistributionsByWebACLId operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDistributionsByWebACLId method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDistributionsByWebACLIdRequest method. +// req, resp := client.ListDistributionsByWebACLIdRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) ListDistributionsByWebACLIdRequest(input *ListDistributionsByWebACLIdInput) (req *request.Request, output *ListDistributionsByWebACLIdOutput) { op := &request.Operation{ Name: opListDistributionsByWebACLId, @@ -507,7 +899,28 @@ func (c *CloudFront) ListDistributionsByWebACLId(input *ListDistributionsByWebAC const opListInvalidations = "ListInvalidations2016_01_28" -// ListInvalidationsRequest generates a request for the ListInvalidations operation. +// ListInvalidationsRequest generates a "aws/request.Request" representing the +// client's request for the ListInvalidations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListInvalidations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListInvalidationsRequest method. +// req, resp := client.ListInvalidationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) ListInvalidationsRequest(input *ListInvalidationsInput) (req *request.Request, output *ListInvalidationsOutput) { op := &request.Operation{ Name: opListInvalidations, @@ -538,6 +951,23 @@ func (c *CloudFront) ListInvalidations(input *ListInvalidationsInput) (*ListInva return out, err } +// ListInvalidationsPages iterates over the pages of a ListInvalidations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListInvalidations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListInvalidations operation. +// pageNum := 0 +// err := client.ListInvalidationsPages(params, +// func(page *ListInvalidationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudFront) ListInvalidationsPages(input *ListInvalidationsInput, fn func(p *ListInvalidationsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListInvalidationsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -548,7 +978,28 @@ func (c *CloudFront) ListInvalidationsPages(input *ListInvalidationsInput, fn fu const opListStreamingDistributions = "ListStreamingDistributions2016_01_28" -// ListStreamingDistributionsRequest generates a request for the ListStreamingDistributions operation. +// ListStreamingDistributionsRequest generates a "aws/request.Request" representing the +// client's request for the ListStreamingDistributions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListStreamingDistributions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListStreamingDistributionsRequest method. +// req, resp := client.ListStreamingDistributionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) ListStreamingDistributionsRequest(input *ListStreamingDistributionsInput) (req *request.Request, output *ListStreamingDistributionsOutput) { op := &request.Operation{ Name: opListStreamingDistributions, @@ -579,6 +1030,23 @@ func (c *CloudFront) ListStreamingDistributions(input *ListStreamingDistribution return out, err } +// ListStreamingDistributionsPages iterates over the pages of a ListStreamingDistributions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStreamingDistributions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStreamingDistributions operation. +// pageNum := 0 +// err := client.ListStreamingDistributionsPages(params, +// func(page *ListStreamingDistributionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudFront) ListStreamingDistributionsPages(input *ListStreamingDistributionsInput, fn func(p *ListStreamingDistributionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListStreamingDistributionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -589,7 +1057,28 @@ func (c *CloudFront) ListStreamingDistributionsPages(input *ListStreamingDistrib const opUpdateCloudFrontOriginAccessIdentity = "UpdateCloudFrontOriginAccessIdentity2016_01_28" -// UpdateCloudFrontOriginAccessIdentityRequest generates a request for the UpdateCloudFrontOriginAccessIdentity operation. +// UpdateCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the +// client's request for the UpdateCloudFrontOriginAccessIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateCloudFrontOriginAccessIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateCloudFrontOriginAccessIdentityRequest method. +// req, resp := client.UpdateCloudFrontOriginAccessIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) UpdateCloudFrontOriginAccessIdentityRequest(input *UpdateCloudFrontOriginAccessIdentityInput) (req *request.Request, output *UpdateCloudFrontOriginAccessIdentityOutput) { op := &request.Operation{ Name: opUpdateCloudFrontOriginAccessIdentity, @@ -616,7 +1105,28 @@ func (c *CloudFront) UpdateCloudFrontOriginAccessIdentity(input *UpdateCloudFron const opUpdateDistribution = "UpdateDistribution2016_01_28" -// UpdateDistributionRequest generates a request for the UpdateDistribution operation. +// UpdateDistributionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDistributionRequest method. +// req, resp := client.UpdateDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) (req *request.Request, output *UpdateDistributionOutput) { op := &request.Operation{ Name: opUpdateDistribution, @@ -643,7 +1153,28 @@ func (c *CloudFront) UpdateDistribution(input *UpdateDistributionInput) (*Update const opUpdateStreamingDistribution = "UpdateStreamingDistribution2016_01_28" -// UpdateStreamingDistributionRequest generates a request for the UpdateStreamingDistribution operation. +// UpdateStreamingDistributionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateStreamingDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateStreamingDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateStreamingDistributionRequest method. +// req, resp := client.UpdateStreamingDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) UpdateStreamingDistributionRequest(input *UpdateStreamingDistributionInput) (req *request.Request, output *UpdateStreamingDistributionOutput) { op := &request.Operation{ Name: opUpdateStreamingDistribution, @@ -722,6 +1253,19 @@ func (s Aliases) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Aliases) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Aliases"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that controls which HTTP methods CloudFront processes and // forwards to your Amazon S3 bucket or your custom origin. There are three // choices: - CloudFront forwards only GET and HEAD requests. - CloudFront forwards @@ -761,6 +1305,27 @@ func (s AllowedMethods) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *AllowedMethods) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AllowedMethods"} + if s.Items == nil { + invalidParams.Add(request.NewErrParamRequired("Items")) + } + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.CachedMethods != nil { + if err := s.CachedMethods.Validate(); err != nil { + invalidParams.AddNested("CachedMethods", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that describes how CloudFront processes requests. You can // create up to 10 cache behaviors.You must create at least as many cache behaviors // (including the default cache behavior) as you have origins if you want CloudFront @@ -885,6 +1450,49 @@ func (s CacheBehavior) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CacheBehavior) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CacheBehavior"} + if s.ForwardedValues == nil { + invalidParams.Add(request.NewErrParamRequired("ForwardedValues")) + } + if s.MinTTL == nil { + invalidParams.Add(request.NewErrParamRequired("MinTTL")) + } + if s.PathPattern == nil { + invalidParams.Add(request.NewErrParamRequired("PathPattern")) + } + if s.TargetOriginId == nil { + invalidParams.Add(request.NewErrParamRequired("TargetOriginId")) + } + if s.TrustedSigners == nil { + invalidParams.Add(request.NewErrParamRequired("TrustedSigners")) + } + if s.ViewerProtocolPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("ViewerProtocolPolicy")) + } + if s.AllowedMethods != nil { + if err := s.AllowedMethods.Validate(); err != nil { + invalidParams.AddNested("AllowedMethods", err.(request.ErrInvalidParams)) + } + } + if s.ForwardedValues != nil { + if err := s.ForwardedValues.Validate(); err != nil { + invalidParams.AddNested("ForwardedValues", err.(request.ErrInvalidParams)) + } + } + if s.TrustedSigners != nil { + if err := s.TrustedSigners.Validate(); err != nil { + invalidParams.AddNested("TrustedSigners", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that contains zero or more CacheBehavior elements. type CacheBehaviors struct { _ struct{} `type:"structure"` @@ -907,6 +1515,29 @@ func (s CacheBehaviors) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CacheBehaviors) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CacheBehaviors"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that controls whether CloudFront caches the response to requests // using the specified HTTP methods. There are two choices: - CloudFront caches // responses to GET and HEAD requests. - CloudFront caches responses to GET, @@ -936,6 +1567,22 @@ func (s CachedMethods) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CachedMethods) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CachedMethods"} + if s.Items == nil { + invalidParams.Add(request.NewErrParamRequired("Items")) + } + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that specifies the whitelisted cookies, if any, that you want // CloudFront to forward to your origin that is associated with this cache behavior. type CookieNames struct { @@ -959,6 +1606,19 @@ func (s CookieNames) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CookieNames) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CookieNames"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that specifies the cookie preferences associated with this // cache behavior. type CookiePreference struct { @@ -985,6 +1645,24 @@ func (s CookiePreference) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CookiePreference) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CookiePreference"} + if s.Forward == nil { + invalidParams.Add(request.NewErrParamRequired("Forward")) + } + if s.WhitelistedNames != nil { + if err := s.WhitelistedNames.Validate(); err != nil { + invalidParams.AddNested("WhitelistedNames", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The request to create a new origin access identity. type CreateCloudFrontOriginAccessIdentityInput struct { _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` @@ -1003,6 +1681,24 @@ func (s CreateCloudFrontOriginAccessIdentityInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCloudFrontOriginAccessIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCloudFrontOriginAccessIdentityInput"} + if s.CloudFrontOriginAccessIdentityConfig == nil { + invalidParams.Add(request.NewErrParamRequired("CloudFrontOriginAccessIdentityConfig")) + } + if s.CloudFrontOriginAccessIdentityConfig != nil { + if err := s.CloudFrontOriginAccessIdentityConfig.Validate(); err != nil { + invalidParams.AddNested("CloudFrontOriginAccessIdentityConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The returned result of the corresponding request. type CreateCloudFrontOriginAccessIdentityOutput struct { _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentity"` @@ -1046,6 +1742,24 @@ func (s CreateDistributionInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDistributionInput"} + if s.DistributionConfig == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionConfig")) + } + if s.DistributionConfig != nil { + if err := s.DistributionConfig.Validate(); err != nil { + invalidParams.AddNested("DistributionConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The returned result of the corresponding request. type CreateDistributionOutput struct { _ struct{} `type:"structure" payload:"Distribution"` @@ -1092,6 +1806,27 @@ func (s CreateInvalidationInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateInvalidationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateInvalidationInput"} + if s.DistributionId == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionId")) + } + if s.InvalidationBatch == nil { + invalidParams.Add(request.NewErrParamRequired("InvalidationBatch")) + } + if s.InvalidationBatch != nil { + if err := s.InvalidationBatch.Validate(); err != nil { + invalidParams.AddNested("InvalidationBatch", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The returned result of the corresponding request. type CreateInvalidationOutput struct { _ struct{} `type:"structure" payload:"Invalidation"` @@ -1132,6 +1867,24 @@ func (s CreateStreamingDistributionInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateStreamingDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateStreamingDistributionInput"} + if s.StreamingDistributionConfig == nil { + invalidParams.Add(request.NewErrParamRequired("StreamingDistributionConfig")) + } + if s.StreamingDistributionConfig != nil { + if err := s.StreamingDistributionConfig.Validate(); err != nil { + invalidParams.AddNested("StreamingDistributionConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The returned result of the corresponding request. type CreateStreamingDistributionOutput struct { _ struct{} `type:"structure" payload:"StreamingDistribution"` @@ -1204,6 +1957,19 @@ func (s CustomErrorResponse) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomErrorResponse) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomErrorResponse"} + if s.ErrorCode == nil { + invalidParams.Add(request.NewErrParamRequired("ErrorCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that contains zero or more CustomErrorResponse elements. type CustomErrorResponses struct { _ struct{} `type:"structure"` @@ -1226,6 +1992,29 @@ func (s CustomErrorResponses) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomErrorResponses) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomErrorResponses"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that contains the list of Custom Headers for each origin. type CustomHeaders struct { _ struct{} `type:"structure"` @@ -1247,6 +2036,29 @@ func (s CustomHeaders) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomHeaders) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomHeaders"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A customer origin. type CustomOriginConfig struct { _ struct{} `type:"structure"` @@ -1275,6 +2087,30 @@ func (s CustomOriginConfig) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomOriginConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomOriginConfig"} + if s.HTTPPort == nil { + invalidParams.Add(request.NewErrParamRequired("HTTPPort")) + } + if s.HTTPSPort == nil { + invalidParams.Add(request.NewErrParamRequired("HTTPSPort")) + } + if s.OriginProtocolPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("OriginProtocolPolicy")) + } + if s.OriginSslProtocols != nil { + if err := s.OriginSslProtocols.Validate(); err != nil { + invalidParams.AddNested("OriginSslProtocols", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that describes the default cache behavior if you do not specify // a CacheBehavior element or if files don't match any of the values of PathPattern // in CacheBehavior elements.You must create exactly one default cache behavior. @@ -1379,6 +2215,46 @@ func (s DefaultCacheBehavior) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DefaultCacheBehavior) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DefaultCacheBehavior"} + if s.ForwardedValues == nil { + invalidParams.Add(request.NewErrParamRequired("ForwardedValues")) + } + if s.MinTTL == nil { + invalidParams.Add(request.NewErrParamRequired("MinTTL")) + } + if s.TargetOriginId == nil { + invalidParams.Add(request.NewErrParamRequired("TargetOriginId")) + } + if s.TrustedSigners == nil { + invalidParams.Add(request.NewErrParamRequired("TrustedSigners")) + } + if s.ViewerProtocolPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("ViewerProtocolPolicy")) + } + if s.AllowedMethods != nil { + if err := s.AllowedMethods.Validate(); err != nil { + invalidParams.AddNested("AllowedMethods", err.(request.ErrInvalidParams)) + } + } + if s.ForwardedValues != nil { + if err := s.ForwardedValues.Validate(); err != nil { + invalidParams.AddNested("ForwardedValues", err.(request.ErrInvalidParams)) + } + } + if s.TrustedSigners != nil { + if err := s.TrustedSigners.Validate(); err != nil { + invalidParams.AddNested("TrustedSigners", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The request to delete a origin access identity. type DeleteCloudFrontOriginAccessIdentityInput struct { _ struct{} `type:"structure"` @@ -1401,6 +2277,19 @@ func (s DeleteCloudFrontOriginAccessIdentityInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCloudFrontOriginAccessIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCloudFrontOriginAccessIdentityInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type DeleteCloudFrontOriginAccessIdentityOutput struct { _ struct{} `type:"structure"` } @@ -1437,6 +2326,19 @@ func (s DeleteDistributionInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDistributionInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type DeleteDistributionOutput struct { _ struct{} `type:"structure"` } @@ -1473,6 +2375,19 @@ func (s DeleteStreamingDistributionInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteStreamingDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteStreamingDistributionInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type DeleteStreamingDistributionOutput struct { _ struct{} `type:"structure"` } @@ -1611,6 +2526,66 @@ func (s DistributionConfig) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DistributionConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DistributionConfig"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) + } + if s.Comment == nil { + invalidParams.Add(request.NewErrParamRequired("Comment")) + } + if s.DefaultCacheBehavior == nil { + invalidParams.Add(request.NewErrParamRequired("DefaultCacheBehavior")) + } + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.Origins == nil { + invalidParams.Add(request.NewErrParamRequired("Origins")) + } + if s.Aliases != nil { + if err := s.Aliases.Validate(); err != nil { + invalidParams.AddNested("Aliases", err.(request.ErrInvalidParams)) + } + } + if s.CacheBehaviors != nil { + if err := s.CacheBehaviors.Validate(); err != nil { + invalidParams.AddNested("CacheBehaviors", err.(request.ErrInvalidParams)) + } + } + if s.CustomErrorResponses != nil { + if err := s.CustomErrorResponses.Validate(); err != nil { + invalidParams.AddNested("CustomErrorResponses", err.(request.ErrInvalidParams)) + } + } + if s.DefaultCacheBehavior != nil { + if err := s.DefaultCacheBehavior.Validate(); err != nil { + invalidParams.AddNested("DefaultCacheBehavior", err.(request.ErrInvalidParams)) + } + } + if s.Logging != nil { + if err := s.Logging.Validate(); err != nil { + invalidParams.AddNested("Logging", err.(request.ErrInvalidParams)) + } + } + if s.Origins != nil { + if err := s.Origins.Validate(); err != nil { + invalidParams.AddNested("Origins", err.(request.ErrInvalidParams)) + } + } + if s.Restrictions != nil { + if err := s.Restrictions.Validate(); err != nil { + invalidParams.AddNested("Restrictions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A distribution list. type DistributionList struct { _ struct{} `type:"structure"` @@ -1744,6 +2719,32 @@ func (s ForwardedValues) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ForwardedValues) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ForwardedValues"} + if s.Cookies == nil { + invalidParams.Add(request.NewErrParamRequired("Cookies")) + } + if s.QueryString == nil { + invalidParams.Add(request.NewErrParamRequired("QueryString")) + } + if s.Cookies != nil { + if err := s.Cookies.Validate(); err != nil { + invalidParams.AddNested("Cookies", err.(request.ErrInvalidParams)) + } + } + if s.Headers != nil { + if err := s.Headers.Validate(); err != nil { + invalidParams.AddNested("Headers", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that controls the countries in which your content is distributed. // For more information about geo restriction, go to Customizing Error Responses // in the Amazon CloudFront Developer Guide. CloudFront determines the location @@ -1788,6 +2789,22 @@ func (s GeoRestriction) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GeoRestriction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GeoRestriction"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.RestrictionType == nil { + invalidParams.Add(request.NewErrParamRequired("RestrictionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The request to get an origin access identity's configuration. type GetCloudFrontOriginAccessIdentityConfigInput struct { _ struct{} `type:"structure"` @@ -1806,6 +2823,19 @@ func (s GetCloudFrontOriginAccessIdentityConfigInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCloudFrontOriginAccessIdentityConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCloudFrontOriginAccessIdentityConfigInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The returned result of the corresponding request. type GetCloudFrontOriginAccessIdentityConfigOutput struct { _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` @@ -1845,6 +2875,19 @@ func (s GetCloudFrontOriginAccessIdentityInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCloudFrontOriginAccessIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCloudFrontOriginAccessIdentityInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The returned result of the corresponding request. type GetCloudFrontOriginAccessIdentityOutput struct { _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentity"` @@ -1885,6 +2928,19 @@ func (s GetDistributionConfigInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDistributionConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDistributionConfigInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The returned result of the corresponding request. type GetDistributionConfigOutput struct { _ struct{} `type:"structure" payload:"DistributionConfig"` @@ -1924,6 +2980,19 @@ func (s GetDistributionInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDistributionInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The returned result of the corresponding request. type GetDistributionOutput struct { _ struct{} `type:"structure" payload:"Distribution"` @@ -1966,6 +3035,22 @@ func (s GetInvalidationInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetInvalidationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetInvalidationInput"} + if s.DistributionId == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionId")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The returned result of the corresponding request. type GetInvalidationOutput struct { _ struct{} `type:"structure" payload:"Invalidation"` @@ -2002,6 +3087,19 @@ func (s GetStreamingDistributionConfigInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetStreamingDistributionConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetStreamingDistributionConfigInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The returned result of the corresponding request. type GetStreamingDistributionConfigOutput struct { _ struct{} `type:"structure" payload:"StreamingDistributionConfig"` @@ -2041,6 +3139,19 @@ func (s GetStreamingDistributionInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetStreamingDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetStreamingDistributionInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The returned result of the corresponding request. type GetStreamingDistributionOutput struct { _ struct{} `type:"structure" payload:"StreamingDistribution"` @@ -2099,6 +3210,19 @@ func (s Headers) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Headers) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Headers"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // An invalidation. type Invalidation struct { _ struct{} `type:"structure"` @@ -2161,6 +3285,27 @@ func (s InvalidationBatch) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *InvalidationBatch) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InvalidationBatch"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) + } + if s.Paths == nil { + invalidParams.Add(request.NewErrParamRequired("Paths")) + } + if s.Paths != nil { + if err := s.Paths.Validate(); err != nil { + invalidParams.AddNested("Paths", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // An invalidation list. type InvalidationList struct { _ struct{} `type:"structure"` @@ -2321,6 +3466,19 @@ func (s ListDistributionsByWebACLIdInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDistributionsByWebACLIdInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDistributionsByWebACLIdInput"} + if s.WebACLId == nil { + invalidParams.Add(request.NewErrParamRequired("WebACLId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The response to a request to list the distributions that are associated with // a specified AWS WAF web ACL. type ListDistributionsByWebACLIdOutput struct { @@ -2414,6 +3572,19 @@ func (s ListInvalidationsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListInvalidationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListInvalidationsInput"} + if s.DistributionId == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The returned result of the corresponding request. type ListInvalidationsOutput struct { _ struct{} `type:"structure" payload:"InvalidationList"` @@ -2515,6 +3686,28 @@ func (s LoggingConfig) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingConfig"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.IncludeCookies == nil { + invalidParams.Add(request.NewErrParamRequired("IncludeCookies")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that describes the Amazon S3 bucket or the HTTP server (for // example, a web server) from which CloudFront gets your files.You must create // at least one origin. @@ -2562,6 +3755,37 @@ func (s Origin) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Origin) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Origin"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.CustomHeaders != nil { + if err := s.CustomHeaders.Validate(); err != nil { + invalidParams.AddNested("CustomHeaders", err.(request.ErrInvalidParams)) + } + } + if s.CustomOriginConfig != nil { + if err := s.CustomOriginConfig.Validate(); err != nil { + invalidParams.AddNested("CustomOriginConfig", err.(request.ErrInvalidParams)) + } + } + if s.S3OriginConfig != nil { + if err := s.S3OriginConfig.Validate(); err != nil { + invalidParams.AddNested("S3OriginConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // CloudFront origin access identity. type OriginAccessIdentity struct { _ struct{} `type:"structure"` @@ -2619,6 +3843,22 @@ func (s OriginAccessIdentityConfig) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *OriginAccessIdentityConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OriginAccessIdentityConfig"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) + } + if s.Comment == nil { + invalidParams.Add(request.NewErrParamRequired("Comment")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The CloudFrontOriginAccessIdentityList type. type OriginAccessIdentityList struct { _ struct{} `type:"structure"` @@ -2707,6 +3947,22 @@ func (s OriginCustomHeader) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *OriginCustomHeader) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OriginCustomHeader"} + if s.HeaderName == nil { + invalidParams.Add(request.NewErrParamRequired("HeaderName")) + } + if s.HeaderValue == nil { + invalidParams.Add(request.NewErrParamRequired("HeaderValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that contains the list of SSL/TLS protocols that you want // CloudFront to use when communicating with your origin over HTTPS. type OriginSslProtocols struct { @@ -2732,6 +3988,22 @@ func (s OriginSslProtocols) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *OriginSslProtocols) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OriginSslProtocols"} + if s.Items == nil { + invalidParams.Add(request.NewErrParamRequired("Items")) + } + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that contains information about origins for this distribution. type Origins struct { _ struct{} `type:"structure"` @@ -2753,6 +4025,32 @@ func (s Origins) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Origins) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Origins"} + if s.Items != nil && len(s.Items) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Items", 1)) + } + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that contains information about the objects that you want // to invalidate. type Paths struct { @@ -2775,6 +4073,19 @@ func (s Paths) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Paths) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Paths"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that identifies ways in which you want to restrict distribution // of your content. type Restrictions struct { @@ -2799,6 +4110,24 @@ func (s Restrictions) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Restrictions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Restrictions"} + if s.GeoRestriction == nil { + invalidParams.Add(request.NewErrParamRequired("GeoRestriction")) + } + if s.GeoRestriction != nil { + if err := s.GeoRestriction.Validate(); err != nil { + invalidParams.AddNested("GeoRestriction", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that contains information about the Amazon S3 bucket from // which you want CloudFront to get your media files for distribution. type S3Origin struct { @@ -2821,6 +4150,22 @@ func (s S3Origin) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3Origin) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3Origin"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.OriginAccessIdentity == nil { + invalidParams.Add(request.NewErrParamRequired("OriginAccessIdentity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that contains information about the Amazon S3 origin. If the // origin is a custom origin, use the CustomOriginConfig element instead. type S3OriginConfig struct { @@ -2850,6 +4195,19 @@ func (s S3OriginConfig) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3OriginConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3OriginConfig"} + if s.OriginAccessIdentity == nil { + invalidParams.Add(request.NewErrParamRequired("OriginAccessIdentity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that lists the AWS accounts that were included in the TrustedSigners // complex type, as well as their active CloudFront key pair IDs, if any. type Signer struct { @@ -2982,6 +4340,51 @@ func (s StreamingDistributionConfig) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *StreamingDistributionConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StreamingDistributionConfig"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) + } + if s.Comment == nil { + invalidParams.Add(request.NewErrParamRequired("Comment")) + } + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.S3Origin == nil { + invalidParams.Add(request.NewErrParamRequired("S3Origin")) + } + if s.TrustedSigners == nil { + invalidParams.Add(request.NewErrParamRequired("TrustedSigners")) + } + if s.Aliases != nil { + if err := s.Aliases.Validate(); err != nil { + invalidParams.AddNested("Aliases", err.(request.ErrInvalidParams)) + } + } + if s.Logging != nil { + if err := s.Logging.Validate(); err != nil { + invalidParams.AddNested("Logging", err.(request.ErrInvalidParams)) + } + } + if s.S3Origin != nil { + if err := s.S3Origin.Validate(); err != nil { + invalidParams.AddNested("S3Origin", err.(request.ErrInvalidParams)) + } + } + if s.TrustedSigners != nil { + if err := s.TrustedSigners.Validate(); err != nil { + invalidParams.AddNested("TrustedSigners", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A streaming distribution list. type StreamingDistributionList struct { _ struct{} `type:"structure"` @@ -3113,6 +4516,25 @@ func (s StreamingLoggingConfig) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *StreamingLoggingConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StreamingLoggingConfig"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that specifies the AWS accounts, if any, that you want to // allow to create signed URLs for private content. If you want to require signed // URLs in requests for objects in the target origin that match the PathPattern @@ -3149,6 +4571,22 @@ func (s TrustedSigners) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *TrustedSigners) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TrustedSigners"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The request to update an origin access identity. type UpdateCloudFrontOriginAccessIdentityInput struct { _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` @@ -3174,6 +4612,27 @@ func (s UpdateCloudFrontOriginAccessIdentityInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateCloudFrontOriginAccessIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateCloudFrontOriginAccessIdentityInput"} + if s.CloudFrontOriginAccessIdentityConfig == nil { + invalidParams.Add(request.NewErrParamRequired("CloudFrontOriginAccessIdentityConfig")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.CloudFrontOriginAccessIdentityConfig != nil { + if err := s.CloudFrontOriginAccessIdentityConfig.Validate(); err != nil { + invalidParams.AddNested("CloudFrontOriginAccessIdentityConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The returned result of the corresponding request. type UpdateCloudFrontOriginAccessIdentityOutput struct { _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentity"` @@ -3220,6 +4679,27 @@ func (s UpdateDistributionInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDistributionInput"} + if s.DistributionConfig == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionConfig")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.DistributionConfig != nil { + if err := s.DistributionConfig.Validate(); err != nil { + invalidParams.AddNested("DistributionConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The returned result of the corresponding request. type UpdateDistributionOutput struct { _ struct{} `type:"structure" payload:"Distribution"` @@ -3266,6 +4746,27 @@ func (s UpdateStreamingDistributionInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateStreamingDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateStreamingDistributionInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.StreamingDistributionConfig == nil { + invalidParams.Add(request.NewErrParamRequired("StreamingDistributionConfig")) + } + if s.StreamingDistributionConfig != nil { + if err := s.StreamingDistributionConfig.Validate(); err != nil { + invalidParams.AddNested("StreamingDistributionConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The returned result of the corresponding request. type UpdateStreamingDistributionOutput struct { _ struct{} `type:"structure" payload:"StreamingDistribution"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudFront/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go similarity index 96% rename from vendor/github.com/aws/aws-sdk-go/service/cloudFront/service.go rename to vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go index 51b73c6..99d23c7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudFront/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/restxml" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // CloudFront is a client for CloudFront. @@ -58,7 +58,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudFront/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/waiters.go similarity index 100% rename from vendor/github.com/aws/aws-sdk-go/service/cloudFront/waiters.go rename to vendor/github.com/aws/aws-sdk-go/service/cloudfront/waiters.go diff --git a/vendor/github.com/aws/aws-sdk-go/service/generate.go b/vendor/github.com/aws/aws-sdk-go/service/generate.go new file mode 100644 index 0000000..9cf991e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/generate.go @@ -0,0 +1,5 @@ +// Package service contains automatically generated AWS clients. +package service + +//go:generate go run ../private/model/cli/gen-api/main.go -path=../service ../models/apis/*/*/api-2.json +//go:generate gofmt -s -w ../service diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 1d63122..5132954 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -4,16 +4,40 @@ package s3 import ( + "fmt" "io" "time" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restxml" ) const opAbortMultipartUpload = "AbortMultipartUpload" -// AbortMultipartUploadRequest generates a request for the AbortMultipartUpload operation. +// AbortMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the AbortMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AbortMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AbortMultipartUploadRequest method. +// req, resp := client.AbortMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { op := &request.Operation{ Name: opAbortMultipartUpload, @@ -44,7 +68,28 @@ func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMulti const opCompleteMultipartUpload = "CompleteMultipartUpload" -// CompleteMultipartUploadRequest generates a request for the CompleteMultipartUpload operation. +// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CompleteMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CompleteMultipartUploadRequest method. +// req, resp := client.CompleteMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { op := &request.Operation{ Name: opCompleteMultipartUpload, @@ -71,7 +116,28 @@ func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*Comp const opCopyObject = "CopyObject" -// CopyObjectRequest generates a request for the CopyObject operation. +// CopyObjectRequest generates a "aws/request.Request" representing the +// client's request for the CopyObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyObjectRequest method. +// req, resp := client.CopyObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { op := &request.Operation{ Name: opCopyObject, @@ -98,7 +164,28 @@ func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { const opCreateBucket = "CreateBucket" -// CreateBucketRequest generates a request for the CreateBucket operation. +// CreateBucketRequest generates a "aws/request.Request" representing the +// client's request for the CreateBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateBucketRequest method. +// req, resp := client.CreateBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { op := &request.Operation{ Name: opCreateBucket, @@ -125,7 +212,28 @@ func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) const opCreateMultipartUpload = "CreateMultipartUpload" -// CreateMultipartUploadRequest generates a request for the CreateMultipartUpload operation. +// CreateMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CreateMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateMultipartUploadRequest method. +// req, resp := client.CreateMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { op := &request.Operation{ Name: opCreateMultipartUpload, @@ -158,7 +266,28 @@ func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMu const opDeleteBucket = "DeleteBucket" -// DeleteBucketRequest generates a request for the DeleteBucket operation. +// DeleteBucketRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketRequest method. +// req, resp := client.DeleteBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { op := &request.Operation{ Name: opDeleteBucket, @@ -171,6 +300,8 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &DeleteBucketOutput{} req.Data = output return @@ -186,7 +317,28 @@ func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) const opDeleteBucketCors = "DeleteBucketCors" -// DeleteBucketCorsRequest generates a request for the DeleteBucketCors operation. +// DeleteBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketCorsRequest method. +// req, resp := client.DeleteBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { op := &request.Operation{ Name: opDeleteBucketCors, @@ -199,6 +351,8 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &DeleteBucketCorsOutput{} req.Data = output return @@ -213,7 +367,28 @@ func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOu const opDeleteBucketLifecycle = "DeleteBucketLifecycle" -// DeleteBucketLifecycleRequest generates a request for the DeleteBucketLifecycle operation. +// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketLifecycleRequest method. +// req, resp := client.DeleteBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { op := &request.Operation{ Name: opDeleteBucketLifecycle, @@ -226,6 +401,8 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &DeleteBucketLifecycleOutput{} req.Data = output return @@ -240,7 +417,28 @@ func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBu const opDeleteBucketPolicy = "DeleteBucketPolicy" -// DeleteBucketPolicyRequest generates a request for the DeleteBucketPolicy operation. +// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketPolicyRequest method. +// req, resp := client.DeleteBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { op := &request.Operation{ Name: opDeleteBucketPolicy, @@ -253,6 +451,8 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &DeleteBucketPolicyOutput{} req.Data = output return @@ -267,7 +467,28 @@ func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPo const opDeleteBucketReplication = "DeleteBucketReplication" -// DeleteBucketReplicationRequest generates a request for the DeleteBucketReplication operation. +// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketReplicationRequest method. +// req, resp := client.DeleteBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { op := &request.Operation{ Name: opDeleteBucketReplication, @@ -280,11 +501,14 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &DeleteBucketReplicationOutput{} req.Data = output return } +// Deletes the replication configuration from the bucket. func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { req, out := c.DeleteBucketReplicationRequest(input) err := req.Send() @@ -293,7 +517,28 @@ func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*Dele const opDeleteBucketTagging = "DeleteBucketTagging" -// DeleteBucketTaggingRequest generates a request for the DeleteBucketTagging operation. +// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketTaggingRequest method. +// req, resp := client.DeleteBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { op := &request.Operation{ Name: opDeleteBucketTagging, @@ -306,6 +551,8 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &DeleteBucketTaggingOutput{} req.Data = output return @@ -320,7 +567,28 @@ func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucket const opDeleteBucketWebsite = "DeleteBucketWebsite" -// DeleteBucketWebsiteRequest generates a request for the DeleteBucketWebsite operation. +// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketWebsiteRequest method. +// req, resp := client.DeleteBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { op := &request.Operation{ Name: opDeleteBucketWebsite, @@ -333,6 +601,8 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &DeleteBucketWebsiteOutput{} req.Data = output return @@ -347,7 +617,28 @@ func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucket const opDeleteObject = "DeleteObject" -// DeleteObjectRequest generates a request for the DeleteObject operation. +// DeleteObjectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteObjectRequest method. +// req, resp := client.DeleteObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { op := &request.Operation{ Name: opDeleteObject, @@ -376,7 +667,28 @@ func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) const opDeleteObjects = "DeleteObjects" -// DeleteObjectsRequest generates a request for the DeleteObjects operation. +// DeleteObjectsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteObjects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteObjectsRequest method. +// req, resp := client.DeleteObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { op := &request.Operation{ Name: opDeleteObjects, @@ -402,9 +714,78 @@ func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, err return out, err } +const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" + +// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAccelerateConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketAccelerateConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. +// req, resp := client.GetBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAccelerateConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &GetBucketAccelerateConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketAccelerateConfigurationOutput{} + req.Data = output + return +} + +// Returns the accelerate configuration of a bucket. +func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + err := req.Send() + return out, err +} + const opGetBucketAcl = "GetBucketAcl" -// GetBucketAclRequest generates a request for the GetBucketAcl operation. +// GetBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketAclRequest method. +// req, resp := client.GetBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { op := &request.Operation{ Name: opGetBucketAcl, @@ -431,7 +812,28 @@ func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) const opGetBucketCors = "GetBucketCors" -// GetBucketCorsRequest generates a request for the GetBucketCors operation. +// GetBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketCorsRequest method. +// req, resp := client.GetBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { op := &request.Operation{ Name: opGetBucketCors, @@ -458,8 +860,32 @@ func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, err const opGetBucketLifecycle = "GetBucketLifecycle" -// GetBucketLifecycleRequest generates a request for the GetBucketLifecycle operation. +// GetBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLifecycleRequest method. +// req, resp := client.GetBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") + } op := &request.Operation{ Name: opGetBucketLifecycle, HTTPMethod: "GET", @@ -485,7 +911,28 @@ func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifec const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" -// GetBucketLifecycleConfigurationRequest generates a request for the GetBucketLifecycleConfiguration operation. +// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycleConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLifecycleConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. +// req, resp := client.GetBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { op := &request.Operation{ Name: opGetBucketLifecycleConfiguration, @@ -512,7 +959,28 @@ func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurat const opGetBucketLocation = "GetBucketLocation" -// GetBucketLocationRequest generates a request for the GetBucketLocation operation. +// GetBucketLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLocation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLocation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLocationRequest method. +// req, resp := client.GetBucketLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { op := &request.Operation{ Name: opGetBucketLocation, @@ -539,7 +1007,28 @@ func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocatio const opGetBucketLogging = "GetBucketLogging" -// GetBucketLoggingRequest generates a request for the GetBucketLogging operation. +// GetBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLoggingRequest method. +// req, resp := client.GetBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { op := &request.Operation{ Name: opGetBucketLogging, @@ -567,8 +1056,32 @@ func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOu const opGetBucketNotification = "GetBucketNotification" -// GetBucketNotificationRequest generates a request for the GetBucketNotification operation. +// GetBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotification operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketNotification method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketNotificationRequest method. +// req, resp := client.GetBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") + } op := &request.Operation{ Name: opGetBucketNotification, HTTPMethod: "GET", @@ -594,7 +1107,28 @@ func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" -// GetBucketNotificationConfigurationRequest generates a request for the GetBucketNotificationConfiguration operation. +// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketNotificationConfigurationRequest method. +// req, resp := client.GetBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { op := &request.Operation{ Name: opGetBucketNotificationConfiguration, @@ -621,7 +1155,28 @@ func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConf const opGetBucketPolicy = "GetBucketPolicy" -// GetBucketPolicyRequest generates a request for the GetBucketPolicy operation. +// GetBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketPolicyRequest method. +// req, resp := client.GetBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { op := &request.Operation{ Name: opGetBucketPolicy, @@ -648,7 +1203,28 @@ func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutpu const opGetBucketReplication = "GetBucketReplication" -// GetBucketReplicationRequest generates a request for the GetBucketReplication operation. +// GetBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketReplicationRequest method. +// req, resp := client.GetBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { op := &request.Operation{ Name: opGetBucketReplication, @@ -666,6 +1242,7 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req return } +// Deprecated, see the GetBucketReplicationConfiguration operation. func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { req, out := c.GetBucketReplicationRequest(input) err := req.Send() @@ -674,7 +1251,28 @@ func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketR const opGetBucketRequestPayment = "GetBucketRequestPayment" -// GetBucketRequestPaymentRequest generates a request for the GetBucketRequestPayment operation. +// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketRequestPayment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketRequestPayment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketRequestPaymentRequest method. +// req, resp := client.GetBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { op := &request.Operation{ Name: opGetBucketRequestPayment, @@ -701,7 +1299,28 @@ func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetB const opGetBucketTagging = "GetBucketTagging" -// GetBucketTaggingRequest generates a request for the GetBucketTagging operation. +// GetBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketTaggingRequest method. +// req, resp := client.GetBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { op := &request.Operation{ Name: opGetBucketTagging, @@ -728,7 +1347,28 @@ func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOu const opGetBucketVersioning = "GetBucketVersioning" -// GetBucketVersioningRequest generates a request for the GetBucketVersioning operation. +// GetBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketVersioning operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketVersioning method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketVersioningRequest method. +// req, resp := client.GetBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { op := &request.Operation{ Name: opGetBucketVersioning, @@ -755,7 +1395,28 @@ func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVer const opGetBucketWebsite = "GetBucketWebsite" -// GetBucketWebsiteRequest generates a request for the GetBucketWebsite operation. +// GetBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketWebsiteRequest method. +// req, resp := client.GetBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { op := &request.Operation{ Name: opGetBucketWebsite, @@ -782,7 +1443,28 @@ func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOu const opGetObject = "GetObject" -// GetObjectRequest generates a request for the GetObject operation. +// GetObjectRequest generates a "aws/request.Request" representing the +// client's request for the GetObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectRequest method. +// req, resp := client.GetObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { op := &request.Operation{ Name: opGetObject, @@ -809,7 +1491,28 @@ func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { const opGetObjectAcl = "GetObjectAcl" -// GetObjectAclRequest generates a request for the GetObjectAcl operation. +// GetObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObjectAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectAclRequest method. +// req, resp := client.GetObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { op := &request.Operation{ Name: opGetObjectAcl, @@ -836,7 +1539,28 @@ func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) const opGetObjectTorrent = "GetObjectTorrent" -// GetObjectTorrentRequest generates a request for the GetObjectTorrent operation. +// GetObjectTorrentRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTorrent operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObjectTorrent method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectTorrentRequest method. +// req, resp := client.GetObjectTorrentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { op := &request.Operation{ Name: opGetObjectTorrent, @@ -863,7 +1587,28 @@ func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOu const opHeadBucket = "HeadBucket" -// HeadBucketRequest generates a request for the HeadBucket operation. +// HeadBucketRequest generates a "aws/request.Request" representing the +// client's request for the HeadBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the HeadBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the HeadBucketRequest method. +// req, resp := client.HeadBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { op := &request.Operation{ Name: opHeadBucket, @@ -876,6 +1621,8 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &HeadBucketOutput{} req.Data = output return @@ -891,7 +1638,28 @@ func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { const opHeadObject = "HeadObject" -// HeadObjectRequest generates a request for the HeadObject operation. +// HeadObjectRequest generates a "aws/request.Request" representing the +// client's request for the HeadObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the HeadObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the HeadObjectRequest method. +// req, resp := client.HeadObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { op := &request.Operation{ Name: opHeadObject, @@ -920,7 +1688,28 @@ func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { const opListBuckets = "ListBuckets" -// ListBucketsRequest generates a request for the ListBuckets operation. +// ListBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuckets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBuckets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBucketsRequest method. +// req, resp := client.ListBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { op := &request.Operation{ Name: opListBuckets, @@ -947,7 +1736,28 @@ func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { const opListMultipartUploads = "ListMultipartUploads" -// ListMultipartUploadsRequest generates a request for the ListMultipartUploads operation. +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListMultipartUploads method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { op := &request.Operation{ Name: opListMultipartUploads, @@ -978,6 +1788,23 @@ func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultip return out, err } +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMultipartUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. +// pageNum := 0 +// err := client.ListMultipartUploadsPages(params, +// func(page *ListMultipartUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListMultipartUploadsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -988,7 +1815,28 @@ func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func const opListObjectVersions = "ListObjectVersions" -// ListObjectVersionsRequest generates a request for the ListObjectVersions operation. +// ListObjectVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjectVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjectVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectVersionsRequest method. +// req, resp := client.ListObjectVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { op := &request.Operation{ Name: opListObjectVersions, @@ -1019,6 +1867,23 @@ func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVers return out, err } +// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectVersions operation. +// pageNum := 0 +// err := client.ListObjectVersionsPages(params, +// func(page *ListObjectVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p *ListObjectVersionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListObjectVersionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1029,7 +1894,28 @@ func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p * const opListObjects = "ListObjects" -// ListObjectsRequest generates a request for the ListObjects operation. +// ListObjectsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectsRequest method. +// req, resp := client.ListObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { op := &request.Operation{ Name: opListObjects, @@ -1062,6 +1948,23 @@ func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { return out, err } +// ListObjectsPages iterates over the pages of a ListObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjects operation. +// pageNum := 0 +// err := client.ListObjectsPages(params, +// func(page *ListObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListObjectsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1070,9 +1973,112 @@ func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOut }) } +const opListObjectsV2 = "ListObjectsV2" + +// ListObjectsV2Request generates a "aws/request.Request" representing the +// client's request for the ListObjectsV2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjectsV2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectsV2Request method. +// req, resp := client.ListObjectsV2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { + op := &request.Operation{ + Name: opListObjectsV2, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?list-type=2", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"NextContinuationToken"}, + LimitToken: "MaxKeys", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListObjectsV2Input{} + } + + req = c.newRequest(op, input, output) + output = &ListObjectsV2Output{} + req.Data = output + return +} + +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend +// you use this revised API for new application development. +func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + err := req.Send() + return out, err +} + +// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectsV2 method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectsV2 operation. +// pageNum := 0 +// err := client.ListObjectsV2Pages(params, +// func(page *ListObjectsV2Output, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(p *ListObjectsV2Output, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectsV2Request(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectsV2Output), lastPage) + }) +} + const opListParts = "ListParts" -// ListPartsRequest generates a request for the ListParts operation. +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListParts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { op := &request.Operation{ Name: opListParts, @@ -1103,6 +2109,23 @@ func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { return out, err } +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListPartsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1111,9 +2134,80 @@ func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, l }) } +const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" + +// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAccelerateConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketAccelerateConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. +// req, resp := client.PutBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAccelerateConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &PutBucketAccelerateConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketAccelerateConfigurationOutput{} + req.Data = output + return +} + +// Sets the accelerate configuration of an existing bucket. +func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + err := req.Send() + return out, err +} + const opPutBucketAcl = "PutBucketAcl" -// PutBucketAclRequest generates a request for the PutBucketAcl operation. +// PutBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketAclRequest method. +// req, resp := client.PutBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { op := &request.Operation{ Name: opPutBucketAcl, @@ -1126,6 +2220,8 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &PutBucketAclOutput{} req.Data = output return @@ -1140,7 +2236,28 @@ func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) const opPutBucketCors = "PutBucketCors" -// PutBucketCorsRequest generates a request for the PutBucketCors operation. +// PutBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketCorsRequest method. +// req, resp := client.PutBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { op := &request.Operation{ Name: opPutBucketCors, @@ -1153,6 +2270,8 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &PutBucketCorsOutput{} req.Data = output return @@ -1167,8 +2286,32 @@ func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, err const opPutBucketLifecycle = "PutBucketLifecycle" -// PutBucketLifecycleRequest generates a request for the PutBucketLifecycle operation. +// PutBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLifecycleRequest method. +// req, resp := client.PutBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") + } op := &request.Operation{ Name: opPutBucketLifecycle, HTTPMethod: "PUT", @@ -1180,6 +2323,8 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &PutBucketLifecycleOutput{} req.Data = output return @@ -1194,7 +2339,28 @@ func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifec const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" -// PutBucketLifecycleConfigurationRequest generates a request for the PutBucketLifecycleConfiguration operation. +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLifecycleConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { op := &request.Operation{ Name: opPutBucketLifecycleConfiguration, @@ -1207,6 +2373,8 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &PutBucketLifecycleConfigurationOutput{} req.Data = output return @@ -1222,7 +2390,28 @@ func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurat const opPutBucketLogging = "PutBucketLogging" -// PutBucketLoggingRequest generates a request for the PutBucketLogging operation. +// PutBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLoggingRequest method. +// req, resp := client.PutBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { op := &request.Operation{ Name: opPutBucketLogging, @@ -1235,6 +2424,8 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &PutBucketLoggingOutput{} req.Data = output return @@ -1251,8 +2442,32 @@ func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOu const opPutBucketNotification = "PutBucketNotification" -// PutBucketNotificationRequest generates a request for the PutBucketNotification operation. +// PutBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotification operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketNotification method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketNotificationRequest method. +// req, resp := client.PutBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") + } op := &request.Operation{ Name: opPutBucketNotification, HTTPMethod: "PUT", @@ -1264,6 +2479,8 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &PutBucketNotificationOutput{} req.Data = output return @@ -1278,7 +2495,28 @@ func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucke const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" -// PutBucketNotificationConfigurationRequest generates a request for the PutBucketNotificationConfiguration operation. +// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketNotificationConfigurationRequest method. +// req, resp := client.PutBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { op := &request.Operation{ Name: opPutBucketNotificationConfiguration, @@ -1291,6 +2529,8 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &PutBucketNotificationConfigurationOutput{} req.Data = output return @@ -1305,7 +2545,28 @@ func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConf const opPutBucketPolicy = "PutBucketPolicy" -// PutBucketPolicyRequest generates a request for the PutBucketPolicy operation. +// PutBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketPolicyRequest method. +// req, resp := client.PutBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { op := &request.Operation{ Name: opPutBucketPolicy, @@ -1318,6 +2579,8 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &PutBucketPolicyOutput{} req.Data = output return @@ -1333,7 +2596,28 @@ func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutpu const opPutBucketReplication = "PutBucketReplication" -// PutBucketReplicationRequest generates a request for the PutBucketReplication operation. +// PutBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketReplicationRequest method. +// req, resp := client.PutBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { op := &request.Operation{ Name: opPutBucketReplication, @@ -1346,6 +2630,8 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &PutBucketReplicationOutput{} req.Data = output return @@ -1361,7 +2647,28 @@ func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketR const opPutBucketRequestPayment = "PutBucketRequestPayment" -// PutBucketRequestPaymentRequest generates a request for the PutBucketRequestPayment operation. +// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketRequestPayment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketRequestPayment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketRequestPaymentRequest method. +// req, resp := client.PutBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { op := &request.Operation{ Name: opPutBucketRequestPayment, @@ -1374,6 +2681,8 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &PutBucketRequestPaymentOutput{} req.Data = output return @@ -1392,7 +2701,28 @@ func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutB const opPutBucketTagging = "PutBucketTagging" -// PutBucketTaggingRequest generates a request for the PutBucketTagging operation. +// PutBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketTaggingRequest method. +// req, resp := client.PutBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { op := &request.Operation{ Name: opPutBucketTagging, @@ -1405,6 +2735,8 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &PutBucketTaggingOutput{} req.Data = output return @@ -1419,7 +2751,28 @@ func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOu const opPutBucketVersioning = "PutBucketVersioning" -// PutBucketVersioningRequest generates a request for the PutBucketVersioning operation. +// PutBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketVersioning operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketVersioning method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketVersioningRequest method. +// req, resp := client.PutBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { op := &request.Operation{ Name: opPutBucketVersioning, @@ -1432,6 +2785,8 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &PutBucketVersioningOutput{} req.Data = output return @@ -1447,7 +2802,28 @@ func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVer const opPutBucketWebsite = "PutBucketWebsite" -// PutBucketWebsiteRequest generates a request for the PutBucketWebsite operation. +// PutBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketWebsiteRequest method. +// req, resp := client.PutBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { op := &request.Operation{ Name: opPutBucketWebsite, @@ -1460,6 +2836,8 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request } req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) output = &PutBucketWebsiteOutput{} req.Data = output return @@ -1474,7 +2852,28 @@ func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOu const opPutObject = "PutObject" -// PutObjectRequest generates a request for the PutObject operation. +// PutObjectRequest generates a "aws/request.Request" representing the +// client's request for the PutObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutObjectRequest method. +// req, resp := client.PutObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { op := &request.Operation{ Name: opPutObject, @@ -1501,7 +2900,28 @@ func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { const opPutObjectAcl = "PutObjectAcl" -// PutObjectAclRequest generates a request for the PutObjectAcl operation. +// PutObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutObjectAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutObjectAclRequest method. +// req, resp := client.PutObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { op := &request.Operation{ Name: opPutObjectAcl, @@ -1529,7 +2949,28 @@ func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) const opRestoreObject = "RestoreObject" -// RestoreObjectRequest generates a request for the RestoreObject operation. +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { op := &request.Operation{ Name: opRestoreObject, @@ -1556,7 +2997,28 @@ func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, err const opUploadPart = "UploadPart" -// UploadPartRequest generates a request for the UploadPart operation. +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadPart method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { op := &request.Operation{ Name: opUploadPart, @@ -1589,7 +3051,28 @@ func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { const opUploadPartCopy = "UploadPartCopy" -// UploadPartCopyRequest generates a request for the UploadPartCopy operation. +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadPartCopy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { op := &request.Operation{ Name: opUploadPartCopy, @@ -1614,6 +3097,26 @@ func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, return out, err } +// Specifies the days since the initiation of an Incomplete Multipart Upload +// that Lifecycle will wait before permanently removing all parts of the upload. +type AbortIncompleteMultipartUpload struct { + _ struct{} `type:"structure"` + + // Indicates the number of days that must pass since initiation for Lifecycle + // to abort an Incomplete Multipart Upload. + DaysAfterInitiation *int64 `type:"integer"` +} + +// String returns the string representation +func (s AbortIncompleteMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortIncompleteMultipartUpload) GoString() string { + return s.String() +} + type AbortMultipartUploadInput struct { _ struct{} `type:"structure"` @@ -1640,6 +3143,28 @@ func (s AbortMultipartUploadInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type AbortMultipartUploadOutput struct { _ struct{} `type:"structure"` @@ -1658,6 +3183,23 @@ func (s AbortMultipartUploadOutput) GoString() string { return s.String() } +type AccelerateConfiguration struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s AccelerateConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccelerateConfiguration) GoString() string { + return s.String() +} + type AccessControlPolicy struct { _ struct{} `type:"structure"` @@ -1677,6 +3219,26 @@ func (s AccessControlPolicy) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} + if s.Grants != nil { + for i, v := range s.Grants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type Bucket struct { _ struct{} `type:"structure"` @@ -1713,6 +3275,29 @@ func (s BucketLifecycleConfiguration) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type BucketLoggingStatus struct { _ struct{} `type:"structure"` @@ -1729,6 +3314,21 @@ func (s BucketLoggingStatus) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLoggingStatus) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} + if s.LoggingEnabled != nil { + if err := s.LoggingEnabled.Validate(); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type CORSConfiguration struct { _ struct{} `type:"structure"` @@ -1745,6 +3345,29 @@ func (s CORSConfiguration) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} + if s.CORSRules == nil { + invalidParams.Add(request.NewErrParamRequired("CORSRules")) + } + if s.CORSRules != nil { + for i, v := range s.CORSRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type CORSRule struct { _ struct{} `type:"structure"` @@ -1778,13 +3401,29 @@ func (s CORSRule) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSRule"} + if s.AllowedMethods == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) + } + if s.AllowedOrigins == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type CloudFunctionConfiguration struct { _ struct{} `type:"structure"` CloudFunction *string `type:"string"` // Bucket event for which to send notifications. - Event *string `type:"string" enum:"Event"` + Event *string `deprecated:"true" type:"string" enum:"Event"` Events []*string `locationName:"Event" type:"list" flattened:"true"` @@ -1849,6 +3488,28 @@ func (s CompleteMultipartUploadInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type CompleteMultipartUploadOutput struct { _ struct{} `type:"structure"` @@ -2087,6 +3748,28 @@ func (s CopyObjectInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type CopyObjectOutput struct { _ struct{} `type:"structure" payload:"CopyObjectResult"` @@ -2226,6 +3909,19 @@ func (s CreateBucketInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type CreateBucketOutput struct { _ struct{} `type:"structure"` @@ -2337,9 +4033,35 @@ func (s CreateMultipartUploadInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type CreateMultipartUploadOutput struct { _ struct{} `type:"structure"` + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + // Name of the bucket to which the multipart upload was initiated. Bucket *string `locationName:"Bucket" type:"string"` @@ -2402,6 +4124,29 @@ func (s Delete) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Objects == nil { + invalidParams.Add(request.NewErrParamRequired("Objects")) + } + if s.Objects != nil { + for i, v := range s.Objects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type DeleteBucketCorsInput struct { _ struct{} `type:"structure"` @@ -2418,6 +4163,19 @@ func (s DeleteBucketCorsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type DeleteBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -2448,6 +4206,19 @@ func (s DeleteBucketInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type DeleteBucketLifecycleInput struct { _ struct{} `type:"structure"` @@ -2464,6 +4235,19 @@ func (s DeleteBucketLifecycleInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type DeleteBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -2508,6 +4292,19 @@ func (s DeleteBucketPolicyInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type DeleteBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -2538,6 +4335,19 @@ func (s DeleteBucketReplicationInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type DeleteBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -2568,6 +4378,19 @@ func (s DeleteBucketTaggingInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type DeleteBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -2598,6 +4421,19 @@ func (s DeleteBucketWebsiteInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type DeleteBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -2672,6 +4508,25 @@ func (s DeleteObjectInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type DeleteObjectOutput struct { _ struct{} `type:"structure"` @@ -2726,6 +4581,27 @@ func (s DeleteObjectsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Delete == nil { + invalidParams.Add(request.NewErrParamRequired("Delete")) + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type DeleteObjectsOutput struct { _ struct{} `type:"structure"` @@ -2791,6 +4667,19 @@ func (s Destination) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Destination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Destination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type Error struct { _ struct{} `type:"structure"` @@ -2830,6 +4719,22 @@ func (s ErrorDocument) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Container for key value pair that defines the criteria for the filter rule. type FilterRule struct { _ struct{} `type:"structure"` @@ -2854,6 +4759,53 @@ func (s FilterRule) GoString() string { return s.String() } +type GetBucketAccelerateConfigurationInput struct { + _ struct{} `type:"structure"` + + // Name of the bucket for which the accelerate configuration is retrieved. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + type GetBucketAclInput struct { _ struct{} `type:"structure"` @@ -2870,6 +4822,19 @@ func (s GetBucketAclInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type GetBucketAclOutput struct { _ struct{} `type:"structure"` @@ -2905,6 +4870,19 @@ func (s GetBucketCorsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type GetBucketCorsOutput struct { _ struct{} `type:"structure"` @@ -2937,6 +4915,19 @@ func (s GetBucketLifecycleConfigurationInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type GetBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` @@ -2969,6 +4960,19 @@ func (s GetBucketLifecycleInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type GetBucketLifecycleOutput struct { _ struct{} `type:"structure"` @@ -3001,6 +5005,19 @@ func (s GetBucketLocationInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type GetBucketLocationOutput struct { _ struct{} `type:"structure"` @@ -3033,6 +5050,19 @@ func (s GetBucketLoggingInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type GetBucketLoggingOutput struct { _ struct{} `type:"structure"` @@ -3052,7 +5082,7 @@ func (s GetBucketLoggingOutput) GoString() string { type GetBucketNotificationConfigurationRequest struct { _ struct{} `type:"structure"` - // Name of the buket to get the notification configuration for. + // Name of the bucket to get the notification configuration for. Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -3066,6 +5096,19 @@ func (s GetBucketNotificationConfigurationRequest) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketNotificationConfigurationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type GetBucketPolicyInput struct { _ struct{} `type:"structure"` @@ -3082,6 +5125,19 @@ func (s GetBucketPolicyInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type GetBucketPolicyOutput struct { _ struct{} `type:"structure" payload:"Policy"` @@ -3115,6 +5171,19 @@ func (s GetBucketReplicationInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type GetBucketReplicationOutput struct { _ struct{} `type:"structure" payload:"ReplicationConfiguration"` @@ -3149,6 +5218,19 @@ func (s GetBucketRequestPaymentInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type GetBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` @@ -3182,6 +5264,19 @@ func (s GetBucketTaggingInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type GetBucketTaggingOutput struct { _ struct{} `type:"structure"` @@ -3214,6 +5309,19 @@ func (s GetBucketVersioningInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type GetBucketVersioningOutput struct { _ struct{} `type:"structure"` @@ -3252,6 +5360,19 @@ func (s GetBucketWebsiteInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type GetBucketWebsiteOutput struct { _ struct{} `type:"structure"` @@ -3301,6 +5422,25 @@ func (s GetObjectAclInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type GetObjectAclOutput struct { _ struct{} `type:"structure"` @@ -3404,6 +5544,25 @@ func (s GetObjectInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type GetObjectOutput struct { _ struct{} `type:"structure" payload:"Body"` @@ -3427,7 +5586,7 @@ type GetObjectOutput struct { ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` // The portion of the object returned in the response. ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` @@ -3537,6 +5696,25 @@ func (s GetObjectTorrentInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTorrentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type GetObjectTorrentOutput struct { _ struct{} `type:"structure" payload:"Body"` @@ -3576,6 +5754,21 @@ func (s Grant) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type Grantee struct { _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` @@ -3605,6 +5798,19 @@ func (s Grantee) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grantee) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grantee"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type HeadBucketInput struct { _ struct{} `type:"structure"` @@ -3621,6 +5827,19 @@ func (s HeadBucketInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type HeadBucketOutput struct { _ struct{} `type:"structure"` } @@ -3697,6 +5916,25 @@ func (s HeadObjectInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type HeadObjectOutput struct { _ struct{} `type:"structure"` @@ -3717,7 +5955,7 @@ type HeadObjectOutput struct { ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` @@ -3820,6 +6058,19 @@ func (s IndexDocument) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *IndexDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} + if s.Suffix == nil { + invalidParams.Add(request.NewErrParamRequired("Suffix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type Initiator struct { _ struct{} `type:"structure"` @@ -3890,6 +6141,22 @@ func (s LambdaFunctionConfiguration) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaFunctionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.LambdaFunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type LifecycleConfiguration struct { _ struct{} `type:"structure"` @@ -3906,6 +6173,29 @@ func (s LifecycleConfiguration) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type LifecycleExpiration struct { _ struct{} `type:"structure"` @@ -3916,6 +6206,12 @@ type LifecycleExpiration struct { // Indicates the lifetime, in days, of the objects that are subject to the rule. // The value must be a non-zero positive integer. Days *int64 `type:"integer"` + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired; if set to false + // the policy takes no action. This cannot be specified with Days or Date in + // a Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker *bool `type:"boolean"` } // String returns the string representation @@ -3931,6 +6227,10 @@ func (s LifecycleExpiration) GoString() string { type LifecycleRule struct { _ struct{} `type:"structure"` + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + Expiration *LifecycleExpiration `type:"structure"` // Unique identifier for the rule. The value cannot be longer than 255 characters. @@ -3965,6 +6265,22 @@ func (s LifecycleRule) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type ListBucketsInput struct { _ struct{} `type:"structure"` } @@ -4042,6 +6358,19 @@ func (s ListMultipartUploadsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMultipartUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type ListMultipartUploadsOutput struct { _ struct{} `type:"structure"` @@ -4136,6 +6465,19 @@ func (s ListObjectVersionsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type ListObjectVersionsOutput struct { _ struct{} `type:"structure"` @@ -4222,6 +6564,19 @@ func (s ListObjectsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type ListObjectsOutput struct { _ struct{} `type:"structure"` @@ -4266,6 +6621,124 @@ func (s ListObjectsOutput) GoString() string { return s.String() } +type ListObjectsV2Input struct { + _ struct{} `type:"structure"` + + // Name of the bucket to list. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The owner field is not present in listV2 by default, if you want to return + // owner field with each key in the result then set the fetch owner field to + // true + FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Input) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsV2Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListObjectsV2Output struct { + _ struct{} `type:"structure"` + + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by delimiter + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + // KeyCount is the number of keys returned with this request. KeyCount will + // always be less than equals to MaxKeys field. Say you ask for 50 keys, your + // result will include less than equals 50 keys + KeyCount *int64 `type:"integer"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `type:"integer"` + + // Name of the bucket to list. + Name *string `type:"string"` + + // NextContinuationToken is sent when isTruncated is true which means there + // are more keys in the bucket that can be listed. The next list requests to + // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken + // is obfuscated and is not a real key + NextContinuationToken *string `type:"string"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `type:"string"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Output) GoString() string { + return s.String() +} + type ListPartsInput struct { _ struct{} `type:"structure"` @@ -4300,9 +6773,38 @@ func (s ListPartsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPartsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type ListPartsOutput struct { _ struct{} `type:"structure"` + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + // Name of the bucket to which the multipart upload was initiated. Bucket *string `type:"string"` @@ -4379,6 +6881,26 @@ func (s LoggingEnabled) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetGrants != nil { + for i, v := range s.TargetGrants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type MultipartUpload struct { _ struct{} `type:"structure"` @@ -4421,8 +6943,8 @@ type NoncurrentVersionExpiration struct { // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage - // Service Developer Guide. + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` } @@ -4447,8 +6969,8 @@ type NoncurrentVersionTransition struct { // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage - // Service Developer Guide. + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` // The class of storage used to store the object. @@ -4487,6 +7009,46 @@ func (s NotificationConfiguration) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *NotificationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} + if s.LambdaFunctionConfigurations != nil { + for i, v := range s.LambdaFunctionConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueueConfigurations != nil { + for i, v := range s.QueueConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TopicConfigurations != nil { + for i, v := range s.TopicConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type NotificationConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -4574,6 +7136,22 @@ func (s ObjectIdentifier) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ObjectIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type ObjectVersion struct { _ struct{} `type:"structure"` @@ -4656,6 +7234,56 @@ func (s Part) GoString() string { return s.String() } +type PutBucketAccelerateConfigurationInput struct { + _ struct{} `type:"structure" payload:"AccelerateConfiguration"` + + // Specifies the Accelerate Configuration you want to set for the bucket. + AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"` + + // Name of the bucket for which the accelerate configuration is set. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} + if s.AccelerateConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + type PutBucketAclInput struct { _ struct{} `type:"structure" payload:"AccessControlPolicy"` @@ -4693,6 +7321,24 @@ func (s PutBucketAclInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type PutBucketAclOutput struct { _ struct{} `type:"structure"` } @@ -4725,6 +7371,27 @@ func (s PutBucketCorsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CORSConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + } + if s.CORSConfiguration != nil { + if err := s.CORSConfiguration.Validate(); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type PutBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -4757,6 +7424,24 @@ func (s PutBucketLifecycleConfigurationInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type PutBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -4789,6 +7474,24 @@ func (s PutBucketLifecycleInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type PutBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -4821,6 +7524,27 @@ func (s PutBucketLoggingInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.BucketLoggingStatus == nil { + invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) + } + if s.BucketLoggingStatus != nil { + if err := s.BucketLoggingStatus.Validate(); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type PutBucketLoggingOutput struct { _ struct{} `type:"structure"` } @@ -4855,6 +7579,27 @@ func (s PutBucketNotificationConfigurationInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + if s.NotificationConfiguration != nil { + if err := s.NotificationConfiguration.Validate(); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type PutBucketNotificationConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -4887,6 +7632,22 @@ func (s PutBucketNotificationInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type PutBucketNotificationOutput struct { _ struct{} `type:"structure"` } @@ -4920,6 +7681,22 @@ func (s PutBucketPolicyInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type PutBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -4954,6 +7731,27 @@ func (s PutBucketReplicationInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.ReplicationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) + } + if s.ReplicationConfiguration != nil { + if err := s.ReplicationConfiguration.Validate(); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type PutBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -4986,6 +7784,27 @@ func (s PutBucketRequestPaymentInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.RequestPaymentConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) + } + if s.RequestPaymentConfiguration != nil { + if err := s.RequestPaymentConfiguration.Validate(); err != nil { + invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type PutBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` } @@ -5018,6 +7837,27 @@ func (s PutBucketTaggingInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type PutBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -5054,6 +7894,22 @@ func (s PutBucketVersioningInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.VersioningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type PutBucketVersioningOutput struct { _ struct{} `type:"structure"` } @@ -5086,6 +7942,27 @@ func (s PutBucketWebsiteInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.WebsiteConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) + } + if s.WebsiteConfiguration != nil { + if err := s.WebsiteConfiguration.Validate(); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type PutBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -5133,6 +8010,9 @@ type PutObjectAclInput struct { // Documentation on downloading objects from requester pays buckets can be found // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation @@ -5145,6 +8025,30 @@ func (s PutObjectAclInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type PutObjectAclOutput struct { _ struct{} `type:"structure"` @@ -5172,6 +8076,7 @@ type PutObjectInput struct { // Object data. Body io.ReadSeeker `type:"blob"` + // Name of the bucket to which the PUT operation was initiated. Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Specifies caching behavior along the request/reply chain. @@ -5190,7 +8095,7 @@ type PutObjectInput struct { // Size of the body in bytes. This parameter is useful when the size of the // body cannot be determined automatically. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` @@ -5210,6 +8115,7 @@ type PutObjectInput struct { // Allows grantee to write the ACL for the applicable object. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // Object key for which the PUT operation was initiated. Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // A map of metadata to store with the object in S3. @@ -5265,6 +8171,25 @@ func (s PutObjectInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type PutObjectOutput struct { _ struct{} `type:"structure"` @@ -5342,11 +8267,27 @@ func (s QueueConfiguration) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueueConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.QueueArn == nil { + invalidParams.Add(request.NewErrParamRequired("QueueArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type QueueConfigurationDeprecated struct { _ struct{} `type:"structure"` // Bucket event for which to send notifications. - Event *string `type:"string" enum:"Event"` + Event *string `deprecated:"true" type:"string" enum:"Event"` Events []*string `locationName:"Event" type:"list" flattened:"true"` @@ -5426,6 +8367,19 @@ func (s RedirectAllRequestsTo) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedirectAllRequestsTo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} + if s.HostName == nil { + invalidParams.Add(request.NewErrParamRequired("HostName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Container for replication rules. You can add as many as 1,000 rules. Total // replication configuration size can be up to 2 MB. type ReplicationConfiguration struct { @@ -5450,6 +8404,32 @@ func (s ReplicationConfiguration) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type ReplicationRule struct { _ struct{} `type:"structure"` @@ -5477,6 +8457,30 @@ func (s ReplicationRule) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type RequestPaymentConfiguration struct { _ struct{} `type:"structure"` @@ -5494,6 +8498,19 @@ func (s RequestPaymentConfiguration) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestPaymentConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} + if s.Payer == nil { + invalidParams.Add(request.NewErrParamRequired("Payer")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type RestoreObjectInput struct { _ struct{} `type:"structure" payload:"RestoreRequest"` @@ -5522,6 +8539,30 @@ func (s RestoreObjectInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RestoreRequest != nil { + if err := s.RestoreRequest.Validate(); err != nil { + invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type RestoreObjectOutput struct { _ struct{} `type:"structure"` @@ -5557,6 +8598,19 @@ func (s RestoreRequest) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type RoutingRule struct { _ struct{} `type:"structure"` @@ -5582,9 +8636,26 @@ func (s RoutingRule) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *RoutingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} + if s.Redirect == nil { + invalidParams.Add(request.NewErrParamRequired("Redirect")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type Rule struct { _ struct{} `type:"structure"` + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + Expiration *LifecycleExpiration `type:"structure"` // Unique identifier for the rule. The value cannot be longer than 255 characters. @@ -5624,6 +8695,22 @@ func (s Rule) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Rule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Rule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type Tag struct { _ struct{} `type:"structure"` @@ -5644,6 +8731,25 @@ func (s Tag) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type Tagging struct { _ struct{} `type:"structure"` @@ -5660,6 +8766,29 @@ func (s Tagging) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tagging) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tagging"} + if s.TagSet == nil { + invalidParams.Add(request.NewErrParamRequired("TagSet")) + } + if s.TagSet != nil { + for i, v := range s.TagSet { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type TargetGrant struct { _ struct{} `type:"structure"` @@ -5679,6 +8808,21 @@ func (s TargetGrant) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetGrant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetGrant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Container for specifying the configuration when you want Amazon S3 to publish // events to an Amazon Simple Notification Service (Amazon SNS) topic. type TopicConfiguration struct { @@ -5710,11 +8854,27 @@ func (s TopicConfiguration) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *TopicConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type TopicConfigurationDeprecated struct { _ struct{} `type:"structure"` // Bucket event for which to send notifications. - Event *string `type:"string" enum:"Event"` + Event *string `deprecated:"true" type:"string" enum:"Event"` Events []*string `locationName:"Event" type:"list" flattened:"true"` @@ -5846,6 +9006,34 @@ func (s UploadPartCopyInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartCopyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type UploadPartCopyOutput struct { _ struct{} `type:"structure" payload:"CopyPartResult"` @@ -5891,14 +9079,17 @@ func (s UploadPartCopyOutput) GoString() string { type UploadPartInput struct { _ struct{} `type:"structure" payload:"Body"` + // Object data. Body io.ReadSeeker `type:"blob"` + // Name of the bucket to which the multipart upload was initiated. Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Size of the body in bytes. This parameter is useful when the size of the // body cannot be determined automatically. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + // Object key for which the multipart upload was initiated. Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Part number of part being uploaded. This is a positive integer between 1 @@ -5941,6 +9132,31 @@ func (s UploadPartInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type UploadPartOutput struct { _ struct{} `type:"structure"` @@ -6024,6 +9240,48 @@ func (s WebsiteConfiguration) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *WebsiteConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"} + if s.ErrorDocument != nil { + if err := s.ErrorDocument.Validate(); err != nil { + invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams)) + } + } + if s.IndexDocument != nil { + if err := s.IndexDocument.Validate(); err != nil { + invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams)) + } + } + if s.RedirectAllRequestsTo != nil { + if err := s.RedirectAllRequestsTo.Validate(); err != nil { + invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams)) + } + } + if s.RoutingRules != nil { + for i, v := range s.RoutingRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +const ( + // @enum BucketAccelerateStatus + BucketAccelerateStatusEnabled = "Enabled" + // @enum BucketAccelerateStatus + BucketAccelerateStatusSuspended = "Suspended" +) + const ( // @enum BucketCannedACL BucketCannedACLPrivate = "private" @@ -6045,6 +9303,8 @@ const ( // @enum BucketLocationConstraint BucketLocationConstraintUsWest2 = "us-west-2" // @enum BucketLocationConstraint + BucketLocationConstraintApSouth1 = "ap-south-1" + // @enum BucketLocationConstraint BucketLocationConstraintApSoutheast1 = "ap-southeast-1" // @enum BucketLocationConstraint BucketLocationConstraintApSoutheast2 = "ap-southeast-2" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index c227f24..8463347 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -6,32 +6,41 @@ import ( ) func init() { - initClient = func(c *client.Client) { - // Support building custom host-style bucket endpoints - c.Handlers.Build.PushFront(updateHostWithBucket) + initClient = defaultInitClientFn + initRequest = defaultInitRequestFn +} - // Require SSL when using SSE keys - c.Handlers.Validate.PushBack(validateSSERequiresSSL) - c.Handlers.Build.PushBack(computeSSEKeys) +func defaultInitClientFn(c *client.Client) { + // Support building custom endpoints based on config + c.Handlers.Build.PushFront(updateEndpointForS3Config) - // S3 uses custom error unmarshaling logic - c.Handlers.UnmarshalError.Clear() - c.Handlers.UnmarshalError.PushBack(unmarshalError) - } + // Require SSL when using SSE keys + c.Handlers.Validate.PushBack(validateSSERequiresSSL) + c.Handlers.Build.PushBack(computeSSEKeys) - initRequest = func(r *request.Request) { - switch r.Operation.Name { - case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration: - // These S3 operations require Content-MD5 to be set - r.Handlers.Build.PushBack(contentMD5) - case opGetBucketLocation: - // GetBucketLocation has custom parsing logic - r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) - case opCreateBucket: - // Auto-populate LocationConstraint with current region - r.Handlers.Validate.PushFront(populateLocationConstraint) - case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: - r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) - } + // S3 uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) +} + +func defaultInitRequestFn(r *request.Request) { + // Add reuest handlers for specific platforms. + // e.g. 100-continue support for PUT requests using Go 1.6 + platformRequestHandlers(r) + + switch r.Operation.Name { + case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, + opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, + opPutBucketReplication: + // These S3 operations require Content-MD5 to be set + r.Handlers.Build.PushBack(contentMD5) + case opGetBucketLocation: + // GetBucketLocation has custom parsing logic + r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) + case opCreateBucket: + // Auto-populate LocationConstraint with current region + r.Handlers.Validate.PushFront(populateLocationConstraint) + case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go index b7cf4f9..5172929 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -1,14 +1,124 @@ package s3 import ( + "fmt" + "net/url" "regexp" "strings" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) +// an operationBlacklist is a list of operation names that should a +// request handler should not be executed with. +type operationBlacklist []string + +// Continue will return true of the Request's operation name is not +// in the blacklist. False otherwise. +func (b operationBlacklist) Continue(r *request.Request) bool { + for i := 0; i < len(b); i++ { + if b[i] == r.Operation.Name { + return false + } + } + return true +} + +var accelerateOpBlacklist = operationBlacklist{ + opListBuckets, opCreateBucket, opDeleteBucket, +} + +// Request handler to automatically add the bucket name to the endpoint domain +// if possible. This style of bucket is valid for all bucket names which are +// DNS compatible and do not contain "." +func updateEndpointForS3Config(r *request.Request) { + forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) + accelerate := aws.BoolValue(r.Config.S3UseAccelerate) + + if accelerate && accelerateOpBlacklist.Continue(r) { + if forceHostStyle { + if r.Config.Logger != nil { + r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") + } + } + updateEndpointForAccelerate(r) + } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { + updateEndpointForHostStyle(r) + } +} + +func updateEndpointForHostStyle(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucketname was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { + // bucket name must be valid to put into the host + return + } + + moveBucketToHost(r.HTTPRequest.URL, bucket) +} + +func updateEndpointForAccelerate(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucketname was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { + r.Error = awserr.New("InvalidParameterException", + fmt.Sprintf("bucket name %s is not compatibile with S3 Accelerate", bucket), + nil) + return + } + + // Change endpoint from s3(-[a-z0-1-])?.amazonaws.com to s3-accelerate.amazonaws.com + r.HTTPRequest.URL.Host = replaceHostRegion(r.HTTPRequest.URL.Host, "accelerate") + moveBucketToHost(r.HTTPRequest.URL, bucket) +} + +// Attempts to retrieve the bucket name from the request input parameters. +// If no bucket is found, or the field is empty "", false will be returned. +func bucketNameFromReqParams(params interface{}) (string, bool) { + b, _ := awsutil.ValuesAtPath(params, "Bucket") + if len(b) == 0 { + return "", false + } + + if bucket, ok := b[0].(*string); ok { + if bucketStr := aws.StringValue(bucket); bucketStr != "" { + return bucketStr, true + } + } + + return "", false +} + +// hostCompatibleBucketName returns true if the request should +// put the bucket in the host. This is false if S3ForcePathStyle is +// explicitly set or if the bucket is not DNS compatible. +func hostCompatibleBucketName(u *url.URL, bucket string) bool { + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if u.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) @@ -20,41 +130,36 @@ func dnsCompatibleBucketName(bucket string) bool { !strings.Contains(bucket, "..") } -// hostStyleBucketName returns true if the request should put the bucket in -// the host. This is false if S3ForcePathStyle is explicitly set or if the -// bucket is not DNS compatible. -func hostStyleBucketName(r *request.Request, bucket string) bool { - if aws.BoolValue(r.Config.S3ForcePathStyle) { - return false +// moveBucketToHost moves the bucket name from the URI path to URL host. +func moveBucketToHost(u *url.URL, bucket string) { + u.Host = bucket + "." + u.Host + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" } - - // Bucket might be DNS compatible but dots in the hostname will fail - // certificate validation, so do not use host-style. - if r.HTTPRequest.URL.Scheme == "https" && strings.Contains(bucket, ".") { - return false - } - - // GetBucketLocation should be able to be called from any region within - // a partition, and return the associated region of the bucket. - if r.Operation.Name == opGetBucketLocation { - return false - } - - // Use host-style if the bucket is DNS compatible - return dnsCompatibleBucketName(bucket) } -func updateHostWithBucket(r *request.Request) { - b, _ := awsutil.ValuesAtPath(r.Params, "Bucket") - if len(b) == 0 { - return +const s3HostPrefix = "s3" + +// replaceHostRegion replaces the S3 region string in the host with the +// value provided. If v is empty the host prefix returned will be s3. +func replaceHostRegion(host, v string) string { + if !strings.HasPrefix(host, s3HostPrefix) { + return host } - if bucket := b[0].(*string); aws.StringValue(bucket) != "" && hostStyleBucketName(r, *bucket) { - r.HTTPRequest.URL.Host = *bucket + "." + r.HTTPRequest.URL.Host - r.HTTPRequest.URL.Path = strings.Replace(r.HTTPRequest.URL.Path, "/{Bucket}", "", -1) - if r.HTTPRequest.URL.Path == "" { - r.HTTPRequest.URL.Path = "/" + suffix := host[len(s3HostPrefix):] + for i := len(s3HostPrefix); i < len(host); i++ { + if host[i] == '.' { + // Trim until '.' leave the it in place. + suffix = host[i:] + break } } + + if len(v) == 0 { + return fmt.Sprintf("s3%s", suffix) + } + + return fmt.Sprintf("s3-%s%s", v, suffix) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go new file mode 100644 index 0000000..8e6f330 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go @@ -0,0 +1,8 @@ +// +build !go1.6 + +package s3 + +import "github.com/aws/aws-sdk-go/aws/request" + +func platformRequestHandlers(r *request.Request) { +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go new file mode 100644 index 0000000..14d05f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go @@ -0,0 +1,28 @@ +// +build go1.6 + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +func platformRequestHandlers(r *request.Request) { + if r.Operation.HTTPMethod == "PUT" { + // 100-Continue should only be used on put requests. + r.Handlers.Sign.PushBack(add100Continue) + } +} + +func add100Continue(r *request.Request) { + if aws.BoolValue(r.Config.S3Disable100Continue) { + return + } + if r.HTTPRequest.ContentLength < 1024*1024*2 { + // Ignore requests smaller than 2MB. This helps prevent delaying + // requests unnecessarily. + return + } + + r.HTTPRequest.Header.Set("Expect", "100-Continue") +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index e80d954..5833952 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/restxml" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // S3 is a client for Amazon S3. @@ -58,11 +58,11 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) - svc.Handlers.Build.PushBack(restxml.Build) - svc.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - svc.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - svc.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) // Run custom client initialization if present if initClient != nil { diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go index cd60048..59e4950 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -4,6 +4,7 @@ import ( "encoding/xml" "fmt" "io" + "io/ioutil" "net/http" "strings" @@ -20,6 +21,7 @@ type xmlErrorResponse struct { func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() + defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { r.Error = awserr.NewRequestFailure( diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go index 4879fca..cbd3d31 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -18,6 +18,12 @@ func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { Argument: "", Expected: 200, }, + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 301, + }, { State: "success", Matcher: "status", diff --git a/vendor/github.com/codegangsta/cli/CHANGELOG.md b/vendor/github.com/codegangsta/cli/CHANGELOG.md new file mode 100644 index 0000000..4976635 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/CHANGELOG.md @@ -0,0 +1,334 @@ +# Change Log + +**ATTN**: This project uses [semantic versioning](http://semver.org/). + +## [Unreleased] +### Added +- Flag type code generation via `go generate` + +### Changed +- Raise minimum tested/supported Go version to 1.2+ + +## [1.18.0] - 2016-06-27 +### Added +- `./runtests` test runner with coverage tracking by default +- testing on OS X +- testing on Windows +- `UintFlag`, `Uint64Flag`, and `Int64Flag` types and supporting code + +### Changed +- Use spaces for alignment in help/usage output instead of tabs, making the + output alignment consistent regardless of tab width + +### Fixed +- Printing of command aliases in help text +- Printing of visible flags for both struct and struct pointer flags +- Display the `help` subcommand when using `CommandCategories` +- No longer swallows `panic`s that occur within the `Action`s themselves when + detecting the signature of the `Action` field + +## [1.17.0] - 2016-05-09 +### Added +- Pluggable flag-level help text rendering via `cli.DefaultFlagStringFunc` +- `context.GlobalBoolT` was added as an analogue to `context.GlobalBool` +- Support for hiding commands by setting `Hidden: true` -- this will hide the + commands in help output + +### Changed +- `Float64Flag`, `IntFlag`, and `DurationFlag` default values are no longer + quoted in help text output. +- All flag types now include `(default: {value})` strings following usage when a + default value can be (reasonably) detected. +- `IntSliceFlag` and `StringSliceFlag` usage strings are now more consistent + with non-slice flag types +- Apps now exit with a code of 3 if an unknown subcommand is specified + (previously they printed "No help topic for...", but still exited 0. This + makes it easier to script around apps built using `cli` since they can trust + that a 0 exit code indicated a successful execution. +- cleanups based on [Go Report Card + feedback](https://goreportcard.com/report/github.com/urfave/cli) + +## [1.16.0] - 2016-05-02 +### Added +- `Hidden` field on all flag struct types to omit from generated help text + +### Changed +- `BashCompletionFlag` (`--enable-bash-completion`) is now omitted from +generated help text via the `Hidden` field + +### Fixed +- handling of error values in `HandleAction` and `HandleExitCoder` + +## [1.15.0] - 2016-04-30 +### Added +- This file! +- Support for placeholders in flag usage strings +- `App.Metadata` map for arbitrary data/state management +- `Set` and `GlobalSet` methods on `*cli.Context` for altering values after +parsing. +- Support for nested lookup of dot-delimited keys in structures loaded from +YAML. + +### Changed +- The `App.Action` and `Command.Action` now prefer a return signature of +`func(*cli.Context) error`, as defined by `cli.ActionFunc`. If a non-nil +`error` is returned, there may be two outcomes: + - If the error fulfills `cli.ExitCoder`, then `os.Exit` will be called + automatically + - Else the error is bubbled up and returned from `App.Run` +- Specifying an `Action` with the legacy return signature of +`func(*cli.Context)` will produce a deprecation message to stderr +- Specifying an `Action` that is not a `func` type will produce a non-zero exit +from `App.Run` +- Specifying an `Action` func that has an invalid (input) signature will +produce a non-zero exit from `App.Run` + +### Deprecated +- +`cli.App.RunAndExitOnError`, which should now be done by returning an error +that fulfills `cli.ExitCoder` to `cli.App.Run`. +- the legacy signature for +`cli.App.Action` of `func(*cli.Context)`, which should now have a return +signature of `func(*cli.Context) error`, as defined by `cli.ActionFunc`. + +### Fixed +- Added missing `*cli.Context.GlobalFloat64` method + +## [1.14.0] - 2016-04-03 (backfilled 2016-04-25) +### Added +- Codebeat badge +- Support for categorization via `CategorizedHelp` and `Categories` on app. + +### Changed +- Use `filepath.Base` instead of `path.Base` in `Name` and `HelpName`. + +### Fixed +- Ensure version is not shown in help text when `HideVersion` set. + +## [1.13.0] - 2016-03-06 (backfilled 2016-04-25) +### Added +- YAML file input support. +- `NArg` method on context. + +## [1.12.0] - 2016-02-17 (backfilled 2016-04-25) +### Added +- Custom usage error handling. +- Custom text support in `USAGE` section of help output. +- Improved help messages for empty strings. +- AppVeyor CI configuration. + +### Changed +- Removed `panic` from default help printer func. +- De-duping and optimizations. + +### Fixed +- Correctly handle `Before`/`After` at command level when no subcommands. +- Case of literal `-` argument causing flag reordering. +- Environment variable hints on Windows. +- Docs updates. + +## [1.11.1] - 2015-12-21 (backfilled 2016-04-25) +### Changed +- Use `path.Base` in `Name` and `HelpName` +- Export `GetName` on flag types. + +### Fixed +- Flag parsing when skipping is enabled. +- Test output cleanup. +- Move completion check to account for empty input case. + +## [1.11.0] - 2015-11-15 (backfilled 2016-04-25) +### Added +- Destination scan support for flags. +- Testing against `tip` in Travis CI config. + +### Changed +- Go version in Travis CI config. + +### Fixed +- Removed redundant tests. +- Use correct example naming in tests. + +## [1.10.2] - 2015-10-29 (backfilled 2016-04-25) +### Fixed +- Remove unused var in bash completion. + +## [1.10.1] - 2015-10-21 (backfilled 2016-04-25) +### Added +- Coverage and reference logos in README. + +### Fixed +- Use specified values in help and version parsing. +- Only display app version and help message once. + +## [1.10.0] - 2015-10-06 (backfilled 2016-04-25) +### Added +- More tests for existing functionality. +- `ArgsUsage` at app and command level for help text flexibility. + +### Fixed +- Honor `HideHelp` and `HideVersion` in `App.Run`. +- Remove juvenile word from README. + +## [1.9.0] - 2015-09-08 (backfilled 2016-04-25) +### Added +- `FullName` on command with accompanying help output update. +- Set default `$PROG` in bash completion. + +### Changed +- Docs formatting. + +### Fixed +- Removed self-referential imports in tests. + +## [1.8.0] - 2015-06-30 (backfilled 2016-04-25) +### Added +- Support for `Copyright` at app level. +- `Parent` func at context level to walk up context lineage. + +### Fixed +- Global flag processing at top level. + +## [1.7.1] - 2015-06-11 (backfilled 2016-04-25) +### Added +- Aggregate errors from `Before`/`After` funcs. +- Doc comments on flag structs. +- Include non-global flags when checking version and help. +- Travis CI config updates. + +### Fixed +- Ensure slice type flags have non-nil values. +- Collect global flags from the full command hierarchy. +- Docs prose. + +## [1.7.0] - 2015-05-03 (backfilled 2016-04-25) +### Changed +- `HelpPrinter` signature includes output writer. + +### Fixed +- Specify go 1.1+ in docs. +- Set `Writer` when running command as app. + +## [1.6.0] - 2015-03-23 (backfilled 2016-04-25) +### Added +- Multiple author support. +- `NumFlags` at context level. +- `Aliases` at command level. + +### Deprecated +- `ShortName` at command level. + +### Fixed +- Subcommand help output. +- Backward compatible support for deprecated `Author` and `Email` fields. +- Docs regarding `Names`/`Aliases`. + +## [1.5.0] - 2015-02-20 (backfilled 2016-04-25) +### Added +- `After` hook func support at app and command level. + +### Fixed +- Use parsed context when running command as subcommand. +- Docs prose. + +## [1.4.1] - 2015-01-09 (backfilled 2016-04-25) +### Added +- Support for hiding `-h / --help` flags, but not `help` subcommand. +- Stop flag parsing after `--`. + +### Fixed +- Help text for generic flags to specify single value. +- Use double quotes in output for defaults. +- Use `ParseInt` instead of `ParseUint` for int environment var values. +- Use `0` as base when parsing int environment var values. + +## [1.4.0] - 2014-12-12 (backfilled 2016-04-25) +### Added +- Support for environment variable lookup "cascade". +- Support for `Stdout` on app for output redirection. + +### Fixed +- Print command help instead of app help in `ShowCommandHelp`. + +## [1.3.1] - 2014-11-13 (backfilled 2016-04-25) +### Added +- Docs and example code updates. + +### Changed +- Default `-v / --version` flag made optional. + +## [1.3.0] - 2014-08-10 (backfilled 2016-04-25) +### Added +- `FlagNames` at context level. +- Exposed `VersionPrinter` var for more control over version output. +- Zsh completion hook. +- `AUTHOR` section in default app help template. +- Contribution guidelines. +- `DurationFlag` type. + +## [1.2.0] - 2014-08-02 +### Added +- Support for environment variable defaults on flags plus tests. + +## [1.1.0] - 2014-07-15 +### Added +- Bash completion. +- Optional hiding of built-in help command. +- Optional skipping of flag parsing at command level. +- `Author`, `Email`, and `Compiled` metadata on app. +- `Before` hook func support at app and command level. +- `CommandNotFound` func support at app level. +- Command reference available on context. +- `GenericFlag` type. +- `Float64Flag` type. +- `BoolTFlag` type. +- `IsSet` flag helper on context. +- More flag lookup funcs at context level. +- More tests & docs. + +### Changed +- Help template updates to account for presence/absence of flags. +- Separated subcommand help template. +- Exposed `HelpPrinter` var for more control over help output. + +## [1.0.0] - 2013-11-01 +### Added +- `help` flag in default app flag set and each command flag set. +- Custom handling of argument parsing errors. +- Command lookup by name at app level. +- `StringSliceFlag` type and supporting `StringSlice` type. +- `IntSliceFlag` type and supporting `IntSlice` type. +- Slice type flag lookups by name at context level. +- Export of app and command help functions. +- More tests & docs. + +## 0.1.0 - 2013-07-22 +### Added +- Initial implementation. + +[Unreleased]: https://github.com/urfave/cli/compare/v1.18.0...HEAD +[1.18.0]: https://github.com/urfave/cli/compare/v1.17.0...v1.18.0 +[1.17.0]: https://github.com/urfave/cli/compare/v1.16.0...v1.17.0 +[1.16.0]: https://github.com/urfave/cli/compare/v1.15.0...v1.16.0 +[1.15.0]: https://github.com/urfave/cli/compare/v1.14.0...v1.15.0 +[1.14.0]: https://github.com/urfave/cli/compare/v1.13.0...v1.14.0 +[1.13.0]: https://github.com/urfave/cli/compare/v1.12.0...v1.13.0 +[1.12.0]: https://github.com/urfave/cli/compare/v1.11.1...v1.12.0 +[1.11.1]: https://github.com/urfave/cli/compare/v1.11.0...v1.11.1 +[1.11.0]: https://github.com/urfave/cli/compare/v1.10.2...v1.11.0 +[1.10.2]: https://github.com/urfave/cli/compare/v1.10.1...v1.10.2 +[1.10.1]: https://github.com/urfave/cli/compare/v1.10.0...v1.10.1 +[1.10.0]: https://github.com/urfave/cli/compare/v1.9.0...v1.10.0 +[1.9.0]: https://github.com/urfave/cli/compare/v1.8.0...v1.9.0 +[1.8.0]: https://github.com/urfave/cli/compare/v1.7.1...v1.8.0 +[1.7.1]: https://github.com/urfave/cli/compare/v1.7.0...v1.7.1 +[1.7.0]: https://github.com/urfave/cli/compare/v1.6.0...v1.7.0 +[1.6.0]: https://github.com/urfave/cli/compare/v1.5.0...v1.6.0 +[1.5.0]: https://github.com/urfave/cli/compare/v1.4.1...v1.5.0 +[1.4.1]: https://github.com/urfave/cli/compare/v1.4.0...v1.4.1 +[1.4.0]: https://github.com/urfave/cli/compare/v1.3.1...v1.4.0 +[1.3.1]: https://github.com/urfave/cli/compare/v1.3.0...v1.3.1 +[1.3.0]: https://github.com/urfave/cli/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/urfave/cli/compare/v1.1.0...v1.2.0 +[1.1.0]: https://github.com/urfave/cli/compare/v1.0.0...v1.1.0 +[1.0.0]: https://github.com/urfave/cli/compare/v0.1.0...v1.0.0 diff --git a/vendor/github.com/codegangsta/cli/LICENSE b/vendor/github.com/codegangsta/cli/LICENSE new file mode 100644 index 0000000..42a597e --- /dev/null +++ b/vendor/github.com/codegangsta/cli/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Jeremy Saenz & Contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/codegangsta/cli/README.md b/vendor/github.com/codegangsta/cli/README.md new file mode 100644 index 0000000..0ffa92f --- /dev/null +++ b/vendor/github.com/codegangsta/cli/README.md @@ -0,0 +1,1307 @@ +cli +=== + +[![Build Status](https://travis-ci.org/urfave/cli.svg?branch=master)](https://travis-ci.org/urfave/cli) +[![Windows Build Status](https://ci.appveyor.com/api/projects/status/rtgk5xufi932pb2v?svg=true)](https://ci.appveyor.com/project/urfave/cli) +[![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://godoc.org/github.com/urfave/cli) +[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli) +[![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli) +[![top level coverage](https://gocover.io/_badge/github.com/urfave/cli?0 "top level coverage")](http://gocover.io/github.com/urfave/cli) / +[![altsrc coverage](https://gocover.io/_badge/github.com/urfave/cli/altsrc?0 "altsrc coverage")](http://gocover.io/github.com/urfave/cli/altsrc) + +**Notice:** This is the library formerly known as +`github.com/codegangsta/cli` -- Github will automatically redirect requests +to this repository, but we recommend updating your references for clarity. + +cli is a simple, fast, and fun package for building command line apps in Go. The +goal is to enable developers to write fast and distributable command line +applications in an expressive way. + + + +- [Overview](#overview) +- [Installation](#installation) + * [Supported platforms](#supported-platforms) + * [Using the `v2` branch](#using-the-v2-branch) + * [Pinning to the `v1` branch](#pinning-to-the-v1-branch) +- [Getting Started](#getting-started) +- [Examples](#examples) + * [Arguments](#arguments) + * [Flags](#flags) + + [Placeholder Values](#placeholder-values) + + [Alternate Names](#alternate-names) + + [Values from the Environment](#values-from-the-environment) + + [Values from alternate input sources (YAML and others)](#values-from-alternate-input-sources-yaml-and-others) + * [Subcommands](#subcommands) + * [Subcommands categories](#subcommands-categories) + * [Exit code](#exit-code) + * [Bash Completion](#bash-completion) + + [Enabling](#enabling) + + [Distribution](#distribution) + + [Customization](#customization) + * [Generated Help Text](#generated-help-text) + + [Customization](#customization-1) + * [Version Flag](#version-flag) + + [Customization](#customization-2) + + [Full API Example](#full-api-example) +- [Contribution Guidelines](#contribution-guidelines) + + + +## Overview + +Command line apps are usually so tiny that there is absolutely no reason why +your code should *not* be self-documenting. Things like generating help text and +parsing command flags/options should not hinder productivity when writing a +command line app. + +**This is where cli comes into play.** cli makes command line programming fun, +organized, and expressive! + +## Installation + +Make sure you have a working Go environment. Go version 1.2+ is supported. [See +the install instructions for Go](http://golang.org/doc/install.html). + +To install cli, simply run: +``` +$ go get github.com/urfave/cli +``` + +Make sure your `PATH` includes the `$GOPATH/bin` directory so your commands can +be easily used: +``` +export PATH=$PATH:$GOPATH/bin +``` + +### Supported platforms + +cli is tested against multiple versions of Go on Linux, and against the latest +released version of Go on OS X and Windows. For full details, see +[`./.travis.yml`](./.travis.yml) and [`./appveyor.yml`](./appveyor.yml). + +### Using the `v2` branch + +**Warning**: The `v2` branch is currently unreleased and considered unstable. + +There is currently a long-lived branch named `v2` that is intended to land as +the new `master` branch once development there has settled down. The current +`master` branch (mirrored as `v1`) is being manually merged into `v2` on +an irregular human-based schedule, but generally if one wants to "upgrade" to +`v2` *now* and accept the volatility (read: "awesomeness") that comes along with +that, please use whatever version pinning of your preference, such as via +`gopkg.in`: + +``` +$ go get gopkg.in/urfave/cli.v2 +``` + +``` go +... +import ( + "gopkg.in/urfave/cli.v2" // imports as package "cli" +) +... +``` + +### Pinning to the `v1` branch + +Similarly to the section above describing use of the `v2` branch, if one wants +to avoid any unexpected compatibility pains once `v2` becomes `master`, then +pinning to the `v1` branch is an acceptable option, e.g.: + +``` +$ go get gopkg.in/urfave/cli.v1 +``` + +``` go +... +import ( + "gopkg.in/urfave/cli.v1" // imports as package "cli" +) +... +``` + +## Getting Started + +One of the philosophies behind cli is that an API should be playful and full of +discovery. So a cli app can be as little as one line of code in `main()`. + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + cli.NewApp().Run(os.Args) +} +``` + +This app will run and show help text, but is not very useful. Let's give an +action to execute and some help documentation: + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + app.Name = "boom" + app.Usage = "make an explosive entrance" + app.Action = func(c *cli.Context) error { + fmt.Println("boom! I say!") + return nil + } + + app.Run(os.Args) +} +``` + +Running this already gives you a ton of functionality, plus support for things +like subcommands and flags, which are covered below. + +## Examples + +Being a programmer can be a lonely job. Thankfully by the power of automation +that is not the case! Let's create a greeter app to fend off our demons of +loneliness! + +Start by creating a directory named `greet`, and within it, add a file, +`greet.go` with the following code in it: + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + app.Name = "greet" + app.Usage = "fight the loneliness!" + app.Action = func(c *cli.Context) error { + fmt.Println("Hello friend!") + return nil + } + + app.Run(os.Args) +} +``` + +Install our command to the `$GOPATH/bin` directory: + +``` +$ go install +``` + +Finally run our new command: + +``` +$ greet +Hello friend! +``` + +cli also generates neat help text: + +``` +$ greet help +NAME: + greet - fight the loneliness! + +USAGE: + greet [global options] command [command options] [arguments...] + +VERSION: + 0.0.0 + +COMMANDS: + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS + --version Shows version information +``` + +### Arguments + +You can lookup arguments by calling the `Args` function on `cli.Context`, e.g.: + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Action = func(c *cli.Context) error { + fmt.Printf("Hello %q", c.Args().Get(0)) + return nil + } + + app.Run(os.Args) +} +``` + +### Flags + +Setting and querying flags is simple. + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang", + Value: "english", + Usage: "language for the greeting", + }, + } + + app.Action = func(c *cli.Context) error { + name := "Nefertiti" + if c.NArg() > 0 { + name = c.Args().Get(0) + } + if c.String("lang") == "spanish" { + fmt.Println("Hola", name) + } else { + fmt.Println("Hello", name) + } + return nil + } + + app.Run(os.Args) +} +``` + +You can also set a destination variable for a flag, to which the content will be +scanned. + + +``` go +package main + +import ( + "os" + "fmt" + + "github.com/urfave/cli" +) + +func main() { + var language string + + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang", + Value: "english", + Usage: "language for the greeting", + Destination: &language, + }, + } + + app.Action = func(c *cli.Context) error { + name := "someone" + if c.NArg() > 0 { + name = c.Args()[0] + } + if language == "spanish" { + fmt.Println("Hola", name) + } else { + fmt.Println("Hello", name) + } + return nil + } + + app.Run(os.Args) +} +``` + +See full list of flags at http://godoc.org/github.com/urfave/cli + +#### Placeholder Values + +Sometimes it's useful to specify a flag's value within the usage string itself. +Such placeholders are indicated with back quotes. + +For example this: + + +```go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag{ + cli.StringFlag{ + Name: "config, c", + Usage: "Load configuration from `FILE`", + }, + } + + app.Run(os.Args) +} +``` + +Will result in help output like: + +``` +--config FILE, -c FILE Load configuration from FILE +``` + +Note that only the first placeholder is used. Subsequent back-quoted words will +be left as-is. + +#### Alternate Names + +You can set alternate (or short) names for flags by providing a comma-delimited +list for the `Name`. e.g. + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "language for the greeting", + }, + } + + app.Run(os.Args) +} +``` + +That flag can then be set with `--lang spanish` or `-l spanish`. Note that +giving two different forms of the same flag in the same command invocation is an +error. + +#### Values from the Environment + +You can also have the default value set from the environment via `EnvVar`. e.g. + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "language for the greeting", + EnvVar: "APP_LANG", + }, + } + + app.Run(os.Args) +} +``` + +The `EnvVar` may also be given as a comma-delimited "cascade", where the first +environment variable that resolves is used as the default. + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Flags = []cli.Flag { + cli.StringFlag{ + Name: "lang, l", + Value: "english", + Usage: "language for the greeting", + EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG", + }, + } + + app.Run(os.Args) +} +``` + +#### Values from alternate input sources (YAML and others) + +There is a separate package altsrc that adds support for getting flag values +from other input sources like YAML. + +In order to get values for a flag from an alternate input source the following +code would be added to wrap an existing cli.Flag like below: + +``` go + altsrc.NewIntFlag(cli.IntFlag{Name: "test"}) +``` + +Initialization must also occur for these flags. Below is an example initializing +getting data from a yaml file below. + +``` go + command.Before = altsrc.InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) +``` + +The code above will use the "load" string as a flag name to get the file name of +a yaml file from the cli.Context. It will then use that file name to initialize +the yaml input source for any flags that are defined on that command. As a note +the "load" flag used would also have to be defined on the command flags in order +for this code snipped to work. + +Currently only YAML files are supported but developers can add support for other +input sources by implementing the altsrc.InputSourceContext for their given +sources. + +Here is a more complete sample of a command using YAML support: + + +``` go +package notmain + +import ( + "fmt" + "os" + + "github.com/urfave/cli" + "github.com/urfave/cli/altsrc" +) + +func main() { + app := cli.NewApp() + + flags := []cli.Flag{ + altsrc.NewIntFlag(cli.IntFlag{Name: "test"}), + cli.StringFlag{Name: "load"}, + } + + app.Action = func(c *cli.Context) error { + fmt.Println("yaml ist rad") + return nil + } + + app.Before = altsrc.InitInputSourceWithContext(flags, altsrc.NewYamlSourceFromFlagFunc("load")) + app.Flags = flags + + app.Run(os.Args) +} +``` + +### Subcommands + +Subcommands can be defined for a more git-like command line app. + + +```go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Commands = []cli.Command{ + { + Name: "add", + Aliases: []string{"a"}, + Usage: "add a task to the list", + Action: func(c *cli.Context) error { + fmt.Println("added task: ", c.Args().First()) + return nil + }, + }, + { + Name: "complete", + Aliases: []string{"c"}, + Usage: "complete a task on the list", + Action: func(c *cli.Context) error { + fmt.Println("completed task: ", c.Args().First()) + return nil + }, + }, + { + Name: "template", + Aliases: []string{"t"}, + Usage: "options for task templates", + Subcommands: []cli.Command{ + { + Name: "add", + Usage: "add a new template", + Action: func(c *cli.Context) error { + fmt.Println("new task template: ", c.Args().First()) + return nil + }, + }, + { + Name: "remove", + Usage: "remove an existing template", + Action: func(c *cli.Context) error { + fmt.Println("removed task template: ", c.Args().First()) + return nil + }, + }, + }, + }, + } + + app.Run(os.Args) +} +``` + +### Subcommands categories + +For additional organization in apps that have many subcommands, you can +associate a category for each command to group them together in the help +output. + +E.g. + +```go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + + app.Commands = []cli.Command{ + { + Name: "noop", + }, + { + Name: "add", + Category: "template", + }, + { + Name: "remove", + Category: "template", + }, + } + + app.Run(os.Args) +} +``` + +Will include: + +``` +COMMANDS: + noop + + Template actions: + add + remove +``` + +### Exit code + +Calling `App.Run` will not automatically call `os.Exit`, which means that by +default the exit code will "fall through" to being `0`. An explicit exit code +may be set by returning a non-nil error that fulfills `cli.ExitCoder`, *or* a +`cli.MultiError` that includes an error that fulfills `cli.ExitCoder`, e.g.: + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + app := cli.NewApp() + app.Flags = []cli.Flag{ + cli.BoolTFlag{ + Name: "ginger-crouton", + Usage: "is it in the soup?", + }, + } + app.Action = func(ctx *cli.Context) error { + if !ctx.Bool("ginger-crouton") { + return cli.NewExitError("it is not in the soup", 86) + } + return nil + } + + app.Run(os.Args) +} +``` + +### Bash Completion + +You can enable completion commands by setting the `EnableBashCompletion` +flag on the `App` object. By default, this setting will only auto-complete to +show an app's subcommands, but you can write your own completion methods for +the App or its subcommands. + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +func main() { + tasks := []string{"cook", "clean", "laundry", "eat", "sleep", "code"} + + app := cli.NewApp() + app.EnableBashCompletion = true + app.Commands = []cli.Command{ + { + Name: "complete", + Aliases: []string{"c"}, + Usage: "complete a task on the list", + Action: func(c *cli.Context) error { + fmt.Println("completed task: ", c.Args().First()) + return nil + }, + BashComplete: func(c *cli.Context) { + // This will complete if no args are passed + if c.NArg() > 0 { + return + } + for _, t := range tasks { + fmt.Println(t) + } + }, + }, + } + + app.Run(os.Args) +} +``` + +#### Enabling + +Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while +setting the `PROG` variable to the name of your program: + +`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete` + +#### Distribution + +Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename +it to the name of the program you wish to add autocomplete support for (or +automatically install it there if you are distributing a package). Don't forget +to source the file to make it active in the current shell. + +``` +sudo cp src/bash_autocomplete /etc/bash_completion.d/ +source /etc/bash_completion.d/ +``` + +Alternatively, you can just document that users should source the generic +`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set +to the name of their program (as above). + +#### Customization + +The default bash completion flag (`--generate-bash-completion`) is defined as +`cli.BashCompletionFlag`, and may be redefined if desired, e.g.: + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + cli.BashCompletionFlag = cli.BoolFlag{ + Name: "compgen", + Hidden: true, + } + + app := cli.NewApp() + app.EnableBashCompletion = true + app.Commands = []cli.Command{ + { + Name: "wat", + }, + } + app.Run(os.Args) +} +``` + +### Generated Help Text + +The default help flag (`-h/--help`) is defined as `cli.HelpFlag` and is checked +by the cli internals in order to print generated help text for the app, command, +or subcommand, and break execution. + +#### Customization + +All of the help text generation may be customized, and at multiple levels. The +templates are exposed as variables `AppHelpTemplate`, `CommandHelpTemplate`, and +`SubcommandHelpTemplate` which may be reassigned or augmented, and full override +is possible by assigning a compatible func to the `cli.HelpPrinter` variable, +e.g.: + + +``` go +package main + +import ( + "fmt" + "io" + "os" + + "github.com/urfave/cli" +) + +func main() { + // EXAMPLE: Append to an existing template + cli.AppHelpTemplate = fmt.Sprintf(`%s + +WEBSITE: http://awesometown.example.com + +SUPPORT: support@awesometown.example.com + +`, cli.AppHelpTemplate) + + // EXAMPLE: Override a template + cli.AppHelpTemplate = `NAME: + {{.Name}} - {{.Usage}} +USAGE: + {{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command +[command options]{{end}} {{if +.ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} + {{if len .Authors}} +AUTHOR(S): + {{range .Authors}}{{ . }}{{end}} + {{end}}{{if .Commands}} +COMMANDS: +{{range .Commands}}{{if not .HideHelp}} {{join .Names ", "}}{{ "\t" +}}{{.Usage}}{{ "\n" }}{{end}}{{end}}{{end}}{{if .VisibleFlags}} +GLOBAL OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}}{{if .Copyright }} +COPYRIGHT: + {{.Copyright}} + {{end}}{{if .Version}} +VERSION: + {{.Version}} + {{end}} +` + + // EXAMPLE: Replace the `HelpPrinter` func + cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) { + fmt.Println("Ha HA. I pwnd the help!!1") + } + + cli.NewApp().Run(os.Args) +} +``` + +The default flag may be customized to something other than `-h/--help` by +setting `cli.HelpFlag`, e.g.: + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + cli.HelpFlag = cli.BoolFlag{ + Name: "halp, haaaaalp", + Usage: "HALP", + EnvVar: "SHOW_HALP,HALPPLZ", + } + + cli.NewApp().Run(os.Args) +} +``` + +### Version Flag + +The default version flag (`-v/--version`) is defined as `cli.VersionFlag`, which +is checked by the cli internals in order to print the `App.Version` via +`cli.VersionPrinter` and break execution. + +#### Customization + +The default flag may be cusomized to something other than `-v/--version` by +setting `cli.VersionFlag`, e.g.: + + +``` go +package main + +import ( + "os" + + "github.com/urfave/cli" +) + +func main() { + cli.VersionFlag = cli.BoolFlag{ + Name: "print-version, V", + Usage: "print only the version", + } + + app := cli.NewApp() + app.Name = "partay" + app.Version = "v19.99.0" + app.Run(os.Args) +} +``` + +Alternatively, the version printer at `cli.VersionPrinter` may be overridden, e.g.: + + +``` go +package main + +import ( + "fmt" + "os" + + "github.com/urfave/cli" +) + +var ( + Revision = "fafafaf" +) + +func main() { + cli.VersionPrinter = func(c *cli.Context) { + fmt.Printf("version=%s revision=%s\n", c.App.Version, Revision) + } + + app := cli.NewApp() + app.Name = "partay" + app.Version = "v19.99.0" + app.Run(os.Args) +} +``` + +#### Full API Example + +**Notice**: This is a contrived (functioning) example meant strictly for API +demonstration purposes. Use of one's imagination is encouraged. + + +``` go +package main + +import ( + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "time" + + "github.com/urfave/cli" +) + +func init() { + cli.AppHelpTemplate += "\nCUSTOMIZED: you bet ur muffins\n" + cli.CommandHelpTemplate += "\nYMMV\n" + cli.SubcommandHelpTemplate += "\nor something\n" + + cli.HelpFlag = cli.BoolFlag{Name: "halp"} + cli.BashCompletionFlag = cli.BoolFlag{Name: "compgen", Hidden: true} + cli.VersionFlag = cli.BoolFlag{Name: "print-version, V"} + + cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) { + fmt.Fprintf(w, "best of luck to you\n") + } + cli.VersionPrinter = func(c *cli.Context) { + fmt.Fprintf(c.App.Writer, "version=%s\n", c.App.Version) + } + cli.OsExiter = func(c int) { + fmt.Fprintf(cli.ErrWriter, "refusing to exit %d\n", c) + } + cli.ErrWriter = ioutil.Discard + cli.FlagStringer = func(fl cli.Flag) string { + return fmt.Sprintf("\t\t%s", fl.GetName()) + } +} + +type hexWriter struct{} + +func (w *hexWriter) Write(p []byte) (int, error) { + for _, b := range p { + fmt.Printf("%x", b) + } + fmt.Printf("\n") + + return len(p), nil +} + +type genericType struct{ + s string +} + +func (g *genericType) Set(value string) error { + g.s = value + return nil +} + +func (g *genericType) String() string { + return g.s +} + +func main() { + app := cli.NewApp() + app.Name = "kənˈtrīv" + app.Version = "v19.99.0" + app.Compiled = time.Now() + app.Authors = []cli.Author{ + cli.Author{ + Name: "Example Human", + Email: "human@example.com", + }, + } + app.Copyright = "(c) 1999 Serious Enterprise" + app.HelpName = "contrive" + app.Usage = "demonstrate available API" + app.UsageText = "contrive - demonstrating the available API" + app.ArgsUsage = "[args and such]" + app.Commands = []cli.Command{ + cli.Command{ + Name: "doo", + Aliases: []string{"do"}, + Category: "motion", + Usage: "do the doo", + UsageText: "doo - does the dooing", + Description: "no really, there is a lot of dooing to be done", + ArgsUsage: "[arrgh]", + Flags: []cli.Flag{ + cli.BoolFlag{Name: "forever, forevvarr"}, + }, + Subcommands: cli.Commands{ + cli.Command{ + Name: "wop", + Action: wopAction, + }, + }, + SkipFlagParsing: false, + HideHelp: false, + Hidden: false, + HelpName: "doo!", + BashComplete: func(c *cli.Context) { + fmt.Fprintf(c.App.Writer, "--better\n") + }, + Before: func(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, "brace for impact\n") + return nil + }, + After: func(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, "did we lose anyone?\n") + return nil + }, + Action: func(c *cli.Context) error { + c.Command.FullName() + c.Command.HasName("wop") + c.Command.Names() + c.Command.VisibleFlags() + fmt.Fprintf(c.App.Writer, "dodododododoodododddooooododododooo\n") + if c.Bool("forever") { + c.Command.Run(c) + } + return nil + }, + OnUsageError: func(c *cli.Context, err error, isSubcommand bool) error { + fmt.Fprintf(c.App.Writer, "for shame\n") + return err + }, + }, + } + app.Flags = []cli.Flag{ + cli.BoolFlag{Name: "fancy"}, + cli.BoolTFlag{Name: "fancier"}, + cli.DurationFlag{Name: "howlong, H", Value: time.Second * 3}, + cli.Float64Flag{Name: "howmuch"}, + cli.GenericFlag{Name: "wat", Value: &genericType{}}, + cli.Int64Flag{Name: "longdistance"}, + cli.Int64SliceFlag{Name: "intervals"}, + cli.IntFlag{Name: "distance"}, + cli.IntSliceFlag{Name: "times"}, + cli.StringFlag{Name: "dance-move, d"}, + cli.StringSliceFlag{Name: "names, N"}, + cli.UintFlag{Name: "age"}, + cli.Uint64Flag{Name: "bigage"}, + } + app.EnableBashCompletion = true + app.HideHelp = false + app.HideVersion = false + app.BashComplete = func(c *cli.Context) { + fmt.Fprintf(c.App.Writer, "lipstick\nkiss\nme\nlipstick\nringo\n") + } + app.Before = func(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, "HEEEERE GOES\n") + return nil + } + app.After = func(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, "Phew!\n") + return nil + } + app.CommandNotFound = func(c *cli.Context, command string) { + fmt.Fprintf(c.App.Writer, "Thar be no %q here.\n", command) + } + app.OnUsageError = func(c *cli.Context, err error, isSubcommand bool) error { + if isSubcommand { + return err + } + + fmt.Fprintf(c.App.Writer, "WRONG: %#v\n", err) + return nil + } + app.Action = func(c *cli.Context) error { + cli.DefaultAppComplete(c) + cli.HandleExitCoder(errors.New("not an exit coder, though")) + cli.ShowAppHelp(c) + cli.ShowCommandCompletions(c, "nope") + cli.ShowCommandHelp(c, "also-nope") + cli.ShowCompletions(c) + cli.ShowSubcommandHelp(c) + cli.ShowVersion(c) + + categories := c.App.Categories() + categories.AddCommand("sounds", cli.Command{ + Name: "bloop", + }) + + for _, category := range c.App.Categories() { + fmt.Fprintf(c.App.Writer, "%s\n", category.Name) + fmt.Fprintf(c.App.Writer, "%#v\n", category.Commands) + fmt.Fprintf(c.App.Writer, "%#v\n", category.VisibleCommands()) + } + + fmt.Printf("%#v\n", c.App.Command("doo")) + if c.Bool("infinite") { + c.App.Run([]string{"app", "doo", "wop"}) + } + + if c.Bool("forevar") { + c.App.RunAsSubcommand(c) + } + c.App.Setup() + fmt.Printf("%#v\n", c.App.VisibleCategories()) + fmt.Printf("%#v\n", c.App.VisibleCommands()) + fmt.Printf("%#v\n", c.App.VisibleFlags()) + + fmt.Printf("%#v\n", c.Args().First()) + if len(c.Args()) > 0 { + fmt.Printf("%#v\n", c.Args()[1]) + } + fmt.Printf("%#v\n", c.Args().Present()) + fmt.Printf("%#v\n", c.Args().Tail()) + + set := flag.NewFlagSet("contrive", 0) + nc := cli.NewContext(c.App, set, c) + + fmt.Printf("%#v\n", nc.Args()) + fmt.Printf("%#v\n", nc.Bool("nope")) + fmt.Printf("%#v\n", nc.BoolT("nerp")) + fmt.Printf("%#v\n", nc.Duration("howlong")) + fmt.Printf("%#v\n", nc.Float64("hay")) + fmt.Printf("%#v\n", nc.Generic("bloop")) + fmt.Printf("%#v\n", nc.Int64("bonk")) + fmt.Printf("%#v\n", nc.Int64Slice("burnks")) + fmt.Printf("%#v\n", nc.Int("bips")) + fmt.Printf("%#v\n", nc.IntSlice("blups")) + fmt.Printf("%#v\n", nc.String("snurt")) + fmt.Printf("%#v\n", nc.StringSlice("snurkles")) + fmt.Printf("%#v\n", nc.Uint("flub")) + fmt.Printf("%#v\n", nc.Uint64("florb")) + fmt.Printf("%#v\n", nc.GlobalBool("global-nope")) + fmt.Printf("%#v\n", nc.GlobalBoolT("global-nerp")) + fmt.Printf("%#v\n", nc.GlobalDuration("global-howlong")) + fmt.Printf("%#v\n", nc.GlobalFloat64("global-hay")) + fmt.Printf("%#v\n", nc.GlobalGeneric("global-bloop")) + fmt.Printf("%#v\n", nc.GlobalInt("global-bips")) + fmt.Printf("%#v\n", nc.GlobalIntSlice("global-blups")) + fmt.Printf("%#v\n", nc.GlobalString("global-snurt")) + fmt.Printf("%#v\n", nc.GlobalStringSlice("global-snurkles")) + + fmt.Printf("%#v\n", nc.FlagNames()) + fmt.Printf("%#v\n", nc.GlobalFlagNames()) + fmt.Printf("%#v\n", nc.GlobalIsSet("wat")) + fmt.Printf("%#v\n", nc.GlobalSet("wat", "nope")) + fmt.Printf("%#v\n", nc.NArg()) + fmt.Printf("%#v\n", nc.NumFlags()) + fmt.Printf("%#v\n", nc.Parent()) + + nc.Set("wat", "also-nope") + + ec := cli.NewExitError("ohwell", 86) + fmt.Fprintf(c.App.Writer, "%d", ec.ExitCode()) + fmt.Printf("made it!\n") + return ec + } + + if os.Getenv("HEXY") != "" { + app.Writer = &hexWriter{} + app.ErrWriter = &hexWriter{} + } + + app.Metadata = map[string]interface{}{ + "layers": "many", + "explicable": false, + "whatever-values": 19.99, + } + + app.Run(os.Args) +} + +func wopAction(c *cli.Context) error { + fmt.Fprintf(c.App.Writer, ":wave: over here, eh\n") + return nil +} +``` + +## Contribution Guidelines + +Feel free to put up a pull request to fix a bug or maybe add a feature. I will +give it a code review and make sure that it does not break backwards +compatibility. If I or any other collaborators agree that it is in line with +the vision of the project, we will work with you to get the code into +a mergeable state and merge it into the master branch. + +If you have contributed something significant to the project, we will most +likely add you as a collaborator. As a collaborator you are given the ability +to merge others pull requests. It is very important that new code does not +break existing code, so be careful about what code you do choose to merge. + +If you feel like you have contributed to the project but have not yet been +added as a collaborator, we probably forgot to add you, please open an issue. diff --git a/vendor/github.com/codegangsta/cli/app.go b/vendor/github.com/codegangsta/cli/app.go new file mode 100644 index 0000000..0755bb6 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/app.go @@ -0,0 +1,503 @@ +package cli + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "time" +) + +var ( + changeLogURL = "https://github.com/urfave/cli/blob/master/CHANGELOG.md" + appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL) + runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL) + + contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you." + + errNonFuncAction = NewExitError("ERROR invalid Action type. "+ + fmt.Sprintf("Must be a func of type `cli.ActionFunc`. %s", contactSysadmin)+ + fmt.Sprintf("See %s", appActionDeprecationURL), 2) + errInvalidActionSignature = NewExitError("ERROR invalid Action signature. "+ + fmt.Sprintf("Must be `cli.ActionFunc`. %s", contactSysadmin)+ + fmt.Sprintf("See %s", appActionDeprecationURL), 2) +) + +// App is the main structure of a cli application. It is recommended that +// an app be created with the cli.NewApp() function +type App struct { + // The name of the program. Defaults to path.Base(os.Args[0]) + Name string + // Full name of command for help, defaults to Name + HelpName string + // Description of the program. + Usage string + // Text to override the USAGE section of help + UsageText string + // Description of the program argument format. + ArgsUsage string + // Version of the program + Version string + // List of commands to execute + Commands []Command + // List of flags to parse + Flags []Flag + // Boolean to enable bash completion commands + EnableBashCompletion bool + // Boolean to hide built-in help command + HideHelp bool + // Boolean to hide built-in version flag and the VERSION section of help + HideVersion bool + // Populate on app startup, only gettable through method Categories() + categories CommandCategories + // An action to execute when the bash-completion flag is set + BashComplete BashCompleteFunc + // An action to execute before any subcommands are run, but after the context is ready + // If a non-nil error is returned, no subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The action to execute when no subcommands are specified + Action interface{} + // TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind + // of deprecation period has passed, maybe? + + // Execute this function if the proper command cannot be found + CommandNotFound CommandNotFoundFunc + // Execute this function if an usage error occurs + OnUsageError OnUsageErrorFunc + // Compilation date + Compiled time.Time + // List of all authors who contributed + Authors []Author + // Copyright of the binary if any + Copyright string + // Name of Author (Note: Use App.Authors, this is deprecated) + Author string + // Email of Author (Note: Use App.Authors, this is deprecated) + Email string + // Writer writer to write output to + Writer io.Writer + // ErrWriter writes error output + ErrWriter io.Writer + // Other custom info + Metadata map[string]interface{} + + didSetup bool +} + +// Tries to find out when this binary was compiled. +// Returns the current time if it fails to find it. +func compileTime() time.Time { + info, err := os.Stat(os.Args[0]) + if err != nil { + return time.Now() + } + return info.ModTime() +} + +// NewApp creates a new cli Application with some reasonable defaults for Name, +// Usage, Version and Action. +func NewApp() *App { + return &App{ + Name: filepath.Base(os.Args[0]), + HelpName: filepath.Base(os.Args[0]), + Usage: "A new cli application", + UsageText: "", + Version: "0.0.0", + BashComplete: DefaultAppComplete, + Action: helpCommand.Action, + Compiled: compileTime(), + Writer: os.Stdout, + } +} + +// Setup runs initialization code to ensure all data structures are ready for +// `Run` or inspection prior to `Run`. It is internally called by `Run`, but +// will return early if setup has already happened. +func (a *App) Setup() { + if a.didSetup { + return + } + + a.didSetup = true + + if a.Author != "" || a.Email != "" { + a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email}) + } + + newCmds := []Command{} + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCmds = append(newCmds, c) + } + a.Commands = newCmds + + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + a.Commands = append(a.Commands, helpCommand) + if (HelpFlag != BoolFlag{}) { + a.appendFlag(HelpFlag) + } + } + + if a.EnableBashCompletion { + a.appendFlag(BashCompletionFlag) + } + + if !a.HideVersion { + a.appendFlag(VersionFlag) + } + + a.categories = CommandCategories{} + for _, command := range a.Commands { + a.categories = a.categories.AddCommand(command.Category, command) + } + sort.Sort(a.categories) + + if a.Metadata == nil { + a.Metadata = make(map[string]interface{}) + } +} + +// Run is the entry point to the cli app. Parses the arguments slice and routes +// to the proper flag/args combination +func (a *App) Run(arguments []string) (err error) { + a.Setup() + + // parse flags + set := flagSet(a.Name, a.Flags) + set.SetOutput(ioutil.Discard) + err = set.Parse(arguments[1:]) + nerr := normalizeFlags(a.Flags, set) + context := NewContext(a, set, nil) + if nerr != nil { + fmt.Fprintln(a.Writer, nerr) + ShowAppHelp(context) + return nerr + } + + if checkCompletions(context) { + return nil + } + + if err != nil { + if a.OnUsageError != nil { + err := a.OnUsageError(context, err, false) + HandleExitCoder(err) + return err + } + fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.") + ShowAppHelp(context) + return err + } + + if !a.HideHelp && checkHelp(context) { + ShowAppHelp(context) + return nil + } + + if !a.HideVersion && checkVersion(context) { + ShowVersion(context) + return nil + } + + if a.After != nil { + defer func() { + if afterErr := a.After(context); afterErr != nil { + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if a.Before != nil { + beforeErr := a.Before(context) + if beforeErr != nil { + fmt.Fprintf(a.Writer, "%v\n\n", beforeErr) + ShowAppHelp(context) + HandleExitCoder(beforeErr) + err = beforeErr + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + // Run default Action + err = HandleAction(a.Action, context) + + HandleExitCoder(err) + return err +} + +// DEPRECATED: Another entry point to the cli app, takes care of passing arguments and error handling +func (a *App) RunAndExitOnError() { + fmt.Fprintf(a.errWriter(), + "DEPRECATED cli.App.RunAndExitOnError. %s See %s\n", + contactSysadmin, runAndExitOnErrorDeprecationURL) + if err := a.Run(os.Args); err != nil { + fmt.Fprintln(a.errWriter(), err) + OsExiter(1) + } +} + +// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to +// generate command-specific flags +func (a *App) RunAsSubcommand(ctx *Context) (err error) { + // append help to commands + if len(a.Commands) > 0 { + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + a.Commands = append(a.Commands, helpCommand) + if (HelpFlag != BoolFlag{}) { + a.appendFlag(HelpFlag) + } + } + } + + newCmds := []Command{} + for _, c := range a.Commands { + if c.HelpName == "" { + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) + } + newCmds = append(newCmds, c) + } + a.Commands = newCmds + + // append flags + if a.EnableBashCompletion { + a.appendFlag(BashCompletionFlag) + } + + // parse flags + set := flagSet(a.Name, a.Flags) + set.SetOutput(ioutil.Discard) + err = set.Parse(ctx.Args().Tail()) + nerr := normalizeFlags(a.Flags, set) + context := NewContext(a, set, ctx) + + if nerr != nil { + fmt.Fprintln(a.Writer, nerr) + fmt.Fprintln(a.Writer) + if len(a.Commands) > 0 { + ShowSubcommandHelp(context) + } else { + ShowCommandHelp(ctx, context.Args().First()) + } + return nerr + } + + if checkCompletions(context) { + return nil + } + + if err != nil { + if a.OnUsageError != nil { + err = a.OnUsageError(context, err, true) + HandleExitCoder(err) + return err + } + fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.") + ShowSubcommandHelp(context) + return err + } + + if len(a.Commands) > 0 { + if checkSubcommandHelp(context) { + return nil + } + } else { + if checkCommandHelp(ctx, context.Args().First()) { + return nil + } + } + + if a.After != nil { + defer func() { + afterErr := a.After(context) + if afterErr != nil { + HandleExitCoder(err) + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if a.Before != nil { + beforeErr := a.Before(context) + if beforeErr != nil { + HandleExitCoder(beforeErr) + err = beforeErr + return err + } + } + + args := context.Args() + if args.Present() { + name := args.First() + c := a.Command(name) + if c != nil { + return c.Run(context) + } + } + + // Run default Action + err = HandleAction(a.Action, context) + + HandleExitCoder(err) + return err +} + +// Command returns the named command on App. Returns nil if the command does not exist +func (a *App) Command(name string) *Command { + for _, c := range a.Commands { + if c.HasName(name) { + return &c + } + } + + return nil +} + +// Categories returns a slice containing all the categories with the commands they contain +func (a *App) Categories() CommandCategories { + return a.categories +} + +// VisibleCategories returns a slice of categories and commands that are +// Hidden=false +func (a *App) VisibleCategories() []*CommandCategory { + ret := []*CommandCategory{} + for _, category := range a.categories { + if visible := func() *CommandCategory { + for _, command := range category.Commands { + if !command.Hidden { + return category + } + } + return nil + }(); visible != nil { + ret = append(ret, visible) + } + } + return ret +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (a *App) VisibleCommands() []Command { + ret := []Command{} + for _, command := range a.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (a *App) VisibleFlags() []Flag { + return visibleFlags(a.Flags) +} + +func (a *App) hasFlag(flag Flag) bool { + for _, f := range a.Flags { + if flag == f { + return true + } + } + + return false +} + +func (a *App) errWriter() io.Writer { + + // When the app ErrWriter is nil use the package level one. + if a.ErrWriter == nil { + return ErrWriter + } + + return a.ErrWriter +} + +func (a *App) appendFlag(flag Flag) { + if !a.hasFlag(flag) { + a.Flags = append(a.Flags, flag) + } +} + +// Author represents someone who has contributed to a cli project. +type Author struct { + Name string // The Authors name + Email string // The Authors email +} + +// String makes Author comply to the Stringer interface, to allow an easy print in the templating process +func (a Author) String() string { + e := "" + if a.Email != "" { + e = "<" + a.Email + "> " + } + + return fmt.Sprintf("%v %v", a.Name, e) +} + +// HandleAction uses ✧✧✧reflection✧✧✧ to figure out if the given Action is an +// ActionFunc, a func with the legacy signature for Action, or some other +// invalid thing. If it's an ActionFunc or a func with the legacy signature for +// Action, the func is run! +func HandleAction(action interface{}, context *Context) (err error) { + defer func() { + if r := recover(); r != nil { + // Try to detect a known reflection error from *this scope*, rather than + // swallowing all panics that may happen when calling an Action func. + s := fmt.Sprintf("%v", r) + if strings.HasPrefix(s, "reflect: ") && strings.Contains(s, "too many input arguments") { + err = NewExitError(fmt.Sprintf("ERROR unknown Action error: %v. See %s", r, appActionDeprecationURL), 2) + } else { + panic(r) + } + } + }() + + if reflect.TypeOf(action).Kind() != reflect.Func { + return errNonFuncAction + } + + vals := reflect.ValueOf(action).Call([]reflect.Value{reflect.ValueOf(context)}) + + if len(vals) == 0 { + fmt.Fprintf(ErrWriter, + "DEPRECATED Action signature. Must be `cli.ActionFunc`. %s See %s\n", + contactSysadmin, appActionDeprecationURL) + return nil + } + + if len(vals) > 1 { + return errInvalidActionSignature + } + + if retErr, ok := vals[0].Interface().(error); vals[0].IsValid() && ok { + return retErr + } + + return err +} diff --git a/vendor/github.com/codegangsta/cli/appveyor.yml b/vendor/github.com/codegangsta/cli/appveyor.yml new file mode 100644 index 0000000..9e9d271 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/appveyor.yml @@ -0,0 +1,25 @@ +version: "{build}" + +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\urfave\cli + +environment: + GOPATH: C:\gopath + GOVERSION: 1.6 + PYTHON: C:\Python27-x64 + PYTHON_VERSION: 2.7.x + PYTHON_ARCH: 64 + GFMXR_DEBUG: 1 + +install: +- set PATH=%GOPATH%\bin;C:\go\bin;%PATH% +- go version +- go env +- go get github.com/urfave/gfmrun/... +- go get -v -t ./... + +build_script: +- python runtests vet +- python runtests test +- python runtests gfmrun diff --git a/vendor/github.com/codegangsta/cli/category.go b/vendor/github.com/codegangsta/cli/category.go new file mode 100644 index 0000000..1a60550 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/category.go @@ -0,0 +1,44 @@ +package cli + +// CommandCategories is a slice of *CommandCategory. +type CommandCategories []*CommandCategory + +// CommandCategory is a category containing commands. +type CommandCategory struct { + Name string + Commands Commands +} + +func (c CommandCategories) Less(i, j int) bool { + return c[i].Name < c[j].Name +} + +func (c CommandCategories) Len() int { + return len(c) +} + +func (c CommandCategories) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// AddCommand adds a command to a category. +func (c CommandCategories) AddCommand(category string, command Command) CommandCategories { + for _, commandCategory := range c { + if commandCategory.Name == category { + commandCategory.Commands = append(commandCategory.Commands, command) + return c + } + } + return append(c, &CommandCategory{Name: category, Commands: []Command{command}}) +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (c *CommandCategory) VisibleCommands() []Command { + ret := []Command{} + for _, command := range c.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} diff --git a/vendor/github.com/codegangsta/cli/cli.go b/vendor/github.com/codegangsta/cli/cli.go new file mode 100644 index 0000000..74fd101 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/cli.go @@ -0,0 +1,21 @@ +// Package cli provides a minimal framework for creating and organizing command line +// Go applications. cli is designed to be easy to understand and write, the most simple +// cli application can be written as follows: +// func main() { +// cli.NewApp().Run(os.Args) +// } +// +// Of course this application does not do much, so let's make this an actual application: +// func main() { +// app := cli.NewApp() +// app.Name = "greet" +// app.Usage = "say a greeting" +// app.Action = func(c *cli.Context) error { +// println("Greetings") +// } +// +// app.Run(os.Args) +// } +package cli + +//go:generate python ./generate-flag-types cli -i flag-types.json -o flag_generated.go diff --git a/vendor/github.com/codegangsta/cli/command.go b/vendor/github.com/codegangsta/cli/command.go new file mode 100644 index 0000000..8950cca --- /dev/null +++ b/vendor/github.com/codegangsta/cli/command.go @@ -0,0 +1,279 @@ +package cli + +import ( + "fmt" + "io/ioutil" + "sort" + "strings" +) + +// Command is a subcommand for a cli.App. +type Command struct { + // The name of the command + Name string + // short name of the command. Typically one character (deprecated, use `Aliases`) + ShortName string + // A list of aliases for the command + Aliases []string + // A short description of the usage of this command + Usage string + // Custom text to show on USAGE section of help + UsageText string + // A longer explanation of how the command works + Description string + // A short description of the arguments of this command + ArgsUsage string + // The category the command is part of + Category string + // The function to call when checking for bash command completions + BashComplete BashCompleteFunc + // An action to execute before any sub-subcommands are run, but after the context is ready + // If a non-nil error is returned, no sub-subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The function to call when this command is invoked + Action interface{} + // TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind + // of deprecation period has passed, maybe? + + // Execute this function if a usage error occurs. + OnUsageError OnUsageErrorFunc + // List of child commands + Subcommands Commands + // List of flags to parse + Flags []Flag + // Treat all flags as normal arguments if true + SkipFlagParsing bool + // Boolean to hide built-in help command + HideHelp bool + // Boolean to hide this command from help or completion + Hidden bool + + // Full name of command for help, defaults to full command name, including parent commands. + HelpName string + commandNamePath []string +} + +// FullName returns the full name of the command. +// For subcommands this ensures that parent commands are part of the command path +func (c Command) FullName() string { + if c.commandNamePath == nil { + return c.Name + } + return strings.Join(c.commandNamePath, " ") +} + +// Commands is a slice of Command +type Commands []Command + +// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags +func (c Command) Run(ctx *Context) (err error) { + if len(c.Subcommands) > 0 { + return c.startApp(ctx) + } + + if !c.HideHelp && (HelpFlag != BoolFlag{}) { + // append help to flags + c.Flags = append( + c.Flags, + HelpFlag, + ) + } + + if ctx.App.EnableBashCompletion { + c.Flags = append(c.Flags, BashCompletionFlag) + } + + set := flagSet(c.Name, c.Flags) + set.SetOutput(ioutil.Discard) + + if !c.SkipFlagParsing { + firstFlagIndex := -1 + terminatorIndex := -1 + for index, arg := range ctx.Args() { + if arg == "--" { + terminatorIndex = index + break + } else if arg == "-" { + // Do nothing. A dash alone is not really a flag. + continue + } else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 { + firstFlagIndex = index + } + } + + if firstFlagIndex > -1 { + args := ctx.Args() + regularArgs := make([]string, len(args[1:firstFlagIndex])) + copy(regularArgs, args[1:firstFlagIndex]) + + var flagArgs []string + if terminatorIndex > -1 { + flagArgs = args[firstFlagIndex:terminatorIndex] + regularArgs = append(regularArgs, args[terminatorIndex:]...) + } else { + flagArgs = args[firstFlagIndex:] + } + + err = set.Parse(append(flagArgs, regularArgs...)) + } else { + err = set.Parse(ctx.Args().Tail()) + } + } else { + if c.SkipFlagParsing { + err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...)) + } + } + + if err != nil { + if c.OnUsageError != nil { + err := c.OnUsageError(ctx, err, false) + HandleExitCoder(err) + return err + } + fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.") + fmt.Fprintln(ctx.App.Writer) + ShowCommandHelp(ctx, c.Name) + return err + } + + nerr := normalizeFlags(c.Flags, set) + if nerr != nil { + fmt.Fprintln(ctx.App.Writer, nerr) + fmt.Fprintln(ctx.App.Writer) + ShowCommandHelp(ctx, c.Name) + return nerr + } + + context := NewContext(ctx.App, set, ctx) + + if checkCommandCompletions(context, c.Name) { + return nil + } + + if checkCommandHelp(context, c.Name) { + return nil + } + + if c.After != nil { + defer func() { + afterErr := c.After(context) + if afterErr != nil { + HandleExitCoder(err) + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if c.Before != nil { + err = c.Before(context) + if err != nil { + fmt.Fprintln(ctx.App.Writer, err) + fmt.Fprintln(ctx.App.Writer) + ShowCommandHelp(ctx, c.Name) + HandleExitCoder(err) + return err + } + } + + context.Command = c + err = HandleAction(c.Action, context) + + if err != nil { + HandleExitCoder(err) + } + return err +} + +// Names returns the names including short names and aliases. +func (c Command) Names() []string { + names := []string{c.Name} + + if c.ShortName != "" { + names = append(names, c.ShortName) + } + + return append(names, c.Aliases...) +} + +// HasName returns true if Command.Name or Command.ShortName matches given name +func (c Command) HasName(name string) bool { + for _, n := range c.Names() { + if n == name { + return true + } + } + return false +} + +func (c Command) startApp(ctx *Context) error { + app := NewApp() + app.Metadata = ctx.App.Metadata + // set the name and usage + app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) + if c.HelpName == "" { + app.HelpName = c.HelpName + } else { + app.HelpName = app.Name + } + + if c.Description != "" { + app.Usage = c.Description + } else { + app.Usage = c.Usage + } + + // set CommandNotFound + app.CommandNotFound = ctx.App.CommandNotFound + + // set the flags and commands + app.Commands = c.Subcommands + app.Flags = c.Flags + app.HideHelp = c.HideHelp + + app.Version = ctx.App.Version + app.HideVersion = ctx.App.HideVersion + app.Compiled = ctx.App.Compiled + app.Author = ctx.App.Author + app.Email = ctx.App.Email + app.Writer = ctx.App.Writer + + app.categories = CommandCategories{} + for _, command := range c.Subcommands { + app.categories = app.categories.AddCommand(command.Category, command) + } + + sort.Sort(app.categories) + + // bash completion + app.EnableBashCompletion = ctx.App.EnableBashCompletion + if c.BashComplete != nil { + app.BashComplete = c.BashComplete + } + + // set the actions + app.Before = c.Before + app.After = c.After + if c.Action != nil { + app.Action = c.Action + } else { + app.Action = helpSubcommand.Action + } + + for index, cc := range app.Commands { + app.Commands[index].commandNamePath = []string{c.Name, cc.Name} + } + + return app.RunAsSubcommand(ctx) +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (c Command) VisibleFlags() []Flag { + return visibleFlags(c.Flags) +} diff --git a/vendor/github.com/codegangsta/cli/context.go b/vendor/github.com/codegangsta/cli/context.go new file mode 100644 index 0000000..14ad3f7 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/context.go @@ -0,0 +1,213 @@ +package cli + +import ( + "errors" + "flag" + "strings" +) + +// Context is a type that is passed through to +// each Handler action in a cli application. Context +// can be used to retrieve context-specific Args and +// parsed command-line options. +type Context struct { + App *App + Command Command + flagSet *flag.FlagSet + setFlags map[string]bool + globalSetFlags map[string]bool + parentContext *Context +} + +// NewContext creates a new context. For use in when invoking an App or Command action. +func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { + return &Context{App: app, flagSet: set, parentContext: parentCtx} +} + +// NumFlags returns the number of flags set +func (c *Context) NumFlags() int { + return c.flagSet.NFlag() +} + +// Set sets a context flag to a value. +func (c *Context) Set(name, value string) error { + return c.flagSet.Set(name, value) +} + +// GlobalSet sets a context flag to a value on the global flagset +func (c *Context) GlobalSet(name, value string) error { + return globalContext(c).flagSet.Set(name, value) +} + +// IsSet determines if the flag was actually set +func (c *Context) IsSet(name string) bool { + if c.setFlags == nil { + c.setFlags = make(map[string]bool) + c.flagSet.Visit(func(f *flag.Flag) { + c.setFlags[f.Name] = true + }) + } + return c.setFlags[name] == true +} + +// GlobalIsSet determines if the global flag was actually set +func (c *Context) GlobalIsSet(name string) bool { + if c.globalSetFlags == nil { + c.globalSetFlags = make(map[string]bool) + ctx := c + if ctx.parentContext != nil { + ctx = ctx.parentContext + } + for ; ctx != nil && c.globalSetFlags[name] == false; ctx = ctx.parentContext { + ctx.flagSet.Visit(func(f *flag.Flag) { + c.globalSetFlags[f.Name] = true + }) + } + } + return c.globalSetFlags[name] +} + +// FlagNames returns a slice of flag names used in this context. +func (c *Context) FlagNames() (names []string) { + for _, flag := range c.Command.Flags { + name := strings.Split(flag.GetName(), ",")[0] + if name == "help" { + continue + } + names = append(names, name) + } + return +} + +// GlobalFlagNames returns a slice of global flag names used by the app. +func (c *Context) GlobalFlagNames() (names []string) { + for _, flag := range c.App.Flags { + name := strings.Split(flag.GetName(), ",")[0] + if name == "help" || name == "version" { + continue + } + names = append(names, name) + } + return +} + +// Parent returns the parent context, if any +func (c *Context) Parent() *Context { + return c.parentContext +} + +// Args contains apps console arguments +type Args []string + +// Args returns the command line arguments associated with the context. +func (c *Context) Args() Args { + args := Args(c.flagSet.Args()) + return args +} + +// NArg returns the number of the command line arguments. +func (c *Context) NArg() int { + return len(c.Args()) +} + +// Get returns the nth argument, or else a blank string +func (a Args) Get(n int) string { + if len(a) > n { + return a[n] + } + return "" +} + +// First returns the first argument, or else a blank string +func (a Args) First() string { + return a.Get(0) +} + +// Tail returns the rest of the arguments (not the first one) +// or else an empty string slice +func (a Args) Tail() []string { + if len(a) >= 2 { + return []string(a)[1:] + } + return []string{} +} + +// Present checks if there are any arguments present +func (a Args) Present() bool { + return len(a) != 0 +} + +// Swap swaps arguments at the given indexes +func (a Args) Swap(from, to int) error { + if from >= len(a) || to >= len(a) { + return errors.New("index out of range") + } + a[from], a[to] = a[to], a[from] + return nil +} + +func globalContext(ctx *Context) *Context { + if ctx == nil { + return nil + } + + for { + if ctx.parentContext == nil { + return ctx + } + ctx = ctx.parentContext + } +} + +func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet { + if ctx.parentContext != nil { + ctx = ctx.parentContext + } + for ; ctx != nil; ctx = ctx.parentContext { + if f := ctx.flagSet.Lookup(name); f != nil { + return ctx.flagSet + } + } + return nil +} + +func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { + switch ff.Value.(type) { + case *StringSlice: + default: + set.Set(name, ff.Value.String()) + } +} + +func normalizeFlags(flags []Flag, set *flag.FlagSet) error { + visited := make(map[string]bool) + set.Visit(func(f *flag.Flag) { + visited[f.Name] = true + }) + for _, f := range flags { + parts := strings.Split(f.GetName(), ",") + if len(parts) == 1 { + continue + } + var ff *flag.Flag + for _, name := range parts { + name = strings.Trim(name, " ") + if visited[name] { + if ff != nil { + return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name) + } + ff = set.Lookup(name) + } + } + if ff == nil { + continue + } + for _, name := range parts { + name = strings.Trim(name, " ") + if !visited[name] { + copyFlag(name, ff, set) + } + } + } + return nil +} diff --git a/vendor/github.com/codegangsta/cli/errors.go b/vendor/github.com/codegangsta/cli/errors.go new file mode 100644 index 0000000..ea551be --- /dev/null +++ b/vendor/github.com/codegangsta/cli/errors.go @@ -0,0 +1,92 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" +) + +// OsExiter is the function used when the app exits. If not set defaults to os.Exit. +var OsExiter = os.Exit + +// ErrWriter is used to write errors to the user. This can be anything +// implementing the io.Writer interface and defaults to os.Stderr. +var ErrWriter io.Writer = os.Stderr + +// MultiError is an error that wraps multiple errors. +type MultiError struct { + Errors []error +} + +// NewMultiError creates a new MultiError. Pass in one or more errors. +func NewMultiError(err ...error) MultiError { + return MultiError{Errors: err} +} + +// Error implents the error interface. +func (m MultiError) Error() string { + errs := make([]string, len(m.Errors)) + for i, err := range m.Errors { + errs[i] = err.Error() + } + + return strings.Join(errs, "\n") +} + +// ExitCoder is the interface checked by `App` and `Command` for a custom exit +// code +type ExitCoder interface { + error + ExitCode() int +} + +// ExitError fulfills both the builtin `error` interface and `ExitCoder` +type ExitError struct { + exitCode int + message string +} + +// NewExitError makes a new *ExitError +func NewExitError(message string, exitCode int) *ExitError { + return &ExitError{ + exitCode: exitCode, + message: message, + } +} + +// Error returns the string message, fulfilling the interface required by +// `error` +func (ee *ExitError) Error() string { + return ee.message +} + +// ExitCode returns the exit code, fulfilling the interface required by +// `ExitCoder` +func (ee *ExitError) ExitCode() int { + return ee.exitCode +} + +// HandleExitCoder checks if the error fulfills the ExitCoder interface, and if +// so prints the error to stderr (if it is non-empty) and calls OsExiter with the +// given exit code. If the given error is a MultiError, then this func is +// called on all members of the Errors slice. +func HandleExitCoder(err error) { + if err == nil { + return + } + + if exitErr, ok := err.(ExitCoder); ok { + if err.Error() != "" { + fmt.Fprintln(ErrWriter, err) + } + OsExiter(exitErr.ExitCode()) + return + } + + if multiErr, ok := err.(MultiError); ok { + for _, merr := range multiErr.Errors { + HandleExitCoder(merr) + } + } +} diff --git a/vendor/github.com/codegangsta/cli/flag-types.json b/vendor/github.com/codegangsta/cli/flag-types.json new file mode 100644 index 0000000..1223107 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/flag-types.json @@ -0,0 +1,93 @@ +[ + { + "name": "Bool", + "type": "bool", + "value": false, + "context_default": "false", + "parser": "strconv.ParseBool(f.Value.String())" + }, + { + "name": "BoolT", + "type": "bool", + "value": false, + "doctail": " that is true by default", + "context_default": "false", + "parser": "strconv.ParseBool(f.Value.String())" + }, + { + "name": "Duration", + "type": "time.Duration", + "doctail": " (see https://golang.org/pkg/time/#ParseDuration)", + "context_default": "0", + "parser": "time.ParseDuration(f.Value.String())" + }, + { + "name": "Float64", + "type": "float64", + "context_default": "0", + "parser": "strconv.ParseFloat(f.Value.String(), 64)" + }, + { + "name": "Generic", + "type": "Generic", + "dest": false, + "context_default": "nil", + "context_type": "interface{}" + }, + { + "name": "Int64", + "type": "int64", + "context_default": "0", + "parser": "strconv.ParseInt(f.Value.String(), 0, 64)" + }, + { + "name": "Int", + "type": "int", + "context_default": "0", + "parser": "strconv.ParseInt(f.Value.String(), 0, 64)", + "parser_cast": "int(parsed)" + }, + { + "name": "IntSlice", + "type": "*IntSlice", + "dest": false, + "context_default": "nil", + "context_type": "[]int", + "parser": "(f.Value.(*IntSlice)).Value(), error(nil)" + }, + { + "name": "Int64Slice", + "type": "*Int64Slice", + "dest": false, + "context_default": "nil", + "context_type": "[]int64", + "parser": "(f.Value.(*Int64Slice)).Value(), error(nil)" + }, + { + "name": "String", + "type": "string", + "context_default": "\"\"", + "parser": "f.Value.String(), error(nil)" + }, + { + "name": "StringSlice", + "type": "*StringSlice", + "dest": false, + "context_default": "nil", + "context_type": "[]string", + "parser": "(f.Value.(*StringSlice)).Value(), error(nil)" + }, + { + "name": "Uint64", + "type": "uint64", + "context_default": "0", + "parser": "strconv.ParseUint(f.Value.String(), 0, 64)" + }, + { + "name": "Uint", + "type": "uint", + "context_default": "0", + "parser": "strconv.ParseUint(f.Value.String(), 0, 64)", + "parser_cast": "uint(parsed)" + } +] diff --git a/vendor/github.com/codegangsta/cli/flag.go b/vendor/github.com/codegangsta/cli/flag.go new file mode 100644 index 0000000..e748c02 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/flag.go @@ -0,0 +1,621 @@ +package cli + +import ( + "flag" + "fmt" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "time" +) + +const defaultPlaceholder = "value" + +// BashCompletionFlag enables bash-completion for all commands and subcommands +var BashCompletionFlag = BoolFlag{ + Name: "generate-bash-completion", + Hidden: true, +} + +// VersionFlag prints the version for the application +var VersionFlag = BoolFlag{ + Name: "version, v", + Usage: "print the version", +} + +// HelpFlag prints the help for all commands and subcommands +// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand +// unless HideHelp is set to true) +var HelpFlag = BoolFlag{ + Name: "help, h", + Usage: "show help", +} + +// FlagStringer converts a flag definition to a string. This is used by help +// to display a flag. +var FlagStringer FlagStringFunc = stringifyFlag + +// Flag is a common interface related to parsing flags in cli. +// For more advanced flag parsing techniques, it is recommended that +// this interface be implemented. +type Flag interface { + fmt.Stringer + // Apply Flag settings to the given flag set + Apply(*flag.FlagSet) + GetName() string +} + +func flagSet(name string, flags []Flag) *flag.FlagSet { + set := flag.NewFlagSet(name, flag.ContinueOnError) + + for _, f := range flags { + f.Apply(set) + } + return set +} + +func eachName(longName string, fn func(string)) { + parts := strings.Split(longName, ",") + for _, name := range parts { + name = strings.Trim(name, " ") + fn(name) + } +} + +// Generic is a generic parseable type identified by a specific flag +type Generic interface { + Set(value string) error + String() string +} + +// Apply takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag +func (f GenericFlag) Apply(set *flag.FlagSet) { + val := f.Value + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + val.Set(envVal) + break + } + } + } + + eachName(f.Name, func(name string) { + set.Var(f.Value, name, f.Usage) + }) +} + +// StringSlice is an opaque type for []string to satisfy flag.Value +type StringSlice []string + +// Set appends the string value to the list of values +func (f *StringSlice) Set(value string) error { + *f = append(*f, value) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *StringSlice) String() string { + return fmt.Sprintf("%s", *f) +} + +// Value returns the slice of strings set by this flag +func (f *StringSlice) Value() []string { + return *f +} + +// Apply populates the flag given the flag set and environment +func (f StringSliceFlag) Apply(set *flag.FlagSet) { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + newVal := &StringSlice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + newVal.Set(s) + } + f.Value = newVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &StringSlice{} + } + set.Var(f.Value, name, f.Usage) + }) +} + +// IntSlice is an opaque type for []int to satisfy flag.Value +type IntSlice []int + +// Set parses the value into an integer and appends it to the list of values +func (f *IntSlice) Set(value string) error { + tmp, err := strconv.Atoi(value) + if err != nil { + return err + } + *f = append(*f, tmp) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *IntSlice) String() string { + return fmt.Sprintf("%#v", *f) +} + +// Value returns the slice of ints set by this flag +func (f *IntSlice) Value() []int { + return *f +} + +// Apply populates the flag given the flag set and environment +func (f IntSliceFlag) Apply(set *flag.FlagSet) { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + newVal := &IntSlice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + err := newVal.Set(s) + if err != nil { + fmt.Fprintf(ErrWriter, err.Error()) + } + } + f.Value = newVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &IntSlice{} + } + set.Var(f.Value, name, f.Usage) + }) +} + +// Int64Slice is an opaque type for []int to satisfy flag.Value +type Int64Slice []int64 + +// Set parses the value into an integer and appends it to the list of values +func (f *Int64Slice) Set(value string) error { + tmp, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + *f = append(*f, tmp) + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *Int64Slice) String() string { + return fmt.Sprintf("%#v", *f) +} + +// Value returns the slice of ints set by this flag +func (f *Int64Slice) Value() []int64 { + return *f +} + +// Apply populates the flag given the flag set and environment +func (f Int64SliceFlag) Apply(set *flag.FlagSet) { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + newVal := &Int64Slice{} + for _, s := range strings.Split(envVal, ",") { + s = strings.TrimSpace(s) + err := newVal.Set(s) + if err != nil { + fmt.Fprintf(ErrWriter, err.Error()) + } + } + f.Value = newVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Value == nil { + f.Value = &Int64Slice{} + } + set.Var(f.Value, name, f.Usage) + }) +} + +// Apply populates the flag given the flag set and environment +func (f BoolFlag) Apply(set *flag.FlagSet) { + val := false + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValBool, err := strconv.ParseBool(envVal) + if err == nil { + val = envValBool + } + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } + set.Bool(name, val, f.Usage) + }) +} + +// Apply populates the flag given the flag set and environment +func (f BoolTFlag) Apply(set *flag.FlagSet) { + val := true + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValBool, err := strconv.ParseBool(envVal) + if err == nil { + val = envValBool + break + } + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.BoolVar(f.Destination, name, val, f.Usage) + return + } + set.Bool(name, val, f.Usage) + }) +} + +// Apply populates the flag given the flag set and environment +func (f StringFlag) Apply(set *flag.FlagSet) { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + f.Value = envVal + break + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.StringVar(f.Destination, name, f.Value, f.Usage) + return + } + set.String(name, f.Value, f.Usage) + }) +} + +// Apply populates the flag given the flag set and environment +func (f IntFlag) Apply(set *flag.FlagSet) { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValInt, err := strconv.ParseInt(envVal, 0, 64) + if err == nil { + f.Value = int(envValInt) + break + } + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.IntVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Int(name, f.Value, f.Usage) + }) +} + +// Apply populates the flag given the flag set and environment +func (f Int64Flag) Apply(set *flag.FlagSet) { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValInt, err := strconv.ParseInt(envVal, 0, 64) + if err == nil { + f.Value = envValInt + break + } + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Int64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Int64(name, f.Value, f.Usage) + }) +} + +// Apply populates the flag given the flag set and environment +func (f UintFlag) Apply(set *flag.FlagSet) { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValInt, err := strconv.ParseUint(envVal, 0, 64) + if err == nil { + f.Value = uint(envValInt) + break + } + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.UintVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Uint(name, f.Value, f.Usage) + }) +} + +// Apply populates the flag given the flag set and environment +func (f Uint64Flag) Apply(set *flag.FlagSet) { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValInt, err := strconv.ParseUint(envVal, 0, 64) + if err == nil { + f.Value = uint64(envValInt) + break + } + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Uint64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Uint64(name, f.Value, f.Usage) + }) +} + +// Apply populates the flag given the flag set and environment +func (f DurationFlag) Apply(set *flag.FlagSet) { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValDuration, err := time.ParseDuration(envVal) + if err == nil { + f.Value = envValDuration + break + } + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.DurationVar(f.Destination, name, f.Value, f.Usage) + return + } + set.Duration(name, f.Value, f.Usage) + }) +} + +// Apply populates the flag given the flag set and environment +func (f Float64Flag) Apply(set *flag.FlagSet) { + if f.EnvVar != "" { + for _, envVar := range strings.Split(f.EnvVar, ",") { + envVar = strings.TrimSpace(envVar) + if envVal := os.Getenv(envVar); envVal != "" { + envValFloat, err := strconv.ParseFloat(envVal, 10) + if err == nil { + f.Value = float64(envValFloat) + } + } + } + } + + eachName(f.Name, func(name string) { + if f.Destination != nil { + set.Float64Var(f.Destination, name, f.Value, f.Usage) + return + } + set.Float64(name, f.Value, f.Usage) + }) +} + +func visibleFlags(fl []Flag) []Flag { + visible := []Flag{} + for _, flag := range fl { + if !flagValue(flag).FieldByName("Hidden").Bool() { + visible = append(visible, flag) + } + } + return visible +} + +func prefixFor(name string) (prefix string) { + if len(name) == 1 { + prefix = "-" + } else { + prefix = "--" + } + + return +} + +// Returns the placeholder, if any, and the unquoted usage string. +func unquoteUsage(usage string) (string, string) { + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name := usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break + } + } + return "", usage +} + +func prefixedNames(fullName, placeholder string) string { + var prefixed string + parts := strings.Split(fullName, ",") + for i, name := range parts { + name = strings.Trim(name, " ") + prefixed += prefixFor(name) + name + if placeholder != "" { + prefixed += " " + placeholder + } + if i < len(parts)-1 { + prefixed += ", " + } + } + return prefixed +} + +func withEnvHint(envVar, str string) string { + envText := "" + if envVar != "" { + prefix := "$" + suffix := "" + sep := ", $" + if runtime.GOOS == "windows" { + prefix = "%" + suffix = "%" + sep = "%, %" + } + envText = fmt.Sprintf(" [%s%s%s]", prefix, strings.Join(strings.Split(envVar, ","), sep), suffix) + } + return str + envText +} + +func flagValue(f Flag) reflect.Value { + fv := reflect.ValueOf(f) + for fv.Kind() == reflect.Ptr { + fv = reflect.Indirect(fv) + } + return fv +} + +func stringifyFlag(f Flag) string { + fv := flagValue(f) + + switch f.(type) { + case IntSliceFlag: + return withEnvHint(fv.FieldByName("EnvVar").String(), + stringifyIntSliceFlag(f.(IntSliceFlag))) + case Int64SliceFlag: + return withEnvHint(fv.FieldByName("EnvVar").String(), + stringifyInt64SliceFlag(f.(Int64SliceFlag))) + case StringSliceFlag: + return withEnvHint(fv.FieldByName("EnvVar").String(), + stringifyStringSliceFlag(f.(StringSliceFlag))) + } + + placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String()) + + needsPlaceholder := false + defaultValueString := "" + val := fv.FieldByName("Value") + + if val.IsValid() { + needsPlaceholder = true + defaultValueString = fmt.Sprintf(" (default: %v)", val.Interface()) + + if val.Kind() == reflect.String && val.String() != "" { + defaultValueString = fmt.Sprintf(" (default: %q)", val.String()) + } + } + + if defaultValueString == " (default: )" { + defaultValueString = "" + } + + if needsPlaceholder && placeholder == "" { + placeholder = defaultPlaceholder + } + + usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultValueString)) + + return withEnvHint(fv.FieldByName("EnvVar").String(), + fmt.Sprintf("%s\t%s", prefixedNames(fv.FieldByName("Name").String(), placeholder), usageWithDefault)) +} + +func stringifyIntSliceFlag(f IntSliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, fmt.Sprintf("%d", i)) + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifyInt64SliceFlag(f Int64SliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, fmt.Sprintf("%d", i)) + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifyStringSliceFlag(f StringSliceFlag) string { + defaultVals := []string{} + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, s := range f.Value.Value() { + if len(s) > 0 { + defaultVals = append(defaultVals, fmt.Sprintf("%q", s)) + } + } + } + + return stringifySliceFlag(f.Usage, f.Name, defaultVals) +} + +func stringifySliceFlag(usage, name string, defaultVals []string) string { + placeholder, usage := unquoteUsage(usage) + if placeholder == "" { + placeholder = defaultPlaceholder + } + + defaultVal := "" + if len(defaultVals) > 0 { + defaultVal = fmt.Sprintf(" (default: %s)", strings.Join(defaultVals, ", ")) + } + + usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal)) + return fmt.Sprintf("%s\t%s", prefixedNames(name, placeholder), usageWithDefault) +} diff --git a/vendor/github.com/codegangsta/cli/flag_generated.go b/vendor/github.com/codegangsta/cli/flag_generated.go new file mode 100644 index 0000000..491b619 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/flag_generated.go @@ -0,0 +1,627 @@ +package cli + +import ( + "flag" + "strconv" + "time" +) + +// WARNING: This file is generated! + +// BoolFlag is a flag with type bool +type BoolFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Destination *bool +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f BoolFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f BoolFlag) GetName() string { + return f.Name +} + +// Bool looks up the value of a local BoolFlag, returns +// false if not found +func (c *Context) Bool(name string) bool { + return lookupBool(name, c.flagSet) +} + +// GlobalBool looks up the value of a global BoolFlag, returns +// false if not found +func (c *Context) GlobalBool(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBool(name, fs) + } + return false +} + +func lookupBool(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} + +// BoolTFlag is a flag with type bool that is true by default +type BoolTFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Destination *bool +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f BoolTFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f BoolTFlag) GetName() string { + return f.Name +} + +// BoolT looks up the value of a local BoolTFlag, returns +// false if not found +func (c *Context) BoolT(name string) bool { + return lookupBoolT(name, c.flagSet) +} + +// GlobalBoolT looks up the value of a global BoolTFlag, returns +// false if not found +func (c *Context) GlobalBoolT(name string) bool { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupBoolT(name, fs) + } + return false +} + +func lookupBoolT(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} + +// DurationFlag is a flag with type time.Duration (see https://golang.org/pkg/time/#ParseDuration) +type DurationFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value time.Duration + Destination *time.Duration +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f DurationFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f DurationFlag) GetName() string { + return f.Name +} + +// Duration looks up the value of a local DurationFlag, returns +// 0 if not found +func (c *Context) Duration(name string) time.Duration { + return lookupDuration(name, c.flagSet) +} + +// GlobalDuration looks up the value of a global DurationFlag, returns +// 0 if not found +func (c *Context) GlobalDuration(name string) time.Duration { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupDuration(name, fs) + } + return 0 +} + +func lookupDuration(name string, set *flag.FlagSet) time.Duration { + f := set.Lookup(name) + if f != nil { + parsed, err := time.ParseDuration(f.Value.String()) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// Float64Flag is a flag with type float64 +type Float64Flag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value float64 + Destination *float64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Float64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Float64Flag) GetName() string { + return f.Name +} + +// Float64 looks up the value of a local Float64Flag, returns +// 0 if not found +func (c *Context) Float64(name string) float64 { + return lookupFloat64(name, c.flagSet) +} + +// GlobalFloat64 looks up the value of a global Float64Flag, returns +// 0 if not found +func (c *Context) GlobalFloat64(name string) float64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupFloat64(name, fs) + } + return 0 +} + +func lookupFloat64(name string, set *flag.FlagSet) float64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseFloat(f.Value.String(), 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// GenericFlag is a flag with type Generic +type GenericFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value Generic +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f GenericFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f GenericFlag) GetName() string { + return f.Name +} + +// Generic looks up the value of a local GenericFlag, returns +// nil if not found +func (c *Context) Generic(name string) interface{} { + return lookupGeneric(name, c.flagSet) +} + +// GlobalGeneric looks up the value of a global GenericFlag, returns +// nil if not found +func (c *Context) GlobalGeneric(name string) interface{} { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupGeneric(name, fs) + } + return nil +} + +func lookupGeneric(name string, set *flag.FlagSet) interface{} { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value, error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// Int64Flag is a flag with type int64 +type Int64Flag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value int64 + Destination *int64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Int64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Int64Flag) GetName() string { + return f.Name +} + +// Int64 looks up the value of a local Int64Flag, returns +// 0 if not found +func (c *Context) Int64(name string) int64 { + return lookupInt64(name, c.flagSet) +} + +// GlobalInt64 looks up the value of a global Int64Flag, returns +// 0 if not found +func (c *Context) GlobalInt64(name string) int64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt64(name, fs) + } + return 0 +} + +func lookupInt64(name string, set *flag.FlagSet) int64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// IntFlag is a flag with type int +type IntFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value int + Destination *int +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f IntFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f IntFlag) GetName() string { + return f.Name +} + +// Int looks up the value of a local IntFlag, returns +// 0 if not found +func (c *Context) Int(name string) int { + return lookupInt(name, c.flagSet) +} + +// GlobalInt looks up the value of a global IntFlag, returns +// 0 if not found +func (c *Context) GlobalInt(name string) int { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt(name, fs) + } + return 0 +} + +func lookupInt(name string, set *flag.FlagSet) int { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return int(parsed) + } + return 0 +} + +// IntSliceFlag is a flag with type *IntSlice +type IntSliceFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value *IntSlice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f IntSliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f IntSliceFlag) GetName() string { + return f.Name +} + +// IntSlice looks up the value of a local IntSliceFlag, returns +// nil if not found +func (c *Context) IntSlice(name string) []int { + return lookupIntSlice(name, c.flagSet) +} + +// GlobalIntSlice looks up the value of a global IntSliceFlag, returns +// nil if not found +func (c *Context) GlobalIntSlice(name string) []int { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupIntSlice(name, fs) + } + return nil +} + +func lookupIntSlice(name string, set *flag.FlagSet) []int { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*IntSlice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// Int64SliceFlag is a flag with type *Int64Slice +type Int64SliceFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value *Int64Slice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Int64SliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Int64SliceFlag) GetName() string { + return f.Name +} + +// Int64Slice looks up the value of a local Int64SliceFlag, returns +// nil if not found +func (c *Context) Int64Slice(name string) []int64 { + return lookupInt64Slice(name, c.flagSet) +} + +// GlobalInt64Slice looks up the value of a global Int64SliceFlag, returns +// nil if not found +func (c *Context) GlobalInt64Slice(name string) []int64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupInt64Slice(name, fs) + } + return nil +} + +func lookupInt64Slice(name string, set *flag.FlagSet) []int64 { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*Int64Slice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// StringFlag is a flag with type string +type StringFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value string + Destination *string +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f StringFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f StringFlag) GetName() string { + return f.Name +} + +// String looks up the value of a local StringFlag, returns +// "" if not found +func (c *Context) String(name string) string { + return lookupString(name, c.flagSet) +} + +// GlobalString looks up the value of a global StringFlag, returns +// "" if not found +func (c *Context) GlobalString(name string) string { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupString(name, fs) + } + return "" +} + +func lookupString(name string, set *flag.FlagSet) string { + f := set.Lookup(name) + if f != nil { + parsed, err := f.Value.String(), error(nil) + if err != nil { + return "" + } + return parsed + } + return "" +} + +// StringSliceFlag is a flag with type *StringSlice +type StringSliceFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value *StringSlice +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f StringSliceFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f StringSliceFlag) GetName() string { + return f.Name +} + +// StringSlice looks up the value of a local StringSliceFlag, returns +// nil if not found +func (c *Context) StringSlice(name string) []string { + return lookupStringSlice(name, c.flagSet) +} + +// GlobalStringSlice looks up the value of a global StringSliceFlag, returns +// nil if not found +func (c *Context) GlobalStringSlice(name string) []string { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupStringSlice(name, fs) + } + return nil +} + +func lookupStringSlice(name string, set *flag.FlagSet) []string { + f := set.Lookup(name) + if f != nil { + parsed, err := (f.Value.(*StringSlice)).Value(), error(nil) + if err != nil { + return nil + } + return parsed + } + return nil +} + +// Uint64Flag is a flag with type uint64 +type Uint64Flag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value uint64 + Destination *uint64 +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f Uint64Flag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f Uint64Flag) GetName() string { + return f.Name +} + +// Uint64 looks up the value of a local Uint64Flag, returns +// 0 if not found +func (c *Context) Uint64(name string) uint64 { + return lookupUint64(name, c.flagSet) +} + +// GlobalUint64 looks up the value of a global Uint64Flag, returns +// 0 if not found +func (c *Context) GlobalUint64(name string) uint64 { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupUint64(name, fs) + } + return 0 +} + +func lookupUint64(name string, set *flag.FlagSet) uint64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} + +// UintFlag is a flag with type uint +type UintFlag struct { + Name string + Usage string + EnvVar string + Hidden bool + Value uint + Destination *uint +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f UintFlag) String() string { + return FlagStringer(f) +} + +// GetName returns the name of the flag +func (f UintFlag) GetName() string { + return f.Name +} + +// Uint looks up the value of a local UintFlag, returns +// 0 if not found +func (c *Context) Uint(name string) uint { + return lookupUint(name, c.flagSet) +} + +// GlobalUint looks up the value of a global UintFlag, returns +// 0 if not found +func (c *Context) GlobalUint(name string) uint { + if fs := lookupGlobalFlagSet(name, c); fs != nil { + return lookupUint(name, fs) + } + return 0 +} + +func lookupUint(name string, set *flag.FlagSet) uint { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return uint(parsed) + } + return 0 +} diff --git a/vendor/github.com/codegangsta/cli/funcs.go b/vendor/github.com/codegangsta/cli/funcs.go new file mode 100644 index 0000000..cba5e6c --- /dev/null +++ b/vendor/github.com/codegangsta/cli/funcs.go @@ -0,0 +1,28 @@ +package cli + +// BashCompleteFunc is an action to execute when the bash-completion flag is set +type BashCompleteFunc func(*Context) + +// BeforeFunc is an action to execute before any subcommands are run, but after +// the context is ready if a non-nil error is returned, no subcommands are run +type BeforeFunc func(*Context) error + +// AfterFunc is an action to execute after any subcommands are run, but after the +// subcommand has finished it is run even if Action() panics +type AfterFunc func(*Context) error + +// ActionFunc is the action to execute when no subcommands are specified +type ActionFunc func(*Context) error + +// CommandNotFoundFunc is executed if the proper command cannot be found +type CommandNotFoundFunc func(*Context, string) + +// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying +// customized usage error messages. This function is able to replace the +// original error messages. If this function is not set, the "Incorrect usage" +// is displayed and the execution is interrupted. +type OnUsageErrorFunc func(context *Context, err error, isSubcommand bool) error + +// FlagStringFunc is used by the help generation to display a flag, which is +// expected to be a single line. +type FlagStringFunc func(Flag) string diff --git a/vendor/github.com/codegangsta/cli/generate-flag-types b/vendor/github.com/codegangsta/cli/generate-flag-types new file mode 100755 index 0000000..8567f5d --- /dev/null +++ b/vendor/github.com/codegangsta/cli/generate-flag-types @@ -0,0 +1,244 @@ +#!/usr/bin/env python +""" +The flag types that ship with the cli library have many things in common, and +so we can take advantage of the `go generate` command to create much of the +source code from a list of definitions. These definitions attempt to cover +the parts that vary between flag types, and should evolve as needed. + +An example of the minimum definition needed is: + + { + "name": "SomeType", + "type": "sometype", + "context_default": "nil" + } + +In this example, the code generated for the `cli` package will include a type +named `SomeTypeFlag` that is expected to wrap a value of type `sometype`. +Fetching values by name via `*cli.Context` will default to a value of `nil`. + +A more complete, albeit somewhat redundant, example showing all available +definition keys is: + + { + "name": "VeryMuchType", + "type": "*VeryMuchType", + "value": true, + "dest": false, + "doctail": " which really only wraps a []float64, oh well!", + "context_type": "[]float64", + "context_default": "nil", + "parser": "parseVeryMuchType(f.Value.String())", + "parser_cast": "[]float64(parsed)" + } + +The meaning of each field is as follows: + + name (string) - The type "name", which will be suffixed with + `Flag` when generating the type definition + for `cli` and the wrapper type for `altsrc` + type (string) - The type that the generated `Flag` type for `cli` + is expected to "contain" as its `.Value` member + value (bool) - Should the generated `cli` type have a `Value` + member? + dest (bool) - Should the generated `cli` type support a + destination pointer? + doctail (string) - Additional docs for the `cli` flag type comment + context_type (string) - The literal type used in the `*cli.Context` + reader func signature + context_default (string) - The literal value used as the default by the + `*cli.Context` reader funcs when no value is + present + parser (string) - Literal code used to parse the flag `f`, + expected to have a return signature of + (value, error) + parser_cast (string) - Literal code used to cast the `parsed` value + returned from the `parser` code +""" + +from __future__ import print_function, unicode_literals + +import argparse +import json +import os +import subprocess +import sys +import tempfile +import textwrap + + +class _FancyFormatter(argparse.ArgumentDefaultsHelpFormatter, + argparse.RawDescriptionHelpFormatter): + pass + + +def main(sysargs=sys.argv[:]): + parser = argparse.ArgumentParser( + description='Generate flag type code!', + formatter_class=_FancyFormatter) + parser.add_argument( + 'package', + type=str, default='cli', choices=_WRITEFUNCS.keys(), + help='Package for which flag types will be generated' + ) + parser.add_argument( + '-i', '--in-json', + type=argparse.FileType('r'), + default=sys.stdin, + help='Input JSON file which defines each type to be generated' + ) + parser.add_argument( + '-o', '--out-go', + type=argparse.FileType('w'), + default=sys.stdout, + help='Output file/stream to which generated source will be written' + ) + parser.epilog = __doc__ + + args = parser.parse_args(sysargs[1:]) + _generate_flag_types(_WRITEFUNCS[args.package], args.out_go, args.in_json) + return 0 + + +def _generate_flag_types(writefunc, output_go, input_json): + types = json.load(input_json) + + tmp = tempfile.NamedTemporaryFile(suffix='.go', delete=False) + writefunc(tmp, types) + tmp.close() + + new_content = subprocess.check_output( + ['goimports', tmp.name] + ).decode('utf-8') + + print(new_content, file=output_go, end='') + output_go.flush() + os.remove(tmp.name) + + +def _set_typedef_defaults(typedef): + typedef.setdefault('doctail', '') + typedef.setdefault('context_type', typedef['type']) + typedef.setdefault('dest', True) + typedef.setdefault('value', True) + typedef.setdefault('parser', 'f.Value, error(nil)') + typedef.setdefault('parser_cast', 'parsed') + + +def _write_cli_flag_types(outfile, types): + _fwrite(outfile, """\ + package cli + + // WARNING: This file is generated! + + """) + + for typedef in types: + _set_typedef_defaults(typedef) + + _fwrite(outfile, """\ + // {name}Flag is a flag with type {type}{doctail} + type {name}Flag struct {{ + Name string + Usage string + EnvVar string + Hidden bool + """.format(**typedef)) + + if typedef['value']: + _fwrite(outfile, """\ + Value {type} + """.format(**typedef)) + + if typedef['dest']: + _fwrite(outfile, """\ + Destination *{type} + """.format(**typedef)) + + _fwrite(outfile, "\n}\n\n") + + _fwrite(outfile, """\ + // String returns a readable representation of this value + // (for usage defaults) + func (f {name}Flag) String() string {{ + return FlagStringer(f) + }} + + // GetName returns the name of the flag + func (f {name}Flag) GetName() string {{ + return f.Name + }} + + // {name} looks up the value of a local {name}Flag, returns + // {context_default} if not found + func (c *Context) {name}(name string) {context_type} {{ + return lookup{name}(name, c.flagSet) + }} + + // Global{name} looks up the value of a global {name}Flag, returns + // {context_default} if not found + func (c *Context) Global{name}(name string) {context_type} {{ + if fs := lookupGlobalFlagSet(name, c); fs != nil {{ + return lookup{name}(name, fs) + }} + return {context_default} + }} + + func lookup{name}(name string, set *flag.FlagSet) {context_type} {{ + f := set.Lookup(name) + if f != nil {{ + parsed, err := {parser} + if err != nil {{ + return {context_default} + }} + return {parser_cast} + }} + return {context_default} + }} + """.format(**typedef)) + + +def _write_altsrc_flag_types(outfile, types): + _fwrite(outfile, """\ + package altsrc + + // WARNING: This file is generated! + + """) + + for typedef in types: + _set_typedef_defaults(typedef) + + _fwrite(outfile, """\ + // {name}Flag is the flag type that wraps cli.{name}Flag to allow + // for other values to be specified + type {name}Flag struct {{ + cli.{name}Flag + set *flag.FlagSet + }} + + // New{name}Flag creates a new {name}Flag + func New{name}Flag(fl cli.{name}Flag) *{name}Flag {{ + return &{name}Flag{{{name}Flag: fl, set: nil}} + }} + + // Apply saves the flagSet for later usage calls, then calls the + // wrapped {name}Flag.Apply + func (f *{name}Flag) Apply(set *flag.FlagSet) {{ + f.set = set + f.{name}Flag.Apply(set) + }} + """.format(**typedef)) + + +def _fwrite(outfile, text): + print(textwrap.dedent(text), end='', file=outfile) + + +_WRITEFUNCS = { + 'cli': _write_cli_flag_types, + 'altsrc': _write_altsrc_flag_types +} + +if __name__ == '__main__': + sys.exit(main()) diff --git a/vendor/github.com/codegangsta/cli/help.go b/vendor/github.com/codegangsta/cli/help.go new file mode 100644 index 0000000..ba34719 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/help.go @@ -0,0 +1,267 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" + "text/tabwriter" + "text/template" +) + +// AppHelpTemplate is the text template for the Default help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var AppHelpTemplate = `NAME: + {{.Name}} - {{.Usage}} + +USAGE: + {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}} + {{if .Version}}{{if not .HideVersion}} +VERSION: + {{.Version}} + {{end}}{{end}}{{if len .Authors}} +AUTHOR(S): + {{range .Authors}}{{.}}{{end}} + {{end}}{{if .VisibleCommands}} +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{end}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}} +{{end}}{{end}}{{if .VisibleFlags}} +GLOBAL OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}}{{if .Copyright}} +COPYRIGHT: + {{.Copyright}} + {{end}} +` + +// CommandHelpTemplate is the text template for the command help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var CommandHelpTemplate = `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Category}} + +CATEGORY: + {{.Category}}{{end}}{{if .Description}} + +DESCRIPTION: + {{.Description}}{{end}}{{if .VisibleFlags}} + +OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +` + +// SubcommandHelpTemplate is the text template for the subcommand help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var SubcommandHelpTemplate = `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} + +COMMANDS:{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{end}}{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}} +{{end}}{{if .VisibleFlags}} +OPTIONS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +` + +var helpCommand = Command{ + Name: "help", + Aliases: []string{"h"}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(c *Context) error { + args := c.Args() + if args.Present() { + return ShowCommandHelp(c, args.First()) + } + + ShowAppHelp(c) + return nil + }, +} + +var helpSubcommand = Command{ + Name: "help", + Aliases: []string{"h"}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(c *Context) error { + args := c.Args() + if args.Present() { + return ShowCommandHelp(c, args.First()) + } + + return ShowSubcommandHelp(c) + }, +} + +// Prints help for the App or Command +type helpPrinter func(w io.Writer, templ string, data interface{}) + +// HelpPrinter is a function that writes the help output. If not set a default +// is used. The function signature is: +// func(w io.Writer, templ string, data interface{}) +var HelpPrinter helpPrinter = printHelp + +// VersionPrinter prints the version for the App +var VersionPrinter = printVersion + +// ShowAppHelp is an action that displays the help. +func ShowAppHelp(c *Context) error { + HelpPrinter(c.App.Writer, AppHelpTemplate, c.App) + return nil +} + +// DefaultAppComplete prints the list of subcommands as the default app completion method +func DefaultAppComplete(c *Context) { + for _, command := range c.App.Commands { + if command.Hidden { + continue + } + for _, name := range command.Names() { + fmt.Fprintln(c.App.Writer, name) + } + } +} + +// ShowCommandHelp prints help for the given command +func ShowCommandHelp(ctx *Context, command string) error { + // show the subcommand help for a command with subcommands + if command == "" { + HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) + return nil + } + + for _, c := range ctx.App.Commands { + if c.HasName(command) { + HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c) + return nil + } + } + + if ctx.App.CommandNotFound == nil { + return NewExitError(fmt.Sprintf("No help topic for '%v'", command), 3) + } + + ctx.App.CommandNotFound(ctx, command) + return nil +} + +// ShowSubcommandHelp prints help for the given subcommand +func ShowSubcommandHelp(c *Context) error { + return ShowCommandHelp(c, c.Command.Name) +} + +// ShowVersion prints the version number of the App +func ShowVersion(c *Context) { + VersionPrinter(c) +} + +func printVersion(c *Context) { + fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) +} + +// ShowCompletions prints the lists of commands within a given context +func ShowCompletions(c *Context) { + a := c.App + if a != nil && a.BashComplete != nil { + a.BashComplete(c) + } +} + +// ShowCommandCompletions prints the custom completions for a given command +func ShowCommandCompletions(ctx *Context, command string) { + c := ctx.App.Command(command) + if c != nil && c.BashComplete != nil { + c.BashComplete(ctx) + } +} + +func printHelp(out io.Writer, templ string, data interface{}) { + funcMap := template.FuncMap{ + "join": strings.Join, + } + + w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0) + t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) + err := t.Execute(w, data) + if err != nil { + // If the writer is closed, t.Execute will fail, and there's nothing + // we can do to recover. + if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { + fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) + } + return + } + w.Flush() +} + +func checkVersion(c *Context) bool { + found := false + if VersionFlag.Name != "" { + eachName(VersionFlag.Name, func(name string) { + if c.GlobalBool(name) || c.Bool(name) { + found = true + } + }) + } + return found +} + +func checkHelp(c *Context) bool { + found := false + if HelpFlag.Name != "" { + eachName(HelpFlag.Name, func(name string) { + if c.GlobalBool(name) || c.Bool(name) { + found = true + } + }) + } + return found +} + +func checkCommandHelp(c *Context, name string) bool { + if c.Bool("h") || c.Bool("help") { + ShowCommandHelp(c, name) + return true + } + + return false +} + +func checkSubcommandHelp(c *Context) bool { + if c.Bool("h") || c.Bool("help") { + ShowSubcommandHelp(c) + return true + } + + return false +} + +func checkCompletions(c *Context) bool { + if (c.GlobalBool(BashCompletionFlag.Name) || c.Bool(BashCompletionFlag.Name)) && c.App.EnableBashCompletion { + ShowCompletions(c) + return true + } + + return false +} + +func checkCommandCompletions(c *Context, name string) bool { + if c.Bool(BashCompletionFlag.Name) && c.App.EnableBashCompletion { + ShowCommandCompletions(c, name) + return true + } + + return false +} diff --git a/vendor/github.com/codegangsta/cli/runtests b/vendor/github.com/codegangsta/cli/runtests new file mode 100755 index 0000000..d9fa542 --- /dev/null +++ b/vendor/github.com/codegangsta/cli/runtests @@ -0,0 +1,118 @@ +#!/usr/bin/env python +from __future__ import print_function + +import argparse +import os +import sys +import tempfile + +from subprocess import check_call, check_output + + +PACKAGE_NAME = os.environ.get( + 'CLI_PACKAGE_NAME', 'github.com/urfave/cli' +) + + +def main(sysargs=sys.argv[:]): + targets = { + 'vet': _vet, + 'test': _test, + 'gfmrun': _gfmrun, + 'toc': _toc, + 'gen': _gen, + } + + parser = argparse.ArgumentParser() + parser.add_argument( + 'target', nargs='?', choices=tuple(targets.keys()), default='test' + ) + args = parser.parse_args(sysargs[1:]) + + targets[args.target]() + return 0 + + +def _test(): + if check_output('go version'.split()).split()[2] < 'go1.2': + _run('go test -v .') + return + + coverprofiles = [] + for subpackage in ['', 'altsrc']: + coverprofile = 'cli.coverprofile' + if subpackage != '': + coverprofile = '{}.coverprofile'.format(subpackage) + + coverprofiles.append(coverprofile) + + _run('go test -v'.split() + [ + '-coverprofile={}'.format(coverprofile), + ('{}/{}'.format(PACKAGE_NAME, subpackage)).rstrip('/') + ]) + + combined_name = _combine_coverprofiles(coverprofiles) + _run('go tool cover -func={}'.format(combined_name)) + os.remove(combined_name) + + +def _gfmrun(): + _run(['gfmrun', '-c', str(_gfmrun_count()), '-s', 'README.md']) + + +def _vet(): + _run('go vet ./...') + + +def _toc(): + _run('node_modules/.bin/markdown-toc -i README.md') + _run('git diff --exit-code') + + +def _gen(): + go_version = check_output('go version'.split()).split()[2] + if go_version < 'go1.4': + print('runtests: skip on {}'.format(go_version), file=sys.stderr) + return + + _run('go generate ./...') + _run('git diff --exit-code') + + +def _run(command): + if hasattr(command, 'split'): + command = command.split() + print('runtests: {}'.format(' '.join(command)), file=sys.stderr) + check_call(command) + + +def _gfmrun_count(): + with open('README.md') as infile: + lines = infile.read().splitlines() + return len(filter(_is_go_runnable, lines)) + + +def _is_go_runnable(line): + return line.startswith('package main') + + +def _combine_coverprofiles(coverprofiles): + combined = tempfile.NamedTemporaryFile( + suffix='.coverprofile', delete=False + ) + combined.write('mode: set\n') + + for coverprofile in coverprofiles: + with open(coverprofile, 'r') as infile: + for line in infile.readlines(): + if not line.startswith('mode: '): + combined.write(line) + + combined.flush() + name = combined.name + combined.close() + return name + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index 2a7cfd2..0000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright (c) 2012-2013 Dave Collins - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 565bf58..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright (c) 2015 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine and "-tags disableunsafe" -// is not added to the go build command line. -// +build !appengine,!disableunsafe - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = uintptr(ptrSize) - offsetScalar = uintptr(0) - offsetFlag = uintptr(ptrSize * 2) - - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = uintptr(flagKindWidth - 1) - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) -) - -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } - - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 - - // Commit adf9b30e5594 modified the flags to separate the - // flagRO flag into two bits which specifies whether or not the - // field is embedded. This causes flagIndir to move over a bit - // and means that flagRO is the combination of either of the - // original flagRO bit and the new bit. - // - // This code detects the change by extracting what used to be - // the indirect bit to ensure it's set. When it's not, the flag - // order has been changed to the newer format, so the flags are - // updated accordingly. - if upfv&flagIndir == 0 { - flagRO = 3 << 5 - flagIndir = 1 << 7 - } - } -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) - } - } - - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() - } - return rv -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 457e412..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2015 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when either the code is running on Google App Engine or "-tags disableunsafe" -// is added to the go build command line. -// +build appengine disableunsafe - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 14f02dc..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index ee1ab07..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "disableunsafe" build tag specified. - DisablePointerMethods bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index 5be0c40..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index a0ff95e..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound == true: - d.w.Write(nilAngleBytes) - - case cycleFound == true: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index ecf3b80..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound == true: - f.fs.Write(nilAngleBytes) - - case cycleFound == true: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index d8233f5..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/drone/drone-go/LICENSE b/vendor/github.com/drone/drone-go/LICENSE deleted file mode 100644 index e06d208..0000000 --- a/vendor/github.com/drone/drone-go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/drone/drone-go/drone/client.go b/vendor/github.com/drone/drone-go/drone/client.go deleted file mode 100644 index f0fae22..0000000 --- a/vendor/github.com/drone/drone-go/drone/client.go +++ /dev/null @@ -1,404 +0,0 @@ -package drone - -//go:generate mockery -all -//go:generate mv mocks/Client.go mocks/client.go - -import ( - "bytes" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - - "golang.org/x/oauth2" -) - -const ( - pathSelf = "%s/api/user" - pathFeed = "%s/api/user/feed" - pathRepos = "%s/api/user/repos" - pathRepo = "%s/api/repos/%s/%s" - pathEncrypt = "%s/api/repos/%s/%s/encrypt" - pathBuilds = "%s/api/repos/%s/%s/builds" - pathBuild = "%s/api/repos/%s/%s/builds/%v" - pathJob = "%s/api/repos/%s/%s/builds/%d/%d" - pathLog = "%s/api/repos/%s/%s/logs/%d/%d" - pathKey = "%s/api/repos/%s/%s/key" - pathSign = "%s/api/repos/%s/%s/sign" - pathSecrets = "%s/api/repos/%s/%s/secrets" - pathSecret = "%s/api/repos/%s/%s/secrets/%s" - pathNodes = "%s/api/nodes" - pathNode = "%s/api/nodes/%d" - pathUsers = "%s/api/users" - pathUser = "%s/api/users/%s" -) - -type client struct { - client *http.Client - base string // base url -} - -// NewClient returns a client at the specified url. -func NewClient(uri string) Client { - return &client{http.DefaultClient, uri} -} - -// NewClientToken returns a client at the specified url that -// authenticates all outbound requests with the given token. -func NewClientToken(uri, token string) Client { - config := new(oauth2.Config) - auther := config.Client(oauth2.NoContext, &oauth2.Token{AccessToken: token}) - return &client{auther, uri} -} - -// NewClientTokenTLS returns a client at the specified url that -// authenticates all outbound requests with the given token and -// tls.Config if provided. -func NewClientTokenTLS(uri, token string, c *tls.Config) Client { - config := new(oauth2.Config) - auther := config.Client(oauth2.NoContext, &oauth2.Token{AccessToken: token}) - if c != nil { - auther.Transport.(*oauth2.Transport).Base = &http.Transport{TLSClientConfig: c} - } - return &client{auther, uri} -} - -// SetClient sets the default http client. This should be -// used in conjunction with golang.org/x/oauth2 to -// authenticate requests to the Drone server. -func (c *client) SetClient(client *http.Client) { - c.client = client -} - -// Self returns the currently authenticated user. -func (c *client) Self() (*User, error) { - out := new(User) - uri := fmt.Sprintf(pathSelf, c.base) - err := c.get(uri, out) - return out, err -} - -// User returns a user by login. -func (c *client) User(login string) (*User, error) { - out := new(User) - uri := fmt.Sprintf(pathUser, c.base, login) - err := c.get(uri, out) - return out, err -} - -// UserList returns a list of all registered users. -func (c *client) UserList() ([]*User, error) { - var out []*User - uri := fmt.Sprintf(pathUsers, c.base) - err := c.get(uri, &out) - return out, err -} - -// UserPost creates a new user account. -func (c *client) UserPost(in *User) (*User, error) { - out := new(User) - uri := fmt.Sprintf(pathUsers, c.base) - err := c.post(uri, in, out) - return out, err -} - -// UserPatch updates a user account. -func (c *client) UserPatch(in *User) (*User, error) { - out := new(User) - uri := fmt.Sprintf(pathUser, c.base, in.Login) - err := c.patch(uri, in, out) - return out, err -} - -// UserDel deletes a user account. -func (c *client) UserDel(login string) error { - uri := fmt.Sprintf(pathUser, c.base, login) - err := c.delete(uri) - return err -} - -// UserFeed returns the user's activity feed. -func (c *client) UserFeed() ([]*Activity, error) { - var out []*Activity - uri := fmt.Sprintf(pathFeed, c.base) - err := c.get(uri, &out) - return out, err -} - -// Repo returns a repository by name. -func (c *client) Repo(owner string, name string) (*Repo, error) { - out := new(Repo) - uri := fmt.Sprintf(pathRepo, c.base, owner, name) - err := c.get(uri, out) - return out, err -} - -// RepoList returns a list of all repositories to which -// the user has explicit access in the host system. -func (c *client) RepoList() ([]*Repo, error) { - var out []*Repo - uri := fmt.Sprintf(pathRepos, c.base) - err := c.get(uri, &out) - return out, err -} - -// RepoPost activates a repository. -func (c *client) RepoPost(owner string, name string) (*Repo, error) { - out := new(Repo) - uri := fmt.Sprintf(pathRepo, c.base, owner, name) - err := c.post(uri, nil, out) - return out, err -} - -// RepoPatch updates a repository. -func (c *client) RepoPatch(in *Repo) (*Repo, error) { - out := new(Repo) - uri := fmt.Sprintf(pathRepo, c.base, in.Owner, in.Name) - err := c.patch(uri, in, out) - return out, err -} - -// RepoDel deletes a repository. -func (c *client) RepoDel(owner, name string) error { - uri := fmt.Sprintf(pathRepo, c.base, owner, name) - err := c.delete(uri) - return err -} - -// RepoKey returns a repository public key. -func (c *client) RepoKey(owner, name string) (*Key, error) { - out := new(Key) - uri := fmt.Sprintf(pathKey, c.base, owner, name) - rc, err := c.stream(uri, "GET", nil, nil) - if err != nil { - return nil, err - } - defer rc.Close() - raw, _ := ioutil.ReadAll(rc) - out.Public = string(raw) - return out, err -} - -// Build returns a repository build by number. -func (c *client) Build(owner, name string, num int) (*Build, error) { - out := new(Build) - uri := fmt.Sprintf(pathBuild, c.base, owner, name, num) - err := c.get(uri, out) - return out, err -} - -// Build returns the latest repository build by branch. -func (c *client) BuildLast(owner, name, branch string) (*Build, error) { - out := new(Build) - uri := fmt.Sprintf(pathBuild, c.base, owner, name, "latest") - if len(branch) != 0 { - uri += "?branch=" + branch - } - err := c.get(uri, out) - return out, err -} - -// BuildList returns a list of recent builds for the -// the specified repository. -func (c *client) BuildList(owner, name string) ([]*Build, error) { - var out []*Build - uri := fmt.Sprintf(pathBuilds, c.base, owner, name) - err := c.get(uri, &out) - return out, err -} - -// BuildStart re-starts a stopped build. -func (c *client) BuildStart(owner, name string, num int) (*Build, error) { - out := new(Build) - uri := fmt.Sprintf(pathBuild, c.base, owner, name, num) - err := c.post(uri, nil, out) - return out, err -} - -// BuildStop cancels the running job. -func (c *client) BuildStop(owner, name string, num, job int) error { - uri := fmt.Sprintf(pathJob, c.base, owner, name, num, job) - err := c.delete(uri) - return err -} - -// BuildFork re-starts a stopped build with a new build number, -// preserving the prior history. -func (c *client) BuildFork(owner, name string, num int) (*Build, error) { - out := new(Build) - uri := fmt.Sprintf(pathBuild+"?fork=true", c.base, owner, name, num) - err := c.post(uri, nil, out) - return out, err -} - -// BuildLogs returns the build logs for the specified job. -func (c *client) BuildLogs(owner, name string, num, job int) (io.ReadCloser, error) { - uri := fmt.Sprintf(pathLog, c.base, owner, name, num, job) - return c.stream(uri, "GET", nil, nil) -} - -// Deploy triggers a deployment for an existing build using the -// specified target environment. -func (c *client) Deploy(owner, name string, num int, env string) (*Build, error) { - out := new(Build) - val := new(url.Values) - val.Set("fork", "true") - val.Set("event", "deployment") - val.Set("deploy_to", env) - uri := fmt.Sprintf(pathBuild+"?"+val.Encode(), c.base, owner, name, num) - err := c.post(uri, nil, out) - return out, err -} - -// SecretPost create or updates a repository secret. -func (c *client) SecretPost(owner, name string, secret *Secret) error { - uri := fmt.Sprintf(pathSecrets, c.base, owner, name) - return c.post(uri, secret, nil) -} - -// SecretDel deletes a named repository secret. -func (c *client) SecretDel(owner, name, secret string) error { - uri := fmt.Sprintf(pathSecret, c.base, owner, name, secret) - return c.delete(uri) -} - -// Sign returns a cryptographic signature for the input string. -func (c *client) Sign(owner, name string, in []byte) ([]byte, error) { - buf := bytes.Buffer{} - buf.Write(in) - uri := fmt.Sprintf(pathSign, c.base, owner, name) - rc, err := c.stream(uri, "POST", buf, nil) - if err != nil { - return nil, err - } - defer rc.Close() - return ioutil.ReadAll(rc) -} - -// Node returns a node by id. -func (c *client) Node(id int64) (*Node, error) { - out := new(Node) - uri := fmt.Sprintf(pathNode, c.base, id) - err := c.get(uri, out) - return out, err -} - -// NodeList returns a list of all registered worker nodes. -func (c *client) NodeList() ([]*Node, error) { - var out []*Node - uri := fmt.Sprintf(pathNodes, c.base) - err := c.get(uri, &out) - return out, err -} - -// NodePost registers a new worker node. -func (c *client) NodePost(in *Node) (*Node, error) { - out := new(Node) - uri := fmt.Sprintf(pathNodes, c.base) - err := c.post(uri, in, out) - return out, err -} - -// NodeDel deletes a worker node. -func (c *client) NodeDel(id int64) error { - uri := fmt.Sprintf(pathNode, c.base, id) - err := c.delete(uri) - return err -} - -// -// http request helper functions -// - -// helper function for making an http GET request. -func (c *client) get(rawurl string, out interface{}) error { - return c.do(rawurl, "GET", nil, out) -} - -// helper function for making an http POST request. -func (c *client) post(rawurl string, in, out interface{}) error { - return c.do(rawurl, "POST", in, out) -} - -// helper function for making an http PUT request. -func (c *client) put(rawurl string, in, out interface{}) error { - return c.do(rawurl, "PUT", in, out) -} - -// helper function for making an http PATCH request. -func (c *client) patch(rawurl string, in, out interface{}) error { - return c.do(rawurl, "PATCH", in, out) -} - -// helper function for making an http DELETE request. -func (c *client) delete(rawurl string) error { - return c.do(rawurl, "DELETE", nil, nil) -} - -// helper function to make an http request -func (c *client) do(rawurl, method string, in, out interface{}) error { - // executes the http request and returns the body as - // and io.ReadCloser - body, err := c.stream(rawurl, method, in, out) - if err != nil { - return err - } - defer body.Close() - - // if a json response is expected, parse and return - // the json response. - if out != nil { - return json.NewDecoder(body).Decode(out) - } - return nil -} - -// helper function to stream an http request -func (c *client) stream(rawurl, method string, in, out interface{}) (io.ReadCloser, error) { - uri, err := url.Parse(rawurl) - if err != nil { - return nil, err - } - - // if we are posting or putting data, we need to - // write it to the body of the request. - var buf io.ReadWriter - if in == nil { - // nothing - } else if rw, ok := in.(io.ReadWriter); ok { - buf = rw - } else { - buf = new(bytes.Buffer) - err := json.NewEncoder(buf).Encode(in) - if err != nil { - return nil, err - } - } - - // creates a new http request to bitbucket. - req, err := http.NewRequest(method, uri.String(), buf) - if err != nil { - return nil, err - } - if in == nil { - // nothing - } else if _, ok := in.(io.ReadWriter); ok { - req.Header.Set("Content-Type", "plain/text") - } else { - req.Header.Set("Content-Type", "application/json") - } - - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - if resp.StatusCode > http.StatusPartialContent { - defer resp.Body.Close() - out, _ := ioutil.ReadAll(resp.Body) - return nil, fmt.Errorf(string(out)) - } - return resp.Body, nil -} diff --git a/vendor/github.com/drone/drone-go/drone/const.go b/vendor/github.com/drone/drone-go/drone/const.go deleted file mode 100644 index fd8f902..0000000 --- a/vendor/github.com/drone/drone-go/drone/const.go +++ /dev/null @@ -1,48 +0,0 @@ -package drone - -// Events -const ( - EventPush = "push" - EventPull = "pull_request" - EventTag = "tag" - EventDeploy = "deployment" -) - -// Statuses -const ( - StatusSkipped = "skipped" - StatusPending = "pending" - StatusRunning = "running" - StatusSuccess = "success" - StatusFailure = "failure" - StatusKilled = "killed" - StatusError = "error" -) - -// Architectures -const ( - Freebsd_386 uint = iota - Freebsd_amd64 - Freebsd_arm - Linux_386 - Linux_amd64 - Linux_arm - Linux_arm64 - Solaris_amd64 - Windows_386 - Windows_amd64 -) - -// Architecture Map -var Archs = map[string]uint{ - "freebsd_386": Freebsd_386, - "freebsd_amd64": Freebsd_amd64, - "freebsd_arm": Freebsd_arm, - "linux_386": Linux_386, - "linux_amd64": Linux_amd64, - "linux_arm": Linux_arm, - "linux_arm64": Linux_arm64, - "solaris_amd64": Solaris_amd64, - "windows_386": Windows_386, - "windows_amd64": Windows_amd64, -} diff --git a/vendor/github.com/drone/drone-go/drone/interface.go b/vendor/github.com/drone/drone-go/drone/interface.go deleted file mode 100644 index 9a65daf..0000000 --- a/vendor/github.com/drone/drone-go/drone/interface.go +++ /dev/null @@ -1,95 +0,0 @@ -package drone - -import "io" - -// Client describes a drone client. -type Client interface { - // Self returns the currently authenticated user. - Self() (*User, error) - - // User returns a user by login. - User(string) (*User, error) - - // UserList returns a list of all registered users. - UserList() ([]*User, error) - - // UserPost creates a new user account. - UserPost(*User) (*User, error) - - // UserPatch updates a user account. - UserPatch(*User) (*User, error) - - // UserDel deletes a user account. - UserDel(string) error - - // UserFeed returns the user's activity feed. - UserFeed() ([]*Activity, error) - - // Repo returns a repository by name. - Repo(string, string) (*Repo, error) - - // RepoList returns a list of all repositories to which - // the user has explicit access in the host system. - RepoList() ([]*Repo, error) - - // RepoPost activates a repository. - RepoPost(string, string) (*Repo, error) - - // RepoPatch updates a repository. - RepoPatch(*Repo) (*Repo, error) - - // RepoDel deletes a repository. - RepoDel(string, string) error - - // RepoKey returns a repository public key. - RepoKey(string, string) (*Key, error) - - // Build returns a repository build by number. - Build(string, string, int) (*Build, error) - - // BuildLast returns the latest repository build by branch. - // An empty branch will result in the default branch. - BuildLast(string, string, string) (*Build, error) - - // BuildList returns a list of recent builds for the - // the specified repository. - BuildList(string, string) ([]*Build, error) - - // BuildStart re-starts a stopped build. - BuildStart(string, string, int) (*Build, error) - - // BuildStop stops the specified running job for given build. - BuildStop(string, string, int, int) error - - // BuildFork re-starts a stopped build with a new build number, - // preserving the prior history. - BuildFork(string, string, int) (*Build, error) - - // BuildLogs returns the build logs for the specified job. - BuildLogs(string, string, int, int) (io.ReadCloser, error) - - // Deploy triggers a deployment for an existing build using the - // specified target environment. - Deploy(string, string, int, string) (*Build, error) - - // Sign returns a cryptographic signature for the input string. - Sign(string, string, []byte) ([]byte, error) - - // SecretPost create or updates a repository secret. - SecretPost(string, string, *Secret) error - - // SecretDel deletes a named repository secret. - SecretDel(string, string, string) error - - // Node returns a node by id. - Node(int64) (*Node, error) - - // NodeList returns a list of all registered worker nodes. - NodeList() ([]*Node, error) - - // NodePost registers a new worker node. - NodePost(*Node) (*Node, error) - - // NodeDel deletes a worker node. - NodeDel(int64) error -} diff --git a/vendor/github.com/drone/drone-go/drone/types.go b/vendor/github.com/drone/drone-go/drone/types.go deleted file mode 100644 index 5d3a6b6..0000000 --- a/vendor/github.com/drone/drone-go/drone/types.go +++ /dev/null @@ -1,209 +0,0 @@ -package drone - -import "encoding/json" - -// User represents a user account. -type User struct { - ID int64 `json:"id"` - Login string `json:"login"` - Email string `json:"email"` - Avatar string `json:"avatar_url"` - Active bool `json:"active"` - Admin bool `json:"admin"` -} - -// Repo represents a version control repository. -type Repo struct { - ID int64 `json:"id"` - Owner string `json:"owner"` - Name string `json:"name"` - FullName string `json:"full_name"` - Avatar string `json:"avatar_url"` - Link string `json:"link_url"` - Clone string `json:"clone_url"` - Branch string `json:"default_branch"` - Timeout int64 `json:"timeout"` - IsPrivate bool `json:"private"` - IsTrusted bool `json:"trusted"` - AllowPull bool `json:"allow_pr"` - AllowPush bool `json:"allow_push"` - AllowDeploy bool `json:"allow_deploys"` - AllowTag bool `json:"allow_tags"` -} - -// Build represents the process of compiling and testing a changeset, -// typically triggered by the remote system (ie GitHub). -type Build struct { - ID int64 `json:"id"` - Number int `json:"number"` - Event string `json:"event"` - Status string `json:"status"` - Enqueued int64 `json:"enqueued_at"` - Created int64 `json:"created_at"` - Started int64 `json:"started_at"` - Finished int64 `json:"finished_at"` - Deploy string `json:"deploy_to"` - Commit string `json:"commit"` - Branch string `json:"branch"` - Ref string `json:"ref"` - Refspec string `json:"refspec"` - Remote string `json:"remote"` - Title string `json:"title"` - Message string `json:"message"` - Timestamp int64 `json:"timestamp"` - Author string `json:"author"` - Avatar string `json:"author_avatar"` - Email string `json:"author_email"` - Link string `json:"link_url"` -} - -// Job represents a single job that is being executed as part -// of a Build. -type Job struct { - ID int64 `json:"id"` - Number int `json:"number"` - Status string `json:"status"` - ExitCode int `json:"exit_code"` - Enqueued int64 `json:"enqueued_at"` - Started int64 `json:"started_at"` - Finished int64 `json:"finished_at"` - - Environment map[string]string `json:"environment"` -} - -// Secret represents a repository secret. -type Secret struct { - ID int64 `json:"id"` - Name string `json:"name"` - Value string `json:"value"` - Image []string `json:"image"` - Event []string `json:"event"` -} - -// Activity represents a build activity. It combines the -// build details with summary Repository information. -type Activity struct { - Owner string `json:"owner"` - Name string `json:"name"` - FullName string `json:"full_name"` - Number int `json:"number"` - Event string `json:"event"` - Status string `json:"status"` - Enqueued int64 `json:"enqueued_at"` - Created int64 `json:"created_at"` - Started int64 `json:"started_at"` - Finished int64 `json:"finished_at"` - Commit string `json:"commit"` - Branch string `json:"branch"` - Ref string `json:"ref"` - Refspec string `json:"refspec"` - Remote string `json:"remote"` - Title string `json:"title"` - Message string `json:"message"` - Timestamp int64 `json:"timestamp"` - Author string `json:"author"` - Avatar string `json:"author_avatar"` - Email string `json:"author_email"` - Link string `json:"link_url"` -} - -// Node represents a local or remote Docker daemon that is -// responsible for running jobs. -type Node struct { - ID int64 `json:"id"` - Addr string `json:"address"` - Arch string `json:"architecture"` - Cert string `json:"cert"` - Key string `json:"key"` - CA string `json:"ca"` -} - -// Key represents an RSA public and private key assigned to a -// repository. It may be used to clone private repositories, or as -// a deployment key. -type Key struct { - Public string `json:"public"` - Private string `json:"private"` -} - -// Netrc defines a default .netrc file that should be injected -// into the build environment. It will be used to authorize access -// to https resources, such as git+https clones. -type Netrc struct { - Machine string `json:"machine"` - Login string `json:"login"` - Password string `json:"password"` -} - -// netrc is only used to allow compatibility with older plugins that rely on -// the JSON "user" attribute as a password value. In Drone 0.6 breaking -// changes will be introduced and this netrc struct and the Netrc UnmarshalJSON -// and MarshalJSON will be removed. -type netrc struct { - Machine string `json:"machine"` - Login string `json:"login"` - Password string `json:"password"` - PasswordOld string `json:"user"` -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (n *Netrc) UnmarshalJSON(b []byte) error { - x := &netrc{} - err := json.Unmarshal(b, x) - if err != nil { - return err - } - n.Machine = x.Machine - n.Login = x.Login - n.Password = x.Password - if x.PasswordOld != "" { - n.Password = x.PasswordOld - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (n Netrc) MarshalJSON() ([]byte, error) { - x := &netrc{ - Machine: n.Machine, - Login: n.Login, - Password: n.Password, - PasswordOld: n.Password, - } - return json.Marshal(x) -} - -// System represents the drone system. -type System struct { - Version string `json:"version"` - Link string `json:"link_url"` - Plugins []string `json:"plugins"` - Globals []string `json:"globals"` - Escalates []string `json:"privileged_plugins"` -} - -// Workspace defines the build's workspace inside the -// container. This helps the plugin locate the source -// code directory. -type Workspace struct { - Root string `json:"root"` - Path string `json:"path"` - - Netrc *Netrc `json:"netrc"` - Keys *Key `json:"keys"` -} - -// Payload defines the full payload send to plugins. -type Payload struct { - Yaml string `json:"config"` - YamlEnc string `json:"secret"` - Repo *Repo `json:"repo"` - Build *Build `json:"build"` - BuildLast *Build `json:"build_last"` - Job *Job `json:"job"` - Netrc *Netrc `json:"netrc"` - Keys *Key `json:"keys"` - System *System `json:"system"` - Workspace *Workspace `json:"workspace"` - Vargs interface{} `json:"vargs"` -} diff --git a/vendor/github.com/drone/drone-go/drone/types_json.go b/vendor/github.com/drone/drone-go/drone/types_json.go deleted file mode 100644 index 97fdfab..0000000 --- a/vendor/github.com/drone/drone-go/drone/types_json.go +++ /dev/null @@ -1,127 +0,0 @@ -package drone - -import ( - "encoding/json" - "strconv" -) - -// StringSlice representes a string or an array of strings. -type StringSlice struct { - parts []string -} - -// UnmarshalJSON unmarshals bytes into a StringSlice. -func (e *StringSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - e.parts = p - return nil -} - -// Len returns the number of strings in a StringSlice. -func (e *StringSlice) Len() int { - if e == nil { - return 0 - } - return len(e.parts) -} - -// Slice returns the slice of strings in a StringSlice. -func (e *StringSlice) Slice() []string { - if e == nil { - return nil - } - return e.parts -} - -// NewStringSlice takes a slice of strings and returns a StringSlice. -func NewStringSlice(parts []string) StringSlice { - return StringSlice{parts} -} - -// StringMap representes a string or a map of strings. -type StringMap struct { - parts map[string]string -} - -// UnmarshalJSON unmarshals bytes into a StringMap. -func (e *StringMap) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - return nil - } - - p := map[string]string{} - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p[""] = s - } - - e.parts = p - return nil -} - -// Len returns the number of elements in the StringMap. -func (e *StringMap) Len() int { - if e == nil { - return 0 - } - return len(e.parts) -} - -// String satisfies the fmt.Stringer interface. -func (e *StringMap) String() (str string) { - if e == nil { - return - } - for _, val := range e.parts { - return val // returns the first string value - } - return -} - -// Map returns StringMap as a map of string to string. -func (e *StringMap) Map() map[string]string { - if e == nil { - return nil - } - return e.parts -} - -// NewStringMap takes a map of string to string and returns a StringMap. -func NewStringMap(parts map[string]string) StringMap { - return StringMap{parts} -} - -// StringInt representes a string or an integer value. -type StringInt struct { - value string -} - -// UnmarshalJSON unmarshals bytes into a StringInt. -func (e *StringInt) UnmarshalJSON(b []byte) error { - var num int - err := json.Unmarshal(b, &num) - if err == nil { - e.value = strconv.Itoa(num) - return nil - } - return json.Unmarshal(b, &e.value) -} - -func (e StringInt) String() string { - return e.value -} diff --git a/vendor/github.com/drone/drone-go/plugin/plugin.go b/vendor/github.com/drone/drone-go/plugin/plugin.go deleted file mode 100644 index 57133fc..0000000 --- a/vendor/github.com/drone/drone-go/plugin/plugin.go +++ /dev/null @@ -1,134 +0,0 @@ -package plugin - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "os" -) - -// Standard Input -var Stdin *ParamSet - -func init() { - // defaults to stdin - Stdin = NewParamSet(os.Stdin) - - // check for params after the double dash - // in the command string - for i, argv := range os.Args { - if argv == "--" { - arg := os.Args[i+1] - buf := bytes.NewBufferString(arg) - Stdin = NewParamSet(buf) - break - } - } -} - -// this init function is deprecated, but I'm keeping it -// around just in case it proves useful in the future. -func deprecated_init() { - // if piping from stdin we can just exit - // and use the default Stdin value - stat, _ := os.Stdin.Stat() - if (stat.Mode() & os.ModeCharDevice) == 0 { - return - } - - // check for params after the double dash - // in the command string - for i, argv := range os.Args { - if argv == "--" { - arg := os.Args[i+1] - buf := bytes.NewBufferString(arg) - Stdin = NewParamSet(buf) - return - } - } - - // else use the first variable in the list - if len(os.Args) > 1 { - buf := bytes.NewBufferString(os.Args[1]) - Stdin = NewParamSet(buf) - } -} - -// ParamSet describes a parameter set. -type ParamSet struct { - reader io.Reader - params map[string]interface{} -} - -// NewParamSet takes a io.Reader and returns a ParamSet. -func NewParamSet(reader io.Reader) *ParamSet { - var p = new(ParamSet) - p.reader = reader - p.params = map[string]interface{}{} - return p -} - -// Param defines a parameter with the specified name. -func (p ParamSet) Param(name string, value interface{}) { - p.params[name] = value -} - -// Parse parses parameter definitions from the map. -func (p ParamSet) Parse() error { - raw := map[string]json.RawMessage{} - err := json.NewDecoder(p.reader).Decode(&raw) - if err != nil { - return err - } - - for key, val := range p.params { - data, ok := raw[key] - if !ok { - continue - } - err := json.Unmarshal(data, val) - if err != nil { - return fmt.Errorf("Unable to unarmshal %s. %s", key, err) - } - } - - return nil -} - -// Unmarshal parses the JSON payload from the command -// arguments and unmarshal into a value pointed to by v. -func (p ParamSet) Unmarshal(v interface{}) error { - return json.NewDecoder(p.reader).Decode(v) -} - -// Param defines a parameter with the specified name. -func Param(name string, value interface{}) { - Stdin.Param(name, value) -} - -// Parse parses parameter definitions from the map. -func Parse() error { - return Stdin.Parse() -} - -// Unmarshal parses the JSON payload from the command -// arguments and unmarshal into a value pointed to by v. -func Unmarshal(v interface{}) error { - return Stdin.Unmarshal(v) -} - -// MustUnmarshal parses the JSON payload from the command -// arguments and unmarshal into a value pointed to by v. -func MustUnmarshal(v interface{}) error { - return Stdin.Unmarshal(v) -} - -// MustParse parses parameter definitions from the map -// and panics if there is a parsing error. -func MustParse() { - err := Parse() - if err != nil { - panic(err) - } -} diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile new file mode 100644 index 0000000..ac034e5 --- /dev/null +++ b/vendor/github.com/go-ini/ini/Makefile @@ -0,0 +1,12 @@ +.PHONY: build test bench vet + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -race -test.bench=. -test.benchmem + +vet: + go vet diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md index 1272038..98698c1 100644 --- a/vendor/github.com/go-ini/ini/README.md +++ b/vendor/github.com/go-ini/ini/README.md @@ -1,4 +1,4 @@ -ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) +INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) === ![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) @@ -22,13 +22,29 @@ Package ini provides INI file read and write functionality in Go. ## Installation +To use a tagged revision: + go get gopkg.in/ini.v1 +To use with latest changes: + + go get github.com/go-ini/ini + +Please add `-u` flag to update in the future. + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +Please add `-u` flag to update in the future. + ## Getting Started ### Loading from data sources -A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. +A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many data sources as you want**. Passing other types will simply return an error. ```go cfg, err := ini.Load([]byte("raw data"), "filename") @@ -40,12 +56,37 @@ Or start with an empty object: cfg := ini.Empty() ``` -When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. +When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later. ```go err := cfg.Append("other file", []byte("other raw data")) ``` +If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. + +When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing. + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 and sec2 are the exactly same section object +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 and key2 are the exactly same key object +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +If you want to give more advanced load options, use `LoadSources` and take a look at [`LoadOptions`](https://github.com/go-ini/ini/blob/v1.16.1/ini.go#L156). + ### Working with sections To get a section, you would need to: @@ -95,6 +136,12 @@ Same rule applies to key operations: key := cfg.Section("").Key("key name") ``` +To check if a key exists: + +```go +yes := cfg.Section("").HasKey("key name") +``` + To create a new key: ```go @@ -111,7 +158,7 @@ names := cfg.Section("").KeyStrings() To get a clone hash of keys and corresponding values: ```go -hash := cfg.GetSection("").KeysHash() +hash := cfg.Section("").KeysHash() ``` ### Working with values @@ -133,12 +180,24 @@ val := cfg.Section("").Key("key name").Validate(func(in string) string { }) ``` +If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +To check if raw value exists: + +```go +yes := cfg.Section("").HasValue("test value") +``` + To get value with types: ```go // For boolean values: -// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On -// false when value is: 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off v, err = cfg.Section("").Key("BOOL").Bool() v, err = cfg.Section("").Key("FLOAT64").Float64() v, err = cfg.Section("").Key("INT").Int() @@ -212,6 +271,16 @@ cfg.Section("advance").Key("two_lines").String() // how about continuation lines cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 ``` +Well, I hate continuation lines, how do I disable that? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +Holy crap! + Note that single quotes around values will be stripped: ```ini @@ -250,9 +319,13 @@ vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), min vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 ``` -To auto-split value into slice: +##### Auto-split values into a slice + +To use zero value of type for invalid inputs: ```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] vals = cfg.Section("").Key("STRINGS").Strings(",") vals = cfg.Section("").Key("FLOAT64S").Float64s(",") vals = cfg.Section("").Key("INTS").Ints(",") @@ -262,6 +335,32 @@ vals = cfg.Section("").Key("UINT64S").Uint64s(",") vals = cfg.Section("").Key("TIMES").Times(",") ``` +To exclude invalid values out of result slice: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +Or to return nothing but error when have invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + ### Save your configuration Finally, it's time to save your configuration to somewhere. @@ -323,6 +422,12 @@ CLONE_URL = https://%(IMPORT_PATH)s cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 ``` +#### Retrieve parent keys available to a child section + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + ### Auto-increment Key Names If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. @@ -464,7 +569,7 @@ type Info struct { } func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) // ... cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md index 45e19ed..7b2f9e0 100644 --- a/vendor/github.com/go-ini/ini/README_ZH.md +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -15,8 +15,24 @@ ## 下载安装 +使用一个特定版本: + go get gopkg.in/ini.v1 +使用最新版: + + go get github.com/go-ini/ini + +如需更新请添加 `-u` 选项。 + +### 测试安装 + +如果您想要在自己的机器上运行测试,请使用 `-t` 标记: + + go get -t gopkg.in/ini.v1 + +如需更新请添加 `-u` 选项。 + ## 开始使用 ### 从数据源加载 @@ -39,6 +55,31 @@ cfg := ini.Empty() err := cfg.Append("other file", []byte("other raw data")) ``` +当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误): + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。 + +有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写: + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 和 sec2 指向同一个分区对象 +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 和 key2 指向同一个键对象 +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +如果您想要更加自定义的加载选项,可以使用 `LoadSources` 方法并参见 [`LoadOptions`](https://github.com/go-ini/ini/blob/v1.16.1/ini.go#L156)。 + ### 操作分区(Section) 获取指定分区: @@ -88,6 +129,12 @@ key, err := cfg.Section("").GetKey("key name") key := cfg.Section("").Key("key name") ``` +判断某个键是否存在: + +```go +yes := cfg.Section("").HasKey("key name") +``` + 创建一个新的键: ```go @@ -104,7 +151,7 @@ names := cfg.Section("").KeyStrings() 获取分区下的所有键值对的克隆: ```go -hash := cfg.GetSection("").KeysHash() +hash := cfg.Section("").KeysHash() ``` ### 操作键值(Value) @@ -126,12 +173,24 @@ val := cfg.Section("").Key("key name").Validate(func(in string) string { }) ``` +如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +判断某个原值是否存在: + +```go +yes := cfg.Section("").HasValue("test value") +``` + 获取其它类型的值: ```go // 布尔值的规则: -// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On -// false 当值为:0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off v, err = cfg.Section("").Key("BOOL").Bool() v, err = cfg.Section("").Key("FLOAT64").Float64() v, err = cfg.Section("").Key("INT").Int() @@ -205,6 +264,16 @@ cfg.Section("advance").Key("two_lines").String() // how about continuation lines cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 ``` +可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +哇靠给力啊! + 需要注意的是,值两侧的单引号会被自动剔除: ```ini @@ -243,9 +312,13 @@ vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), min vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 ``` -自动分割键值为切片(slice): +##### 自动分割键值到切片(slice) + +当存在无效输入时,使用零值代替: ```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] vals = cfg.Section("").Key("STRINGS").Strings(",") vals = cfg.Section("").Key("FLOAT64S").Float64s(",") vals = cfg.Section("").Key("INTS").Ints(",") @@ -255,6 +328,32 @@ vals = cfg.Section("").Key("UINT64S").Uint64s(",") vals = cfg.Section("").Key("TIMES").Times(",") ``` +从结果切片中剔除无效输入: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +当存在无效输入时,直接返回错误: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + ### 保存配置 终于到了这个时刻,是时候保存一下配置了。 @@ -316,6 +415,12 @@ CLONE_URL = https://%(IMPORT_PATH)s cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 ``` +#### 获取上级父分区下的所有键名 + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + #### 读取自增键名 如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 @@ -455,7 +560,7 @@ type Info struct{ } func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) // ... cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go index 1fee789..24cfb46 100644 --- a/vendor/github.com/go-ini/ini/ini.go +++ b/vendor/github.com/go-ini/ini/ini.go @@ -16,7 +16,6 @@ package ini import ( - "bufio" "bytes" "errors" "fmt" @@ -31,25 +30,35 @@ import ( ) const ( + // Name for default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. DEFAULT_SECTION = "DEFAULT" + // Maximum allowed depth when recursively substituing variable names. _DEPTH_VALUES = 99 - - _VERSION = "1.6.0" + _VERSION = "1.18.0" ) +// Version returns current package version literal. func Version() string { return _VERSION } var ( + // Delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows + // at package init time. LineBreak = "\n" // Variable regexp pattern: %(variable)s varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) - // Write spaces around "=" to look better. + // Indicate whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. PrettyFormat = true + + // Explicitly write DEFAULT section header + DefaultHeader = false ) func init() { @@ -67,11 +76,12 @@ func inSlice(str string, s []string) bool { return false } -// dataSource is a interface that returns file content. +// dataSource is an interface that returns object which can be read and closed. type dataSource interface { ReadCloser() (io.ReadCloser, error) } +// sourceFile represents an object that contains content on the local file system. type sourceFile struct { name string } @@ -92,6 +102,7 @@ func (rc *bytesReadCloser) Close() error { return nil } +// sourceData represents an object that contains content in memory. type sourceData struct { data []byte } @@ -100,589 +111,6 @@ func (s *sourceData) ReadCloser() (io.ReadCloser, error) { return &bytesReadCloser{bytes.NewReader(s.data)}, nil } -// ____ __. -// | |/ _|____ ___.__. -// | <_/ __ < | | -// | | \ ___/\___ | -// |____|__ \___ > ____| -// \/ \/\/ - -// Key represents a key under a section. -type Key struct { - s *Section - Comment string - name string - value string - isAutoIncr bool -} - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// String returns string representation of value. -func (k *Key) String() string { - val := k.value - if strings.Index(val, "%") == -1 { - return val - } - - for i := 0; i < _DEPTH_VALUES; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := strings.TrimLeft(vr, "%(") - noption = strings.TrimRight(noption, ")s") - - // Search in the same section. - nk, err := k.s.GetKey(noption) - if err != nil { - // Search again in default section. - nk, _ = k.s.f.Section("").GetKey(noption) - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - return strconv.Atoi(k.String()) -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 10, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 10, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 10, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string devide by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - vals := strings.Split(str, delim) - for i := range vals { - vals[i] = strings.TrimSpace(vals[i]) - } - return vals -} - -// Float64s returns list of float64 devide by given delimiter. -func (k *Key) Float64s(delim string) []float64 { - strs := k.Strings(delim) - vals := make([]float64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseFloat(strs[i], 64) - } - return vals -} - -// Ints returns list of int devide by given delimiter. -func (k *Key) Ints(delim string) []int { - strs := k.Strings(delim) - vals := make([]int, len(strs)) - for i := range strs { - vals[i], _ = strconv.Atoi(strs[i]) - } - return vals -} - -// Int64s returns list of int64 devide by given delimiter. -func (k *Key) Int64s(delim string) []int64 { - strs := k.Strings(delim) - vals := make([]int64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseInt(strs[i], 10, 64) - } - return vals -} - -// Uints returns list of uint devide by given delimiter. -func (k *Key) Uints(delim string) []uint { - strs := k.Strings(delim) - vals := make([]uint, len(strs)) - for i := range strs { - u, _ := strconv.ParseUint(strs[i], 10, 64) - vals[i] = uint(u) - } - return vals -} - -// Uint64s returns list of uint64 devide by given delimiter. -func (k *Key) Uint64s(delim string) []uint64 { - strs := k.Strings(delim) - vals := make([]uint64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseUint(strs[i], 10, 64) - } - return vals -} - -// TimesFormat parses with given format and returns list of time.Time devide by given delimiter. -func (k *Key) TimesFormat(format, delim string) []time.Time { - strs := k.Strings(delim) - vals := make([]time.Time, len(strs)) - for i := range strs { - vals[i], _ = time.Parse(format, strs[i]) - } - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time devide by given delimiter. -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - k.value = v -} - -// _________ __ .__ -// / _____/ ____ _____/ |_|__| ____ ____ -// \_____ \_/ __ \_/ ___\ __\ |/ _ \ / \ -// / \ ___/\ \___| | | ( <_> ) | \ -// /_______ /\___ >\___ >__| |__|\____/|___| / -// \/ \/ \/ \/ - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string -} - -func newSection(f *File, name string) *Section { - return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - s.keys[name].value = val - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = &Key{s, "", name, val, false} - s.keysHash[name] = val - return s.keys[name], nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - // FIXME: change to section level lock? - if s.f.BlockMode { - s.f.lock.RLock() - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } else { - break - } - } - return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) - } - return key, nil -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := map[string]string{} - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - return - } - } -} - -// ___________.__.__ -// \_ _____/|__| | ____ -// | __) | | | _/ __ \ -// | \ | | |_\ ___/ -// \___ / |__|____/\___ > -// \/ \/ - // File represents a combination of a or more INI file(s) in memory. type File struct { // Should make things safe, but sometimes doesn't matter. @@ -698,16 +126,19 @@ type File struct { // To keep data in order. sectionList []string + options LoadOptions + NameMapper } // newFile initializes File object with given data sources. -func newFile(dataSources []dataSource) *File { +func newFile(dataSources []dataSource, opts LoadOptions) *File { return &File{ BlockMode: true, dataSources: dataSources, sections: make(map[string]*Section), sectionList: make([]string, 0, 10), + options: opts, } } @@ -722,9 +153,16 @@ func parseDataSource(source interface{}) (dataSource, error) { } } -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -func Load(source interface{}, others ...interface{}) (_ *File, err error) { +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool +} + +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { sources := make([]dataSource, len(others)+1) sources[0], err = parseDataSource(source) if err != nil { @@ -736,8 +174,30 @@ func Load(source interface{}, others ...interface{}) (_ *File, err error) { return nil, err } } - f := newFile(sources) - return f, f.Reload() + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) } // Empty returns an empty file object. @@ -751,6 +211,8 @@ func Empty() *File { func (f *File) NewSection(name string) (*Section, error) { if len(name) == 0 { return nil, errors.New("error creating new section: empty section name") + } else if f.options.Insensitive && name != DEFAULT_SECTION { + name = strings.ToLower(name) } if f.BlockMode { @@ -781,6 +243,8 @@ func (f *File) NewSections(names ...string) (err error) { func (f *File) GetSection(name string) (*Section, error) { if len(name) == 0 { name = DEFAULT_SECTION + } else if f.options.Insensitive { + name = strings.ToLower(name) } if f.BlockMode { @@ -790,7 +254,7 @@ func (f *File) GetSection(name string) (*Section, error) { sec := f.sections[name] if sec == nil { - return nil, fmt.Errorf("error when getting section: section '%s' not exists", name) + return nil, fmt.Errorf("section '%s' does not exist", name) } return sec, nil } @@ -843,240 +307,6 @@ func (f *File) DeleteSection(name string) { } } -func cutComment(str string) string { - i := strings.Index(str, "#") - if i == -1 { - return str - } - return str[:i] -} - -func checkMultipleLines(buf *bufio.Reader, line, val, valQuote string) (string, error) { - isEnd := false - for { - next, err := buf.ReadString('\n') - if err != nil { - if err != io.EOF { - return "", err - } - isEnd = true - } - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - break - } - val += next - if isEnd { - return "", fmt.Errorf("error parsing line: missing closing key quote from '%s' to '%s'", line, next) - } - } - return val, nil -} - -func checkContinuationLines(buf *bufio.Reader, val string) (string, bool, error) { - isEnd := false - for { - valLen := len(val) - if valLen == 0 || val[valLen-1] != '\\' { - break - } - val = val[:valLen-1] - - next, err := buf.ReadString('\n') - if err != nil { - if err != io.EOF { - return "", isEnd, err - } - isEnd = true - } - - next = strings.TrimSpace(next) - if len(next) == 0 { - break - } - val += next - } - return val, isEnd, nil -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) error { - buf := bufio.NewReader(reader) - - // Handle BOM-UTF8. - // http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding - mask, err := buf.Peek(3) - if err == nil && len(mask) >= 3 && mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { - buf.Read(mask) - } - - count := 1 - comments := "" - isEnd := false - - section, err := f.NewSection(DEFAULT_SECTION) - if err != nil { - return err - } - - for { - line, err := buf.ReadString('\n') - line = strings.TrimSpace(line) - length := len(line) - - // Check error and ignore io.EOF just for a moment. - if err != nil { - if err != io.EOF { - return fmt.Errorf("error reading next line: %v", err) - } - // The last line of file could be an empty line. - if length == 0 { - break - } - isEnd = true - } - - // Skip empty lines. - if length == 0 { - continue - } - - switch { - case line[0] == '#' || line[0] == ';': // Comments. - if len(comments) == 0 { - comments = line - } else { - comments += LineBreak + line - } - continue - case line[0] == '[' && line[length-1] == ']': // New sction. - section, err = f.NewSection(strings.TrimSpace(line[1 : length-1])) - if err != nil { - return err - } - - if len(comments) > 0 { - section.Comment = comments - comments = "" - } - // Reset counter. - count = 1 - continue - } - - // Other possibilities. - var ( - i int - keyQuote string - kname string - valQuote string - val string - ) - - // Key name surrounded by quotes. - if line[0] == '"' { - if length > 6 && line[0:3] == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - if len(keyQuote) > 0 { - qLen := len(keyQuote) - pos := strings.Index(line[qLen:], keyQuote) - if pos == -1 { - return fmt.Errorf("error parsing line: missing closing key quote: %s", line) - } - pos = pos + qLen - i = strings.IndexAny(line[pos:], "=:") - if i < 0 { - return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) - } else if i == pos { - return fmt.Errorf("error parsing line: key is empty: %s", line) - } - i = i + pos - kname = line[qLen:pos] // Just keep spaces inside quotes. - } else { - i = strings.IndexAny(line, "=:") - if i < 0 { - return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) - } else if i == 0 { - return fmt.Errorf("error parsing line: key is empty: %s", line) - } - kname = strings.TrimSpace(line[0:i]) - } - - isAutoIncr := false - // Auto increment. - if kname == "-" { - isAutoIncr = true - kname = "#" + fmt.Sprint(count) - count++ - } - - lineRight := strings.TrimSpace(line[i+1:]) - lineRightLength := len(lineRight) - firstChar := "" - if lineRightLength >= 2 { - firstChar = lineRight[0:1] - } - if firstChar == "`" { - valQuote = "`" - } else if firstChar == `"` { - if lineRightLength >= 3 && lineRight[0:3] == `"""` { - valQuote = `"""` - } else { - valQuote = `"` - } - } else if firstChar == `'` { - valQuote = `'` - } - - if len(valQuote) > 0 { - qLen := len(valQuote) - pos := strings.LastIndex(lineRight[qLen:], valQuote) - // For multiple-line value check. - if pos == -1 { - if valQuote == `"` || valQuote == `'` { - return fmt.Errorf("error parsing line: single quote does not allow multiple-line value: %s", line) - } - - val = lineRight[qLen:] + "\n" - val, err = checkMultipleLines(buf, line, val, valQuote) - if err != nil { - return err - } - } else { - val = lineRight[qLen : pos+qLen] - } - } else { - val = strings.TrimSpace(cutComment(lineRight)) - val, isEnd, err = checkContinuationLines(buf, val) - if err != nil { - return err - } - } - - k, err := section.NewKey(kname, val) - if err != nil { - return err - } - k.isAutoIncr = isAutoIncr - if len(comments) > 0 { - k.Comment = comments - comments = "" - } - - if isEnd { - break - } - } - return nil -} - func (f *File) reload(s dataSource) error { r, err := s.ReadCloser() if err != nil { @@ -1091,6 +321,11 @@ func (f *File) reload(s dataSource) error { func (f *File) Reload() (err error) { for _, s := range f.dataSources { if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + f.parse(bytes.NewBuffer(nil)) + continue + } return err } } @@ -1114,7 +349,9 @@ func (f *File) Append(source interface{}, others ...interface{}) error { return f.Reload() } -// WriteToIndent writes file content into io.Writer with given value indention. +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { equalSign := "=" if PrettyFormat { @@ -1134,17 +371,38 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { } } - if i > 0 { + if i > 0 || DefaultHeader { if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { return 0, err } } else { - // Write nothing if default section is empty. + // Write nothing if default section is empty if len(sec.keyList) == 0 { continue } } + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modifed if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.ContainsAny(kname, "\"=:") { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + for _, kname := range sec.keyList { key := sec.Key(kname) if len(key.Comment) > 0 { @@ -1166,24 +424,33 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { switch { case key.isAutoIncr: kname = "-" - case strings.Contains(kname, "`") || strings.Contains(kname, `"`): - kname = `"""` + kname + `"""` - case strings.Contains(kname, `=`) || strings.Contains(kname, `:`): + case strings.ContainsAny(kname, "\"=:"): kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + if _, err = buf.WriteString(kname); err != nil { + return 0, err + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) } val := key.value - // In case key value contains "\n", "`" or "\"". - if strings.Contains(val, "\n") || strings.Contains(val, "`") || strings.Contains(val, `"`) || - strings.Contains(val, "#") { + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { val = `"""` + val + `"""` + } else if strings.ContainsAny(val, "#;") { + val = "`" + val + "`" } - if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil { + if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { return 0, err } } - // Put a line between sections. + // Put a line between sections if _, err = buf.WriteString(LineBreak); err != nil { return 0, err } diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go new file mode 100644 index 0000000..3b2f472 --- /dev/null +++ b/vendor/github.com/go-ini/ini/key.go @@ -0,0 +1,625 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncr bool +} + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// String returns string representation of value. +func (k *Key) String() string { + val := k.value + if strings.Index(val, "%") == -1 { + return val + } + + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.getInts(delim, true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.getInt64s(delim, true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.getUints(delim, true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.getInts(delim, false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.getInt64s(delim, false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.getUints(delim, false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.getFloat64s(delim, false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.getInts(delim, false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.getInt64s(delim, false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.getUints(delim, false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.getUint64s(delim, false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.getTimesFormat(format, delim, false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// getFloat64s returns list of float64 divided by given delimiter. +func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) { + strs := k.Strings(delim) + vals := make([]float64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseFloat(str, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getInts returns list of int divided by given delimiter. +func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) { + strs := k.Strings(delim) + vals := make([]int, 0, len(strs)) + for _, str := range strs { + val, err := strconv.Atoi(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getInt64s returns list of int64 divided by given delimiter. +func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) { + strs := k.Strings(delim) + vals := make([]int64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getUints returns list of uint divided by given delimiter. +func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) { + strs := k.Strings(delim) + vals := make([]uint, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 0) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, uint(val)) + } + } + return vals, nil +} + +// getUint64s returns list of uint64 divided by given delimiter. +func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + strs := k.Strings(delim) + vals := make([]uint64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + strs := k.Strings(delim) + vals := make([]time.Time, 0, len(strs)) + for _, str := range strs { + val, err := time.Parse(format, str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 0000000..d6d4005 --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,314 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of BOM-UTF8 format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { + p.buf.Read(mask) + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, fmt.Errorf("key-value delimiter not found: %s", line) + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, fmt.Errorf("key-value delimiter not found: %s", line) + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace. + line = strings.TrimSpace(line) + + // Check continuation lines when desired. + if !ignoreContinuation && line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 + closeIdx := bytes.LastIndex(line, []byte("]")) + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + key, err := section.NewKey(kname, "") + if err != nil { + return err + } + key.isAutoIncr = isAutoIncr + + value, err := p.readValue(line[offset:], f.options.IgnoreContinuation) + if err != nil { + return err + } + key.SetValue(value) + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go new file mode 100644 index 0000000..2fa099f --- /dev/null +++ b/vendor/github.com/go-ini/ini/section.go @@ -0,0 +1,202 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string +} + +func newSection(f *File, name string) *Section { + return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + s.keys[name].value = val + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = &Key{s, "", name, val, false} + s.keysHash[name] = val + return s.keys[name], nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go index c118437..f4620af 100644 --- a/vendor/github.com/go-ini/ini/struct.go +++ b/vendor/github.com/go-ini/ini/struct.go @@ -76,6 +76,59 @@ func parseDelim(actual string) string { var reflectTime = reflect.TypeOf(time.Now()).Kind() +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string) error { + strs := key.Strings(delim) + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals = key.Ints(delim) + case reflect.Int64: + vals = key.Int64s(delim) + case reflect.Uint: + vals = key.Uints(delim) + case reflect.Uint64: + vals = key.Uint64s(delim) + case reflect.Float64: + vals = key.Float64s(delim) + case reflectTime: + vals = key.Times(delim) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + // setWithProperType sets proper value to field based on its type, // but it does not return error for failing parsing, // because we want to use default value that is already assigned to strcut. @@ -94,13 +147,14 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri field.SetBool(boolVal) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: durationVal, err := key.Duration() - if err == nil { + // Skip zero value + if err == nil && int(durationVal) > 0 { field.Set(reflect.ValueOf(durationVal)) return nil } intVal, err := key.Int64() - if err != nil { + if err != nil || intVal == 0 { return nil } field.SetInt(intVal) @@ -131,29 +185,7 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri } field.Set(reflect.ValueOf(timeVal)) case reflect.Slice: - vals := key.Strings(delim) - numVals := len(vals) - if numVals == 0 { - return nil - } - - sliceOf := field.Type().Elem().Kind() - - var times []time.Time - if sliceOf == reflectTime { - times = key.Times(delim) - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(times[i])) - default: - slice.Index(i).Set(reflect.ValueOf(vals[i])) - } - } - field.Set(slice) + return setSliceWithProperType(key, field, delim) default: return fmt.Errorf("unsupported type '%s'", t) } @@ -238,34 +270,53 @@ func MapTo(v, source interface{}, others ...interface{}) error { return MapToWithMapper(v, nil, source, others...) } -// reflectWithProperType does the opposite thing with setWithProperType. +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + sliceOf := field.Type().Elem().Kind() + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { switch t.Kind() { case reflect.String: key.SetValue(field.String()) - case reflect.Bool, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float64, - reflectTime: - key.SetValue(fmt.Sprint(field)) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) case reflect.Slice: - vals := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - - var buf bytes.Buffer - isTime := fmt.Sprint(field.Type()) == "[]time.Time" - for i := 0; i < field.Len(); i++ { - if isTime { - buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) - } else { - buf.WriteString(fmt.Sprint(vals.Index(i))) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-1]) + return reflectSliceWithProperType(key, field, delim) default: return fmt.Errorf("unsupported type '%s'", t) } @@ -293,7 +344,7 @@ func (s *Section) reflectFrom(val reflect.Value) error { } if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || - (tpField.Type.Kind() == reflect.Struct) { + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Kind() != reflectTime) { // Note: The only error here is section doesn't exist. sec, err := s.f.GetSection(fieldName) if err != nil { diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile index ad17bf0..a828d28 100644 --- a/vendor/github.com/jmespath/go-jmespath/Makefile +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -35,7 +35,7 @@ buildfuzz: go-fuzz-build github.com/jmespath/go-jmespath/fuzz fuzz: buildfuzz - go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/corpus + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata bench: go test -bench . -cpuprofile cpu.out diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go index 67df3fc..9cfa988 100644 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -1,5 +1,42 @@ package jmespath +import "strconv" + +// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + // Search evaluates a JMESPath expression against input data and returns the result. func Search(expression string, data interface{}) (interface{}, error) { intr := newInterpreter() diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go index 8a3f2ef..9b7cd89 100644 --- a/vendor/github.com/jmespath/go-jmespath/functions.go +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "math" + "reflect" "sort" "strconv" "strings" @@ -124,197 +125,197 @@ type functionCaller struct { func newFunctionCaller() *functionCaller { caller := &functionCaller{} caller.functionTable = map[string]functionEntry{ - "length": functionEntry{ + "length": { name: "length", arguments: []argSpec{ - argSpec{types: []jpType{jpString, jpArray, jpObject}}, + {types: []jpType{jpString, jpArray, jpObject}}, }, handler: jpfLength, }, - "starts_with": functionEntry{ + "starts_with": { name: "starts_with", arguments: []argSpec{ - argSpec{types: []jpType{jpString}}, - argSpec{types: []jpType{jpString}}, + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, }, handler: jpfStartsWith, }, - "abs": functionEntry{ + "abs": { name: "abs", arguments: []argSpec{ - argSpec{types: []jpType{jpNumber}}, + {types: []jpType{jpNumber}}, }, handler: jpfAbs, }, - "avg": functionEntry{ + "avg": { name: "avg", arguments: []argSpec{ - argSpec{types: []jpType{jpArrayNumber}}, + {types: []jpType{jpArrayNumber}}, }, handler: jpfAvg, }, - "ceil": functionEntry{ + "ceil": { name: "ceil", arguments: []argSpec{ - argSpec{types: []jpType{jpNumber}}, + {types: []jpType{jpNumber}}, }, handler: jpfCeil, }, - "contains": functionEntry{ + "contains": { name: "contains", arguments: []argSpec{ - argSpec{types: []jpType{jpArray, jpString}}, - argSpec{types: []jpType{jpAny}}, + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, }, handler: jpfContains, }, - "ends_with": functionEntry{ + "ends_with": { name: "ends_with", arguments: []argSpec{ - argSpec{types: []jpType{jpString}}, - argSpec{types: []jpType{jpString}}, + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, }, handler: jpfEndsWith, }, - "floor": functionEntry{ + "floor": { name: "floor", arguments: []argSpec{ - argSpec{types: []jpType{jpNumber}}, + {types: []jpType{jpNumber}}, }, handler: jpfFloor, }, - "map": functionEntry{ + "map": { name: "amp", arguments: []argSpec{ - argSpec{types: []jpType{jpExpref}}, - argSpec{types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, }, handler: jpfMap, hasExpRef: true, }, - "max": functionEntry{ + "max": { name: "max", arguments: []argSpec{ - argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, + {types: []jpType{jpArrayNumber, jpArrayString}}, }, handler: jpfMax, }, - "merge": functionEntry{ + "merge": { name: "merge", arguments: []argSpec{ - argSpec{types: []jpType{jpObject}, variadic: true}, + {types: []jpType{jpObject}, variadic: true}, }, handler: jpfMerge, }, - "max_by": functionEntry{ + "max_by": { name: "max_by", arguments: []argSpec{ - argSpec{types: []jpType{jpArray}}, - argSpec{types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, }, handler: jpfMaxBy, hasExpRef: true, }, - "sum": functionEntry{ + "sum": { name: "sum", arguments: []argSpec{ - argSpec{types: []jpType{jpArrayNumber}}, + {types: []jpType{jpArrayNumber}}, }, handler: jpfSum, }, - "min": functionEntry{ + "min": { name: "min", arguments: []argSpec{ - argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, + {types: []jpType{jpArrayNumber, jpArrayString}}, }, handler: jpfMin, }, - "min_by": functionEntry{ + "min_by": { name: "min_by", arguments: []argSpec{ - argSpec{types: []jpType{jpArray}}, - argSpec{types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, }, handler: jpfMinBy, hasExpRef: true, }, - "type": functionEntry{ + "type": { name: "type", arguments: []argSpec{ - argSpec{types: []jpType{jpAny}}, + {types: []jpType{jpAny}}, }, handler: jpfType, }, - "keys": functionEntry{ + "keys": { name: "keys", arguments: []argSpec{ - argSpec{types: []jpType{jpObject}}, + {types: []jpType{jpObject}}, }, handler: jpfKeys, }, - "values": functionEntry{ + "values": { name: "values", arguments: []argSpec{ - argSpec{types: []jpType{jpObject}}, + {types: []jpType{jpObject}}, }, handler: jpfValues, }, - "sort": functionEntry{ + "sort": { name: "sort", arguments: []argSpec{ - argSpec{types: []jpType{jpArrayString, jpArrayNumber}}, + {types: []jpType{jpArrayString, jpArrayNumber}}, }, handler: jpfSort, }, - "sort_by": functionEntry{ + "sort_by": { name: "sort_by", arguments: []argSpec{ - argSpec{types: []jpType{jpArray}}, - argSpec{types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, }, handler: jpfSortBy, hasExpRef: true, }, - "join": functionEntry{ + "join": { name: "join", arguments: []argSpec{ - argSpec{types: []jpType{jpString}}, - argSpec{types: []jpType{jpArrayString}}, + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, }, handler: jpfJoin, }, - "reverse": functionEntry{ + "reverse": { name: "reverse", arguments: []argSpec{ - argSpec{types: []jpType{jpArray, jpString}}, + {types: []jpType{jpArray, jpString}}, }, handler: jpfReverse, }, - "to_array": functionEntry{ + "to_array": { name: "to_array", arguments: []argSpec{ - argSpec{types: []jpType{jpAny}}, + {types: []jpType{jpAny}}, }, handler: jpfToArray, }, - "to_string": functionEntry{ + "to_string": { name: "to_string", arguments: []argSpec{ - argSpec{types: []jpType{jpAny}}, + {types: []jpType{jpAny}}, }, handler: jpfToString, }, - "to_number": functionEntry{ + "to_number": { name: "to_number", arguments: []argSpec{ - argSpec{types: []jpType{jpAny}}, + {types: []jpType{jpAny}}, }, handler: jpfToNumber, }, - "not_null": functionEntry{ + "not_null": { name: "not_null", arguments: []argSpec{ - argSpec{types: []jpType{jpAny}, variadic: true}, + {types: []jpType{jpAny}, variadic: true}, }, handler: jpfNotNull, }, @@ -357,7 +358,7 @@ func (a *argSpec) typeCheck(arg interface{}) error { return nil } case jpArray: - if _, ok := arg.([]interface{}); ok { + if isSliceType(arg) { return nil } case jpObject: @@ -409,8 +410,9 @@ func jpfLength(arguments []interface{}) (interface{}, error) { arg := arguments[0] if c, ok := arg.(string); ok { return float64(utf8.RuneCountInString(c)), nil - } else if c, ok := arg.([]interface{}); ok { - return float64(len(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil } else if c, ok := arg.(map[string]interface{}); ok { return float64(len(c)), nil } diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go index c8f4bce..1240a17 100644 --- a/vendor/github.com/jmespath/go-jmespath/parser.go +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -353,7 +353,7 @@ func (p *Parser) nud(token token) (ASTNode, error) { case tFlatten: left := ASTNode{ nodeType: ASTFlatten, - children: []ASTNode{ASTNode{nodeType: ASTIdentity}}, + children: []ASTNode{{nodeType: ASTIdentity}}, } right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) if err != nil { @@ -378,7 +378,7 @@ func (p *Parser) nud(token token) (ASTNode, error) { } return ASTNode{ nodeType: ASTProjection, - children: []ASTNode{ASTNode{nodeType: ASTIdentity}, right}, + children: []ASTNode{{nodeType: ASTIdentity}, right}, }, nil } else { return p.parseMultiSelectList() diff --git a/vendor/github.com/joho/godotenv/LICENCE b/vendor/github.com/joho/godotenv/LICENCE new file mode 100644 index 0000000..e7ddd51 --- /dev/null +++ b/vendor/github.com/joho/godotenv/LICENCE @@ -0,0 +1,23 @@ +Copyright (c) 2013 John Barton + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/joho/godotenv/README.md b/vendor/github.com/joho/godotenv/README.md new file mode 100644 index 0000000..05c47e6 --- /dev/null +++ b/vendor/github.com/joho/godotenv/README.md @@ -0,0 +1,127 @@ +# GoDotEnv [![wercker status](https://app.wercker.com/status/507594c2ec7e60f19403a568dfea0f78 "wercker status")](https://app.wercker.com/project/bykey/507594c2ec7e60f19403a568dfea0f78) + +A Go (golang) port of the Ruby dotenv project (which loads env vars from a .env file) + +From the original Library: + +> Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables. +> +> But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped. + +It can be used as a library (for loading in env for your own daemons etc) or as a bin command. + +There is test coverage and CI for both linuxish and windows environments, but I make no guarantees about the bin version working on windows. + +## Installation + +As a library + +```shell +go get github.com/joho/godotenv +``` + +or if you want to use it as a bin command +```shell +go get github.com/joho/godotenv/cmd/godotenv +``` + +## Usage + +Add your application configuration to your `.env` file in the root of your project: + +```shell +S3_BUCKET=YOURS3BUCKET +SECRET_KEY=YOURSECRETKEYGOESHERE +``` + +Then in your Go app you can do something like + +```go +package main + +import ( + "github.com/joho/godotenv" + "log" + "os" +) + +func main() { + err := godotenv.Load() + if err != nil { + log.Fatal("Error loading .env file") + } + + s3Bucket := os.Getenv("S3_BUCKET") + secretKey := os.Getenv("SECRET_KEY") + + // now do something with s3 or whatever +} +``` + +If you're even lazier than that, you can just take advantage of the autoload package which will read in `.env` on import + +```go +import _ "github.com/joho/godotenv/autoload" +``` + +While `.env` in the project root is the default, you don't have to be constrained, both examples below are 100% legit + +```go +_ = godotenv.Load("somerandomfile") +_ = godotenv.Load("filenumberone.env", "filenumbertwo.env") +``` + +If you want to be really fancy with your env file you can do comments and exports (below is a valid env file) + +```shell +# I am a comment and that is OK +SOME_VAR=someval +FOO=BAR # comments at line end are OK too +export BAR=BAZ +``` + +Or finally you can do YAML(ish) style + +```yaml +FOO: bar +BAR: baz +``` + +as a final aside, if you don't want godotenv munging your env you can just get a map back instead + +```go +var myEnv map[string]string +myEnv, err := godotenv.Read() + +s3Bucket := myEnv["S3_BUCKET"] +``` + +### Command Mode + +Assuming you've installed the command as above and you've got `$GOPATH/bin` in your `$PATH` + +``` +godotenv -f /some/path/to/.env some_command with some args +``` + +If you don't specify `-f` it will fall back on the default of loading `.env` in `PWD` + +## Contributing + +Contributions are most welcome! The parser itself is pretty stupidly naive and I wouldn't be surprised if it breaks with edge cases. + +*code changes without tests will not be accepted* + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Added some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## CI + +Linux: [![wercker status](https://app.wercker.com/status/507594c2ec7e60f19403a568dfea0f78/m "wercker status")](https://app.wercker.com/project/bykey/507594c2ec7e60f19403a568dfea0f78) Windows: [![Build status](https://ci.appveyor.com/api/projects/status/9v40vnfvvgde64u4)](https://ci.appveyor.com/project/joho/godotenv) + +## Who? + +The original library [dotenv](https://github.com/bkeepers/dotenv) was written by [Brandon Keepers](http://opensoul.org/), and this port was done by [John Barton](http://whoisjohnbarton.com) based off the tests/fixtures in the original library. diff --git a/vendor/github.com/joho/godotenv/autoload/autoload.go b/vendor/github.com/joho/godotenv/autoload/autoload.go new file mode 100644 index 0000000..fbcd2bd --- /dev/null +++ b/vendor/github.com/joho/godotenv/autoload/autoload.go @@ -0,0 +1,15 @@ +package autoload + +/* + You can just read the .env file on import just by doing + + import _ "github.com/joho/godotenv/autoload" + + And bob's your mother's brother +*/ + +import "github.com/joho/godotenv" + +func init() { + godotenv.Load() +} diff --git a/vendor/github.com/joho/godotenv/godotenv.go b/vendor/github.com/joho/godotenv/godotenv.go new file mode 100644 index 0000000..94b2676 --- /dev/null +++ b/vendor/github.com/joho/godotenv/godotenv.go @@ -0,0 +1,229 @@ +// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) +// +// Examples/readme can be found on the github page at https://github.com/joho/godotenv +// +// The TL;DR is that you make a .env file that looks something like +// +// SOME_ENV_VAR=somevalue +// +// and then in your go code you can call +// +// godotenv.Load() +// +// and all the env vars declared in .env will be avaiable through os.Getenv("SOME_ENV_VAR") +package godotenv + +import ( + "bufio" + "errors" + "os" + "os/exec" + "strings" +) + +// Load will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Load without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Load("fileone", "filetwo") +// +// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults +func Load(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, false) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Overload will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Overload without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Overload("fileone", "filetwo") +// +// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars. +func Overload(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, true) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Read all env (with same file loading semantics as Load) but return values as +// a map rather than automatically writing values into env +func Read(filenames ...string) (envMap map[string]string, err error) { + filenames = filenamesOrDefault(filenames) + envMap = make(map[string]string) + + for _, filename := range filenames { + individualEnvMap, individualErr := readFile(filename) + + if individualErr != nil { + err = individualErr + return // return early on a spazout + } + + for key, value := range individualEnvMap { + envMap[key] = value + } + } + + return +} + +// Exec loads env vars from the specified filenames (empty map falls back to default) +// then executes the cmd specified. +// +// Simply hooks up os.Stdin/err/out to the command and calls Run() +// +// If you want more fine grained control over your command it's recommended +// that you use `Load()` or `Read()` and the `os/exec` package yourself. +func Exec(filenames []string, cmd string, cmdArgs []string) error { + Load(filenames...) + + command := exec.Command(cmd, cmdArgs...) + command.Stdin = os.Stdin + command.Stdout = os.Stdout + command.Stderr = os.Stderr + return command.Run() +} + +func filenamesOrDefault(filenames []string) []string { + if len(filenames) == 0 { + return []string{".env"} + } + return filenames +} + +func loadFile(filename string, overload bool) error { + envMap, err := readFile(filename) + if err != nil { + return err + } + + for key, value := range envMap { + if os.Getenv(key) == "" || overload { + os.Setenv(key, value) + } + } + + return nil +} + +func readFile(filename string) (envMap map[string]string, err error) { + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + + envMap = make(map[string]string) + + var lines []string + scanner := bufio.NewScanner(file) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + for _, fullLine := range lines { + if !isIgnoredLine(fullLine) { + key, value, err := parseLine(fullLine) + + if err == nil { + envMap[key] = value + } + } + } + return +} + +func parseLine(line string) (key string, value string, err error) { + if len(line) == 0 { + err = errors.New("zero length string") + return + } + + // ditch the comments (but keep quoted hashes) + if strings.Contains(line, "#") { + segmentsBetweenHashes := strings.Split(line, "#") + quotesAreOpen := false + var segmentsToKeep []string + for _, segment := range segmentsBetweenHashes { + if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 { + if quotesAreOpen { + quotesAreOpen = false + segmentsToKeep = append(segmentsToKeep, segment) + } else { + quotesAreOpen = true + } + } + + if len(segmentsToKeep) == 0 || quotesAreOpen { + segmentsToKeep = append(segmentsToKeep, segment) + } + } + + line = strings.Join(segmentsToKeep, "#") + } + + // now split key from value + splitString := strings.SplitN(line, "=", 2) + + if len(splitString) != 2 { + // try yaml mode! + splitString = strings.SplitN(line, ":", 2) + } + + if len(splitString) != 2 { + err = errors.New("Can't separate key from value") + return + } + + // Parse the key + key = splitString[0] + if strings.HasPrefix(key, "export") { + key = strings.TrimPrefix(key, "export") + } + key = strings.Trim(key, " ") + + // Parse the value + value = splitString[1] + // trim + value = strings.Trim(value, " ") + + // check if we've got quoted values + if strings.Count(value, "\"") == 2 || strings.Count(value, "'") == 2 { + // pull the quotes off the edges + value = strings.Trim(value, "\"'") + + // expand quotes + value = strings.Replace(value, "\\\"", "\"", -1) + // expand newlines + value = strings.Replace(value, "\\n", "\n", -1) + } + + return +} + +func isIgnoredLine(line string) bool { + trimmedLine := strings.Trim(line, " \n\t") + return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#") +} diff --git a/vendor/github.com/joho/godotenv/wercker.yml b/vendor/github.com/joho/godotenv/wercker.yml new file mode 100644 index 0000000..c716ac9 --- /dev/null +++ b/vendor/github.com/joho/godotenv/wercker.yml @@ -0,0 +1 @@ +box: pjvds/golang diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE deleted file mode 100644 index c67dad6..0000000 --- a/vendor/github.com/pmezard/go-difflib/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013, Patrick Mezard -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - The names of its contributors may not be used to endorse or promote -products derived from this software without specific prior written -permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go deleted file mode 100644 index 003e99f..0000000 --- a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go +++ /dev/null @@ -1,772 +0,0 @@ -// Package difflib is a partial port of Python difflib module. -// -// It provides tools to compare sequences of strings and generate textual diffs. -// -// The following class and functions have been ported: -// -// - SequenceMatcher -// -// - unified_diff -// -// - context_diff -// -// Getting unified diffs was the main goal of the port. Keep in mind this code -// is mostly suitable to output text differences in a human friendly way, there -// are no guarantees generated diffs are consumable by patch(1). -package difflib - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" -) - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func calculateRatio(matches, length int) float64 { - if length > 0 { - return 2.0 * float64(matches) / float64(length) - } - return 1.0 -} - -type Match struct { - A int - B int - Size int -} - -type OpCode struct { - Tag byte - I1 int - I2 int - J1 int - J2 int -} - -// SequenceMatcher compares sequence of strings. The basic -// algorithm predates, and is a little fancier than, an algorithm -// published in the late 1980's by Ratcliff and Obershelp under the -// hyperbolic name "gestalt pattern matching". The basic idea is to find -// the longest contiguous matching subsequence that contains no "junk" -// elements (R-O doesn't address junk). The same idea is then applied -// recursively to the pieces of the sequences to the left and to the right -// of the matching subsequence. This does not yield minimal edit -// sequences, but does tend to yield matches that "look right" to people. -// -// SequenceMatcher tries to compute a "human-friendly diff" between two -// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the -// longest *contiguous* & junk-free matching subsequence. That's what -// catches peoples' eyes. The Windows(tm) windiff has another interesting -// notion, pairing up elements that appear uniquely in each sequence. -// That, and the method here, appear to yield more intuitive difference -// reports than does diff. This method appears to be the least vulnerable -// to synching up on blocks of "junk lines", though (like blank lines in -// ordinary text files, or maybe "

" lines in HTML files). That may be -// because this is the only method of the 3 that has a *concept* of -// "junk" . -// -// Timing: Basic R-O is cubic time worst case and quadratic time expected -// case. SequenceMatcher is quadratic time for the worst case and has -// expected-case behavior dependent in a complicated way on how many -// elements the sequences have in common; best case time is linear. -type SequenceMatcher struct { - a []string - b []string - b2j map[string][]int - IsJunk func(string) bool - autoJunk bool - bJunk map[string]struct{} - matchingBlocks []Match - fullBCount map[string]int - bPopular map[string]struct{} - opCodes []OpCode -} - -func NewMatcher(a, b []string) *SequenceMatcher { - m := SequenceMatcher{autoJunk: true} - m.SetSeqs(a, b) - return &m -} - -func NewMatcherWithJunk(a, b []string, autoJunk bool, - isJunk func(string) bool) *SequenceMatcher { - - m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} - m.SetSeqs(a, b) - return &m -} - -// Set two sequences to be compared. -func (m *SequenceMatcher) SetSeqs(a, b []string) { - m.SetSeq1(a) - m.SetSeq2(b) -} - -// Set the first sequence to be compared. The second sequence to be compared is -// not changed. -// -// SequenceMatcher computes and caches detailed information about the second -// sequence, so if you want to compare one sequence S against many sequences, -// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other -// sequences. -// -// See also SetSeqs() and SetSeq2(). -func (m *SequenceMatcher) SetSeq1(a []string) { - if &a == &m.a { - return - } - m.a = a - m.matchingBlocks = nil - m.opCodes = nil -} - -// Set the second sequence to be compared. The first sequence to be compared is -// not changed. -func (m *SequenceMatcher) SetSeq2(b []string) { - if &b == &m.b { - return - } - m.b = b - m.matchingBlocks = nil - m.opCodes = nil - m.fullBCount = nil - m.chainB() -} - -func (m *SequenceMatcher) chainB() { - // Populate line -> index mapping - b2j := map[string][]int{} - for i, s := range m.b { - indices := b2j[s] - indices = append(indices, i) - b2j[s] = indices - } - - // Purge junk elements - m.bJunk = map[string]struct{}{} - if m.IsJunk != nil { - junk := m.bJunk - for s, _ := range b2j { - if m.IsJunk(s) { - junk[s] = struct{}{} - } - } - for s, _ := range junk { - delete(b2j, s) - } - } - - // Purge remaining popular elements - popular := map[string]struct{}{} - n := len(m.b) - if m.autoJunk && n >= 200 { - ntest := n/100 + 1 - for s, indices := range b2j { - if len(indices) > ntest { - popular[s] = struct{}{} - } - } - for s, _ := range popular { - delete(b2j, s) - } - } - m.bPopular = popular - m.b2j = b2j -} - -func (m *SequenceMatcher) isBJunk(s string) bool { - _, ok := m.bJunk[s] - return ok -} - -// Find longest matching block in a[alo:ahi] and b[blo:bhi]. -// -// If IsJunk is not defined: -// -// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi -// and for all (i',j',k') meeting those conditions, -// k >= k' -// i <= i' -// and if i == i', j <= j' -// -// In other words, of all maximal matching blocks, return one that -// starts earliest in a, and of all those maximal matching blocks that -// start earliest in a, return the one that starts earliest in b. -// -// If IsJunk is defined, first the longest matching block is -// determined as above, but with the additional restriction that no -// junk element appears in the block. Then that block is extended as -// far as possible by matching (only) junk elements on both sides. So -// the resulting block never matches on junk except as identical junk -// happens to be adjacent to an "interesting" match. -// -// If no blocks match, return (alo, blo, 0). -func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { - // CAUTION: stripping common prefix or suffix would be incorrect. - // E.g., - // ab - // acab - // Longest matching block is "ab", but if common prefix is - // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so - // strip, so ends up claiming that ab is changed to acab by - // inserting "ca" in the middle. That's minimal but unintuitive: - // "it's obvious" that someone inserted "ac" at the front. - // Windiff ends up at the same place as diff, but by pairing up - // the unique 'b's and then matching the first two 'a's. - besti, bestj, bestsize := alo, blo, 0 - - // find longest junk-free match - // during an iteration of the loop, j2len[j] = length of longest - // junk-free match ending with a[i-1] and b[j] - j2len := map[int]int{} - for i := alo; i != ahi; i++ { - // look at all instances of a[i] in b; note that because - // b2j has no junk keys, the loop is skipped if a[i] is junk - newj2len := map[int]int{} - for _, j := range m.b2j[m.a[i]] { - // a[i] matches b[j] - if j < blo { - continue - } - if j >= bhi { - break - } - k := j2len[j-1] + 1 - newj2len[j] = k - if k > bestsize { - besti, bestj, bestsize = i-k+1, j-k+1, k - } - } - j2len = newj2len - } - - // Extend the best by non-junk elements on each end. In particular, - // "popular" non-junk elements aren't in b2j, which greatly speeds - // the inner loop above, but also means "the best" match so far - // doesn't contain any junk *or* popular non-junk elements. - for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - !m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - // Now that we have a wholly interesting match (albeit possibly - // empty!), we may as well suck up the matching junk on each - // side of it too. Can't think of a good reason not to, and it - // saves post-processing the (possibly considerable) expense of - // figuring out what to do with it. In the case of an empty - // interesting match, this is clearly the right thing to do, - // because no other kind of match is possible in the regions. - for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - return Match{A: besti, B: bestj, Size: bestsize} -} - -// Return list of triples describing matching subsequences. -// -// Each triple is of the form (i, j, n), and means that -// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in -// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are -// adjacent triples in the list, and the second is not the last triple in the -// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe -// adjacent equal blocks. -// -// The last triple is a dummy, (len(a), len(b), 0), and is the only -// triple with n==0. -func (m *SequenceMatcher) GetMatchingBlocks() []Match { - if m.matchingBlocks != nil { - return m.matchingBlocks - } - - var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match - matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { - match := m.findLongestMatch(alo, ahi, blo, bhi) - i, j, k := match.A, match.B, match.Size - if match.Size > 0 { - if alo < i && blo < j { - matched = matchBlocks(alo, i, blo, j, matched) - } - matched = append(matched, match) - if i+k < ahi && j+k < bhi { - matched = matchBlocks(i+k, ahi, j+k, bhi, matched) - } - } - return matched - } - matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) - - // It's possible that we have adjacent equal blocks in the - // matching_blocks list now. - nonAdjacent := []Match{} - i1, j1, k1 := 0, 0, 0 - for _, b := range matched { - // Is this block adjacent to i1, j1, k1? - i2, j2, k2 := b.A, b.B, b.Size - if i1+k1 == i2 && j1+k1 == j2 { - // Yes, so collapse them -- this just increases the length of - // the first block by the length of the second, and the first - // block so lengthened remains the block to compare against. - k1 += k2 - } else { - // Not adjacent. Remember the first block (k1==0 means it's - // the dummy we started with), and make the second block the - // new block to compare against. - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - i1, j1, k1 = i2, j2, k2 - } - } - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - - nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) - m.matchingBlocks = nonAdjacent - return m.matchingBlocks -} - -// Return list of 5-tuples describing how to turn a into b. -// -// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple -// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the -// tuple preceding it, and likewise for j1 == the previous j2. -// -// The tags are characters, with these meanings: -// -// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] -// -// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. -// -// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. -// -// 'e' (equal): a[i1:i2] == b[j1:j2] -func (m *SequenceMatcher) GetOpCodes() []OpCode { - if m.opCodes != nil { - return m.opCodes - } - i, j := 0, 0 - matching := m.GetMatchingBlocks() - opCodes := make([]OpCode, 0, len(matching)) - for _, m := range matching { - // invariant: we've pumped out correct diffs to change - // a[:i] into b[:j], and the next matching block is - // a[ai:ai+size] == b[bj:bj+size]. So we need to pump - // out a diff to change a[i:ai] into b[j:bj], pump out - // the matching block, and move (i,j) beyond the match - ai, bj, size := m.A, m.B, m.Size - tag := byte(0) - if i < ai && j < bj { - tag = 'r' - } else if i < ai { - tag = 'd' - } else if j < bj { - tag = 'i' - } - if tag > 0 { - opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) - } - i, j = ai+size, bj+size - // the list of matching blocks is terminated by a - // sentinel with size 0 - if size > 0 { - opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) - } - } - m.opCodes = opCodes - return m.opCodes -} - -// Isolate change clusters by eliminating ranges with no changes. -// -// Return a generator of groups with up to n lines of context. -// Each group is in the same format as returned by GetOpCodes(). -func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { - if n < 0 { - n = 3 - } - codes := m.GetOpCodes() - if len(codes) == 0 { - codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} - } - // Fixup leading and trailing groups if they show no changes. - if codes[0].Tag == 'e' { - c := codes[0] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} - } - if codes[len(codes)-1].Tag == 'e' { - c := codes[len(codes)-1] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} - } - nn := n + n - groups := [][]OpCode{} - group := []OpCode{} - for _, c := range codes { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - // End the current group and start a new one whenever - // there is a large range with no changes. - if c.Tag == 'e' && i2-i1 > nn { - group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n)}) - groups = append(groups, group) - group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) - } - group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) - } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { - groups = append(groups, group) - } - return groups -} - -// Return a measure of the sequences' similarity (float in [0,1]). -// -// Where T is the total number of elements in both sequences, and -// M is the number of matches, this is 2.0*M / T. -// Note that this is 1 if the sequences are identical, and 0 if -// they have nothing in common. -// -// .Ratio() is expensive to compute if you haven't already computed -// .GetMatchingBlocks() or .GetOpCodes(), in which case you may -// want to try .QuickRatio() or .RealQuickRation() first to get an -// upper bound. -func (m *SequenceMatcher) Ratio() float64 { - matches := 0 - for _, m := range m.GetMatchingBlocks() { - matches += m.Size - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() relatively quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute. -func (m *SequenceMatcher) QuickRatio() float64 { - // viewing a and b as multisets, set matches to the cardinality - // of their intersection; this counts the number of matches - // without regard to order, so is clearly an upper bound - if m.fullBCount == nil { - m.fullBCount = map[string]int{} - for _, s := range m.b { - m.fullBCount[s] = m.fullBCount[s] + 1 - } - } - - // avail[x] is the number of times x appears in 'b' less the - // number of times we've seen it in 'a' so far ... kinda - avail := map[string]int{} - matches := 0 - for _, s := range m.a { - n, ok := avail[s] - if !ok { - n = m.fullBCount[s] - } - avail[s] = n - 1 - if n > 0 { - matches += 1 - } - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() very quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute than either .Ratio() or .QuickRatio(). -func (m *SequenceMatcher) RealQuickRatio() float64 { - la, lb := len(m.a), len(m.b) - return calculateRatio(min(la, lb), la+lb) -} - -// Convert range to the "ed" format -func formatRangeUnified(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 1 { - return fmt.Sprintf("%d", beginning) - } - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - return fmt.Sprintf("%d,%d", beginning, length) -} - -// Unified diff parameters -type UnifiedDiff struct { - A []string // First sequence lines - FromFile string // First file name - FromDate string // First file time - B []string // Second sequence lines - ToFile string // Second file name - ToDate string // Second file time - Eol string // Headers end of line, defaults to LF - Context int // Number of context lines -} - -// Compare two sequences of lines; generate the delta as a unified diff. -// -// Unified diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by 'n' which -// defaults to three. -// -// By default, the diff control lines (those with ---, +++, or @@) are -// created with a trailing newline. This is helpful so that inputs -// created from file.readlines() result in diffs that are suitable for -// file.writelines() since both the inputs and outputs have trailing -// newlines. -// -// For inputs that do not have trailing newlines, set the lineterm -// argument to "" so that the output will be uniformly newline free. -// -// The unidiff format normally has a header for filenames and modification -// times. Any or all of these may be specified using strings for -// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. -// The modification times are normally expressed in the ISO 8601 format. -func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - return err - } - ws := func(s string) error { - _, err := buf.WriteString(s) - return err - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err - } - } - } - first, last := g[0], g[len(g)-1] - range1 := formatRangeUnified(first.I1, last.I2) - range2 := formatRangeUnified(first.J1, last.J2) - if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { - return err - } - for _, c := range g { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - if c.Tag == 'e' { - for _, line := range diff.A[i1:i2] { - if err := ws(" " + line); err != nil { - return err - } - } - continue - } - if c.Tag == 'r' || c.Tag == 'd' { - for _, line := range diff.A[i1:i2] { - if err := ws("-" + line); err != nil { - return err - } - } - } - if c.Tag == 'r' || c.Tag == 'i' { - for _, line := range diff.B[j1:j2] { - if err := ws("+" + line); err != nil { - return err - } - } - } - } - } - return nil -} - -// Like WriteUnifiedDiff but returns the diff a string. -func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteUnifiedDiff(w, diff) - return string(w.Bytes()), err -} - -// Convert range to the "ed" format. -func formatRangeContext(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - if length <= 1 { - return fmt.Sprintf("%d", beginning) - } - return fmt.Sprintf("%d,%d", beginning, beginning+length-1) -} - -type ContextDiff UnifiedDiff - -// Compare two sequences of lines; generate the delta as a context diff. -// -// Context diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by diff.Context -// which defaults to three. -// -// By default, the diff control lines (those with *** or ---) are -// created with a trailing newline. -// -// For inputs that do not have trailing newlines, set the diff.Eol -// argument to "" so that the output will be uniformly newline free. -// -// The context diff format normally has a header for filenames and -// modification times. Any or all of these may be specified using -// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. -// The modification times are normally expressed in the ISO 8601 format. -// If not specified, the strings default to blanks. -func WriteContextDiff(writer io.Writer, diff ContextDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - var diffErr error - wf := func(format string, args ...interface{}) { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - if diffErr == nil && err != nil { - diffErr = err - } - } - ws := func(s string) { - _, err := buf.WriteString(s) - if diffErr == nil && err != nil { - diffErr = err - } - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - prefix := map[byte]string{ - 'i': "+ ", - 'd': "- ", - 'r': "! ", - 'e': " ", - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) - wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) - } - } - - first, last := g[0], g[len(g)-1] - ws("***************" + diff.Eol) - - range1 := formatRangeContext(first.I1, last.I2) - wf("*** %s ****%s", range1, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'd' { - for _, cc := range g { - if cc.Tag == 'i' { - continue - } - for _, line := range diff.A[cc.I1:cc.I2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - - range2 := formatRangeContext(first.J1, last.J2) - wf("--- %s ----%s", range2, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'i' { - for _, cc := range g { - if cc.Tag == 'd' { - continue - } - for _, line := range diff.B[cc.J1:cc.J2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - } - return diffErr -} - -// Like WriteContextDiff but returns the diff a string. -func GetContextDiffString(diff ContextDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteContextDiff(w, diff) - return string(w.Bytes()), err -} - -// Split a string on "\n" while preserving them. The output can be used -// as input for UnifiedDiff and ContextDiff structures. -func SplitLines(s string) []string { - lines := strings.SplitAfter(s, "\n") - lines[len(lines)-1] += "\n" - return lines -} diff --git a/vendor/github.com/ryanuber/go-glob/glob.go b/vendor/github.com/ryanuber/go-glob/glob.go index 32e3390..d9d4637 100644 --- a/vendor/github.com/ryanuber/go-glob/glob.go +++ b/vendor/github.com/ryanuber/go-glob/glob.go @@ -30,30 +30,22 @@ func Glob(pattern, subj string) bool { trailingGlob := strings.HasSuffix(pattern, GLOB) end := len(parts) - 1 - for i, part := range parts { - switch i { - case 0: - if leadingGlob { - continue - } - if !strings.HasPrefix(subj, part) { - return false - } - case end: - if len(subj) > 0 { - return trailingGlob || strings.HasSuffix(subj, part) - } - default: - if !strings.Contains(subj, part) { - return false - } + // Check the first section. Requires special handling. + if !leadingGlob && !strings.HasPrefix(subj, parts[0]) { + return false + } + + // Go over the middle parts and ensure they match. + for i := 1; i < end; i++ { + if !strings.Contains(subj, parts[i]) { + return false } // Trim evaluated text from subj as we loop over the pattern. - idx := strings.Index(subj, part) + len(part) + idx := strings.Index(subj, parts[i]) + len(parts[i]) subj = subj[idx:] } - // All parts of the pattern matched - return true + // Reached the last section. Requires special handling. + return trailingGlob || strings.HasSuffix(subj, parts[end]) } diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE deleted file mode 100644 index 473b670..0000000 --- a/vendor/github.com/stretchr/testify/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell - -Please consider promoting this project if you find it useful. - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, -and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT -OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE -OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go deleted file mode 100644 index e6a7960..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ /dev/null @@ -1,387 +0,0 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND -*/ - -package assert - -import ( - - http "net/http" - url "net/url" - time "time" -) - - -// Condition uses a Comparison to assert a complex condition. -func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { - return Condition(a.t, comp, msgAndArgs...) -} - - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") -// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") -// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - return Contains(a.t, s, contains, msgAndArgs...) -} - - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Empty(obj) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { - return Empty(a.t, object, msgAndArgs...) -} - - -// Equal asserts that two objects are equal. -// -// a.Equal(123, 123, "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - return Equal(a.t, expected, actual, msgAndArgs...) -} - - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { - return EqualError(a.t, theError, errString, msgAndArgs...) -} - - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - return EqualValues(a.t, expected, actual, msgAndArgs...) -} - - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Error(err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { - return Error(a.t, err, msgAndArgs...) -} - - -// Exactly asserts that two objects are equal is value and type. -// -// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - return Exactly(a.t, expected, actual, msgAndArgs...) -} - - -// Fail reports a failure through -func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { - return Fail(a.t, failureMessage, msgAndArgs...) -} - - -// FailNow fails test -func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { - return FailNow(a.t, failureMessage, msgAndArgs...) -} - - -// False asserts that the specified value is false. -// -// a.False(myBool, "myBool should be false") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { - return False(a.t, value, msgAndArgs...) -} - - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { - return HTTPBodyContains(a.t, handler, method, url, values, str) -} - - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { - return HTTPBodyNotContains(a.t, handler, method, url, values, str) -} - - -// HTTPError asserts that a specified handler returns an error status code. -// -// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool { - return HTTPError(a.t, handler, method, url, values) -} - - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool { - return HTTPRedirect(a.t, handler, method, url, values) -} - - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool { - return HTTPSuccess(a.t, handler, method, url, values) -} - - -// Implements asserts that an object is implemented by the specified interface. -// -// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") -func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - return Implements(a.t, interfaceObject, object, msgAndArgs...) -} - - -// InDelta asserts that the two numerals are within delta of each other. -// -// a.InDelta(math.Pi, (22 / 7.0), 0.01) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - return InDelta(a.t, expected, actual, delta, msgAndArgs...) -} - - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) -} - - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) -} - - -// InEpsilonSlice is the same as InEpsilon, except it compares two slices. -func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - return InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...) -} - - -// IsType asserts that the specified objects are of the same type. -func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - return IsType(a.t, expectedType, object, msgAndArgs...) -} - - -// JSONEq asserts that two JSON strings are equivalent. -// -// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { - return JSONEq(a.t, expected, actual, msgAndArgs...) -} - - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// a.Len(mySlice, 3, "The size of slice is not 3") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { - return Len(a.t, object, length, msgAndArgs...) -} - - -// Nil asserts that the specified object is nil. -// -// a.Nil(err, "err should be nothing") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { - return Nil(a.t, object, msgAndArgs...) -} - - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoError(err) { -// assert.Equal(t, actualObj, expectedObj) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { - return NoError(a.t, err, msgAndArgs...) -} - - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") -// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - return NotContains(a.t, s, contains, msgAndArgs...) -} - - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmpty(obj) { -// assert.Equal(t, "two", obj[1]) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { - return NotEmpty(a.t, object, msgAndArgs...) -} - - -// NotEqual asserts that the specified values are NOT equal. -// -// a.NotEqual(obj1, obj2, "two objects shouldn't be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - return NotEqual(a.t, expected, actual, msgAndArgs...) -} - - -// NotNil asserts that the specified object is not nil. -// -// a.NotNil(err, "err should be something") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { - return NotNil(a.t, object, msgAndArgs...) -} - - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanics(func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - return NotPanics(a.t, f, msgAndArgs...) -} - - -// NotRegexp asserts that a specified regexp does not match a string. -// -// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") -// a.NotRegexp("^start", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - return NotRegexp(a.t, rx, str, msgAndArgs...) -} - - -// NotZero asserts that i is not the zero value for its type and returns the truth. -func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { - return NotZero(a.t, i, msgAndArgs...) -} - - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panics(func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - return Panics(a.t, f, msgAndArgs...) -} - - -// Regexp asserts that a specified regexp matches a string. -// -// a.Regexp(regexp.MustCompile("start"), "it's starting") -// a.Regexp("start...$", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - return Regexp(a.t, rx, str, msgAndArgs...) -} - - -// True asserts that the specified value is true. -// -// a.True(myBool, "myBool should be true") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { - return True(a.t, value, msgAndArgs...) -} - - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) -} - - -// Zero asserts that i is the zero value for its type and returns the truth. -func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { - return Zero(a.t, i, msgAndArgs...) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl deleted file mode 100644 index 99f9acf..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl +++ /dev/null @@ -1,4 +0,0 @@ -{{.CommentWithoutT "a"}} -func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { - return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go deleted file mode 100644 index d7c16c5..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ /dev/null @@ -1,1004 +0,0 @@ -package assert - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "math" - "reflect" - "regexp" - "runtime" - "strings" - "time" - "unicode" - "unicode/utf8" - - "github.com/davecgh/go-spew/spew" - "github.com/pmezard/go-difflib/difflib" -) - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Errorf(format string, args ...interface{}) -} - -// Comparison a custom function that returns true on success and false on failure -type Comparison func() (success bool) - -/* - Helper functions -*/ - -// ObjectsAreEqual determines if two objects are considered equal. -// -// This function does no assertion of any kind. -func ObjectsAreEqual(expected, actual interface{}) bool { - - if expected == nil || actual == nil { - return expected == actual - } - - return reflect.DeepEqual(expected, actual) - -} - -// ObjectsAreEqualValues gets whether two objects are equal, or if their -// values are equal. -func ObjectsAreEqualValues(expected, actual interface{}) bool { - if ObjectsAreEqual(expected, actual) { - return true - } - - actualType := reflect.TypeOf(actual) - if actualType == nil { - return false - } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { - // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) - } - - return false -} - -/* CallerInfo is necessary because the assert functions use the testing object -internally, causing it to print the file:line of the assert method, rather than where -the problem actually occured in calling code.*/ - -// CallerInfo returns an array of strings containing the file and line number -// of each stack frame leading from the current test to the assert call that -// failed. -func CallerInfo() []string { - - pc := uintptr(0) - file := "" - line := 0 - ok := false - name := "" - - callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - return nil - } - - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } - - parts := strings.Split(file, "/") - dir := parts[len(parts)-2] - file = parts[len(parts)-1] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) - } - - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break - } - } - - return callers -} - -// Stolen from the `go test` tool. -// isTest tells whether name looks like a test (or benchmark, according to prefix). -// It is a Test (say) if there is a character after Test that is not a lower-case letter. -// We don't want TesticularCancer. -func isTest(name, prefix string) bool { - if !strings.HasPrefix(name, prefix) { - return false - } - if len(name) == len(prefix) { // "Test" is ok - return true - } - rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) - return !unicode.IsLower(rune) -} - -// getWhitespaceString returns a string that is long enough to overwrite the default -// output from the go testing framework. -func getWhitespaceString() string { - - _, file, line, ok := runtime.Caller(1) - if !ok { - return "" - } - parts := strings.Split(file, "/") - file = parts[len(parts)-1] - - return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) - -} - -func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { - if len(msgAndArgs) == 0 || msgAndArgs == nil { - return "" - } - if len(msgAndArgs) == 1 { - return msgAndArgs[0].(string) - } - if len(msgAndArgs) > 1 { - return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) - } - return "" -} - -// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's -// test printing (see inner comment for specifics) -func indentMessageLines(message string, tabs int) string { - outBuf := new(bytes.Buffer) - - for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { - if i != 0 { - outBuf.WriteRune('\n') - } - for ii := 0; ii < tabs; ii++ { - outBuf.WriteRune('\t') - // Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter - // by 1 prematurely. - if ii == 0 && i > 0 { - ii++ - } - } - outBuf.WriteString(scanner.Text()) - } - - return outBuf.String() -} - -type failNower interface { - FailNow() -} - -// FailNow fails test -func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - Fail(t, failureMessage, msgAndArgs...) - - // We cannot extend TestingT with FailNow() and - // maintain backwards compatibility, so we fallback - // to panicking when FailNow is not available in - // TestingT. - // See issue #263 - - if t, ok := t.(failNower); ok { - t.FailNow() - } else { - panic("test failed and t is missing `FailNow()`") - } - return false -} - -// Fail reports a failure through -func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - - message := messageFromMsgAndArgs(msgAndArgs...) - - errorTrace := strings.Join(CallerInfo(), "\n\r\t\t\t") - if len(message) > 0 { - t.Errorf("\r%s\r\tError Trace:\t%s\n"+ - "\r\tError:%s\n"+ - "\r\tMessages:\t%s\n\r", - getWhitespaceString(), - errorTrace, - indentMessageLines(failureMessage, 2), - message) - } else { - t.Errorf("\r%s\r\tError Trace:\t%s\n"+ - "\r\tError:%s\n\r", - getWhitespaceString(), - errorTrace, - indentMessageLines(failureMessage, 2)) - } - - return false -} - -// Implements asserts that an object is implemented by the specified interface. -// -// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") -func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - - interfaceType := reflect.TypeOf(interfaceObject).Elem() - - if !reflect.TypeOf(object).Implements(interfaceType) { - return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) - } - - return true - -} - -// IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) - } - - return true -} - -// Equal asserts that two objects are equal. -// -// assert.Equal(t, 123, 123, "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - if !ObjectsAreEqual(expected, actual) { - diff := diff(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ - " != %#v (actual)%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - if !ObjectsAreEqualValues(expected, actual) { - return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ - " != %#v (actual)", expected, actual), msgAndArgs...) - } - - return true - -} - -// Exactly asserts that two objects are equal is value and type. -// -// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - aType := reflect.TypeOf(expected) - bType := reflect.TypeOf(actual) - - if aType != bType { - return Fail(t, fmt.Sprintf("Types expected to match exactly\n\r\t%v != %v", aType, bType), msgAndArgs...) - } - - return Equal(t, expected, actual, msgAndArgs...) - -} - -// NotNil asserts that the specified object is not nil. -// -// assert.NotNil(t, err, "err should be something") -// -// Returns whether the assertion was successful (true) or not (false). -func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if !isNil(object) { - return true - } - return Fail(t, "Expected value not to be nil.", msgAndArgs...) -} - -// isNil checks if a specified object is nil or not, without Failing. -func isNil(object interface{}) bool { - if object == nil { - return true - } - - value := reflect.ValueOf(object) - kind := value.Kind() - if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { - return true - } - - return false -} - -// Nil asserts that the specified object is nil. -// -// assert.Nil(t, err, "err should be nothing") -// -// Returns whether the assertion was successful (true) or not (false). -func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if isNil(object) { - return true - } - return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) -} - -var numericZeros = []interface{}{ - int(0), - int8(0), - int16(0), - int32(0), - int64(0), - uint(0), - uint8(0), - uint16(0), - uint32(0), - uint64(0), - float32(0), - float64(0), -} - -// isEmpty gets whether the specified object is considered empty or not. -func isEmpty(object interface{}) bool { - - if object == nil { - return true - } else if object == "" { - return true - } else if object == false { - return true - } - - for _, v := range numericZeros { - if object == v { - return true - } - } - - objValue := reflect.ValueOf(object) - - switch objValue.Kind() { - case reflect.Map: - fallthrough - case reflect.Slice, reflect.Chan: - { - return (objValue.Len() == 0) - } - case reflect.Struct: - switch object.(type) { - case time.Time: - return object.(time.Time).IsZero() - } - case reflect.Ptr: - { - if objValue.IsNil() { - return true - } - switch object.(type) { - case *time.Time: - return object.(*time.Time).IsZero() - default: - return false - } - } - } - return false -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Empty(t, obj) -// -// Returns whether the assertion was successful (true) or not (false). -func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - - pass := isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - - pass := !isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { - v := reflect.ValueOf(x) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - return true, v.Len() -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// assert.Len(t, mySlice, 3, "The size of slice is not 3") -// -// Returns whether the assertion was successful (true) or not (false). -func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { - ok, l := getLen(object) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) - } - - if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) - } - return true -} - -// True asserts that the specified value is true. -// -// assert.True(t, myBool, "myBool should be true") -// -// Returns whether the assertion was successful (true) or not (false). -func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { - - if value != true { - return Fail(t, "Should be true", msgAndArgs...) - } - - return true - -} - -// False asserts that the specified value is false. -// -// assert.False(t, myBool, "myBool should be false") -// -// Returns whether the assertion was successful (true) or not (false). -func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { - - if value != false { - return Fail(t, "Should be false", msgAndArgs...) - } - - return true - -} - -// NotEqual asserts that the specified values are NOT equal. -// -// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - - if ObjectsAreEqual(expected, actual) { - return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) - } - - return true - -} - -// containsElement try loop over the list check if the list includes the element. -// return (false, false) if impossible. -// return (true, false) if element was not found. -// return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { - - listValue := reflect.ValueOf(list) - elementValue := reflect.ValueOf(element) - defer func() { - if e := recover(); e != nil { - ok = false - found = false - } - }() - - if reflect.TypeOf(list).Kind() == reflect.String { - return true, strings.Contains(listValue.String(), elementValue.String()) - } - - if reflect.TypeOf(list).Kind() == reflect.Map { - mapKeys := listValue.MapKeys() - for i := 0; i < len(mapKeys); i++ { - if ObjectsAreEqual(mapKeys[i].Interface(), element) { - return true, true - } - } - return true, false - } - - for i := 0; i < listValue.Len(); i++ { - if ObjectsAreEqual(listValue.Index(i).Interface(), element) { - return true, true - } - } - return true, false - -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") -// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") -// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") -// -// Returns whether the assertion was successful (true) or not (false). -func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") -// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") -// -// Returns whether the assertion was successful (true) or not (false). -func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if found { - return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// Condition uses a Comparison to assert a complex condition. -func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { - result := comp() - if !result { - Fail(t, "Condition failed!", msgAndArgs...) - } - return result -} - -// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics -// methods, and represents a simple func that takes no arguments, and returns nothing. -type PanicTestFunc func() - -// didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}) { - - didPanic := false - var message interface{} - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - } - }() - - // call the target function - f() - - }() - - return didPanic, message - -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panics(t, func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// -// Returns whether the assertion was successful (true) or not (false). -func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - - if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) - } - - return true -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanics(t, func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// -// Returns whether the assertion was successful (true) or not (false). -func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - - if funcDidPanic, panicValue := didPanic(f); funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) - } - - return true -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// -// Returns whether the assertion was successful (true) or not (false). -func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - - dt := expected.Sub(actual) - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -func toFloat(x interface{}) (float64, bool) { - var xf float64 - xok := true - - switch xn := x.(type) { - case uint8: - xf = float64(xn) - case uint16: - xf = float64(xn) - case uint32: - xf = float64(xn) - case uint64: - xf = float64(xn) - case int: - xf = float64(xn) - case int8: - xf = float64(xn) - case int16: - xf = float64(xn) - case int32: - xf = float64(xn) - case int64: - xf = float64(xn) - case float32: - xf = float64(xn) - case float64: - xf = float64(xn) - default: - xok = false - } - - return xf, xok -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) -// -// Returns whether the assertion was successful (true) or not (false). -func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - - af, aok := toFloat(expected) - bf, bok := toFloat(actual) - - if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) - } - - if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Actual must not be NaN"), msgAndArgs...) - } - - if math.IsNaN(bf) { - return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) - } - - dt := af - bf - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta) - if !result { - return result - } - } - - return true -} - -func calcRelativeError(expected, actual interface{}) (float64, error) { - af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) - } - if af == 0 { - return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") - } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", actual) - } - - return math.Abs(af-bf) / math.Abs(af), nil -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -// -// Returns whether the assertion was successful (true) or not (false). -func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - actualEpsilon, err := calcRelativeError(expected, actual) - if err != nil { - return Fail(t, err.Error(), msgAndArgs...) - } - if actualEpsilon > epsilon { - return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ - " < %#v (actual)", actualEpsilon, epsilon), msgAndArgs...) - } - - return true -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result - } - } - - return true -} - -/* - Errors -*/ - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, actualObj, expectedObj) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { - if isNil(err) { - return true - } - - return Fail(t, fmt.Sprintf("Received unexpected error %q", err), msgAndArgs...) -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { - - message := messageFromMsgAndArgs(msgAndArgs...) - return NotNil(t, err, "An error is expected but got nil. %s", message) - -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { - - message := messageFromMsgAndArgs(msgAndArgs...) - if !NotNil(t, theError, "An error is expected but got nil. %s", message) { - return false - } - s := "An error with value \"%s\" is expected but got \"%s\". %s" - return Equal(t, errString, theError.Error(), - s, errString, theError.Error(), message) -} - -// matchRegexp return true if a specified regexp matches a string. -func matchRegexp(rx interface{}, str interface{}) bool { - - var r *regexp.Regexp - if rr, ok := rx.(*regexp.Regexp); ok { - r = rr - } else { - r = regexp.MustCompile(fmt.Sprint(rx)) - } - - return (r.FindStringIndex(fmt.Sprint(str)) != nil) - -} - -// Regexp asserts that a specified regexp matches a string. -// -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - - match := matchRegexp(rx, str) - - if !match { - Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) - } - - return match -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - match := matchRegexp(rx, str) - - if match { - Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) - } - - return !match - -} - -// Zero asserts that i is the zero value for its type and returns the truth. -func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// NotZero asserts that i is not the zero value for its type and returns the truth. -func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -// -// Returns whether the assertion was successful (true) or not (false). -func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { - var expectedJSONAsInterface, actualJSONAsInterface interface{} - - if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) - } - - if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) - } - - return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) -} - -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice or array. Otherwise it returns an empty string. -func diff(expected interface{}, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { - return "" - } - - spew.Config.SortKeys = true - e := spew.Sdump(expected) - a := spew.Sdump(actual) - - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), - FromFile: "Expected", - FromDate: "", - ToFile: "Actual", - ToDate: "", - Context: 1, - }) - - return "\n\nDiff:\n" + diff -} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go deleted file mode 100644 index c9dccc4..0000000 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. -// -// Example Usage -// -// The following is a complete example using assert in a standard test function: -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(t, a, b, "The two words should be the same.") -// -// } -// -// if you assert many times, use the format below: -// -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// assert := assert.New(t) -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(a, b, "The two words should be the same.") -// } -// -// Assertions -// -// Assertions allow you to easily write test code, and are global funcs in the `assert` package. -// All assertion functions take, as the first argument, the `*testing.T` object provided by the -// testing framework. This allows the assertion funcs to write the failings and other details to -// the correct place. -// -// Every assertion function also takes an optional string message as the final argument, -// allowing custom error messages to be appended to the message the assertion method outputs. -package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go deleted file mode 100644 index ac9dc9d..0000000 --- a/vendor/github.com/stretchr/testify/assert/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package assert - -import ( - "errors" -) - -// AnError is an error instance useful for testing. If the code does not care -// about error specifics, and only needs to return the error for example, this -// error should be used to make the test code more readable. -var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go deleted file mode 100644 index b867e95..0000000 --- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go +++ /dev/null @@ -1,16 +0,0 @@ -package assert - -// Assertions provides assertion methods around the -// TestingT interface. -type Assertions struct { - t TestingT -} - -// New makes a new Assertions object for the specified TestingT. -func New(t TestingT) *Assertions { - return &Assertions{ - t: t, - } -} - -//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go deleted file mode 100644 index e1b9442..0000000 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ /dev/null @@ -1,106 +0,0 @@ -package assert - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "strings" -) - -// httpCode is a helper that returns HTTP code of the response. It returns -1 -// if building a new request fails. -func httpCode(handler http.HandlerFunc, method, url string, values url.Values) int { - w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) - if err != nil { - return -1 - } - handler(w, req) - return w.Code -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { - code := httpCode(handler, method, url, values) - if code == -1 { - return false - } - return code >= http.StatusOK && code <= http.StatusPartialContent -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { - code := httpCode(handler, method, url, values) - if code == -1 { - return false - } - return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { - code := httpCode(handler, method, url, values) - if code == -1 { - return false - } - return code >= http.StatusBadRequest -} - -// HTTPBody is a helper that returns HTTP body of the response. It returns -// empty string if building a new request fails. -func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { - w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) - if err != nil { - return "" - } - handler(w, req) - return w.Body.String() -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) - } - - return contains -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if contains { - Fail(t, "Expected response body for %s to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body) - } - - return !contains -} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index 77b64d0..0000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out <-chan Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, &c) - return &c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) cancelCtx { - return cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return &c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS deleted file mode 100644 index 15167cd..0000000 --- a/vendor/golang.org/x/oauth2/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md deleted file mode 100644 index 46aa2b1..0000000 --- a/vendor/golang.org/x/oauth2/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - - -## Filing issues - -When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. - diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS deleted file mode 100644 index 1c4577e..0000000 --- a/vendor/golang.org/x/oauth2/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE deleted file mode 100644 index d02f24f..0000000 --- a/vendor/golang.org/x/oauth2/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The oauth2 Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md deleted file mode 100644 index 0d51417..0000000 --- a/vendor/golang.org/x/oauth2/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# OAuth2 for Go - -[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) - -oauth2 package contains a client implementation for OAuth 2.0 spec. - -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -See godoc for further documentation and examples. - -* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) -* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) - - -## App Engine - -In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor -of the [`context.Context`](https://golang.org/x/net/context#Context) type from -the `golang.org/x/net/context` package - -This means its no longer possible to use the "Classic App Engine" -`appengine.Context` type with the `oauth2` package. (You're using -Classic App Engine if you import the package `"appengine"`.) - -To work around this, you may use the new `"google.golang.org/appengine"` -package. This package has almost the same API as the `"appengine"` package, -but it can be fetched with `go get` and used on "Managed VMs" and well as -Classic App Engine. - -See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) -for information on updating your app. - -If you don't want to update your entire app to use the new App Engine packages, -you may use both sets of packages in parallel, using only the new packages -with the `oauth2` package. - - import ( - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - newappengine "google.golang.org/appengine" - newurlfetch "google.golang.org/appengine/urlfetch" - - "appengine" - ) - - func handler(w http.ResponseWriter, r *http.Request) { - var c appengine.Context = appengine.NewContext(r) - c.Infof("Logging a message with the old package") - - var ctx context.Context = newappengine.NewContext(r) - client := &http.Client{ - Transport: &oauth2.Transport{ - Source: google.AppEngineTokenSource(ctx, "scope"), - Base: &newurlfetch.Transport{Context: ctx}, - }, - } - client.Get("...") - } - diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go deleted file mode 100644 index 8962c49..0000000 --- a/vendor/golang.org/x/oauth2/client_appengine.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -// App Engine hooks. - -package oauth2 - -import ( - "net/http" - - "golang.org/x/net/context" - "golang.org/x/oauth2/internal" - "google.golang.org/appengine/urlfetch" -) - -func init() { - internal.RegisterContextClientFunc(contextClientAppEngine) -} - -func contextClientAppEngine(ctx context.Context) (*http.Client, error) { - return urlfetch.Client(ctx), nil -} diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go deleted file mode 100644 index fbe1028..0000000 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains support packages for oauth2 package. -package internal - -import ( - "bufio" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io" - "strings" -) - -// ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key -// from PEM container before conversion. It only supports PEM -// containers with no passphrase. -func ParseKey(key []byte) (*rsa.PrivateKey, error) { - block, _ := pem.Decode(key) - if block != nil { - key = block.Bytes - } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) - if err != nil { - parsedKey, err = x509.ParsePKCS1PrivateKey(key) - if err != nil { - return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) - } - } - parsed, ok := parsedKey.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("private key is invalid") - } - return parsed, nil -} - -func ParseINI(ini io.Reader) (map[string]map[string]string, error) { - result := map[string]map[string]string{ - "": map[string]string{}, // root section - } - scanner := bufio.NewScanner(ini) - currentSection := "" - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if strings.HasPrefix(line, ";") { - // comment. - continue - } - if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { - currentSection = strings.TrimSpace(line[1 : len(line)-1]) - result[currentSection] = map[string]string{} - continue - } - parts := strings.SplitN(line, "=", 2) - if len(parts) == 2 && parts[0] != "" { - result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) - } - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning ini: %v", err) - } - return result, nil -} - -func CondVal(v string) []string { - if v == "" { - return nil - } - return []string{v} -} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go deleted file mode 100644 index 39caf6c..0000000 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains support packages for oauth2 package. -package internal - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "mime" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "golang.org/x/net/context" -) - -// Token represents the crendentials used to authorize -// the requests to access protected resources on the OAuth 2.0 -// provider's backend. -// -// This type is a mirror of oauth2.Token and exists to break -// an otherwise-circular dependency. Other internal packages -// should convert this Token into an oauth2.Token before use. -type Token struct { - // AccessToken is the token that authorizes and authenticates - // the requests. - AccessToken string - - // TokenType is the type of token. - // The Type method returns either this or "Bearer", the default. - TokenType string - - // RefreshToken is a token that's used by the application - // (as opposed to the user) to refresh the access token - // if it expires. - RefreshToken string - - // Expiry is the optional expiration time of the access token. - // - // If zero, TokenSource implementations will reuse the same - // token forever and RefreshToken or equivalent - // mechanisms for that TokenSource will not be used. - Expiry time.Time - - // Raw optionally contains extra metadata from the server - // when updating a token. - Raw interface{} -} - -// tokenJSON is the struct representing the HTTP response from OAuth2 -// providers returning a token in JSON form. -type tokenJSON struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - RefreshToken string `json:"refresh_token"` - ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number - Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in -} - -func (e *tokenJSON) expiry() (t time.Time) { - if v := e.ExpiresIn; v != 0 { - return time.Now().Add(time.Duration(v) * time.Second) - } - if v := e.Expires; v != 0 { - return time.Now().Add(time.Duration(v) * time.Second) - } - return -} - -type expirationTime int32 - -func (e *expirationTime) UnmarshalJSON(b []byte) error { - var n json.Number - err := json.Unmarshal(b, &n) - if err != nil { - return err - } - i, err := n.Int64() - if err != nil { - return err - } - *e = expirationTime(i) - return nil -} - -var brokenAuthHeaderProviders = []string{ - "https://accounts.google.com/", - "https://api.dropbox.com/", - "https://api.instagram.com/", - "https://api.netatmo.net/", - "https://api.odnoklassniki.ru/", - "https://api.pushbullet.com/", - "https://api.soundcloud.com/", - "https://api.twitch.tv/", - "https://app.box.com/", - "https://connect.stripe.com/", - "https://login.microsoftonline.com/", - "https://login.salesforce.com/", - "https://oauth.sandbox.trainingpeaks.com/", - "https://oauth.trainingpeaks.com/", - "https://oauth.vk.com/", - "https://slack.com/", - "https://test-sandbox.auth.corp.google.com", - "https://test.salesforce.com/", - "https://user.gini.net/", - "https://www.douban.com/", - "https://www.googleapis.com/", - "https://www.linkedin.com/", - "https://www.strava.com/oauth/", -} - -func RegisterBrokenAuthHeaderProvider(tokenURL string) { - brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) -} - -// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL -// implements the OAuth2 spec correctly -// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. -// In summary: -// - Reddit only accepts client secret in the Authorization header -// - Dropbox accepts either it in URL param or Auth header, but not both. -// - Google only accepts URL param (not spec compliant?), not Auth header -// - Stripe only accepts client secret in Auth header with Bearer method, not Basic -func providerAuthHeaderWorks(tokenURL string) bool { - for _, s := range brokenAuthHeaderProviders { - if strings.HasPrefix(tokenURL, s) { - // Some sites fail to implement the OAuth2 spec fully. - return false - } - } - - // Assume the provider implements the spec properly - // otherwise. We can add more exceptions as they're - // discovered. We will _not_ be adding configurable hooks - // to this package to let users select server bugs. - return true -} - -func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) { - hc, err := ContextClient(ctx) - if err != nil { - return nil, err - } - v.Set("client_id", ClientID) - bustedAuth := !providerAuthHeaderWorks(TokenURL) - if bustedAuth && ClientSecret != "" { - v.Set("client_secret", ClientSecret) - } - req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode())) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - if !bustedAuth { - req.SetBasicAuth(ClientID, ClientSecret) - } - r, err := hc.Do(req) - if err != nil { - return nil, err - } - defer r.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - if code := r.StatusCode; code < 200 || code > 299 { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) - } - - var token *Token - content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) - switch content { - case "application/x-www-form-urlencoded", "text/plain": - vals, err := url.ParseQuery(string(body)) - if err != nil { - return nil, err - } - token = &Token{ - AccessToken: vals.Get("access_token"), - TokenType: vals.Get("token_type"), - RefreshToken: vals.Get("refresh_token"), - Raw: vals, - } - e := vals.Get("expires_in") - if e == "" { - // TODO(jbd): Facebook's OAuth2 implementation is broken and - // returns expires_in field in expires. Remove the fallback to expires, - // when Facebook fixes their implementation. - e = vals.Get("expires") - } - expires, _ := strconv.Atoi(e) - if expires != 0 { - token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) - } - default: - var tj tokenJSON - if err = json.Unmarshal(body, &tj); err != nil { - return nil, err - } - token = &Token{ - AccessToken: tj.AccessToken, - TokenType: tj.TokenType, - RefreshToken: tj.RefreshToken, - Expiry: tj.expiry(), - Raw: make(map[string]interface{}), - } - json.Unmarshal(body, &token.Raw) // no error checks for optional fields - } - // Don't overwrite `RefreshToken` with an empty value - // if this was a token refreshing request. - if token.RefreshToken == "" { - token.RefreshToken = v.Get("refresh_token") - } - return token, nil -} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go deleted file mode 100644 index f1f173e..0000000 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains support packages for oauth2 package. -package internal - -import ( - "net/http" - - "golang.org/x/net/context" -) - -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. -var HTTPClient ContextKey - -// ContextKey is just an empty struct. It exists so HTTPClient can be -// an immutable public variable with a unique type. It's immutable -// because nobody else can create a ContextKey, being unexported. -type ContextKey struct{} - -// ContextClientFunc is a func which tries to return an *http.Client -// given a Context value. If it returns an error, the search stops -// with that error. If it returns (nil, nil), the search continues -// down the list of registered funcs. -type ContextClientFunc func(context.Context) (*http.Client, error) - -var contextClientFuncs []ContextClientFunc - -func RegisterContextClientFunc(fn ContextClientFunc) { - contextClientFuncs = append(contextClientFuncs, fn) -} - -func ContextClient(ctx context.Context) (*http.Client, error) { - if ctx != nil { - if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { - return hc, nil - } - } - for _, fn := range contextClientFuncs { - c, err := fn(ctx) - if err != nil { - return nil, err - } - if c != nil { - return c, nil - } - } - return http.DefaultClient, nil -} - -func ContextTransport(ctx context.Context) http.RoundTripper { - hc, err := ContextClient(ctx) - // This is a rare error case (somebody using nil on App Engine). - if err != nil { - return ErrorTransport{err} - } - return hc.Transport -} - -// ErrorTransport returns the specified error on RoundTrip. -// This RoundTripper should be used in rare error cases where -// error handling can be postponed to response handling time. -type ErrorTransport struct{ Err error } - -func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { - return nil, t.Err -} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go deleted file mode 100644 index a682896..0000000 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package oauth2 provides support for making -// OAuth2 authorized and authenticated HTTP requests. -// It can additionally grant authorization with Bearer JWT. -package oauth2 // import "golang.org/x/oauth2" - -import ( - "bytes" - "errors" - "net/http" - "net/url" - "strings" - "sync" - - "golang.org/x/net/context" - "golang.org/x/oauth2/internal" -) - -// NoContext is the default context you should supply if not using -// your own context.Context (see https://golang.org/x/net/context). -var NoContext = context.TODO() - -// RegisterBrokenAuthHeaderProvider registers an OAuth2 server -// identified by the tokenURL prefix as an OAuth2 implementation -// which doesn't support the HTTP Basic authentication -// scheme to authenticate with the authorization server. -// Once a server is registered, credentials (client_id and client_secret) -// will be passed as query parameters rather than being present -// in the Authorization header. -// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. -func RegisterBrokenAuthHeaderProvider(tokenURL string) { - internal.RegisterBrokenAuthHeaderProvider(tokenURL) -} - -// Config describes a typical 3-legged OAuth2 flow, with both the -// client application information and the server's endpoint URLs. -type Config struct { - // ClientID is the application's ID. - ClientID string - - // ClientSecret is the application's secret. - ClientSecret string - - // Endpoint contains the resource server's token endpoint - // URLs. These are constants specific to each server and are - // often available via site-specific packages, such as - // google.Endpoint or github.Endpoint. - Endpoint Endpoint - - // RedirectURL is the URL to redirect users going through - // the OAuth flow, after the resource owner's URLs. - RedirectURL string - - // Scope specifies optional requested permissions. - Scopes []string -} - -// A TokenSource is anything that can return a token. -type TokenSource interface { - // Token returns a token or an error. - // Token must be safe for concurrent use by multiple goroutines. - // The returned Token must not be modified. - Token() (*Token, error) -} - -// Endpoint contains the OAuth 2.0 provider's authorization and token -// endpoint URLs. -type Endpoint struct { - AuthURL string - TokenURL string -} - -var ( - // AccessTypeOnline and AccessTypeOffline are options passed - // to the Options.AuthCodeURL method. They modify the - // "access_type" field that gets sent in the URL returned by - // AuthCodeURL. - // - // Online is the default if neither is specified. If your - // application needs to refresh access tokens when the user - // is not present at the browser, then use offline. This will - // result in your application obtaining a refresh token the - // first time your application exchanges an authorization - // code for a user. - AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") - AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") - - // ApprovalForce forces the users to view the consent dialog - // and confirm the permissions request at the URL returned - // from AuthCodeURL, even if they've already done so. - ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") -) - -// An AuthCodeOption is passed to Config.AuthCodeURL. -type AuthCodeOption interface { - setValue(url.Values) -} - -type setParam struct{ k, v string } - -func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } - -// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters -// to a provider's authorization endpoint. -func SetAuthURLParam(key, value string) AuthCodeOption { - return setParam{key, value} -} - -// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page -// that asks for permissions for the required scopes explicitly. -// -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-zero string and validate that it matches the -// the state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. -// -// Opts may include AccessTypeOnline or AccessTypeOffline, as well -// as ApprovalForce. -func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { - var buf bytes.Buffer - buf.WriteString(c.Endpoint.AuthURL) - v := url.Values{ - "response_type": {"code"}, - "client_id": {c.ClientID}, - "redirect_uri": internal.CondVal(c.RedirectURL), - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), - "state": internal.CondVal(state), - } - for _, opt := range opts { - opt.setValue(v) - } - if strings.Contains(c.Endpoint.AuthURL, "?") { - buf.WriteByte('&') - } else { - buf.WriteByte('?') - } - buf.WriteString(v.Encode()) - return buf.String() -} - -// PasswordCredentialsToken converts a resource owner username and password -// pair into a token. -// -// Per the RFC, this grant type should only be used "when there is a high -// degree of trust between the resource owner and the client (e.g., the client -// is part of the device operating system or a highly privileged application), -// and when other authorization grant types are not available." -// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. -// -// The HTTP client to use is derived from the context. -// If nil, http.DefaultClient is used. -func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { - return retrieveToken(ctx, c, url.Values{ - "grant_type": {"password"}, - "username": {username}, - "password": {password}, - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), - }) -} - -// Exchange converts an authorization code into a token. -// -// It is used after a resource provider redirects the user back -// to the Redirect URI (the URL obtained from AuthCodeURL). -// -// The HTTP client to use is derived from the context. -// If a client is not provided via the context, http.DefaultClient is used. -// -// The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). -func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { - return retrieveToken(ctx, c, url.Values{ - "grant_type": {"authorization_code"}, - "code": {code}, - "redirect_uri": internal.CondVal(c.RedirectURL), - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), - }) -} - -// Client returns an HTTP client using the provided token. -// The token will auto-refresh as necessary. The underlying -// HTTP transport will be obtained using the provided context. -// The returned client and its Transport should not be modified. -func (c *Config) Client(ctx context.Context, t *Token) *http.Client { - return NewClient(ctx, c.TokenSource(ctx, t)) -} - -// TokenSource returns a TokenSource that returns t until t expires, -// automatically refreshing it as necessary using the provided context. -// -// Most users will use Config.Client instead. -func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { - tkr := &tokenRefresher{ - ctx: ctx, - conf: c, - } - if t != nil { - tkr.refreshToken = t.RefreshToken - } - return &reuseTokenSource{ - t: t, - new: tkr, - } -} - -// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" -// HTTP requests to renew a token using a RefreshToken. -type tokenRefresher struct { - ctx context.Context // used to get HTTP requests - conf *Config - refreshToken string -} - -// WARNING: Token is not safe for concurrent access, as it -// updates the tokenRefresher's refreshToken field. -// Within this package, it is used by reuseTokenSource which -// synchronizes calls to this method with its own mutex. -func (tf *tokenRefresher) Token() (*Token, error) { - if tf.refreshToken == "" { - return nil, errors.New("oauth2: token expired and refresh token is not set") - } - - tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ - "grant_type": {"refresh_token"}, - "refresh_token": {tf.refreshToken}, - }) - - if err != nil { - return nil, err - } - if tf.refreshToken != tk.RefreshToken { - tf.refreshToken = tk.RefreshToken - } - return tk, err -} - -// reuseTokenSource is a TokenSource that holds a single token in memory -// and validates its expiry before each call to retrieve it with -// Token. If it's expired, it will be auto-refreshed using the -// new TokenSource. -type reuseTokenSource struct { - new TokenSource // called when t is expired. - - mu sync.Mutex // guards t - t *Token -} - -// Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. -func (s *reuseTokenSource) Token() (*Token, error) { - s.mu.Lock() - defer s.mu.Unlock() - if s.t.Valid() { - return s.t, nil - } - t, err := s.new.Token() - if err != nil { - return nil, err - } - s.t = t - return t, nil -} - -// StaticTokenSource returns a TokenSource that always returns the same token. -// Because the provided token t is never refreshed, StaticTokenSource is only -// useful for tokens that never expire. -func StaticTokenSource(t *Token) TokenSource { - return staticTokenSource{t} -} - -// staticTokenSource is a TokenSource that always returns the same Token. -type staticTokenSource struct { - t *Token -} - -func (s staticTokenSource) Token() (*Token, error) { - return s.t, nil -} - -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. -var HTTPClient internal.ContextKey - -// NewClient creates an *http.Client from a Context and TokenSource. -// The returned client is not valid beyond the lifetime of the context. -// -// As a special case, if src is nil, a non-OAuth2 client is returned -// using the provided context. This exists to support related OAuth2 -// packages. -func NewClient(ctx context.Context, src TokenSource) *http.Client { - if src == nil { - c, err := internal.ContextClient(ctx) - if err != nil { - return &http.Client{Transport: internal.ErrorTransport{err}} - } - return c - } - return &http.Client{ - Transport: &Transport{ - Base: internal.ContextTransport(ctx), - Source: ReuseTokenSource(nil, src), - }, - } -} - -// ReuseTokenSource returns a TokenSource which repeatedly returns the -// same token as long as it's valid, starting with t. -// When its cached token is invalid, a new token is obtained from src. -// -// ReuseTokenSource is typically used to reuse tokens from a cache -// (such as a file on disk) between runs of a program, rather than -// obtaining new tokens unnecessarily. -// -// The initial token t may be nil, in which case the TokenSource is -// wrapped in a caching version if it isn't one already. This also -// means it's always safe to wrap ReuseTokenSource around any other -// TokenSource without adverse effects. -func ReuseTokenSource(t *Token, src TokenSource) TokenSource { - // Don't wrap a reuseTokenSource in itself. That would work, - // but cause an unnecessary number of mutex operations. - // Just build the equivalent one. - if rt, ok := src.(*reuseTokenSource); ok { - if t == nil { - // Just use it directly. - return rt - } - src = rt.new - } - return &reuseTokenSource{ - t: t, - new: src, - } -} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go deleted file mode 100644 index 7a3167f..0000000 --- a/vendor/golang.org/x/oauth2/token.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2/internal" -) - -// expiryDelta determines how earlier a token should be considered -// expired than its actual expiration time. It is used to avoid late -// expirations due to client-server time mismatches. -const expiryDelta = 10 * time.Second - -// Token represents the crendentials used to authorize -// the requests to access protected resources on the OAuth 2.0 -// provider's backend. -// -// Most users of this package should not access fields of Token -// directly. They're exported mostly for use by related packages -// implementing derivative OAuth2 flows. -type Token struct { - // AccessToken is the token that authorizes and authenticates - // the requests. - AccessToken string `json:"access_token"` - - // TokenType is the type of token. - // The Type method returns either this or "Bearer", the default. - TokenType string `json:"token_type,omitempty"` - - // RefreshToken is a token that's used by the application - // (as opposed to the user) to refresh the access token - // if it expires. - RefreshToken string `json:"refresh_token,omitempty"` - - // Expiry is the optional expiration time of the access token. - // - // If zero, TokenSource implementations will reuse the same - // token forever and RefreshToken or equivalent - // mechanisms for that TokenSource will not be used. - Expiry time.Time `json:"expiry,omitempty"` - - // raw optionally contains extra metadata from the server - // when updating a token. - raw interface{} -} - -// Type returns t.TokenType if non-empty, else "Bearer". -func (t *Token) Type() string { - if strings.EqualFold(t.TokenType, "bearer") { - return "Bearer" - } - if strings.EqualFold(t.TokenType, "mac") { - return "MAC" - } - if strings.EqualFold(t.TokenType, "basic") { - return "Basic" - } - if t.TokenType != "" { - return t.TokenType - } - return "Bearer" -} - -// SetAuthHeader sets the Authorization header to r using the access -// token in t. -// -// This method is unnecessary when using Transport or an HTTP Client -// returned by this package. -func (t *Token) SetAuthHeader(r *http.Request) { - r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) -} - -// WithExtra returns a new Token that's a clone of t, but using the -// provided raw extra map. This is only intended for use by packages -// implementing derivative OAuth2 flows. -func (t *Token) WithExtra(extra interface{}) *Token { - t2 := new(Token) - *t2 = *t - t2.raw = extra - return t2 -} - -// Extra returns an extra field. -// Extra fields are key-value pairs returned by the server as a -// part of the token retrieval response. -func (t *Token) Extra(key string) interface{} { - if raw, ok := t.raw.(map[string]interface{}); ok { - return raw[key] - } - - vals, ok := t.raw.(url.Values) - if !ok { - return nil - } - - v := vals.Get(key) - switch s := strings.TrimSpace(v); strings.Count(s, ".") { - case 0: // Contains no "."; try to parse as int - if i, err := strconv.ParseInt(s, 10, 64); err == nil { - return i - } - case 1: // Contains a single "."; try to parse as float - if f, err := strconv.ParseFloat(s, 64); err == nil { - return f - } - } - - return v -} - -// expired reports whether the token is expired. -// t must be non-nil. -func (t *Token) expired() bool { - if t.Expiry.IsZero() { - return false - } - return t.Expiry.Add(-expiryDelta).Before(time.Now()) -} - -// Valid reports whether t is non-nil, has an AccessToken, and is not expired. -func (t *Token) Valid() bool { - return t != nil && t.AccessToken != "" && !t.expired() -} - -// tokenFromInternal maps an *internal.Token struct into -// a *Token struct. -func tokenFromInternal(t *internal.Token) *Token { - if t == nil { - return nil - } - return &Token{ - AccessToken: t.AccessToken, - TokenType: t.TokenType, - RefreshToken: t.RefreshToken, - Expiry: t.Expiry, - raw: t.Raw, - } -} - -// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. -// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along -// with an error.. -func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) - if err != nil { - return nil, err - } - return tokenFromInternal(tk), nil -} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go deleted file mode 100644 index 92ac7e2..0000000 --- a/vendor/golang.org/x/oauth2/transport.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "errors" - "io" - "net/http" - "sync" -) - -// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, -// wrapping a base RoundTripper and adding an Authorization header -// with a token from the supplied Sources. -// -// Transport is a low-level mechanism. Most code will use the -// higher-level Config.Client method instead. -type Transport struct { - // Source supplies the token to add to outgoing requests' - // Authorization headers. - Source TokenSource - - // Base is the base RoundTripper used to make HTTP requests. - // If nil, http.DefaultTransport is used. - Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - if t.Source == nil { - return nil, errors.New("oauth2: Transport's Source is nil") - } - token, err := t.Source.Token() - if err != nil { - return nil, err - } - - req2 := cloneRequest(req) // per RoundTripper contract - token.SetAuthHeader(req2) - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *Transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func (t *Transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -func (t *Transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - return r2 -} - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/vendor/vendor.json b/vendor/vendor.json deleted file mode 100644 index 8ea185d..0000000 --- a/vendor/vendor.json +++ /dev/null @@ -1,178 +0,0 @@ -{ - "comment": "", - "ignore": "test", - "package": [ - { - "path": "github.com/aws/aws-sdk-go/aws", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/private/endpoints", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "996f601d8198d4fdc49bb5d93c73f27a6bdfe900", - "revisionTime": "2016-03-09T12:56:27-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "996f601d8198d4fdc49bb5d93c73f27a6bdfe900", - "revisionTime": "2016-03-09T12:56:27-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "996f601d8198d4fdc49bb5d93c73f27a6bdfe900", - "revisionTime": "2016-03-09T12:56:27-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/private/signer/v4", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/private/waiter", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/service/cloudFront", - "revision": "996f601d8198d4fdc49bb5d93c73f27a6bdfe900", - "revisionTime": "2016-03-09T12:56:27-08:00" - }, - { - "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/davecgh/go-spew/spew", - "revision": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d", - "revisionTime": "2015-11-05T15:09:06-06:00" - }, - { - "path": "github.com/drone/drone-go/drone", - "revision": "ab38cefd63183bbb1252fef0b2b8b410e4370fe1", - "revisionTime": "2016-03-31T17:54:51-07:00" - }, - { - "path": "github.com/drone/drone-go/plugin", - "revision": "ab38cefd63183bbb1252fef0b2b8b410e4370fe1", - "revisionTime": "2016-03-31T17:54:51-07:00" - }, - { - "origin": "github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini", - "path": "github.com/go-ini/ini", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "origin": "github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath", - "path": "github.com/jmespath/go-jmespath", - "revision": "f22f230de2ce5d5bd15c415fb72d30a77931ac56", - "revisionTime": "2016-01-21T13:38:05-08:00" - }, - { - "path": "github.com/pmezard/go-difflib/difflib", - "revision": "792786c7400a136282c1664665ae0a8db921c6c2", - "revisionTime": "2016-01-10T11:55:54+01:00" - }, - { - "path": "github.com/ryanuber/go-glob", - "revision": "0067a9abd927e50aed5190662702f81231413ae0", - "revisionTime": "2014-06-17T10:15:57-07:00" - }, - { - "path": "github.com/stretchr/testify/assert", - "revision": "6fe211e493929a8aac0469b93f28b1d0688a9a3a", - "revisionTime": "2016-03-05T16:54:46Z" - }, - { - "path": "golang.org/x/net/context", - "revision": "6ccd6698c634f5d835c40c1c31848729e0cecda1", - "revisionTime": "2016-02-02T14:37:19-08:00" - }, - { - "path": "golang.org/x/oauth2", - "revision": "8a57ed94ffd43444c0879fe75701732a38afc985", - "revisionTime": "2015-12-29T21:02:54-07:00" - }, - { - "path": "golang.org/x/oauth2/internal", - "revision": "8a57ed94ffd43444c0879fe75701732a38afc985", - "revisionTime": "2015-12-29T21:02:54-07:00" - } - ] -}