mirror of
https://github.com/thegeeklab/wp-s3-action.git
synced 2024-11-10 04:40:38 +00:00
Updated to current build process (#39)
* Dropped vendoring directory * Updated to current build process
This commit is contained in:
parent
353adc52b0
commit
b0dadb7aa6
68
.appveyor.yml
Normal file
68
.appveyor.yml
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
version: '{build}'
|
||||||
|
image: 'Visual Studio 2017'
|
||||||
|
platform: 'x64'
|
||||||
|
|
||||||
|
clone_folder: 'c:\gopath\src\github.com\drone-plugins\drone-s3-sync'
|
||||||
|
max_jobs: 1
|
||||||
|
|
||||||
|
environment:
|
||||||
|
GOPATH: c:\gopath
|
||||||
|
DOCKER_USERNAME:
|
||||||
|
secure: '4YzzahbEiMZQJpOCOd1LAw=='
|
||||||
|
DOCKER_PASSWORD:
|
||||||
|
secure: 'VqO/G3Zfslu6zSLdwHKO+Q=='
|
||||||
|
|
||||||
|
install:
|
||||||
|
- ps: |
|
||||||
|
docker version
|
||||||
|
go version
|
||||||
|
- ps: |
|
||||||
|
$env:Path = "c:\gopath\bin;$env:Path"
|
||||||
|
|
||||||
|
build_script:
|
||||||
|
- ps: |
|
||||||
|
go get -u github.com/golang/dep/cmd/dep
|
||||||
|
dep ensure
|
||||||
|
|
||||||
|
if ( $env:APPVEYOR_REPO_TAG -eq 'false' ) {
|
||||||
|
go build -ldflags "-X main.build=$env:APPVEYOR_BUILD_VERSION" -a -o release/drone-s3-sync.exe
|
||||||
|
} else {
|
||||||
|
$version = $env:APPVEYOR_REPO_TAG_NAME.substring(1)
|
||||||
|
go build -ldflags "-X main.version=$version -X main.build=$env:APPVEYOR_BUILD_VERSION" -a -o release/drone-s3-sync.exe
|
||||||
|
}
|
||||||
|
|
||||||
|
docker pull microsoft/nanoserver:10.0.14393.1593
|
||||||
|
docker build -f Dockerfile.windows -t plugins/s3-sync:windows-amd64 .
|
||||||
|
|
||||||
|
test_script:
|
||||||
|
- ps: |
|
||||||
|
docker run --rm plugins/s3-sync:windows-amd64 --version
|
||||||
|
|
||||||
|
deploy_script:
|
||||||
|
- ps: |
|
||||||
|
$ErrorActionPreference = 'Stop';
|
||||||
|
|
||||||
|
if ( $env:APPVEYOR_PULL_REQUEST_NUMBER ) {
|
||||||
|
Write-Host Nothing to deploy.
|
||||||
|
} else {
|
||||||
|
docker login --username $env:DOCKER_USERNAME --password $env:DOCKER_PASSWORD
|
||||||
|
|
||||||
|
if ( $env:APPVEYOR_REPO_TAG -eq 'true' ) {
|
||||||
|
$major,$minor,$patch = $env:APPVEYOR_REPO_TAG_NAME.substring(1).split('.')
|
||||||
|
|
||||||
|
docker push plugins/s3-sync:windows-amd64
|
||||||
|
|
||||||
|
docker tag plugins/s3-sync:windows-amd64 plugins/s3-sync:$major.$minor.$patch-windows-amd64
|
||||||
|
docker push plugins/s3-sync:$major.$minor.$patch-windows-amd64
|
||||||
|
|
||||||
|
docker tag plugins/s3-sync:windows-amd64 plugins/s3-sync:$major.$minor-windows-amd64
|
||||||
|
docker push plugins/s3-sync:$major.$minor-windows-amd64
|
||||||
|
|
||||||
|
docker tag plugins/s3-sync:windows-amd64 plugins/s3-sync:$major-windows-amd64
|
||||||
|
docker push plugins/s3-sync:$major-windows-amd64
|
||||||
|
} else {
|
||||||
|
if ( $env:APPVEYOR_REPO_BRANCH -eq 'master' ) {
|
||||||
|
docker push plugins/s3-sync:windows-amd64
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
136
.drone.yml
136
.drone.yml
@ -3,15 +3,22 @@ workspace:
|
|||||||
path: src/github.com/drone-plugins/drone-s3-sync
|
path: src/github.com/drone-plugins/drone-s3-sync
|
||||||
|
|
||||||
pipeline:
|
pipeline:
|
||||||
test:
|
deps:
|
||||||
image: golang:1.9
|
image: golang:1.10
|
||||||
pull: true
|
pull: true
|
||||||
commands:
|
commands:
|
||||||
- go vet
|
- go get -u github.com/golang/dep/cmd/dep
|
||||||
- go test -cover -coverprofile=coverage.out
|
- dep ensure
|
||||||
|
|
||||||
|
test:
|
||||||
|
image: golang:1.10
|
||||||
|
pull: true
|
||||||
|
commands:
|
||||||
|
- go vet ./...
|
||||||
|
- go test -cover ./...
|
||||||
|
|
||||||
build_linux_amd64:
|
build_linux_amd64:
|
||||||
image: golang:1.9
|
image: golang:1.10
|
||||||
pull: true
|
pull: true
|
||||||
group: build
|
group: build
|
||||||
environment:
|
environment:
|
||||||
@ -19,10 +26,31 @@ pipeline:
|
|||||||
- GOARCH=amd64
|
- GOARCH=amd64
|
||||||
- CGO_ENABLED=0
|
- CGO_ENABLED=0
|
||||||
commands:
|
commands:
|
||||||
- go build -v -ldflags "-X main.build=${DRONE_BUILD_NUMBER}" -a -o release/linux/amd64/drone-s3-sync
|
- |
|
||||||
|
if test "${DRONE_TAG}" = ""; then
|
||||||
|
go build -v -ldflags "-X main.build=${DRONE_BUILD_NUMBER}" -a -o release/linux/amd64/drone-s3-sync
|
||||||
|
else
|
||||||
|
go build -v -ldflags "-X main.version=${DRONE_TAG##v} -X main.build=${DRONE_BUILD_NUMBER}" -a -o release/linux/amd64/drone-s3-sync
|
||||||
|
fi
|
||||||
|
|
||||||
|
build_linux_i386:
|
||||||
|
image: golang:1.10
|
||||||
|
pull: true
|
||||||
|
group: build
|
||||||
|
environment:
|
||||||
|
- GOOS=linux
|
||||||
|
- GOARCH=386
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
commands:
|
||||||
|
- |
|
||||||
|
if test "${DRONE_TAG}" = ""; then
|
||||||
|
go build -v -ldflags "-X main.build=${DRONE_BUILD_NUMBER}" -a -o release/linux/i386/drone-s3-sync
|
||||||
|
else
|
||||||
|
go build -v -ldflags "-X main.version=${DRONE_TAG##v} -X main.build=${DRONE_BUILD_NUMBER}" -a -o release/linux/i386/drone-s3-sync
|
||||||
|
fi
|
||||||
|
|
||||||
build_linux_arm64:
|
build_linux_arm64:
|
||||||
image: golang:1.9
|
image: golang:1.10
|
||||||
pull: true
|
pull: true
|
||||||
group: build
|
group: build
|
||||||
environment:
|
environment:
|
||||||
@ -30,10 +58,15 @@ pipeline:
|
|||||||
- GOARCH=arm64
|
- GOARCH=arm64
|
||||||
- CGO_ENABLED=0
|
- CGO_ENABLED=0
|
||||||
commands:
|
commands:
|
||||||
- go build -v -ldflags "-X main.build=${DRONE_BUILD_NUMBER}" -a -o release/linux/arm64/drone-s3-sync
|
- |
|
||||||
|
if test "${DRONE_TAG}" = ""; then
|
||||||
|
go build -v -ldflags "-X main.build=${DRONE_BUILD_NUMBER}" -a -o release/linux/arm64/drone-s3-sync
|
||||||
|
else
|
||||||
|
go build -v -ldflags "-X main.version=${DRONE_TAG##v} -X main.build=${DRONE_BUILD_NUMBER}" -a -o release/linux/arm64/drone-s3-sync
|
||||||
|
fi
|
||||||
|
|
||||||
build_linux_arm:
|
build_linux_arm:
|
||||||
image: golang:1.9
|
image: golang:1.10
|
||||||
pull: true
|
pull: true
|
||||||
group: build
|
group: build
|
||||||
environment:
|
environment:
|
||||||
@ -42,67 +75,74 @@ pipeline:
|
|||||||
- CGO_ENABLED=0
|
- CGO_ENABLED=0
|
||||||
- GOARM=7
|
- GOARM=7
|
||||||
commands:
|
commands:
|
||||||
- go build -v -ldflags "-X main.build=${DRONE_BUILD_NUMBER}" -a -o release/linux/arm/drone-s3-sync
|
- |
|
||||||
|
if test "${DRONE_TAG}" = ""; then
|
||||||
# build_windows_amd64:
|
go build -v -ldflags "-X main.build=${DRONE_BUILD_NUMBER}" -a -o release/linux/arm/drone-s3-sync
|
||||||
# image: golang:1.9-nanoserver
|
else
|
||||||
# pull: true
|
go build -v -ldflags "-X main.version=${DRONE_TAG##v} -X main.build=${DRONE_BUILD_NUMBER}" -a -o release/linux/arm/drone-s3-sync
|
||||||
# group: build
|
fi
|
||||||
# environment:
|
|
||||||
# - GOOS=windows
|
|
||||||
# - GOARCH=amd64
|
|
||||||
# - CGO_ENABLED=0
|
|
||||||
# commands:
|
|
||||||
# - go build -v -ldflags "-X main.build=${DRONE_BUILD_NUMBER}" -a -o release/windows/amd64/drone-s3-sync
|
|
||||||
|
|
||||||
publish_linux_amd64:
|
publish_linux_amd64:
|
||||||
image: plugins/docker:17.05
|
image: plugins/docker:17.12
|
||||||
pull: true
|
pull: true
|
||||||
repo: plugins/s3-sync
|
|
||||||
tags: [ latest, 1.1.0, 1.1, 1 ]
|
|
||||||
secrets: [ docker_username, docker_password ]
|
secrets: [ docker_username, docker_password ]
|
||||||
|
group: docker
|
||||||
|
repo: plugins/s3-sync
|
||||||
|
auto_tag: true
|
||||||
|
auto_tag_suffix: linux-amd64
|
||||||
|
dockerfile: Dockerfile
|
||||||
when:
|
when:
|
||||||
branch: master
|
event: [ push, tag ]
|
||||||
event: push
|
|
||||||
|
publish_linux_i386:
|
||||||
|
image: plugins/docker:17.12
|
||||||
|
pull: true
|
||||||
|
secrets: [ docker_username, docker_password ]
|
||||||
|
group: docker
|
||||||
|
repo: plugins/s3-sync
|
||||||
|
auto_tag: true
|
||||||
|
auto_tag_suffix: linux-i386
|
||||||
|
dockerfile: Dockerfile.i386
|
||||||
|
when:
|
||||||
|
event: [ push, tag ]
|
||||||
|
|
||||||
publish_linux_arm64:
|
publish_linux_arm64:
|
||||||
image: plugins/docker:17.05
|
image: plugins/docker:17.12
|
||||||
pull: true
|
pull: true
|
||||||
repo: plugins/s3-sync
|
|
||||||
tags: [ linux-arm64 ]
|
|
||||||
secrets: [ docker_username, docker_password ]
|
secrets: [ docker_username, docker_password ]
|
||||||
|
group: docker
|
||||||
|
repo: plugins/s3-sync
|
||||||
|
auto_tag: true
|
||||||
|
auto_tag_suffix: linux-arm64
|
||||||
dockerfile: Dockerfile.arm64
|
dockerfile: Dockerfile.arm64
|
||||||
when:
|
when:
|
||||||
branch: master
|
event: [ push, tag ]
|
||||||
event: push
|
|
||||||
|
|
||||||
publish_linux_arm:
|
publish_linux_arm:
|
||||||
image: plugins/docker:17.05
|
image: plugins/docker:17.12
|
||||||
pull: true
|
pull: true
|
||||||
repo: plugins/s3-sync
|
|
||||||
tags: [ linux-arm ]
|
|
||||||
secrets: [ docker_username, docker_password ]
|
secrets: [ docker_username, docker_password ]
|
||||||
|
group: docker
|
||||||
|
repo: plugins/s3-sync
|
||||||
|
auto_tag: true
|
||||||
|
auto_tag_suffix: linux-arm
|
||||||
dockerfile: Dockerfile.arm
|
dockerfile: Dockerfile.arm
|
||||||
when:
|
when:
|
||||||
branch: master
|
event: [ push, tag ]
|
||||||
event: push
|
|
||||||
|
|
||||||
# publish_windows_amd64:
|
manifests:
|
||||||
# image: plugins/docker:17.05
|
image: plugins/manifest:1
|
||||||
# pull: true
|
pull: true
|
||||||
# repo: plugins/s3-sync
|
secrets: [ docker_username, docker_password ]
|
||||||
# tags: [ windows-amd64 ]
|
spec: manifest.tmpl
|
||||||
# secrets: [ docker_username, docker_password ]
|
auto_tag: true
|
||||||
# dockerfile: Dockerfile.windows
|
ignore_missing: true
|
||||||
# when:
|
when:
|
||||||
# branch: master
|
event: [ push, tag ]
|
||||||
# event: push
|
|
||||||
|
|
||||||
microbadger:
|
microbadger:
|
||||||
image: plugins/webhook:1
|
image: plugins/webhook:1
|
||||||
pull: true
|
pull: true
|
||||||
secrets: [ webhook_url ]
|
secrets: [ webhook_url ]
|
||||||
when:
|
when:
|
||||||
branch: master
|
|
||||||
event: push
|
|
||||||
status: [ success ]
|
status: [ success ]
|
||||||
|
0
.github/issue_template.md
vendored
Normal file
0
.github/issue_template.md
vendored
Normal file
0
.github/pull_request_template.md
vendored
Normal file
0
.github/pull_request_template.md
vendored
Normal file
5
.gitignore
vendored
5
.gitignore
vendored
@ -22,8 +22,9 @@ _testmain.go
|
|||||||
*.exe
|
*.exe
|
||||||
*.test
|
*.test
|
||||||
*.prof
|
*.prof
|
||||||
.env
|
|
||||||
|
|
||||||
release/*
|
release/
|
||||||
|
vendor/
|
||||||
|
|
||||||
coverage.out
|
coverage.out
|
||||||
drone-s3-sync
|
drone-s3-sync
|
||||||
|
12
Dockerfile
12
Dockerfile
@ -1,11 +1,9 @@
|
|||||||
FROM plugins/base:multiarch
|
FROM plugins/base:multiarch
|
||||||
MAINTAINER Drone.IO Community <drone-dev@googlegroups.com>
|
|
||||||
|
|
||||||
LABEL org.label-schema.version=latest
|
LABEL maintainer="Drone.IO Community <drone-dev@googlegroups.com>" \
|
||||||
LABEL org.label-schema.vcs-url="https://github.com/drone-plugins/drone-s3-sync.git"
|
org.label-schema.name="Drone S3 Sync" \
|
||||||
LABEL org.label-schema.name="Drone S3 Sync"
|
org.label-schema.vendor="Drone.IO Community" \
|
||||||
LABEL org.label-schema.vendor="Drone.IO Community"
|
org.label-schema.schema-version="1.0"
|
||||||
LABEL org.label-schema.schema-version="1.0"
|
|
||||||
|
|
||||||
ADD release/linux/amd64/drone-s3-sync /bin/
|
ADD release/linux/amd64/drone-s3-sync /bin/
|
||||||
ENTRYPOINT [ "/bin/drone-s3-sync" ]
|
ENTRYPOINT ["/bin/drone-s3-sync"]
|
||||||
|
@ -1,11 +1,9 @@
|
|||||||
FROM plugins/base:multiarch
|
FROM plugins/base:multiarch
|
||||||
MAINTAINER Drone.IO Community <drone-dev@googlegroups.com>
|
|
||||||
|
|
||||||
LABEL org.label-schema.version=latest
|
LABEL maintainer="Drone.IO Community <drone-dev@googlegroups.com>" \
|
||||||
LABEL org.label-schema.vcs-url="https://github.com/drone-plugins/drone-s3-sync.git"
|
org.label-schema.name="Drone S3 Sync" \
|
||||||
LABEL org.label-schema.name="Drone S3 Sync"
|
org.label-schema.vendor="Drone.IO Community" \
|
||||||
LABEL org.label-schema.vendor="Drone.IO Community"
|
org.label-schema.schema-version="1.0"
|
||||||
LABEL org.label-schema.schema-version="1.0"
|
|
||||||
|
|
||||||
ADD release/linux/arm/drone-s3-sync /bin/
|
ADD release/linux/arm/drone-s3-sync /bin/
|
||||||
ENTRYPOINT [ "/bin/drone-s3-sync" ]
|
ENTRYPOINT ["/bin/drone-s3-sync"]
|
||||||
|
@ -1,11 +1,9 @@
|
|||||||
FROM plugins/base:multiarch
|
FROM plugins/base:multiarch
|
||||||
MAINTAINER Drone.IO Community <drone-dev@googlegroups.com>
|
|
||||||
|
|
||||||
LABEL org.label-schema.version=latest
|
LABEL maintainer="Drone.IO Community <drone-dev@googlegroups.com>" \
|
||||||
LABEL org.label-schema.vcs-url="https://github.com/drone-plugins/drone-s3-sync.git"
|
org.label-schema.name="Drone S3 Sync" \
|
||||||
LABEL org.label-schema.name="Drone S3 Sync"
|
org.label-schema.vendor="Drone.IO Community" \
|
||||||
LABEL org.label-schema.vendor="Drone.IO Community"
|
org.label-schema.schema-version="1.0"
|
||||||
LABEL org.label-schema.schema-version="1.0"
|
|
||||||
|
|
||||||
ADD release/linux/arm64/drone-s3-sync /bin/
|
ADD release/linux/arm64/drone-s3-sync /bin/
|
||||||
ENTRYPOINT [ "/bin/drone-s3-sync" ]
|
ENTRYPOINT ["/bin/drone-s3-sync"]
|
||||||
|
9
Dockerfile.i386
Normal file
9
Dockerfile.i386
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
FROM plugins/base:multiarch
|
||||||
|
|
||||||
|
LABEL maintainer="Drone.IO Community <drone-dev@googlegroups.com>" \
|
||||||
|
org.label-schema.name="Drone S3 Sync" \
|
||||||
|
org.label-schema.vendor="Drone.IO Community" \
|
||||||
|
org.label-schema.schema-version="1.0"
|
||||||
|
|
||||||
|
ADD release/linux/i386/drone-s3-sync /bin/
|
||||||
|
ENTRYPOINT ["/bin/drone-s3-sync"]
|
@ -1,11 +1,12 @@
|
|||||||
FROM microsoft/nanoserver:latest
|
# escape=`
|
||||||
MAINTAINER Drone.IO Community <drone-dev@googlegroups.com>
|
FROM microsoft/nanoserver:10.0.14393.1593
|
||||||
|
|
||||||
LABEL org.label-schema.version=latest
|
LABEL maintainer="Drone.IO Community <drone-dev@googlegroups.com>" `
|
||||||
LABEL org.label-schema.vcs-url="https://github.com/drone-plugins/drone-s3-sync.git"
|
org.label-schema.name="Drone S3 Sync" `
|
||||||
LABEL org.label-schema.name="Drone S3 Sync"
|
org.label-schema.vendor="Drone.IO Community" `
|
||||||
LABEL org.label-schema.vendor="Drone.IO Community"
|
org.label-schema.schema-version="1.0"
|
||||||
LABEL org.label-schema.schema-version="1.0"
|
|
||||||
|
|
||||||
ADD release/windows/amd64/drone-s3-sync /bin/
|
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||||
ENTRYPOINT [ "/bin/drone-s3-sync" ]
|
|
||||||
|
ADD release\drone-s3-sync.exe c:\drone-s3-sync.exe
|
||||||
|
ENTRYPOINT [ "c:\\drone-s3-sync.exe" ]
|
||||||
|
94
Gopkg.lock
generated
Normal file
94
Gopkg.lock
generated
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||||
|
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/Sirupsen/logrus"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
|
||||||
|
version = "v1.0.5"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/aws/aws-sdk-go"
|
||||||
|
packages = [
|
||||||
|
"aws",
|
||||||
|
"aws/awserr",
|
||||||
|
"aws/awsutil",
|
||||||
|
"aws/client",
|
||||||
|
"aws/client/metadata",
|
||||||
|
"aws/corehandlers",
|
||||||
|
"aws/credentials",
|
||||||
|
"aws/credentials/ec2rolecreds",
|
||||||
|
"aws/credentials/endpointcreds",
|
||||||
|
"aws/credentials/stscreds",
|
||||||
|
"aws/defaults",
|
||||||
|
"aws/ec2metadata",
|
||||||
|
"aws/endpoints",
|
||||||
|
"aws/request",
|
||||||
|
"aws/session",
|
||||||
|
"aws/signer/v4",
|
||||||
|
"internal/sdkio",
|
||||||
|
"internal/sdkrand",
|
||||||
|
"internal/shareddefaults",
|
||||||
|
"private/protocol",
|
||||||
|
"private/protocol/query",
|
||||||
|
"private/protocol/query/queryutil",
|
||||||
|
"private/protocol/rest",
|
||||||
|
"private/protocol/restxml",
|
||||||
|
"private/protocol/xml/xmlutil",
|
||||||
|
"service/cloudfront",
|
||||||
|
"service/s3",
|
||||||
|
"service/sts"
|
||||||
|
]
|
||||||
|
revision = "f0872da8a448f8cb10f4f0c95c2100e4f7356448"
|
||||||
|
version = "v1.13.16"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/go-ini/ini"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "6333e38ac20b8949a8dd68baa3650f4dee8f39f0"
|
||||||
|
version = "v1.33.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/jmespath/go-jmespath"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "0b12d6b5"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/joho/godotenv"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "a79fa1e548e2c689c241d10173efd51e5d689d5b"
|
||||||
|
version = "v1.2.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/ryanuber/go-glob"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "572520ed46dbddaed19ea3d9541bdd0494163693"
|
||||||
|
version = "v0.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/urfave/cli"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "cfb38830724cc34fedffe9a2a29fb54fa9169cd1"
|
||||||
|
version = "v1.20.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/crypto"
|
||||||
|
packages = ["ssh/terminal"]
|
||||||
|
revision = "374053ea96cb300f8671b8d3b07edeeb06e203b4"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/sys"
|
||||||
|
packages = [
|
||||||
|
"unix",
|
||||||
|
"windows"
|
||||||
|
]
|
||||||
|
revision = "2f1e207ee39ff70f3433e49c6eb52677a515e3b5"
|
||||||
|
|
||||||
|
[solve-meta]
|
||||||
|
analyzer-name = "dep"
|
||||||
|
analyzer-version = 1
|
||||||
|
inputs-digest = "0ae6c8475682e1b48bad52d2486af272d0903917f02fcfcf214f81c95cf30db3"
|
||||||
|
solver-name = "gps-cdcl"
|
||||||
|
solver-version = 1
|
23
Gopkg.toml
Normal file
23
Gopkg.toml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
[[constraint]]
|
||||||
|
name = "github.com/Sirupsen/logrus"
|
||||||
|
version = "1.0.5"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/aws/aws-sdk-go"
|
||||||
|
version = "1.13.16"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/joho/godotenv"
|
||||||
|
version = "1.2.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/ryanuber/go-glob"
|
||||||
|
version = "0.1.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/urfave/cli"
|
||||||
|
version = "1.20.0"
|
||||||
|
|
||||||
|
[prune]
|
||||||
|
go-tests = true
|
||||||
|
unused-packages = true
|
33
manifest.tmpl
Normal file
33
manifest.tmpl
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
image: plugins/s3-sync:{{#if build.tag}}{{trimPrefix build.tag "v"}}{{else}}latest{{/if}}
|
||||||
|
{{#if build.tags}}
|
||||||
|
tags:
|
||||||
|
{{#each build.tags}}
|
||||||
|
- {{this}}
|
||||||
|
{{/each}}
|
||||||
|
{{/if}}
|
||||||
|
manifests:
|
||||||
|
-
|
||||||
|
image: plugins/s3-sync:{{#if build.tag}}{{trimPrefix build.tag "v"}}-{{/if}}linux-amd64
|
||||||
|
platform:
|
||||||
|
architecture: amd64
|
||||||
|
os: linux
|
||||||
|
-
|
||||||
|
image: plugins/s3-sync:{{#if build.tag}}{{trimPrefix build.tag "v"}}-{{/if}}linux-i386
|
||||||
|
platform:
|
||||||
|
architecture: 386
|
||||||
|
os: linux
|
||||||
|
-
|
||||||
|
image: plugins/s3-sync:{{#if build.tag}}{{trimPrefix build.tag "v"}}-{{/if}}linux-arm64
|
||||||
|
platform:
|
||||||
|
architecture: arm64
|
||||||
|
os: linux
|
||||||
|
-
|
||||||
|
image: plugins/s3-sync:{{#if build.tag}}{{trimPrefix build.tag "v"}}-{{/if}}linux-arm
|
||||||
|
platform:
|
||||||
|
architecture: arm
|
||||||
|
os: linux
|
||||||
|
-
|
||||||
|
image: plugins/s3-sync:{{#if build.tag}}{{trimPrefix build.tag "v"}}-{{/if}}windows-amd64
|
||||||
|
platform:
|
||||||
|
architecture: amd64
|
||||||
|
os: windows
|
47
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
47
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
@ -1,47 +0,0 @@
|
|||||||
# 0.8.7
|
|
||||||
|
|
||||||
* logrus/core: fix possible race (#216)
|
|
||||||
* logrus/doc: small typo fixes and doc improvements
|
|
||||||
|
|
||||||
|
|
||||||
# 0.8.6
|
|
||||||
|
|
||||||
* hooks/raven: allow passing an initialized client
|
|
||||||
|
|
||||||
# 0.8.5
|
|
||||||
|
|
||||||
* logrus/core: revert #208
|
|
||||||
|
|
||||||
# 0.8.4
|
|
||||||
|
|
||||||
* formatter/text: fix data race (#218)
|
|
||||||
|
|
||||||
# 0.8.3
|
|
||||||
|
|
||||||
* logrus/core: fix entry log level (#208)
|
|
||||||
* logrus/core: improve performance of text formatter by 40%
|
|
||||||
* logrus/core: expose `LevelHooks` type
|
|
||||||
* logrus/core: add support for DragonflyBSD and NetBSD
|
|
||||||
* formatter/text: print structs more verbosely
|
|
||||||
|
|
||||||
# 0.8.2
|
|
||||||
|
|
||||||
* logrus: fix more Fatal family functions
|
|
||||||
|
|
||||||
# 0.8.1
|
|
||||||
|
|
||||||
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
|
||||||
|
|
||||||
# 0.8.0
|
|
||||||
|
|
||||||
* logrus: defaults to stderr instead of stdout
|
|
||||||
* hooks/sentry: add special field for `*http.Request`
|
|
||||||
* formatter/text: ignore Windows for colors
|
|
||||||
|
|
||||||
# 0.7.3
|
|
||||||
|
|
||||||
* formatter/\*: allow configuration of timestamp layout
|
|
||||||
|
|
||||||
# 0.7.2
|
|
||||||
|
|
||||||
* formatter/text: Add configuration option for time format (#158)
|
|
21
vendor/github.com/Sirupsen/logrus/LICENSE
generated
vendored
21
vendor/github.com/Sirupsen/logrus/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2014 Simon Eskildsen
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
357
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
357
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
@ -1,357 +0,0 @@
|
|||||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc]
|
|
||||||
|
|
||||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
|
||||||
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
|
||||||
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
|
|
||||||
many large deployments. The core API is unlikely to change much but please
|
|
||||||
version control your Logrus to make sure you aren't fetching latest `master` on
|
|
||||||
every build.**
|
|
||||||
|
|
||||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
|
||||||
plain text):
|
|
||||||
|
|
||||||
![Colored](http://i.imgur.com/PY7qMwd.png)
|
|
||||||
|
|
||||||
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
|
|
||||||
or Splunk:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
|
||||||
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
|
||||||
|
|
||||||
{"level":"warning","msg":"The group's number increased tremendously!",
|
|
||||||
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
|
||||||
|
|
||||||
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
|
||||||
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
|
||||||
|
|
||||||
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
|
||||||
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
|
||||||
|
|
||||||
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
|
||||||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
|
||||||
```
|
|
||||||
|
|
||||||
With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not
|
|
||||||
attached, the output is compatible with the
|
|
||||||
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
|
||||||
|
|
||||||
```text
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
|
||||||
exit status 1
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Example
|
|
||||||
|
|
||||||
The simplest way to use Logrus is simply the package-level exported logger:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"animal": "walrus",
|
|
||||||
}).Info("A walrus appears")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
|
||||||
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
|
|
||||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
|
||||||
want:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
"github.com/Sirupsen/logrus/hooks/airbrake"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Log as JSON instead of the default ASCII formatter.
|
|
||||||
log.SetFormatter(&log.JSONFormatter{})
|
|
||||||
|
|
||||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
|
||||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
|
||||||
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
|
|
||||||
|
|
||||||
// Output to stderr instead of stdout, could also be a file.
|
|
||||||
log.SetOutput(os.Stderr)
|
|
||||||
|
|
||||||
// Only log the warning severity or above.
|
|
||||||
log.SetLevel(log.WarnLevel)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"animal": "walrus",
|
|
||||||
"size": 10,
|
|
||||||
}).Info("A group of walrus emerges from the ocean")
|
|
||||||
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"omg": true,
|
|
||||||
"number": 122,
|
|
||||||
}).Warn("The group's number increased tremendously!")
|
|
||||||
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"omg": true,
|
|
||||||
"number": 100,
|
|
||||||
}).Fatal("The ice breaks!")
|
|
||||||
|
|
||||||
// A common pattern is to re-use fields between logging statements by re-using
|
|
||||||
// the logrus.Entry returned from WithFields()
|
|
||||||
contextLogger := log.WithFields(log.Fields{
|
|
||||||
"common": "this is a common field",
|
|
||||||
"other": "I also should be logged always",
|
|
||||||
})
|
|
||||||
|
|
||||||
contextLogger.Info("I'll be logged with common and other field")
|
|
||||||
contextLogger.Info("Me too")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
For more advanced usage such as logging to multiple locations from the same
|
|
||||||
application, you can also create an instance of the `logrus` Logger:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Create a new instance of the logger. You can have any number of instances.
|
|
||||||
var log = logrus.New()
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// The API for setting attributes is a little different than the package level
|
|
||||||
// exported logger. See Godoc.
|
|
||||||
log.Out = os.Stderr
|
|
||||||
|
|
||||||
log.WithFields(logrus.Fields{
|
|
||||||
"animal": "walrus",
|
|
||||||
"size": 10,
|
|
||||||
}).Info("A group of walrus emerges from the ocean")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Fields
|
|
||||||
|
|
||||||
Logrus encourages careful, structured logging though logging fields instead of
|
|
||||||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
|
||||||
to send event %s to topic %s with key %d")`, you should log the much more
|
|
||||||
discoverable:
|
|
||||||
|
|
||||||
```go
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"event": event,
|
|
||||||
"topic": topic,
|
|
||||||
"key": key,
|
|
||||||
}).Fatal("Failed to send event")
|
|
||||||
```
|
|
||||||
|
|
||||||
We've found this API forces you to think about logging in a way that produces
|
|
||||||
much more useful logging messages. We've been in countless situations where just
|
|
||||||
a single added field to a log statement that was already there would've saved us
|
|
||||||
hours. The `WithFields` call is optional.
|
|
||||||
|
|
||||||
In general, with Logrus using any of the `printf`-family functions should be
|
|
||||||
seen as a hint you should add a field, however, you can still use the
|
|
||||||
`printf`-family functions with Logrus.
|
|
||||||
|
|
||||||
#### Hooks
|
|
||||||
|
|
||||||
You can add hooks for logging levels. For example to send errors to an exception
|
|
||||||
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
|
||||||
multiple places simultaneously, e.g. syslog.
|
|
||||||
|
|
||||||
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
|
||||||
`init`:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
"github.com/Sirupsen/logrus/hooks/airbrake"
|
|
||||||
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
|
||||||
"log/syslog"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
|
|
||||||
|
|
||||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Unable to connect to local syslog daemon")
|
|
||||||
} else {
|
|
||||||
log.AddHook(hook)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
| Hook | Description |
|
|
||||||
| ----- | ----------- |
|
|
||||||
| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
|
||||||
| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. |
|
|
||||||
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
|
||||||
| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
|
||||||
| [Sentry](https://github.com/Sirupsen/logrus/blob/master/hooks/sentry/sentry.go) | Send errors to the Sentry error logging and aggregation service. |
|
|
||||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
|
||||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
|
||||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
|
||||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
|
||||||
| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
|
|
||||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
|
||||||
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
|
||||||
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
|
||||||
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
|
||||||
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
|
||||||
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
|
||||||
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
|
||||||
|
|
||||||
#### Level logging
|
|
||||||
|
|
||||||
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
|
||||||
|
|
||||||
```go
|
|
||||||
log.Debug("Useful debugging information.")
|
|
||||||
log.Info("Something noteworthy happened!")
|
|
||||||
log.Warn("You should probably take a look at this.")
|
|
||||||
log.Error("Something failed but I'm not quitting.")
|
|
||||||
// Calls os.Exit(1) after logging
|
|
||||||
log.Fatal("Bye.")
|
|
||||||
// Calls panic() after logging
|
|
||||||
log.Panic("I'm bailing.")
|
|
||||||
```
|
|
||||||
|
|
||||||
You can set the logging level on a `Logger`, then it will only log entries with
|
|
||||||
that severity or anything above it:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
|
||||||
log.SetLevel(log.InfoLevel)
|
|
||||||
```
|
|
||||||
|
|
||||||
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
|
||||||
environment if your application has that.
|
|
||||||
|
|
||||||
#### Entries
|
|
||||||
|
|
||||||
Besides the fields added with `WithField` or `WithFields` some fields are
|
|
||||||
automatically added to all logging events:
|
|
||||||
|
|
||||||
1. `time`. The timestamp when the entry was created.
|
|
||||||
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
|
||||||
the `AddFields` call. E.g. `Failed to send event.`
|
|
||||||
3. `level`. The logging level. E.g. `info`.
|
|
||||||
|
|
||||||
#### Environments
|
|
||||||
|
|
||||||
Logrus has no notion of environment.
|
|
||||||
|
|
||||||
If you wish for hooks and formatters to only be used in specific environments,
|
|
||||||
you should handle that yourself. For example, if your application has a global
|
|
||||||
variable `Environment`, which is a string representation of the environment you
|
|
||||||
could do:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
init() {
|
|
||||||
// do something here to set environment depending on an environment variable
|
|
||||||
// or command-line flag
|
|
||||||
if Environment == "production" {
|
|
||||||
log.SetFormatter(&log.JSONFormatter{})
|
|
||||||
} else {
|
|
||||||
// The TextFormatter is default, you don't actually have to do this.
|
|
||||||
log.SetFormatter(&log.TextFormatter{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This configuration is how `logrus` was intended to be used, but JSON in
|
|
||||||
production is mostly only useful if you do log aggregation with tools like
|
|
||||||
Splunk or Logstash.
|
|
||||||
|
|
||||||
#### Formatters
|
|
||||||
|
|
||||||
The built-in logging formatters are:
|
|
||||||
|
|
||||||
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
|
||||||
without colors.
|
|
||||||
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
|
||||||
field to `true`. To force no colored output even if there is a TTY set the
|
|
||||||
`DisableColors` field to `true`
|
|
||||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
|
||||||
* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net).
|
|
||||||
|
|
||||||
```go
|
|
||||||
logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"})
|
|
||||||
```
|
|
||||||
|
|
||||||
Third party logging formatters:
|
|
||||||
|
|
||||||
* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
|
||||||
|
|
||||||
You can define your formatter by implementing the `Formatter` interface,
|
|
||||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
|
||||||
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
|
||||||
default ones (see Entries section above):
|
|
||||||
|
|
||||||
```go
|
|
||||||
type MyJSONFormatter struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
log.SetFormatter(new(MyJSONFormatter))
|
|
||||||
|
|
||||||
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
|
||||||
// Note this doesn't include Time, Level and Message which are available on
|
|
||||||
// the Entry. Consult `godoc` on information about those fields or read the
|
|
||||||
// source of the official loggers.
|
|
||||||
serialized, err := json.Marshal(entry.Data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
|
||||||
}
|
|
||||||
return append(serialized, '\n'), nil
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Logger as an `io.Writer`
|
|
||||||
|
|
||||||
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
|
||||||
|
|
||||||
```go
|
|
||||||
w := logger.Writer()
|
|
||||||
defer w.Close()
|
|
||||||
|
|
||||||
srv := http.Server{
|
|
||||||
// create a stdlib log.Logger that writes to
|
|
||||||
// logrus.Logger.
|
|
||||||
ErrorLog: log.New(w, "", 0),
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Each line written to that writer will be printed the usual way, using formatters
|
|
||||||
and hooks. The level for those entries is `info`.
|
|
||||||
|
|
||||||
#### Rotation
|
|
||||||
|
|
||||||
Log rotation is not provided with Logrus. Log rotation should be done by an
|
|
||||||
external program (like `logrotate(8)`) that can compress and delete old log
|
|
||||||
entries. It should not be a feature of the application-level logger.
|
|
||||||
|
|
||||||
|
|
||||||
[godoc]: https://godoc.org/github.com/Sirupsen/logrus
|
|
26
vendor/github.com/Sirupsen/logrus/doc.go
generated
vendored
26
vendor/github.com/Sirupsen/logrus/doc.go
generated
vendored
@ -1,26 +0,0 @@
|
|||||||
/*
|
|
||||||
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
|
|
||||||
|
|
||||||
|
|
||||||
The simplest way to use Logrus is simply the package-level exported logger:
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"animal": "walrus",
|
|
||||||
"number": 1,
|
|
||||||
"size": 10,
|
|
||||||
}).Info("A walrus appears")
|
|
||||||
}
|
|
||||||
|
|
||||||
Output:
|
|
||||||
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
|
||||||
|
|
||||||
For a full guide visit https://github.com/Sirupsen/logrus
|
|
||||||
*/
|
|
||||||
package logrus
|
|
264
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
264
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
@ -1,264 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Defines the key when adding errors using WithError.
|
|
||||||
var ErrorKey = "error"
|
|
||||||
|
|
||||||
// An entry is the final or intermediate Logrus logging entry. It contains all
|
|
||||||
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
|
||||||
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
|
||||||
// passed around as much as you wish to avoid field duplication.
|
|
||||||
type Entry struct {
|
|
||||||
Logger *Logger
|
|
||||||
|
|
||||||
// Contains all the fields set by the user.
|
|
||||||
Data Fields
|
|
||||||
|
|
||||||
// Time at which the log entry was created
|
|
||||||
Time time.Time
|
|
||||||
|
|
||||||
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
|
||||||
Level Level
|
|
||||||
|
|
||||||
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
|
||||||
Message string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewEntry(logger *Logger) *Entry {
|
|
||||||
return &Entry{
|
|
||||||
Logger: logger,
|
|
||||||
// Default is three fields, give a little extra room
|
|
||||||
Data: make(Fields, 5),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a reader for the entry, which is a proxy to the formatter.
|
|
||||||
func (entry *Entry) Reader() (*bytes.Buffer, error) {
|
|
||||||
serialized, err := entry.Logger.Formatter.Format(entry)
|
|
||||||
return bytes.NewBuffer(serialized), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the string representation from the reader and ultimately the
|
|
||||||
// formatter.
|
|
||||||
func (entry *Entry) String() (string, error) {
|
|
||||||
reader, err := entry.Reader()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return reader.String(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
|
|
||||||
func (entry *Entry) WithError(err error) *Entry {
|
|
||||||
return entry.WithField(ErrorKey, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a single field to the Entry.
|
|
||||||
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
|
||||||
return entry.WithFields(Fields{key: value})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a map of fields to the Entry.
|
|
||||||
func (entry *Entry) WithFields(fields Fields) *Entry {
|
|
||||||
data := Fields{}
|
|
||||||
for k, v := range entry.Data {
|
|
||||||
data[k] = v
|
|
||||||
}
|
|
||||||
for k, v := range fields {
|
|
||||||
data[k] = v
|
|
||||||
}
|
|
||||||
return &Entry{Logger: entry.Logger, Data: data}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function is not declared with a pointer value because otherwise
|
|
||||||
// race conditions will occur when using multiple goroutines
|
|
||||||
func (entry Entry) log(level Level, msg string) {
|
|
||||||
entry.Time = time.Now()
|
|
||||||
entry.Level = level
|
|
||||||
entry.Message = msg
|
|
||||||
|
|
||||||
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
|
|
||||||
entry.Logger.mu.Lock()
|
|
||||||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
|
||||||
entry.Logger.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
reader, err := entry.Reader()
|
|
||||||
if err != nil {
|
|
||||||
entry.Logger.mu.Lock()
|
|
||||||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
|
||||||
entry.Logger.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
entry.Logger.mu.Lock()
|
|
||||||
defer entry.Logger.mu.Unlock()
|
|
||||||
|
|
||||||
_, err = io.Copy(entry.Logger.Out, reader)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// To avoid Entry#log() returning a value that only would make sense for
|
|
||||||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
|
||||||
// directly here.
|
|
||||||
if level <= PanicLevel {
|
|
||||||
panic(&entry)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Debug(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= DebugLevel {
|
|
||||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Print(args ...interface{}) {
|
|
||||||
entry.Info(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Info(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= InfoLevel {
|
|
||||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Warn(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= WarnLevel {
|
|
||||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Warning(args ...interface{}) {
|
|
||||||
entry.Warn(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Error(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= ErrorLevel {
|
|
||||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Fatal(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= FatalLevel {
|
|
||||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Panic(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= PanicLevel {
|
|
||||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
panic(fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Entry Printf family functions
|
|
||||||
|
|
||||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= DebugLevel {
|
|
||||||
entry.Debug(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= InfoLevel {
|
|
||||||
entry.Info(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Printf(format string, args ...interface{}) {
|
|
||||||
entry.Infof(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= WarnLevel {
|
|
||||||
entry.Warn(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
|
||||||
entry.Warnf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= ErrorLevel {
|
|
||||||
entry.Error(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= FatalLevel {
|
|
||||||
entry.Fatal(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= PanicLevel {
|
|
||||||
entry.Panic(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Entry Println family functions
|
|
||||||
|
|
||||||
func (entry *Entry) Debugln(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= DebugLevel {
|
|
||||||
entry.Debug(entry.sprintlnn(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Infoln(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= InfoLevel {
|
|
||||||
entry.Info(entry.sprintlnn(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Println(args ...interface{}) {
|
|
||||||
entry.Infoln(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Warnln(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= WarnLevel {
|
|
||||||
entry.Warn(entry.sprintlnn(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Warningln(args ...interface{}) {
|
|
||||||
entry.Warnln(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Errorln(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= ErrorLevel {
|
|
||||||
entry.Error(entry.sprintlnn(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= FatalLevel {
|
|
||||||
entry.Fatal(entry.sprintlnn(args...))
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Panicln(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= PanicLevel {
|
|
||||||
entry.Panic(entry.sprintlnn(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
|
||||||
// fmt.Sprintln where spaces are always added between operands, regardless of
|
|
||||||
// their type. Instead of vendoring the Sprintln implementation to spare a
|
|
||||||
// string allocation, we do the simplest thing.
|
|
||||||
func (entry *Entry) sprintlnn(args ...interface{}) string {
|
|
||||||
msg := fmt.Sprintln(args...)
|
|
||||||
return msg[:len(msg)-1]
|
|
||||||
}
|
|
193
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
193
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
@ -1,193 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// std is the name of the standard logger in stdlib `log`
|
|
||||||
std = New()
|
|
||||||
)
|
|
||||||
|
|
||||||
func StandardLogger() *Logger {
|
|
||||||
return std
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetOutput sets the standard logger output.
|
|
||||||
func SetOutput(out io.Writer) {
|
|
||||||
std.mu.Lock()
|
|
||||||
defer std.mu.Unlock()
|
|
||||||
std.Out = out
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetFormatter sets the standard logger formatter.
|
|
||||||
func SetFormatter(formatter Formatter) {
|
|
||||||
std.mu.Lock()
|
|
||||||
defer std.mu.Unlock()
|
|
||||||
std.Formatter = formatter
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLevel sets the standard logger level.
|
|
||||||
func SetLevel(level Level) {
|
|
||||||
std.mu.Lock()
|
|
||||||
defer std.mu.Unlock()
|
|
||||||
std.Level = level
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLevel returns the standard logger level.
|
|
||||||
func GetLevel() Level {
|
|
||||||
std.mu.Lock()
|
|
||||||
defer std.mu.Unlock()
|
|
||||||
return std.Level
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddHook adds a hook to the standard logger hooks.
|
|
||||||
func AddHook(hook Hook) {
|
|
||||||
std.mu.Lock()
|
|
||||||
defer std.mu.Unlock()
|
|
||||||
std.Hooks.Add(hook)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
|
||||||
func WithError(err error) *Entry {
|
|
||||||
return std.WithField(ErrorKey, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithField creates an entry from the standard logger and adds a field to
|
|
||||||
// it. If you want multiple fields, use `WithFields`.
|
|
||||||
//
|
|
||||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
|
||||||
// or Panic on the Entry it returns.
|
|
||||||
func WithField(key string, value interface{}) *Entry {
|
|
||||||
return std.WithField(key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithFields creates an entry from the standard logger and adds multiple
|
|
||||||
// fields to it. This is simply a helper for `WithField`, invoking it
|
|
||||||
// once for each field.
|
|
||||||
//
|
|
||||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
|
||||||
// or Panic on the Entry it returns.
|
|
||||||
func WithFields(fields Fields) *Entry {
|
|
||||||
return std.WithFields(fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug logs a message at level Debug on the standard logger.
|
|
||||||
func Debug(args ...interface{}) {
|
|
||||||
std.Debug(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print logs a message at level Info on the standard logger.
|
|
||||||
func Print(args ...interface{}) {
|
|
||||||
std.Print(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Info logs a message at level Info on the standard logger.
|
|
||||||
func Info(args ...interface{}) {
|
|
||||||
std.Info(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warn logs a message at level Warn on the standard logger.
|
|
||||||
func Warn(args ...interface{}) {
|
|
||||||
std.Warn(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warning logs a message at level Warn on the standard logger.
|
|
||||||
func Warning(args ...interface{}) {
|
|
||||||
std.Warning(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error logs a message at level Error on the standard logger.
|
|
||||||
func Error(args ...interface{}) {
|
|
||||||
std.Error(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Panic logs a message at level Panic on the standard logger.
|
|
||||||
func Panic(args ...interface{}) {
|
|
||||||
std.Panic(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatal logs a message at level Fatal on the standard logger.
|
|
||||||
func Fatal(args ...interface{}) {
|
|
||||||
std.Fatal(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debugf logs a message at level Debug on the standard logger.
|
|
||||||
func Debugf(format string, args ...interface{}) {
|
|
||||||
std.Debugf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Printf logs a message at level Info on the standard logger.
|
|
||||||
func Printf(format string, args ...interface{}) {
|
|
||||||
std.Printf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Infof logs a message at level Info on the standard logger.
|
|
||||||
func Infof(format string, args ...interface{}) {
|
|
||||||
std.Infof(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warnf logs a message at level Warn on the standard logger.
|
|
||||||
func Warnf(format string, args ...interface{}) {
|
|
||||||
std.Warnf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warningf logs a message at level Warn on the standard logger.
|
|
||||||
func Warningf(format string, args ...interface{}) {
|
|
||||||
std.Warningf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Errorf logs a message at level Error on the standard logger.
|
|
||||||
func Errorf(format string, args ...interface{}) {
|
|
||||||
std.Errorf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Panicf logs a message at level Panic on the standard logger.
|
|
||||||
func Panicf(format string, args ...interface{}) {
|
|
||||||
std.Panicf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatalf logs a message at level Fatal on the standard logger.
|
|
||||||
func Fatalf(format string, args ...interface{}) {
|
|
||||||
std.Fatalf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debugln logs a message at level Debug on the standard logger.
|
|
||||||
func Debugln(args ...interface{}) {
|
|
||||||
std.Debugln(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Println logs a message at level Info on the standard logger.
|
|
||||||
func Println(args ...interface{}) {
|
|
||||||
std.Println(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Infoln logs a message at level Info on the standard logger.
|
|
||||||
func Infoln(args ...interface{}) {
|
|
||||||
std.Infoln(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warnln logs a message at level Warn on the standard logger.
|
|
||||||
func Warnln(args ...interface{}) {
|
|
||||||
std.Warnln(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warningln logs a message at level Warn on the standard logger.
|
|
||||||
func Warningln(args ...interface{}) {
|
|
||||||
std.Warningln(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Errorln logs a message at level Error on the standard logger.
|
|
||||||
func Errorln(args ...interface{}) {
|
|
||||||
std.Errorln(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Panicln logs a message at level Panic on the standard logger.
|
|
||||||
func Panicln(args ...interface{}) {
|
|
||||||
std.Panicln(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatalln logs a message at level Fatal on the standard logger.
|
|
||||||
func Fatalln(args ...interface{}) {
|
|
||||||
std.Fatalln(args...)
|
|
||||||
}
|
|
48
vendor/github.com/Sirupsen/logrus/formatter.go
generated
vendored
48
vendor/github.com/Sirupsen/logrus/formatter.go
generated
vendored
@ -1,48 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
const DefaultTimestampFormat = time.RFC3339
|
|
||||||
|
|
||||||
// The Formatter interface is used to implement a custom Formatter. It takes an
|
|
||||||
// `Entry`. It exposes all the fields, including the default ones:
|
|
||||||
//
|
|
||||||
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
|
|
||||||
// * `entry.Data["time"]`. The timestamp.
|
|
||||||
// * `entry.Data["level"]. The level the entry was logged at.
|
|
||||||
//
|
|
||||||
// Any additional fields added with `WithField` or `WithFields` are also in
|
|
||||||
// `entry.Data`. Format is expected to return an array of bytes which are then
|
|
||||||
// logged to `logger.Out`.
|
|
||||||
type Formatter interface {
|
|
||||||
Format(*Entry) ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
|
||||||
// dumping it. If this code wasn't there doing:
|
|
||||||
//
|
|
||||||
// logrus.WithField("level", 1).Info("hello")
|
|
||||||
//
|
|
||||||
// Would just silently drop the user provided level. Instead with this code
|
|
||||||
// it'll logged as:
|
|
||||||
//
|
|
||||||
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
|
||||||
//
|
|
||||||
// It's not exported because it's still using Data in an opinionated way. It's to
|
|
||||||
// avoid code duplication between the two default formatters.
|
|
||||||
func prefixFieldClashes(data Fields) {
|
|
||||||
_, ok := data["time"]
|
|
||||||
if ok {
|
|
||||||
data["fields.time"] = data["time"]
|
|
||||||
}
|
|
||||||
|
|
||||||
_, ok = data["msg"]
|
|
||||||
if ok {
|
|
||||||
data["fields.msg"] = data["msg"]
|
|
||||||
}
|
|
||||||
|
|
||||||
_, ok = data["level"]
|
|
||||||
if ok {
|
|
||||||
data["fields.level"] = data["level"]
|
|
||||||
}
|
|
||||||
}
|
|
34
vendor/github.com/Sirupsen/logrus/hooks.go
generated
vendored
34
vendor/github.com/Sirupsen/logrus/hooks.go
generated
vendored
@ -1,34 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
// A hook to be fired when logging on the logging levels returned from
|
|
||||||
// `Levels()` on your implementation of the interface. Note that this is not
|
|
||||||
// fired in a goroutine or a channel with workers, you should handle such
|
|
||||||
// functionality yourself if your call is non-blocking and you don't wish for
|
|
||||||
// the logging calls for levels returned from `Levels()` to block.
|
|
||||||
type Hook interface {
|
|
||||||
Levels() []Level
|
|
||||||
Fire(*Entry) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Internal type for storing the hooks on a logger instance.
|
|
||||||
type LevelHooks map[Level][]Hook
|
|
||||||
|
|
||||||
// Add a hook to an instance of logger. This is called with
|
|
||||||
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
|
|
||||||
func (hooks LevelHooks) Add(hook Hook) {
|
|
||||||
for _, level := range hook.Levels() {
|
|
||||||
hooks[level] = append(hooks[level], hook)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fire all the hooks for the passed level. Used by `entry.log` to fire
|
|
||||||
// appropriate hooks for a log entry.
|
|
||||||
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
|
|
||||||
for _, hook := range hooks[level] {
|
|
||||||
if err := hook.Fire(entry); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
41
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
41
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
@ -1,41 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
type JSONFormatter struct {
|
|
||||||
// TimestampFormat sets the format used for marshaling timestamps.
|
|
||||||
TimestampFormat string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
|
||||||
data := make(Fields, len(entry.Data)+3)
|
|
||||||
for k, v := range entry.Data {
|
|
||||||
switch v := v.(type) {
|
|
||||||
case error:
|
|
||||||
// Otherwise errors are ignored by `encoding/json`
|
|
||||||
// https://github.com/Sirupsen/logrus/issues/137
|
|
||||||
data[k] = v.Error()
|
|
||||||
default:
|
|
||||||
data[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
prefixFieldClashes(data)
|
|
||||||
|
|
||||||
timestampFormat := f.TimestampFormat
|
|
||||||
if timestampFormat == "" {
|
|
||||||
timestampFormat = DefaultTimestampFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
data["time"] = entry.Time.Format(timestampFormat)
|
|
||||||
data["msg"] = entry.Message
|
|
||||||
data["level"] = entry.Level.String()
|
|
||||||
|
|
||||||
serialized, err := json.Marshal(data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
|
||||||
}
|
|
||||||
return append(serialized, '\n'), nil
|
|
||||||
}
|
|
206
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
206
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
@ -1,206 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Logger struct {
|
|
||||||
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
|
||||||
// file, or leave it default which is `os.Stderr`. You can also set this to
|
|
||||||
// something more adventorous, such as logging to Kafka.
|
|
||||||
Out io.Writer
|
|
||||||
// Hooks for the logger instance. These allow firing events based on logging
|
|
||||||
// levels and log entries. For example, to send errors to an error tracking
|
|
||||||
// service, log to StatsD or dump the core on fatal errors.
|
|
||||||
Hooks LevelHooks
|
|
||||||
// All log entries pass through the formatter before logged to Out. The
|
|
||||||
// included formatters are `TextFormatter` and `JSONFormatter` for which
|
|
||||||
// TextFormatter is the default. In development (when a TTY is attached) it
|
|
||||||
// logs with colors, but to a file it wouldn't. You can easily implement your
|
|
||||||
// own that implements the `Formatter` interface, see the `README` or included
|
|
||||||
// formatters for examples.
|
|
||||||
Formatter Formatter
|
|
||||||
// The logging level the logger should log at. This is typically (and defaults
|
|
||||||
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
|
||||||
// logged. `logrus.Debug` is useful in
|
|
||||||
Level Level
|
|
||||||
// Used to sync writing to the log.
|
|
||||||
mu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates a new logger. Configuration should be set by changing `Formatter`,
|
|
||||||
// `Out` and `Hooks` directly on the default logger instance. You can also just
|
|
||||||
// instantiate your own:
|
|
||||||
//
|
|
||||||
// var log = &Logger{
|
|
||||||
// Out: os.Stderr,
|
|
||||||
// Formatter: new(JSONFormatter),
|
|
||||||
// Hooks: make(LevelHooks),
|
|
||||||
// Level: logrus.DebugLevel,
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// It's recommended to make this a global instance called `log`.
|
|
||||||
func New() *Logger {
|
|
||||||
return &Logger{
|
|
||||||
Out: os.Stderr,
|
|
||||||
Formatter: new(TextFormatter),
|
|
||||||
Hooks: make(LevelHooks),
|
|
||||||
Level: InfoLevel,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adds a field to the log entry, note that you it doesn't log until you call
|
|
||||||
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
|
||||||
// If you want multiple fields, use `WithFields`.
|
|
||||||
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
|
||||||
return NewEntry(logger).WithField(key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adds a struct of fields to the log entry. All it does is call `WithField` for
|
|
||||||
// each `Field`.
|
|
||||||
func (logger *Logger) WithFields(fields Fields) *Entry {
|
|
||||||
return NewEntry(logger).WithFields(fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
|
||||||
if logger.Level >= DebugLevel {
|
|
||||||
NewEntry(logger).Debugf(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
|
||||||
if logger.Level >= InfoLevel {
|
|
||||||
NewEntry(logger).Infof(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Printf(format string, args ...interface{}) {
|
|
||||||
NewEntry(logger).Printf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
|
||||||
if logger.Level >= WarnLevel {
|
|
||||||
NewEntry(logger).Warnf(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
|
||||||
if logger.Level >= WarnLevel {
|
|
||||||
NewEntry(logger).Warnf(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
|
||||||
if logger.Level >= ErrorLevel {
|
|
||||||
NewEntry(logger).Errorf(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
|
||||||
if logger.Level >= FatalLevel {
|
|
||||||
NewEntry(logger).Fatalf(format, args...)
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
|
||||||
if logger.Level >= PanicLevel {
|
|
||||||
NewEntry(logger).Panicf(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Debug(args ...interface{}) {
|
|
||||||
if logger.Level >= DebugLevel {
|
|
||||||
NewEntry(logger).Debug(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Info(args ...interface{}) {
|
|
||||||
if logger.Level >= InfoLevel {
|
|
||||||
NewEntry(logger).Info(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Print(args ...interface{}) {
|
|
||||||
NewEntry(logger).Info(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Warn(args ...interface{}) {
|
|
||||||
if logger.Level >= WarnLevel {
|
|
||||||
NewEntry(logger).Warn(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Warning(args ...interface{}) {
|
|
||||||
if logger.Level >= WarnLevel {
|
|
||||||
NewEntry(logger).Warn(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Error(args ...interface{}) {
|
|
||||||
if logger.Level >= ErrorLevel {
|
|
||||||
NewEntry(logger).Error(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Fatal(args ...interface{}) {
|
|
||||||
if logger.Level >= FatalLevel {
|
|
||||||
NewEntry(logger).Fatal(args...)
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Panic(args ...interface{}) {
|
|
||||||
if logger.Level >= PanicLevel {
|
|
||||||
NewEntry(logger).Panic(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Debugln(args ...interface{}) {
|
|
||||||
if logger.Level >= DebugLevel {
|
|
||||||
NewEntry(logger).Debugln(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Infoln(args ...interface{}) {
|
|
||||||
if logger.Level >= InfoLevel {
|
|
||||||
NewEntry(logger).Infoln(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Println(args ...interface{}) {
|
|
||||||
NewEntry(logger).Println(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Warnln(args ...interface{}) {
|
|
||||||
if logger.Level >= WarnLevel {
|
|
||||||
NewEntry(logger).Warnln(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Warningln(args ...interface{}) {
|
|
||||||
if logger.Level >= WarnLevel {
|
|
||||||
NewEntry(logger).Warnln(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Errorln(args ...interface{}) {
|
|
||||||
if logger.Level >= ErrorLevel {
|
|
||||||
NewEntry(logger).Errorln(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
|
||||||
if logger.Level >= FatalLevel {
|
|
||||||
NewEntry(logger).Fatalln(args...)
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Panicln(args ...interface{}) {
|
|
||||||
if logger.Level >= PanicLevel {
|
|
||||||
NewEntry(logger).Panicln(args...)
|
|
||||||
}
|
|
||||||
}
|
|
98
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
98
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
@ -1,98 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Fields type, used to pass to `WithFields`.
|
|
||||||
type Fields map[string]interface{}
|
|
||||||
|
|
||||||
// Level type
|
|
||||||
type Level uint8
|
|
||||||
|
|
||||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
|
||||||
func (level Level) String() string {
|
|
||||||
switch level {
|
|
||||||
case DebugLevel:
|
|
||||||
return "debug"
|
|
||||||
case InfoLevel:
|
|
||||||
return "info"
|
|
||||||
case WarnLevel:
|
|
||||||
return "warning"
|
|
||||||
case ErrorLevel:
|
|
||||||
return "error"
|
|
||||||
case FatalLevel:
|
|
||||||
return "fatal"
|
|
||||||
case PanicLevel:
|
|
||||||
return "panic"
|
|
||||||
}
|
|
||||||
|
|
||||||
return "unknown"
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseLevel takes a string level and returns the Logrus log level constant.
|
|
||||||
func ParseLevel(lvl string) (Level, error) {
|
|
||||||
switch lvl {
|
|
||||||
case "panic":
|
|
||||||
return PanicLevel, nil
|
|
||||||
case "fatal":
|
|
||||||
return FatalLevel, nil
|
|
||||||
case "error":
|
|
||||||
return ErrorLevel, nil
|
|
||||||
case "warn", "warning":
|
|
||||||
return WarnLevel, nil
|
|
||||||
case "info":
|
|
||||||
return InfoLevel, nil
|
|
||||||
case "debug":
|
|
||||||
return DebugLevel, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var l Level
|
|
||||||
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
|
||||||
}
|
|
||||||
|
|
||||||
// These are the different logging levels. You can set the logging level to log
|
|
||||||
// on your instance of logger, obtained with `logrus.New()`.
|
|
||||||
const (
|
|
||||||
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
|
||||||
// message passed to Debug, Info, ...
|
|
||||||
PanicLevel Level = iota
|
|
||||||
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
|
||||||
// logging level is set to Panic.
|
|
||||||
FatalLevel
|
|
||||||
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
|
||||||
// Commonly used for hooks to send errors to an error tracking service.
|
|
||||||
ErrorLevel
|
|
||||||
// WarnLevel level. Non-critical entries that deserve eyes.
|
|
||||||
WarnLevel
|
|
||||||
// InfoLevel level. General operational entries about what's going on inside the
|
|
||||||
// application.
|
|
||||||
InfoLevel
|
|
||||||
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
|
||||||
DebugLevel
|
|
||||||
)
|
|
||||||
|
|
||||||
// Won't compile if StdLogger can't be realized by a log.Logger
|
|
||||||
var (
|
|
||||||
_ StdLogger = &log.Logger{}
|
|
||||||
_ StdLogger = &Entry{}
|
|
||||||
_ StdLogger = &Logger{}
|
|
||||||
)
|
|
||||||
|
|
||||||
// StdLogger is what your logrus-enabled library should take, that way
|
|
||||||
// it'll accept a stdlib logger and a logrus logger. There's no standard
|
|
||||||
// interface, this is the closest we get, unfortunately.
|
|
||||||
type StdLogger interface {
|
|
||||||
Print(...interface{})
|
|
||||||
Printf(string, ...interface{})
|
|
||||||
Println(...interface{})
|
|
||||||
|
|
||||||
Fatal(...interface{})
|
|
||||||
Fatalf(string, ...interface{})
|
|
||||||
Fatalln(...interface{})
|
|
||||||
|
|
||||||
Panic(...interface{})
|
|
||||||
Panicf(string, ...interface{})
|
|
||||||
Panicln(...interface{})
|
|
||||||
}
|
|
9
vendor/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
9
vendor/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
@ -1,9 +0,0 @@
|
|||||||
// +build darwin freebsd openbsd netbsd dragonfly
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import "syscall"
|
|
||||||
|
|
||||||
const ioctlReadTermios = syscall.TIOCGETA
|
|
||||||
|
|
||||||
type Termios syscall.Termios
|
|
12
vendor/github.com/Sirupsen/logrus/terminal_linux.go
generated
vendored
12
vendor/github.com/Sirupsen/logrus/terminal_linux.go
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
// Based on ssh/terminal:
|
|
||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import "syscall"
|
|
||||||
|
|
||||||
const ioctlReadTermios = syscall.TCGETS
|
|
||||||
|
|
||||||
type Termios syscall.Termios
|
|
21
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
21
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
// Based on ssh/terminal:
|
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
|
||||||
func IsTerminal() bool {
|
|
||||||
fd := syscall.Stdout
|
|
||||||
var termios Termios
|
|
||||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
|
||||||
return err == 0
|
|
||||||
}
|
|
27
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
27
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
@ -1,27 +0,0 @@
|
|||||||
// Based on ssh/terminal:
|
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
|
||||||
|
|
||||||
var (
|
|
||||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
|
||||||
func IsTerminal() bool {
|
|
||||||
fd := syscall.Stdout
|
|
||||||
var st uint32
|
|
||||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
|
|
||||||
return r != 0 && e == 0
|
|
||||||
}
|
|
159
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
159
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
@ -1,159 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
nocolor = 0
|
|
||||||
red = 31
|
|
||||||
green = 32
|
|
||||||
yellow = 33
|
|
||||||
blue = 34
|
|
||||||
gray = 37
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
baseTimestamp time.Time
|
|
||||||
isTerminal bool
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
baseTimestamp = time.Now()
|
|
||||||
isTerminal = IsTerminal()
|
|
||||||
}
|
|
||||||
|
|
||||||
func miniTS() int {
|
|
||||||
return int(time.Since(baseTimestamp) / time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
type TextFormatter struct {
|
|
||||||
// Set to true to bypass checking for a TTY before outputting colors.
|
|
||||||
ForceColors bool
|
|
||||||
|
|
||||||
// Force disabling colors.
|
|
||||||
DisableColors bool
|
|
||||||
|
|
||||||
// Disable timestamp logging. useful when output is redirected to logging
|
|
||||||
// system that already adds timestamps.
|
|
||||||
DisableTimestamp bool
|
|
||||||
|
|
||||||
// Enable logging the full timestamp when a TTY is attached instead of just
|
|
||||||
// the time passed since beginning of execution.
|
|
||||||
FullTimestamp bool
|
|
||||||
|
|
||||||
// TimestampFormat to use for display when a full timestamp is printed
|
|
||||||
TimestampFormat string
|
|
||||||
|
|
||||||
// The fields are sorted by default for a consistent output. For applications
|
|
||||||
// that log extremely frequently and don't use the JSON formatter this may not
|
|
||||||
// be desired.
|
|
||||||
DisableSorting bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
|
||||||
var keys []string = make([]string, 0, len(entry.Data))
|
|
||||||
for k := range entry.Data {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !f.DisableSorting {
|
|
||||||
sort.Strings(keys)
|
|
||||||
}
|
|
||||||
|
|
||||||
b := &bytes.Buffer{}
|
|
||||||
|
|
||||||
prefixFieldClashes(entry.Data)
|
|
||||||
|
|
||||||
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
|
|
||||||
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
|
|
||||||
|
|
||||||
timestampFormat := f.TimestampFormat
|
|
||||||
if timestampFormat == "" {
|
|
||||||
timestampFormat = DefaultTimestampFormat
|
|
||||||
}
|
|
||||||
if isColored {
|
|
||||||
f.printColored(b, entry, keys, timestampFormat)
|
|
||||||
} else {
|
|
||||||
if !f.DisableTimestamp {
|
|
||||||
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
|
|
||||||
}
|
|
||||||
f.appendKeyValue(b, "level", entry.Level.String())
|
|
||||||
f.appendKeyValue(b, "msg", entry.Message)
|
|
||||||
for _, key := range keys {
|
|
||||||
f.appendKeyValue(b, key, entry.Data[key])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
b.WriteByte('\n')
|
|
||||||
return b.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
|
||||||
var levelColor int
|
|
||||||
switch entry.Level {
|
|
||||||
case DebugLevel:
|
|
||||||
levelColor = gray
|
|
||||||
case WarnLevel:
|
|
||||||
levelColor = yellow
|
|
||||||
case ErrorLevel, FatalLevel, PanicLevel:
|
|
||||||
levelColor = red
|
|
||||||
default:
|
|
||||||
levelColor = blue
|
|
||||||
}
|
|
||||||
|
|
||||||
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
|
||||||
|
|
||||||
if !f.FullTimestamp {
|
|
||||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
|
||||||
}
|
|
||||||
for _, k := range keys {
|
|
||||||
v := entry.Data[k]
|
|
||||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func needsQuoting(text string) bool {
|
|
||||||
for _, ch := range text {
|
|
||||||
if !((ch >= 'a' && ch <= 'z') ||
|
|
||||||
(ch >= 'A' && ch <= 'Z') ||
|
|
||||||
(ch >= '0' && ch <= '9') ||
|
|
||||||
ch == '-' || ch == '.') {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
|
||||||
|
|
||||||
b.WriteString(key)
|
|
||||||
b.WriteByte('=')
|
|
||||||
|
|
||||||
switch value := value.(type) {
|
|
||||||
case string:
|
|
||||||
if needsQuoting(value) {
|
|
||||||
b.WriteString(value)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(b, "%q", value)
|
|
||||||
}
|
|
||||||
case error:
|
|
||||||
errmsg := value.Error()
|
|
||||||
if needsQuoting(errmsg) {
|
|
||||||
b.WriteString(errmsg)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(b, "%q", value)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
fmt.Fprint(b, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.WriteByte(' ')
|
|
||||||
}
|
|
31
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
31
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
@ -1,31 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"io"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (logger *Logger) Writer() *io.PipeWriter {
|
|
||||||
reader, writer := io.Pipe()
|
|
||||||
|
|
||||||
go logger.writerScanner(reader)
|
|
||||||
runtime.SetFinalizer(writer, writerFinalizer)
|
|
||||||
|
|
||||||
return writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) writerScanner(reader *io.PipeReader) {
|
|
||||||
scanner := bufio.NewScanner(reader)
|
|
||||||
for scanner.Scan() {
|
|
||||||
logger.Print(scanner.Text())
|
|
||||||
}
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
logger.Errorf("Error while reading from Writer: %s", err)
|
|
||||||
}
|
|
||||||
reader.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func writerFinalizer(writer *io.PipeWriter) {
|
|
||||||
writer.Close()
|
|
||||||
}
|
|
202
vendor/github.com/aws/aws-sdk-go/LICENSE.txt
generated
vendored
202
vendor/github.com/aws/aws-sdk-go/LICENSE.txt
generated
vendored
@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
3
vendor/github.com/aws/aws-sdk-go/NOTICE.txt
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/NOTICE.txt
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
AWS SDK for Go
|
|
||||||
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
||||||
Copyright 2014-2015 Stripe, Inc.
|
|
145
vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
generated
vendored
145
vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
generated
vendored
@ -1,145 +0,0 @@
|
|||||||
// Package awserr represents API error interface accessors for the SDK.
|
|
||||||
package awserr
|
|
||||||
|
|
||||||
// An Error wraps lower level errors with code, message and an original error.
|
|
||||||
// The underlying concrete error type may also satisfy other interfaces which
|
|
||||||
// can be to used to obtain more specific information about the error.
|
|
||||||
//
|
|
||||||
// Calling Error() or String() will always include the full information about
|
|
||||||
// an error based on its underlying type.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
//
|
|
||||||
// output, err := s3manage.Upload(svc, input, opts)
|
|
||||||
// if err != nil {
|
|
||||||
// if awsErr, ok := err.(awserr.Error); ok {
|
|
||||||
// // Get error details
|
|
||||||
// log.Println("Error:", awsErr.Code(), awsErr.Message())
|
|
||||||
//
|
|
||||||
// // Prints out full error message, including original error if there was one.
|
|
||||||
// log.Println("Error:", awsErr.Error())
|
|
||||||
//
|
|
||||||
// // Get original error
|
|
||||||
// if origErr := awsErr.OrigErr(); origErr != nil {
|
|
||||||
// // operate on original error.
|
|
||||||
// }
|
|
||||||
// } else {
|
|
||||||
// fmt.Println(err.Error())
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
type Error interface {
|
|
||||||
// Satisfy the generic error interface.
|
|
||||||
error
|
|
||||||
|
|
||||||
// Returns the short phrase depicting the classification of the error.
|
|
||||||
Code() string
|
|
||||||
|
|
||||||
// Returns the error details message.
|
|
||||||
Message() string
|
|
||||||
|
|
||||||
// Returns the original error if one was set. Nil is returned if not set.
|
|
||||||
OrigErr() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// BatchError is a batch of errors which also wraps lower level errors with
|
|
||||||
// code, message, and original errors. Calling Error() will include all errors
|
|
||||||
// that occurred in the batch.
|
|
||||||
//
|
|
||||||
// Deprecated: Replaced with BatchedErrors. Only defined for backwards
|
|
||||||
// compatibility.
|
|
||||||
type BatchError interface {
|
|
||||||
// Satisfy the generic error interface.
|
|
||||||
error
|
|
||||||
|
|
||||||
// Returns the short phrase depicting the classification of the error.
|
|
||||||
Code() string
|
|
||||||
|
|
||||||
// Returns the error details message.
|
|
||||||
Message() string
|
|
||||||
|
|
||||||
// Returns the original error if one was set. Nil is returned if not set.
|
|
||||||
OrigErrs() []error
|
|
||||||
}
|
|
||||||
|
|
||||||
// BatchedErrors is a batch of errors which also wraps lower level errors with
|
|
||||||
// code, message, and original errors. Calling Error() will include all errors
|
|
||||||
// that occurred in the batch.
|
|
||||||
//
|
|
||||||
// Replaces BatchError
|
|
||||||
type BatchedErrors interface {
|
|
||||||
// Satisfy the base Error interface.
|
|
||||||
Error
|
|
||||||
|
|
||||||
// Returns the original error if one was set. Nil is returned if not set.
|
|
||||||
OrigErrs() []error
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns an Error object described by the code, message, and origErr.
|
|
||||||
//
|
|
||||||
// If origErr satisfies the Error interface it will not be wrapped within a new
|
|
||||||
// Error object and will instead be returned.
|
|
||||||
func New(code, message string, origErr error) Error {
|
|
||||||
var errs []error
|
|
||||||
if origErr != nil {
|
|
||||||
errs = append(errs, origErr)
|
|
||||||
}
|
|
||||||
return newBaseError(code, message, errs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBatchError returns an BatchedErrors with a collection of errors as an
|
|
||||||
// array of errors.
|
|
||||||
func NewBatchError(code, message string, errs []error) BatchedErrors {
|
|
||||||
return newBaseError(code, message, errs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A RequestFailure is an interface to extract request failure information from
|
|
||||||
// an Error such as the request ID of the failed request returned by a service.
|
|
||||||
// RequestFailures may not always have a requestID value if the request failed
|
|
||||||
// prior to reaching the service such as a connection error.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
//
|
|
||||||
// output, err := s3manage.Upload(svc, input, opts)
|
|
||||||
// if err != nil {
|
|
||||||
// if reqerr, ok := err.(RequestFailure); ok {
|
|
||||||
// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
|
|
||||||
// } else {
|
|
||||||
// log.Println("Error:", err.Error())
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Combined with awserr.Error:
|
|
||||||
//
|
|
||||||
// output, err := s3manage.Upload(svc, input, opts)
|
|
||||||
// if err != nil {
|
|
||||||
// if awsErr, ok := err.(awserr.Error); ok {
|
|
||||||
// // Generic AWS Error with Code, Message, and original error (if any)
|
|
||||||
// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
|
||||||
//
|
|
||||||
// if reqErr, ok := err.(awserr.RequestFailure); ok {
|
|
||||||
// // A service error occurred
|
|
||||||
// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
|
|
||||||
// }
|
|
||||||
// } else {
|
|
||||||
// fmt.Println(err.Error())
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
type RequestFailure interface {
|
|
||||||
Error
|
|
||||||
|
|
||||||
// The status code of the HTTP response.
|
|
||||||
StatusCode() int
|
|
||||||
|
|
||||||
// The request ID returned by the service for a request failure. This will
|
|
||||||
// be empty if no request ID is available such as the request failed due
|
|
||||||
// to a connection error.
|
|
||||||
RequestID() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRequestFailure returns a new request error wrapper for the given Error
|
|
||||||
// provided.
|
|
||||||
func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
|
|
||||||
return newRequestError(err, statusCode, reqID)
|
|
||||||
}
|
|
194
vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
generated
vendored
194
vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
generated
vendored
@ -1,194 +0,0 @@
|
|||||||
package awserr
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// SprintError returns a string of the formatted error code.
|
|
||||||
//
|
|
||||||
// Both extra and origErr are optional. If they are included their lines
|
|
||||||
// will be added, but if they are not included their lines will be ignored.
|
|
||||||
func SprintError(code, message, extra string, origErr error) string {
|
|
||||||
msg := fmt.Sprintf("%s: %s", code, message)
|
|
||||||
if extra != "" {
|
|
||||||
msg = fmt.Sprintf("%s\n\t%s", msg, extra)
|
|
||||||
}
|
|
||||||
if origErr != nil {
|
|
||||||
msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
|
|
||||||
}
|
|
||||||
return msg
|
|
||||||
}
|
|
||||||
|
|
||||||
// A baseError wraps the code and message which defines an error. It also
|
|
||||||
// can be used to wrap an original error object.
|
|
||||||
//
|
|
||||||
// Should be used as the root for errors satisfying the awserr.Error. Also
|
|
||||||
// for any error which does not fit into a specific error wrapper type.
|
|
||||||
type baseError struct {
|
|
||||||
// Classification of error
|
|
||||||
code string
|
|
||||||
|
|
||||||
// Detailed information about error
|
|
||||||
message string
|
|
||||||
|
|
||||||
// Optional original error this error is based off of. Allows building
|
|
||||||
// chained errors.
|
|
||||||
errs []error
|
|
||||||
}
|
|
||||||
|
|
||||||
// newBaseError returns an error object for the code, message, and errors.
|
|
||||||
//
|
|
||||||
// code is a short no whitespace phrase depicting the classification of
|
|
||||||
// the error that is being created.
|
|
||||||
//
|
|
||||||
// message is the free flow string containing detailed information about the
|
|
||||||
// error.
|
|
||||||
//
|
|
||||||
// origErrs is the error objects which will be nested under the new errors to
|
|
||||||
// be returned.
|
|
||||||
func newBaseError(code, message string, origErrs []error) *baseError {
|
|
||||||
b := &baseError{
|
|
||||||
code: code,
|
|
||||||
message: message,
|
|
||||||
errs: origErrs,
|
|
||||||
}
|
|
||||||
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns the string representation of the error.
|
|
||||||
//
|
|
||||||
// See ErrorWithExtra for formatting.
|
|
||||||
//
|
|
||||||
// Satisfies the error interface.
|
|
||||||
func (b baseError) Error() string {
|
|
||||||
size := len(b.errs)
|
|
||||||
if size > 0 {
|
|
||||||
return SprintError(b.code, b.message, "", errorList(b.errs))
|
|
||||||
}
|
|
||||||
|
|
||||||
return SprintError(b.code, b.message, "", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the string representation of the error.
|
|
||||||
// Alias for Error to satisfy the stringer interface.
|
|
||||||
func (b baseError) String() string {
|
|
||||||
return b.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Code returns the short phrase depicting the classification of the error.
|
|
||||||
func (b baseError) Code() string {
|
|
||||||
return b.code
|
|
||||||
}
|
|
||||||
|
|
||||||
// Message returns the error details message.
|
|
||||||
func (b baseError) Message() string {
|
|
||||||
return b.message
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrigErr returns the original error if one was set. Nil is returned if no
|
|
||||||
// error was set. This only returns the first element in the list. If the full
|
|
||||||
// list is needed, use BatchedErrors.
|
|
||||||
func (b baseError) OrigErr() error {
|
|
||||||
switch len(b.errs) {
|
|
||||||
case 0:
|
|
||||||
return nil
|
|
||||||
case 1:
|
|
||||||
return b.errs[0]
|
|
||||||
default:
|
|
||||||
if err, ok := b.errs[0].(Error); ok {
|
|
||||||
return NewBatchError(err.Code(), err.Message(), b.errs[1:])
|
|
||||||
}
|
|
||||||
return NewBatchError("BatchedErrors",
|
|
||||||
"multiple errors occurred", b.errs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrigErrs returns the original errors if one was set. An empty slice is
|
|
||||||
// returned if no error was set.
|
|
||||||
func (b baseError) OrigErrs() []error {
|
|
||||||
return b.errs
|
|
||||||
}
|
|
||||||
|
|
||||||
// So that the Error interface type can be included as an anonymous field
|
|
||||||
// in the requestError struct and not conflict with the error.Error() method.
|
|
||||||
type awsError Error
|
|
||||||
|
|
||||||
// A requestError wraps a request or service error.
|
|
||||||
//
|
|
||||||
// Composed of baseError for code, message, and original error.
|
|
||||||
type requestError struct {
|
|
||||||
awsError
|
|
||||||
statusCode int
|
|
||||||
requestID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// newRequestError returns a wrapped error with additional information for
|
|
||||||
// request status code, and service requestID.
|
|
||||||
//
|
|
||||||
// Should be used to wrap all request which involve service requests. Even if
|
|
||||||
// the request failed without a service response, but had an HTTP status code
|
|
||||||
// that may be meaningful.
|
|
||||||
//
|
|
||||||
// Also wraps original errors via the baseError.
|
|
||||||
func newRequestError(err Error, statusCode int, requestID string) *requestError {
|
|
||||||
return &requestError{
|
|
||||||
awsError: err,
|
|
||||||
statusCode: statusCode,
|
|
||||||
requestID: requestID,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns the string representation of the error.
|
|
||||||
// Satisfies the error interface.
|
|
||||||
func (r requestError) Error() string {
|
|
||||||
extra := fmt.Sprintf("status code: %d, request id: %s",
|
|
||||||
r.statusCode, r.requestID)
|
|
||||||
return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the string representation of the error.
|
|
||||||
// Alias for Error to satisfy the stringer interface.
|
|
||||||
func (r requestError) String() string {
|
|
||||||
return r.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// StatusCode returns the wrapped status code for the error
|
|
||||||
func (r requestError) StatusCode() int {
|
|
||||||
return r.statusCode
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequestID returns the wrapped requestID
|
|
||||||
func (r requestError) RequestID() string {
|
|
||||||
return r.requestID
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrigErrs returns the original errors if one was set. An empty slice is
|
|
||||||
// returned if no error was set.
|
|
||||||
func (r requestError) OrigErrs() []error {
|
|
||||||
if b, ok := r.awsError.(BatchedErrors); ok {
|
|
||||||
return b.OrigErrs()
|
|
||||||
}
|
|
||||||
return []error{r.OrigErr()}
|
|
||||||
}
|
|
||||||
|
|
||||||
// An error list that satisfies the golang interface
|
|
||||||
type errorList []error
|
|
||||||
|
|
||||||
// Error returns the string representation of the error.
|
|
||||||
//
|
|
||||||
// Satisfies the error interface.
|
|
||||||
func (e errorList) Error() string {
|
|
||||||
msg := ""
|
|
||||||
// How do we want to handle the array size being zero
|
|
||||||
if size := len(e); size > 0 {
|
|
||||||
for i := 0; i < size; i++ {
|
|
||||||
msg += fmt.Sprintf("%s", e[i].Error())
|
|
||||||
// We check the next index to see if it is within the slice.
|
|
||||||
// If it is, then we append a newline. We do this, because unit tests
|
|
||||||
// could be broken with the additional '\n'
|
|
||||||
if i+1 < size {
|
|
||||||
msg += "\n"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return msg
|
|
||||||
}
|
|
108
vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
generated
vendored
108
vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
generated
vendored
@ -1,108 +0,0 @@
|
|||||||
package awsutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Copy deeply copies a src structure to dst. Useful for copying request and
|
|
||||||
// response structures.
|
|
||||||
//
|
|
||||||
// Can copy between structs of different type, but will only copy fields which
|
|
||||||
// are assignable, and exist in both structs. Fields which are not assignable,
|
|
||||||
// or do not exist in both structs are ignored.
|
|
||||||
func Copy(dst, src interface{}) {
|
|
||||||
dstval := reflect.ValueOf(dst)
|
|
||||||
if !dstval.IsValid() {
|
|
||||||
panic("Copy dst cannot be nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
rcopy(dstval, reflect.ValueOf(src), true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyOf returns a copy of src while also allocating the memory for dst.
|
|
||||||
// src must be a pointer type or this operation will fail.
|
|
||||||
func CopyOf(src interface{}) (dst interface{}) {
|
|
||||||
dsti := reflect.New(reflect.TypeOf(src).Elem())
|
|
||||||
dst = dsti.Interface()
|
|
||||||
rcopy(dsti, reflect.ValueOf(src), true)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// rcopy performs a recursive copy of values from the source to destination.
|
|
||||||
//
|
|
||||||
// root is used to skip certain aspects of the copy which are not valid
|
|
||||||
// for the root node of a object.
|
|
||||||
func rcopy(dst, src reflect.Value, root bool) {
|
|
||||||
if !src.IsValid() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch src.Kind() {
|
|
||||||
case reflect.Ptr:
|
|
||||||
if _, ok := src.Interface().(io.Reader); ok {
|
|
||||||
if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
|
|
||||||
dst.Elem().Set(src)
|
|
||||||
} else if dst.CanSet() {
|
|
||||||
dst.Set(src)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
e := src.Type().Elem()
|
|
||||||
if dst.CanSet() && !src.IsNil() {
|
|
||||||
if _, ok := src.Interface().(*time.Time); !ok {
|
|
||||||
dst.Set(reflect.New(e))
|
|
||||||
} else {
|
|
||||||
tempValue := reflect.New(e)
|
|
||||||
tempValue.Elem().Set(src.Elem())
|
|
||||||
// Sets time.Time's unexported values
|
|
||||||
dst.Set(tempValue)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if src.Elem().IsValid() {
|
|
||||||
// Keep the current root state since the depth hasn't changed
|
|
||||||
rcopy(dst.Elem(), src.Elem(), root)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
t := dst.Type()
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
name := t.Field(i).Name
|
|
||||||
srcVal := src.FieldByName(name)
|
|
||||||
dstVal := dst.FieldByName(name)
|
|
||||||
if srcVal.IsValid() && dstVal.CanSet() {
|
|
||||||
rcopy(dstVal, srcVal, false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Slice:
|
|
||||||
if src.IsNil() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
|
|
||||||
dst.Set(s)
|
|
||||||
for i := 0; i < src.Len(); i++ {
|
|
||||||
rcopy(dst.Index(i), src.Index(i), false)
|
|
||||||
}
|
|
||||||
case reflect.Map:
|
|
||||||
if src.IsNil() {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
s := reflect.MakeMap(src.Type())
|
|
||||||
dst.Set(s)
|
|
||||||
for _, k := range src.MapKeys() {
|
|
||||||
v := src.MapIndex(k)
|
|
||||||
v2 := reflect.New(v.Type()).Elem()
|
|
||||||
rcopy(v2, v, false)
|
|
||||||
dst.SetMapIndex(k, v2)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
// Assign the value if possible. If its not assignable, the value would
|
|
||||||
// need to be converted and the impact of that may be unexpected, or is
|
|
||||||
// not compatible with the dst type.
|
|
||||||
if src.Type().AssignableTo(dst.Type()) {
|
|
||||||
dst.Set(src)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
27
vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
generated
vendored
27
vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
generated
vendored
@ -1,27 +0,0 @@
|
|||||||
package awsutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
|
|
||||||
// In addition to this, this method will also dereference the input values if
|
|
||||||
// possible so the DeepEqual performed will not fail if one parameter is a
|
|
||||||
// pointer and the other is not.
|
|
||||||
//
|
|
||||||
// DeepEqual will not perform indirection of nested values of the input parameters.
|
|
||||||
func DeepEqual(a, b interface{}) bool {
|
|
||||||
ra := reflect.Indirect(reflect.ValueOf(a))
|
|
||||||
rb := reflect.Indirect(reflect.ValueOf(b))
|
|
||||||
|
|
||||||
if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
|
|
||||||
// If the elements are both nil, and of the same type the are equal
|
|
||||||
// If they are of different types they are not equal
|
|
||||||
return reflect.TypeOf(a) == reflect.TypeOf(b)
|
|
||||||
} else if raValid != rbValid {
|
|
||||||
// Both values must be valid to be equal
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return reflect.DeepEqual(ra.Interface(), rb.Interface())
|
|
||||||
}
|
|
222
vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
generated
vendored
222
vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
generated
vendored
@ -1,222 +0,0 @@
|
|||||||
package awsutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/jmespath/go-jmespath"
|
|
||||||
)
|
|
||||||
|
|
||||||
var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
|
|
||||||
|
|
||||||
// rValuesAtPath returns a slice of values found in value v. The values
|
|
||||||
// in v are explored recursively so all nested values are collected.
|
|
||||||
func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
|
|
||||||
pathparts := strings.Split(path, "||")
|
|
||||||
if len(pathparts) > 1 {
|
|
||||||
for _, pathpart := range pathparts {
|
|
||||||
vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
|
|
||||||
if len(vals) > 0 {
|
|
||||||
return vals
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
|
|
||||||
components := strings.Split(path, ".")
|
|
||||||
for len(values) > 0 && len(components) > 0 {
|
|
||||||
var index *int64
|
|
||||||
var indexStar bool
|
|
||||||
c := strings.TrimSpace(components[0])
|
|
||||||
if c == "" { // no actual component, illegal syntax
|
|
||||||
return nil
|
|
||||||
} else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
|
|
||||||
// TODO normalize case for user
|
|
||||||
return nil // don't support unexported fields
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse this component
|
|
||||||
if m := indexRe.FindStringSubmatch(c); m != nil {
|
|
||||||
c = m[1]
|
|
||||||
if m[2] == "" {
|
|
||||||
index = nil
|
|
||||||
indexStar = true
|
|
||||||
} else {
|
|
||||||
i, _ := strconv.ParseInt(m[2], 10, 32)
|
|
||||||
index = &i
|
|
||||||
indexStar = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nextvals := []reflect.Value{}
|
|
||||||
for _, value := range values {
|
|
||||||
// pull component name out of struct member
|
|
||||||
if value.Kind() != reflect.Struct {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if c == "*" { // pull all members
|
|
||||||
for i := 0; i < value.NumField(); i++ {
|
|
||||||
if f := reflect.Indirect(value.Field(i)); f.IsValid() {
|
|
||||||
nextvals = append(nextvals, f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
value = value.FieldByNameFunc(func(name string) bool {
|
|
||||||
if c == name {
|
|
||||||
return true
|
|
||||||
} else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
|
|
||||||
if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
|
|
||||||
if !value.IsNil() {
|
|
||||||
value.Set(reflect.Zero(value.Type()))
|
|
||||||
}
|
|
||||||
return []reflect.Value{value}
|
|
||||||
}
|
|
||||||
|
|
||||||
if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
|
|
||||||
// TODO if the value is the terminus it should not be created
|
|
||||||
// if the value to be set to its position is nil.
|
|
||||||
value.Set(reflect.New(value.Type().Elem()))
|
|
||||||
value = value.Elem()
|
|
||||||
} else {
|
|
||||||
value = reflect.Indirect(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
|
|
||||||
if !createPath && value.IsNil() {
|
|
||||||
value = reflect.ValueOf(nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if value.IsValid() {
|
|
||||||
nextvals = append(nextvals, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
values = nextvals
|
|
||||||
|
|
||||||
if indexStar || index != nil {
|
|
||||||
nextvals = []reflect.Value{}
|
|
||||||
for _, valItem := range values {
|
|
||||||
value := reflect.Indirect(valItem)
|
|
||||||
if value.Kind() != reflect.Slice {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if indexStar { // grab all indices
|
|
||||||
for i := 0; i < value.Len(); i++ {
|
|
||||||
idx := reflect.Indirect(value.Index(i))
|
|
||||||
if idx.IsValid() {
|
|
||||||
nextvals = append(nextvals, idx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// pull out index
|
|
||||||
i := int(*index)
|
|
||||||
if i >= value.Len() { // check out of bounds
|
|
||||||
if createPath {
|
|
||||||
// TODO resize slice
|
|
||||||
} else {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else if i < 0 { // support negative indexing
|
|
||||||
i = value.Len() + i
|
|
||||||
}
|
|
||||||
value = reflect.Indirect(value.Index(i))
|
|
||||||
|
|
||||||
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
|
|
||||||
if !createPath && value.IsNil() {
|
|
||||||
value = reflect.ValueOf(nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if value.IsValid() {
|
|
||||||
nextvals = append(nextvals, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
values = nextvals
|
|
||||||
}
|
|
||||||
|
|
||||||
components = components[1:]
|
|
||||||
}
|
|
||||||
return values
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValuesAtPath returns a list of values at the case insensitive lexical
|
|
||||||
// path inside of a structure.
|
|
||||||
func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
|
|
||||||
result, err := jmespath.Search(path, i)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
v := reflect.ValueOf(result)
|
|
||||||
if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if s, ok := result.([]interface{}); ok {
|
|
||||||
return s, err
|
|
||||||
}
|
|
||||||
if v.Kind() == reflect.Map && v.Len() == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if v.Kind() == reflect.Slice {
|
|
||||||
out := make([]interface{}, v.Len())
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
out[i] = v.Index(i).Interface()
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return []interface{}{result}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetValueAtPath sets a value at the case insensitive lexical path inside
|
|
||||||
// of a structure.
|
|
||||||
func SetValueAtPath(i interface{}, path string, v interface{}) {
|
|
||||||
if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
|
|
||||||
for _, rval := range rvals {
|
|
||||||
if rval.Kind() == reflect.Ptr && rval.IsNil() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
setValue(rval, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func setValue(dstVal reflect.Value, src interface{}) {
|
|
||||||
if dstVal.Kind() == reflect.Ptr {
|
|
||||||
dstVal = reflect.Indirect(dstVal)
|
|
||||||
}
|
|
||||||
srcVal := reflect.ValueOf(src)
|
|
||||||
|
|
||||||
if !srcVal.IsValid() { // src is literal nil
|
|
||||||
if dstVal.CanAddr() {
|
|
||||||
// Convert to pointer so that pointer's value can be nil'ed
|
|
||||||
// dstVal = dstVal.Addr()
|
|
||||||
}
|
|
||||||
dstVal.Set(reflect.Zero(dstVal.Type()))
|
|
||||||
|
|
||||||
} else if srcVal.Kind() == reflect.Ptr {
|
|
||||||
if srcVal.IsNil() {
|
|
||||||
srcVal = reflect.Zero(dstVal.Type())
|
|
||||||
} else {
|
|
||||||
srcVal = reflect.ValueOf(src).Elem()
|
|
||||||
}
|
|
||||||
dstVal.Set(srcVal)
|
|
||||||
} else {
|
|
||||||
dstVal.Set(srcVal)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
113
vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
generated
vendored
113
vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
generated
vendored
@ -1,113 +0,0 @@
|
|||||||
package awsutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Prettify returns the string representation of a value.
|
|
||||||
func Prettify(i interface{}) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
prettify(reflect.ValueOf(i), 0, &buf)
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// prettify will recursively walk value v to build a textual
|
|
||||||
// representation of the value.
|
|
||||||
func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
|
|
||||||
for v.Kind() == reflect.Ptr {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Struct:
|
|
||||||
strtype := v.Type().String()
|
|
||||||
if strtype == "time.Time" {
|
|
||||||
fmt.Fprintf(buf, "%s", v.Interface())
|
|
||||||
break
|
|
||||||
} else if strings.HasPrefix(strtype, "io.") {
|
|
||||||
buf.WriteString("<buffer>")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString("{\n")
|
|
||||||
|
|
||||||
names := []string{}
|
|
||||||
for i := 0; i < v.Type().NumField(); i++ {
|
|
||||||
name := v.Type().Field(i).Name
|
|
||||||
f := v.Field(i)
|
|
||||||
if name[0:1] == strings.ToLower(name[0:1]) {
|
|
||||||
continue // ignore unexported fields
|
|
||||||
}
|
|
||||||
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
|
|
||||||
continue // ignore unset fields
|
|
||||||
}
|
|
||||||
names = append(names, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, n := range names {
|
|
||||||
val := v.FieldByName(n)
|
|
||||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
|
||||||
buf.WriteString(n + ": ")
|
|
||||||
prettify(val, indent+2, buf)
|
|
||||||
|
|
||||||
if i < len(names)-1 {
|
|
||||||
buf.WriteString(",\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
|
||||||
case reflect.Slice:
|
|
||||||
strtype := v.Type().String()
|
|
||||||
if strtype == "[]uint8" {
|
|
||||||
fmt.Fprintf(buf, "<binary> len %d", v.Len())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
nl, id, id2 := "", "", ""
|
|
||||||
if v.Len() > 3 {
|
|
||||||
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
|
|
||||||
}
|
|
||||||
buf.WriteString("[" + nl)
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
buf.WriteString(id2)
|
|
||||||
prettify(v.Index(i), indent+2, buf)
|
|
||||||
|
|
||||||
if i < v.Len()-1 {
|
|
||||||
buf.WriteString("," + nl)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString(nl + id + "]")
|
|
||||||
case reflect.Map:
|
|
||||||
buf.WriteString("{\n")
|
|
||||||
|
|
||||||
for i, k := range v.MapKeys() {
|
|
||||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
|
||||||
buf.WriteString(k.String() + ": ")
|
|
||||||
prettify(v.MapIndex(k), indent+2, buf)
|
|
||||||
|
|
||||||
if i < v.Len()-1 {
|
|
||||||
buf.WriteString(",\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
|
||||||
default:
|
|
||||||
if !v.IsValid() {
|
|
||||||
fmt.Fprint(buf, "<invalid value>")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
format := "%v"
|
|
||||||
switch v.Interface().(type) {
|
|
||||||
case string:
|
|
||||||
format = "%q"
|
|
||||||
case io.ReadSeeker, io.Reader:
|
|
||||||
format = "buffer(%p)"
|
|
||||||
}
|
|
||||||
fmt.Fprintf(buf, format, v.Interface())
|
|
||||||
}
|
|
||||||
}
|
|
89
vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
generated
vendored
89
vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
generated
vendored
@ -1,89 +0,0 @@
|
|||||||
package awsutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// StringValue returns the string representation of a value.
|
|
||||||
func StringValue(i interface{}) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
stringValue(reflect.ValueOf(i), 0, &buf)
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
|
|
||||||
for v.Kind() == reflect.Ptr {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Struct:
|
|
||||||
buf.WriteString("{\n")
|
|
||||||
|
|
||||||
names := []string{}
|
|
||||||
for i := 0; i < v.Type().NumField(); i++ {
|
|
||||||
name := v.Type().Field(i).Name
|
|
||||||
f := v.Field(i)
|
|
||||||
if name[0:1] == strings.ToLower(name[0:1]) {
|
|
||||||
continue // ignore unexported fields
|
|
||||||
}
|
|
||||||
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
|
|
||||||
continue // ignore unset fields
|
|
||||||
}
|
|
||||||
names = append(names, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, n := range names {
|
|
||||||
val := v.FieldByName(n)
|
|
||||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
|
||||||
buf.WriteString(n + ": ")
|
|
||||||
stringValue(val, indent+2, buf)
|
|
||||||
|
|
||||||
if i < len(names)-1 {
|
|
||||||
buf.WriteString(",\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
|
||||||
case reflect.Slice:
|
|
||||||
nl, id, id2 := "", "", ""
|
|
||||||
if v.Len() > 3 {
|
|
||||||
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
|
|
||||||
}
|
|
||||||
buf.WriteString("[" + nl)
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
buf.WriteString(id2)
|
|
||||||
stringValue(v.Index(i), indent+2, buf)
|
|
||||||
|
|
||||||
if i < v.Len()-1 {
|
|
||||||
buf.WriteString("," + nl)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString(nl + id + "]")
|
|
||||||
case reflect.Map:
|
|
||||||
buf.WriteString("{\n")
|
|
||||||
|
|
||||||
for i, k := range v.MapKeys() {
|
|
||||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
|
||||||
buf.WriteString(k.String() + ": ")
|
|
||||||
stringValue(v.MapIndex(k), indent+2, buf)
|
|
||||||
|
|
||||||
if i < v.Len()-1 {
|
|
||||||
buf.WriteString(",\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
|
||||||
default:
|
|
||||||
format := "%v"
|
|
||||||
switch v.Interface().(type) {
|
|
||||||
case string:
|
|
||||||
format = "%q"
|
|
||||||
}
|
|
||||||
fmt.Fprintf(buf, format, v.Interface())
|
|
||||||
}
|
|
||||||
}
|
|
90
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
90
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
@ -1,90 +0,0 @@
|
|||||||
package client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Config provides configuration to a service client instance.
|
|
||||||
type Config struct {
|
|
||||||
Config *aws.Config
|
|
||||||
Handlers request.Handlers
|
|
||||||
Endpoint string
|
|
||||||
SigningRegion string
|
|
||||||
SigningName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigProvider provides a generic way for a service client to receive
|
|
||||||
// the ClientConfig without circular dependencies.
|
|
||||||
type ConfigProvider interface {
|
|
||||||
ClientConfig(serviceName string, cfgs ...*aws.Config) Config
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not
|
|
||||||
// resolve the endpoint automatically. The service client's endpoint must be
|
|
||||||
// provided via the aws.Config.Endpoint field.
|
|
||||||
type ConfigNoResolveEndpointProvider interface {
|
|
||||||
ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Client implements the base client request and response handling
|
|
||||||
// used by all service clients.
|
|
||||||
type Client struct {
|
|
||||||
request.Retryer
|
|
||||||
metadata.ClientInfo
|
|
||||||
|
|
||||||
Config aws.Config
|
|
||||||
Handlers request.Handlers
|
|
||||||
}
|
|
||||||
|
|
||||||
// New will return a pointer to a new initialized service client.
|
|
||||||
func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
|
|
||||||
svc := &Client{
|
|
||||||
Config: cfg,
|
|
||||||
ClientInfo: info,
|
|
||||||
Handlers: handlers.Copy(),
|
|
||||||
}
|
|
||||||
|
|
||||||
switch retryer, ok := cfg.Retryer.(request.Retryer); {
|
|
||||||
case ok:
|
|
||||||
svc.Retryer = retryer
|
|
||||||
case cfg.Retryer != nil && cfg.Logger != nil:
|
|
||||||
s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
|
|
||||||
cfg.Logger.Log(s)
|
|
||||||
fallthrough
|
|
||||||
default:
|
|
||||||
maxRetries := aws.IntValue(cfg.MaxRetries)
|
|
||||||
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
|
|
||||||
maxRetries = 3
|
|
||||||
}
|
|
||||||
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
|
|
||||||
}
|
|
||||||
|
|
||||||
svc.AddDebugHandlers()
|
|
||||||
|
|
||||||
for _, option := range options {
|
|
||||||
option(svc)
|
|
||||||
}
|
|
||||||
|
|
||||||
return svc
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRequest returns a new Request pointer for the service API
|
|
||||||
// operation and parameters.
|
|
||||||
func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
|
|
||||||
return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddDebugHandlers injects debug logging handlers into the service to log request
|
|
||||||
// debug information.
|
|
||||||
func (c *Client) AddDebugHandlers() {
|
|
||||||
if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest})
|
|
||||||
c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse})
|
|
||||||
}
|
|
96
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
96
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
@ -1,96 +0,0 @@
|
|||||||
package client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/rand"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultRetryer implements basic retry logic using exponential backoff for
|
|
||||||
// most services. If you want to implement custom retry logic, implement the
|
|
||||||
// request.Retryer interface or create a structure type that composes this
|
|
||||||
// struct and override the specific methods. For example, to override only
|
|
||||||
// the MaxRetries method:
|
|
||||||
//
|
|
||||||
// type retryer struct {
|
|
||||||
// client.DefaultRetryer
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// // This implementation always has 100 max retries
|
|
||||||
// func (d retryer) MaxRetries() int { return 100 }
|
|
||||||
type DefaultRetryer struct {
|
|
||||||
NumMaxRetries int
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaxRetries returns the number of maximum returns the service will use to make
|
|
||||||
// an individual API request.
|
|
||||||
func (d DefaultRetryer) MaxRetries() int {
|
|
||||||
return d.NumMaxRetries
|
|
||||||
}
|
|
||||||
|
|
||||||
var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
|
|
||||||
|
|
||||||
// RetryRules returns the delay duration before retrying this request again
|
|
||||||
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
|
|
||||||
// Set the upper limit of delay in retrying at ~five minutes
|
|
||||||
minTime := 30
|
|
||||||
throttle := d.shouldThrottle(r)
|
|
||||||
if throttle {
|
|
||||||
minTime = 500
|
|
||||||
}
|
|
||||||
|
|
||||||
retryCount := r.RetryCount
|
|
||||||
if retryCount > 13 {
|
|
||||||
retryCount = 13
|
|
||||||
} else if throttle && retryCount > 8 {
|
|
||||||
retryCount = 8
|
|
||||||
}
|
|
||||||
|
|
||||||
delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
|
|
||||||
return time.Duration(delay) * time.Millisecond
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShouldRetry returns true if the request should be retried.
|
|
||||||
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
|
|
||||||
// If one of the other handlers already set the retry state
|
|
||||||
// we don't want to override it based on the service's state
|
|
||||||
if r.Retryable != nil {
|
|
||||||
return *r.Retryable
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.HTTPResponse.StatusCode >= 500 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return r.IsErrorRetryable() || d.shouldThrottle(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShouldThrottle returns true if the request should be throttled.
|
|
||||||
func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
|
|
||||||
if r.HTTPResponse.StatusCode == 502 ||
|
|
||||||
r.HTTPResponse.StatusCode == 503 ||
|
|
||||||
r.HTTPResponse.StatusCode == 504 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return r.IsErrorThrottle()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lockedSource is a thread-safe implementation of rand.Source
|
|
||||||
type lockedSource struct {
|
|
||||||
lk sync.Mutex
|
|
||||||
src rand.Source
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *lockedSource) Int63() (n int64) {
|
|
||||||
r.lk.Lock()
|
|
||||||
n = r.src.Int63()
|
|
||||||
r.lk.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *lockedSource) Seed(seed int64) {
|
|
||||||
r.lk.Lock()
|
|
||||||
r.src.Seed(seed)
|
|
||||||
r.lk.Unlock()
|
|
||||||
}
|
|
108
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
108
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
@ -1,108 +0,0 @@
|
|||||||
package client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http/httputil"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
|
||||||
)
|
|
||||||
|
|
||||||
const logReqMsg = `DEBUG: Request %s/%s Details:
|
|
||||||
---[ REQUEST POST-SIGN ]-----------------------------
|
|
||||||
%s
|
|
||||||
-----------------------------------------------------`
|
|
||||||
|
|
||||||
const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
|
|
||||||
---[ REQUEST DUMP ERROR ]-----------------------------
|
|
||||||
%s
|
|
||||||
------------------------------------------------------`
|
|
||||||
|
|
||||||
type logWriter struct {
|
|
||||||
// Logger is what we will use to log the payload of a response.
|
|
||||||
Logger aws.Logger
|
|
||||||
// buf stores the contents of what has been read
|
|
||||||
buf *bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *logWriter) Write(b []byte) (int, error) {
|
|
||||||
return logger.buf.Write(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
type teeReaderCloser struct {
|
|
||||||
// io.Reader will be a tee reader that is used during logging.
|
|
||||||
// This structure will read from a body and write the contents to a logger.
|
|
||||||
io.Reader
|
|
||||||
// Source is used just to close when we are done reading.
|
|
||||||
Source io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
func (reader *teeReaderCloser) Close() error {
|
|
||||||
return reader.Source.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func logRequest(r *request.Request) {
|
|
||||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
|
||||||
dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
|
|
||||||
if err != nil {
|
|
||||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if logBody {
|
|
||||||
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
|
|
||||||
// Body as a NoOpCloser and will not be reset after read by the HTTP
|
|
||||||
// client reader.
|
|
||||||
r.ResetBody()
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
|
|
||||||
}
|
|
||||||
|
|
||||||
const logRespMsg = `DEBUG: Response %s/%s Details:
|
|
||||||
---[ RESPONSE ]--------------------------------------
|
|
||||||
%s
|
|
||||||
-----------------------------------------------------`
|
|
||||||
|
|
||||||
const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
|
|
||||||
---[ RESPONSE DUMP ERROR ]-----------------------------
|
|
||||||
%s
|
|
||||||
-----------------------------------------------------`
|
|
||||||
|
|
||||||
func logResponse(r *request.Request) {
|
|
||||||
lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
|
|
||||||
r.HTTPResponse.Body = &teeReaderCloser{
|
|
||||||
Reader: io.TeeReader(r.HTTPResponse.Body, lw),
|
|
||||||
Source: r.HTTPResponse.Body,
|
|
||||||
}
|
|
||||||
|
|
||||||
handlerFn := func(req *request.Request) {
|
|
||||||
body, err := httputil.DumpResponse(req.HTTPResponse, false)
|
|
||||||
if err != nil {
|
|
||||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := ioutil.ReadAll(lw.buf)
|
|
||||||
if err != nil {
|
|
||||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lw.Logger.Log(fmt.Sprintf(logRespMsg, req.ClientInfo.ServiceName, req.Operation.Name, string(body)))
|
|
||||||
if req.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) {
|
|
||||||
lw.Logger.Log(string(b))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const handlerName = "awsdk.client.LogResponse.ResponseBody"
|
|
||||||
|
|
||||||
r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{
|
|
||||||
Name: handlerName, Fn: handlerFn,
|
|
||||||
})
|
|
||||||
r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{
|
|
||||||
Name: handlerName, Fn: handlerFn,
|
|
||||||
})
|
|
||||||
}
|
|
12
vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
generated
vendored
12
vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
package metadata
|
|
||||||
|
|
||||||
// ClientInfo wraps immutable data from the client.Client structure.
|
|
||||||
type ClientInfo struct {
|
|
||||||
ServiceName string
|
|
||||||
APIVersion string
|
|
||||||
Endpoint string
|
|
||||||
SigningName string
|
|
||||||
SigningRegion string
|
|
||||||
JSONVersion string
|
|
||||||
TargetPrefix string
|
|
||||||
}
|
|
470
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
470
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
@ -1,470 +0,0 @@
|
|||||||
package aws
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UseServiceDefaultRetries instructs the config to use the service's own
|
|
||||||
// default number of retries. This will be the default action if
|
|
||||||
// Config.MaxRetries is nil also.
|
|
||||||
const UseServiceDefaultRetries = -1
|
|
||||||
|
|
||||||
// RequestRetryer is an alias for a type that implements the request.Retryer
|
|
||||||
// interface.
|
|
||||||
type RequestRetryer interface{}
|
|
||||||
|
|
||||||
// A Config provides service configuration for service clients. By default,
|
|
||||||
// all clients will use the defaults.DefaultConfig tructure.
|
|
||||||
//
|
|
||||||
// // Create Session with MaxRetry configuration to be shared by multiple
|
|
||||||
// // service clients.
|
|
||||||
// sess := session.Must(session.NewSession(&aws.Config{
|
|
||||||
// MaxRetries: aws.Int(3),
|
|
||||||
// }))
|
|
||||||
//
|
|
||||||
// // Create S3 service client with a specific Region.
|
|
||||||
// svc := s3.New(sess, &aws.Config{
|
|
||||||
// Region: aws.String("us-west-2"),
|
|
||||||
// })
|
|
||||||
type Config struct {
|
|
||||||
// Enables verbose error printing of all credential chain errors.
|
|
||||||
// Should be used when wanting to see all errors while attempting to
|
|
||||||
// retrieve credentials.
|
|
||||||
CredentialsChainVerboseErrors *bool
|
|
||||||
|
|
||||||
// The credentials object to use when signing requests. Defaults to a
|
|
||||||
// chain of credential providers to search for credentials in environment
|
|
||||||
// variables, shared credential file, and EC2 Instance Roles.
|
|
||||||
Credentials *credentials.Credentials
|
|
||||||
|
|
||||||
// An optional endpoint URL (hostname only or fully qualified URI)
|
|
||||||
// that overrides the default generated endpoint for a client. Set this
|
|
||||||
// to `""` to use the default generated endpoint.
|
|
||||||
//
|
|
||||||
// @note You must still provide a `Region` value when specifying an
|
|
||||||
// endpoint for a client.
|
|
||||||
Endpoint *string
|
|
||||||
|
|
||||||
// The resolver to use for looking up endpoints for AWS service clients
|
|
||||||
// to use based on region.
|
|
||||||
EndpointResolver endpoints.Resolver
|
|
||||||
|
|
||||||
// EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
|
|
||||||
// ShouldRetry regardless of whether or not if request.Retryable is set.
|
|
||||||
// This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck
|
|
||||||
// is not set, then ShouldRetry will only be called if request.Retryable is nil.
|
|
||||||
// Proper handling of the request.Retryable field is important when setting this field.
|
|
||||||
EnforceShouldRetryCheck *bool
|
|
||||||
|
|
||||||
// The region to send requests to. This parameter is required and must
|
|
||||||
// be configured globally or on a per-client basis unless otherwise
|
|
||||||
// noted. A full list of regions is found in the "Regions and Endpoints"
|
|
||||||
// document.
|
|
||||||
//
|
|
||||||
// @see http://docs.aws.amazon.com/general/latest/gr/rande.html
|
|
||||||
// AWS Regions and Endpoints
|
|
||||||
Region *string
|
|
||||||
|
|
||||||
// Set this to `true` to disable SSL when sending requests. Defaults
|
|
||||||
// to `false`.
|
|
||||||
DisableSSL *bool
|
|
||||||
|
|
||||||
// The HTTP client to use when sending requests. Defaults to
|
|
||||||
// `http.DefaultClient`.
|
|
||||||
HTTPClient *http.Client
|
|
||||||
|
|
||||||
// An integer value representing the logging level. The default log level
|
|
||||||
// is zero (LogOff), which represents no logging. To enable logging set
|
|
||||||
// to a LogLevel Value.
|
|
||||||
LogLevel *LogLevelType
|
|
||||||
|
|
||||||
// The logger writer interface to write logging messages to. Defaults to
|
|
||||||
// standard out.
|
|
||||||
Logger Logger
|
|
||||||
|
|
||||||
// The maximum number of times that a request will be retried for failures.
|
|
||||||
// Defaults to -1, which defers the max retry setting to the service
|
|
||||||
// specific configuration.
|
|
||||||
MaxRetries *int
|
|
||||||
|
|
||||||
// Retryer guides how HTTP requests should be retried in case of
|
|
||||||
// recoverable failures.
|
|
||||||
//
|
|
||||||
// When nil or the value does not implement the request.Retryer interface,
|
|
||||||
// the client.DefaultRetryer will be used.
|
|
||||||
//
|
|
||||||
// When both Retryer and MaxRetries are non-nil, the former is used and
|
|
||||||
// the latter ignored.
|
|
||||||
//
|
|
||||||
// To set the Retryer field in a type-safe manner and with chaining, use
|
|
||||||
// the request.WithRetryer helper function:
|
|
||||||
//
|
|
||||||
// cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
|
|
||||||
//
|
|
||||||
Retryer RequestRetryer
|
|
||||||
|
|
||||||
// Disables semantic parameter validation, which validates input for
|
|
||||||
// missing required fields and/or other semantic request input errors.
|
|
||||||
DisableParamValidation *bool
|
|
||||||
|
|
||||||
// Disables the computation of request and response checksums, e.g.,
|
|
||||||
// CRC32 checksums in Amazon DynamoDB.
|
|
||||||
DisableComputeChecksums *bool
|
|
||||||
|
|
||||||
// Set this to `true` to force the request to use path-style addressing,
|
|
||||||
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
|
|
||||||
// will use virtual hosted bucket addressing when possible
|
|
||||||
// (`http://BUCKET.s3.amazonaws.com/KEY`).
|
|
||||||
//
|
|
||||||
// @note This configuration option is specific to the Amazon S3 service.
|
|
||||||
// @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
|
|
||||||
// Amazon S3: Virtual Hosting of Buckets
|
|
||||||
S3ForcePathStyle *bool
|
|
||||||
|
|
||||||
// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
|
|
||||||
// header to PUT requests over 2MB of content. 100-Continue instructs the
|
|
||||||
// HTTP client not to send the body until the service responds with a
|
|
||||||
// `continue` status. This is useful to prevent sending the request body
|
|
||||||
// until after the request is authenticated, and validated.
|
|
||||||
//
|
|
||||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
|
|
||||||
//
|
|
||||||
// 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
|
|
||||||
// `ExpectContinueTimeout` for information on adjusting the continue wait
|
|
||||||
// timeout. https://golang.org/pkg/net/http/#Transport
|
|
||||||
//
|
|
||||||
// You should use this flag to disble 100-Continue if you experience issues
|
|
||||||
// with proxies or third party S3 compatible services.
|
|
||||||
S3Disable100Continue *bool
|
|
||||||
|
|
||||||
// Set this to `true` to enable S3 Accelerate feature. For all operations
|
|
||||||
// compatible with S3 Accelerate will use the accelerate endpoint for
|
|
||||||
// requests. Requests not compatible will fall back to normal S3 requests.
|
|
||||||
//
|
|
||||||
// The bucket must be enable for accelerate to be used with S3 client with
|
|
||||||
// accelerate enabled. If the bucket is not enabled for accelerate an error
|
|
||||||
// will be returned. The bucket name must be DNS compatible to also work
|
|
||||||
// with accelerate.
|
|
||||||
S3UseAccelerate *bool
|
|
||||||
|
|
||||||
// Set this to `true` to disable the EC2Metadata client from overriding the
|
|
||||||
// default http.Client's Timeout. This is helpful if you do not want the
|
|
||||||
// EC2Metadata client to create a new http.Client. This options is only
|
|
||||||
// meaningful if you're not already using a custom HTTP client with the
|
|
||||||
// SDK. Enabled by default.
|
|
||||||
//
|
|
||||||
// Must be set and provided to the session.NewSession() in order to disable
|
|
||||||
// the EC2Metadata overriding the timeout for default credentials chain.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
// sess := session.Must(session.NewSession(aws.NewConfig()
|
|
||||||
// .WithEC2MetadataDiableTimeoutOverride(true)))
|
|
||||||
//
|
|
||||||
// svc := s3.New(sess)
|
|
||||||
//
|
|
||||||
EC2MetadataDisableTimeoutOverride *bool
|
|
||||||
|
|
||||||
// Instructs the endpiont to be generated for a service client to
|
|
||||||
// be the dual stack endpoint. The dual stack endpoint will support
|
|
||||||
// both IPv4 and IPv6 addressing.
|
|
||||||
//
|
|
||||||
// Setting this for a service which does not support dual stack will fail
|
|
||||||
// to make requets. It is not recommended to set this value on the session
|
|
||||||
// as it will apply to all service clients created with the session. Even
|
|
||||||
// services which don't support dual stack endpoints.
|
|
||||||
//
|
|
||||||
// If the Endpoint config value is also provided the UseDualStack flag
|
|
||||||
// will be ignored.
|
|
||||||
//
|
|
||||||
// Only supported with.
|
|
||||||
//
|
|
||||||
// sess := session.Must(session.NewSession())
|
|
||||||
//
|
|
||||||
// svc := s3.New(sess, &aws.Config{
|
|
||||||
// UseDualStack: aws.Bool(true),
|
|
||||||
// })
|
|
||||||
UseDualStack *bool
|
|
||||||
|
|
||||||
// SleepDelay is an override for the func the SDK will call when sleeping
|
|
||||||
// during the lifecycle of a request. Specifically this will be used for
|
|
||||||
// request delays. This value should only be used for testing. To adjust
|
|
||||||
// the delay of a request see the aws/client.DefaultRetryer and
|
|
||||||
// aws/request.Retryer.
|
|
||||||
//
|
|
||||||
// SleepDelay will prevent any Context from being used for canceling retry
|
|
||||||
// delay of an API operation. It is recommended to not use SleepDelay at all
|
|
||||||
// and specify a Retryer instead.
|
|
||||||
SleepDelay func(time.Duration)
|
|
||||||
|
|
||||||
// DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
|
|
||||||
// Will default to false. This would only be used for empty directory names in s3 requests.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
// sess := session.Must(session.NewSession(&aws.Config{
|
|
||||||
// DisableRestProtocolURICleaning: aws.Bool(true),
|
|
||||||
// }))
|
|
||||||
//
|
|
||||||
// svc := s3.New(sess)
|
|
||||||
// out, err := svc.GetObject(&s3.GetObjectInput {
|
|
||||||
// Bucket: aws.String("bucketname"),
|
|
||||||
// Key: aws.String("//foo//bar//moo"),
|
|
||||||
// })
|
|
||||||
DisableRestProtocolURICleaning *bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConfig returns a new Config pointer that can be chained with builder
|
|
||||||
// methods to set multiple configuration values inline without using pointers.
|
|
||||||
//
|
|
||||||
// // Create Session with MaxRetry configuration to be shared by multiple
|
|
||||||
// // service clients.
|
|
||||||
// sess := session.Must(session.NewSession(aws.NewConfig().
|
|
||||||
// WithMaxRetries(3),
|
|
||||||
// ))
|
|
||||||
//
|
|
||||||
// // Create S3 service client with a specific Region.
|
|
||||||
// svc := s3.New(sess, aws.NewConfig().
|
|
||||||
// WithRegion("us-west-2"),
|
|
||||||
// )
|
|
||||||
func NewConfig() *Config {
|
|
||||||
return &Config{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
|
|
||||||
// a Config pointer.
|
|
||||||
func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
|
|
||||||
c.CredentialsChainVerboseErrors = &verboseErrs
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithCredentials sets a config Credentials value returning a Config pointer
|
|
||||||
// for chaining.
|
|
||||||
func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
|
|
||||||
c.Credentials = creds
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithEndpoint sets a config Endpoint value returning a Config pointer for
|
|
||||||
// chaining.
|
|
||||||
func (c *Config) WithEndpoint(endpoint string) *Config {
|
|
||||||
c.Endpoint = &endpoint
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithEndpointResolver sets a config EndpointResolver value returning a
|
|
||||||
// Config pointer for chaining.
|
|
||||||
func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config {
|
|
||||||
c.EndpointResolver = resolver
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithRegion sets a config Region value returning a Config pointer for
|
|
||||||
// chaining.
|
|
||||||
func (c *Config) WithRegion(region string) *Config {
|
|
||||||
c.Region = ®ion
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDisableSSL sets a config DisableSSL value returning a Config pointer
|
|
||||||
// for chaining.
|
|
||||||
func (c *Config) WithDisableSSL(disable bool) *Config {
|
|
||||||
c.DisableSSL = &disable
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithHTTPClient sets a config HTTPClient value returning a Config pointer
|
|
||||||
// for chaining.
|
|
||||||
func (c *Config) WithHTTPClient(client *http.Client) *Config {
|
|
||||||
c.HTTPClient = client
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMaxRetries sets a config MaxRetries value returning a Config pointer
|
|
||||||
// for chaining.
|
|
||||||
func (c *Config) WithMaxRetries(max int) *Config {
|
|
||||||
c.MaxRetries = &max
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDisableParamValidation sets a config DisableParamValidation value
|
|
||||||
// returning a Config pointer for chaining.
|
|
||||||
func (c *Config) WithDisableParamValidation(disable bool) *Config {
|
|
||||||
c.DisableParamValidation = &disable
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDisableComputeChecksums sets a config DisableComputeChecksums value
|
|
||||||
// returning a Config pointer for chaining.
|
|
||||||
func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
|
|
||||||
c.DisableComputeChecksums = &disable
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLogLevel sets a config LogLevel value returning a Config pointer for
|
|
||||||
// chaining.
|
|
||||||
func (c *Config) WithLogLevel(level LogLevelType) *Config {
|
|
||||||
c.LogLevel = &level
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLogger sets a config Logger value returning a Config pointer for
|
|
||||||
// chaining.
|
|
||||||
func (c *Config) WithLogger(logger Logger) *Config {
|
|
||||||
c.Logger = logger
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
|
|
||||||
// pointer for chaining.
|
|
||||||
func (c *Config) WithS3ForcePathStyle(force bool) *Config {
|
|
||||||
c.S3ForcePathStyle = &force
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithS3Disable100Continue sets a config S3Disable100Continue value returning
|
|
||||||
// a Config pointer for chaining.
|
|
||||||
func (c *Config) WithS3Disable100Continue(disable bool) *Config {
|
|
||||||
c.S3Disable100Continue = &disable
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
|
|
||||||
// pointer for chaining.
|
|
||||||
func (c *Config) WithS3UseAccelerate(enable bool) *Config {
|
|
||||||
c.S3UseAccelerate = &enable
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithUseDualStack sets a config UseDualStack value returning a Config
|
|
||||||
// pointer for chaining.
|
|
||||||
func (c *Config) WithUseDualStack(enable bool) *Config {
|
|
||||||
c.UseDualStack = &enable
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
|
|
||||||
// returning a Config pointer for chaining.
|
|
||||||
func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
|
|
||||||
c.EC2MetadataDisableTimeoutOverride = &enable
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithSleepDelay overrides the function used to sleep while waiting for the
|
|
||||||
// next retry. Defaults to time.Sleep.
|
|
||||||
func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
|
|
||||||
c.SleepDelay = fn
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// MergeIn merges the passed in configs into the existing config object.
|
|
||||||
func (c *Config) MergeIn(cfgs ...*Config) {
|
|
||||||
for _, other := range cfgs {
|
|
||||||
mergeInConfig(c, other)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeInConfig(dst *Config, other *Config) {
|
|
||||||
if other == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.CredentialsChainVerboseErrors != nil {
|
|
||||||
dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.Credentials != nil {
|
|
||||||
dst.Credentials = other.Credentials
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.Endpoint != nil {
|
|
||||||
dst.Endpoint = other.Endpoint
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.EndpointResolver != nil {
|
|
||||||
dst.EndpointResolver = other.EndpointResolver
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.Region != nil {
|
|
||||||
dst.Region = other.Region
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.DisableSSL != nil {
|
|
||||||
dst.DisableSSL = other.DisableSSL
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.HTTPClient != nil {
|
|
||||||
dst.HTTPClient = other.HTTPClient
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.LogLevel != nil {
|
|
||||||
dst.LogLevel = other.LogLevel
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.Logger != nil {
|
|
||||||
dst.Logger = other.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.MaxRetries != nil {
|
|
||||||
dst.MaxRetries = other.MaxRetries
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.Retryer != nil {
|
|
||||||
dst.Retryer = other.Retryer
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.DisableParamValidation != nil {
|
|
||||||
dst.DisableParamValidation = other.DisableParamValidation
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.DisableComputeChecksums != nil {
|
|
||||||
dst.DisableComputeChecksums = other.DisableComputeChecksums
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.S3ForcePathStyle != nil {
|
|
||||||
dst.S3ForcePathStyle = other.S3ForcePathStyle
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.S3Disable100Continue != nil {
|
|
||||||
dst.S3Disable100Continue = other.S3Disable100Continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.S3UseAccelerate != nil {
|
|
||||||
dst.S3UseAccelerate = other.S3UseAccelerate
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.UseDualStack != nil {
|
|
||||||
dst.UseDualStack = other.UseDualStack
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.EC2MetadataDisableTimeoutOverride != nil {
|
|
||||||
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.SleepDelay != nil {
|
|
||||||
dst.SleepDelay = other.SleepDelay
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.DisableRestProtocolURICleaning != nil {
|
|
||||||
dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
|
|
||||||
}
|
|
||||||
|
|
||||||
if other.EnforceShouldRetryCheck != nil {
|
|
||||||
dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy will return a shallow copy of the Config object. If any additional
|
|
||||||
// configurations are provided they will be merged into the new config returned.
|
|
||||||
func (c *Config) Copy(cfgs ...*Config) *Config {
|
|
||||||
dst := &Config{}
|
|
||||||
dst.MergeIn(c)
|
|
||||||
|
|
||||||
for _, cfg := range cfgs {
|
|
||||||
dst.MergeIn(cfg)
|
|
||||||
}
|
|
||||||
|
|
||||||
return dst
|
|
||||||
}
|
|
71
vendor/github.com/aws/aws-sdk-go/aws/context.go
generated
vendored
71
vendor/github.com/aws/aws-sdk-go/aws/context.go
generated
vendored
@ -1,71 +0,0 @@
|
|||||||
package aws
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Context is an copy of the Go v1.7 stdlib's context.Context interface.
|
|
||||||
// It is represented as a SDK interface to enable you to use the "WithContext"
|
|
||||||
// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
|
|
||||||
//
|
|
||||||
// See https://golang.org/pkg/context on how to use contexts.
|
|
||||||
type Context interface {
|
|
||||||
// Deadline returns the time when work done on behalf of this context
|
|
||||||
// should be canceled. Deadline returns ok==false when no deadline is
|
|
||||||
// set. Successive calls to Deadline return the same results.
|
|
||||||
Deadline() (deadline time.Time, ok bool)
|
|
||||||
|
|
||||||
// Done returns a channel that's closed when work done on behalf of this
|
|
||||||
// context should be canceled. Done may return nil if this context can
|
|
||||||
// never be canceled. Successive calls to Done return the same value.
|
|
||||||
Done() <-chan struct{}
|
|
||||||
|
|
||||||
// Err returns a non-nil error value after Done is closed. Err returns
|
|
||||||
// Canceled if the context was canceled or DeadlineExceeded if the
|
|
||||||
// context's deadline passed. No other values for Err are defined.
|
|
||||||
// After Done is closed, successive calls to Err return the same value.
|
|
||||||
Err() error
|
|
||||||
|
|
||||||
// Value returns the value associated with this context for key, or nil
|
|
||||||
// if no value is associated with key. Successive calls to Value with
|
|
||||||
// the same key returns the same result.
|
|
||||||
//
|
|
||||||
// Use context values only for request-scoped data that transits
|
|
||||||
// processes and API boundaries, not for passing optional parameters to
|
|
||||||
// functions.
|
|
||||||
Value(key interface{}) interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BackgroundContext returns a context that will never be canceled, has no
|
|
||||||
// values, and no deadline. This context is used by the SDK to provide
|
|
||||||
// backwards compatibility with non-context API operations and functionality.
|
|
||||||
//
|
|
||||||
// Go 1.6 and before:
|
|
||||||
// This context function is equivalent to context.Background in the Go stdlib.
|
|
||||||
//
|
|
||||||
// Go 1.7 and later:
|
|
||||||
// The context returned will be the value returned by context.Background()
|
|
||||||
//
|
|
||||||
// See https://golang.org/pkg/context for more information on Contexts.
|
|
||||||
func BackgroundContext() Context {
|
|
||||||
return backgroundCtx
|
|
||||||
}
|
|
||||||
|
|
||||||
// SleepWithContext will wait for the timer duration to expire, or the context
|
|
||||||
// is canceled. Which ever happens first. If the context is canceled the Context's
|
|
||||||
// error will be returned.
|
|
||||||
//
|
|
||||||
// Expects Context to always return a non-nil error if the Done channel is closed.
|
|
||||||
func SleepWithContext(ctx Context, dur time.Duration) error {
|
|
||||||
t := time.NewTimer(dur)
|
|
||||||
defer t.Stop()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-t.C:
|
|
||||||
break
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
41
vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go
generated
vendored
41
vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go
generated
vendored
@ -1,41 +0,0 @@
|
|||||||
// +build !go1.7
|
|
||||||
|
|
||||||
package aws
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to
|
|
||||||
// provide a 1.6 and 1.5 safe version of context that is compatible with Go
|
|
||||||
// 1.7's Context.
|
|
||||||
//
|
|
||||||
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
|
|
||||||
// struct{}, since vars of this type must have distinct addresses.
|
|
||||||
type emptyCtx int
|
|
||||||
|
|
||||||
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*emptyCtx) Done() <-chan struct{} {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*emptyCtx) Err() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*emptyCtx) Value(key interface{}) interface{} {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *emptyCtx) String() string {
|
|
||||||
switch e {
|
|
||||||
case backgroundCtx:
|
|
||||||
return "aws.BackgroundContext"
|
|
||||||
}
|
|
||||||
return "unknown empty Context"
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
backgroundCtx = new(emptyCtx)
|
|
||||||
)
|
|
9
vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go
generated
vendored
@ -1,9 +0,0 @@
|
|||||||
// +build go1.7
|
|
||||||
|
|
||||||
package aws
|
|
||||||
|
|
||||||
import "context"
|
|
||||||
|
|
||||||
var (
|
|
||||||
backgroundCtx = context.Background()
|
|
||||||
)
|
|
387
vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
generated
vendored
387
vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
generated
vendored
@ -1,387 +0,0 @@
|
|||||||
package aws
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// String returns a pointer to the string value passed in.
|
|
||||||
func String(v string) *string {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringValue returns the value of the string pointer passed in or
|
|
||||||
// "" if the pointer is nil.
|
|
||||||
func StringValue(v *string) string {
|
|
||||||
if v != nil {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringSlice converts a slice of string values into a slice of
|
|
||||||
// string pointers
|
|
||||||
func StringSlice(src []string) []*string {
|
|
||||||
dst := make([]*string, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
dst[i] = &(src[i])
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringValueSlice converts a slice of string pointers into a slice of
|
|
||||||
// string values
|
|
||||||
func StringValueSlice(src []*string) []string {
|
|
||||||
dst := make([]string, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if src[i] != nil {
|
|
||||||
dst[i] = *(src[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringMap converts a string map of string values into a string
|
|
||||||
// map of string pointers
|
|
||||||
func StringMap(src map[string]string) map[string]*string {
|
|
||||||
dst := make(map[string]*string)
|
|
||||||
for k, val := range src {
|
|
||||||
v := val
|
|
||||||
dst[k] = &v
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringValueMap converts a string map of string pointers into a string
|
|
||||||
// map of string values
|
|
||||||
func StringValueMap(src map[string]*string) map[string]string {
|
|
||||||
dst := make(map[string]string)
|
|
||||||
for k, val := range src {
|
|
||||||
if val != nil {
|
|
||||||
dst[k] = *val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bool returns a pointer to the bool value passed in.
|
|
||||||
func Bool(v bool) *bool {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolValue returns the value of the bool pointer passed in or
|
|
||||||
// false if the pointer is nil.
|
|
||||||
func BoolValue(v *bool) bool {
|
|
||||||
if v != nil {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolSlice converts a slice of bool values into a slice of
|
|
||||||
// bool pointers
|
|
||||||
func BoolSlice(src []bool) []*bool {
|
|
||||||
dst := make([]*bool, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
dst[i] = &(src[i])
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolValueSlice converts a slice of bool pointers into a slice of
|
|
||||||
// bool values
|
|
||||||
func BoolValueSlice(src []*bool) []bool {
|
|
||||||
dst := make([]bool, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if src[i] != nil {
|
|
||||||
dst[i] = *(src[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolMap converts a string map of bool values into a string
|
|
||||||
// map of bool pointers
|
|
||||||
func BoolMap(src map[string]bool) map[string]*bool {
|
|
||||||
dst := make(map[string]*bool)
|
|
||||||
for k, val := range src {
|
|
||||||
v := val
|
|
||||||
dst[k] = &v
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolValueMap converts a string map of bool pointers into a string
|
|
||||||
// map of bool values
|
|
||||||
func BoolValueMap(src map[string]*bool) map[string]bool {
|
|
||||||
dst := make(map[string]bool)
|
|
||||||
for k, val := range src {
|
|
||||||
if val != nil {
|
|
||||||
dst[k] = *val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int returns a pointer to the int value passed in.
|
|
||||||
func Int(v int) *int {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// IntValue returns the value of the int pointer passed in or
|
|
||||||
// 0 if the pointer is nil.
|
|
||||||
func IntValue(v *int) int {
|
|
||||||
if v != nil {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// IntSlice converts a slice of int values into a slice of
|
|
||||||
// int pointers
|
|
||||||
func IntSlice(src []int) []*int {
|
|
||||||
dst := make([]*int, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
dst[i] = &(src[i])
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// IntValueSlice converts a slice of int pointers into a slice of
|
|
||||||
// int values
|
|
||||||
func IntValueSlice(src []*int) []int {
|
|
||||||
dst := make([]int, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if src[i] != nil {
|
|
||||||
dst[i] = *(src[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// IntMap converts a string map of int values into a string
|
|
||||||
// map of int pointers
|
|
||||||
func IntMap(src map[string]int) map[string]*int {
|
|
||||||
dst := make(map[string]*int)
|
|
||||||
for k, val := range src {
|
|
||||||
v := val
|
|
||||||
dst[k] = &v
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// IntValueMap converts a string map of int pointers into a string
|
|
||||||
// map of int values
|
|
||||||
func IntValueMap(src map[string]*int) map[string]int {
|
|
||||||
dst := make(map[string]int)
|
|
||||||
for k, val := range src {
|
|
||||||
if val != nil {
|
|
||||||
dst[k] = *val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64 returns a pointer to the int64 value passed in.
|
|
||||||
func Int64(v int64) *int64 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64Value returns the value of the int64 pointer passed in or
|
|
||||||
// 0 if the pointer is nil.
|
|
||||||
func Int64Value(v *int64) int64 {
|
|
||||||
if v != nil {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64Slice converts a slice of int64 values into a slice of
|
|
||||||
// int64 pointers
|
|
||||||
func Int64Slice(src []int64) []*int64 {
|
|
||||||
dst := make([]*int64, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
dst[i] = &(src[i])
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64ValueSlice converts a slice of int64 pointers into a slice of
|
|
||||||
// int64 values
|
|
||||||
func Int64ValueSlice(src []*int64) []int64 {
|
|
||||||
dst := make([]int64, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if src[i] != nil {
|
|
||||||
dst[i] = *(src[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64Map converts a string map of int64 values into a string
|
|
||||||
// map of int64 pointers
|
|
||||||
func Int64Map(src map[string]int64) map[string]*int64 {
|
|
||||||
dst := make(map[string]*int64)
|
|
||||||
for k, val := range src {
|
|
||||||
v := val
|
|
||||||
dst[k] = &v
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64ValueMap converts a string map of int64 pointers into a string
|
|
||||||
// map of int64 values
|
|
||||||
func Int64ValueMap(src map[string]*int64) map[string]int64 {
|
|
||||||
dst := make(map[string]int64)
|
|
||||||
for k, val := range src {
|
|
||||||
if val != nil {
|
|
||||||
dst[k] = *val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64 returns a pointer to the float64 value passed in.
|
|
||||||
func Float64(v float64) *float64 {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64Value returns the value of the float64 pointer passed in or
|
|
||||||
// 0 if the pointer is nil.
|
|
||||||
func Float64Value(v *float64) float64 {
|
|
||||||
if v != nil {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64Slice converts a slice of float64 values into a slice of
|
|
||||||
// float64 pointers
|
|
||||||
func Float64Slice(src []float64) []*float64 {
|
|
||||||
dst := make([]*float64, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
dst[i] = &(src[i])
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64ValueSlice converts a slice of float64 pointers into a slice of
|
|
||||||
// float64 values
|
|
||||||
func Float64ValueSlice(src []*float64) []float64 {
|
|
||||||
dst := make([]float64, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if src[i] != nil {
|
|
||||||
dst[i] = *(src[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64Map converts a string map of float64 values into a string
|
|
||||||
// map of float64 pointers
|
|
||||||
func Float64Map(src map[string]float64) map[string]*float64 {
|
|
||||||
dst := make(map[string]*float64)
|
|
||||||
for k, val := range src {
|
|
||||||
v := val
|
|
||||||
dst[k] = &v
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64ValueMap converts a string map of float64 pointers into a string
|
|
||||||
// map of float64 values
|
|
||||||
func Float64ValueMap(src map[string]*float64) map[string]float64 {
|
|
||||||
dst := make(map[string]float64)
|
|
||||||
for k, val := range src {
|
|
||||||
if val != nil {
|
|
||||||
dst[k] = *val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// Time returns a pointer to the time.Time value passed in.
|
|
||||||
func Time(v time.Time) *time.Time {
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeValue returns the value of the time.Time pointer passed in or
|
|
||||||
// time.Time{} if the pointer is nil.
|
|
||||||
func TimeValue(v *time.Time) time.Time {
|
|
||||||
if v != nil {
|
|
||||||
return *v
|
|
||||||
}
|
|
||||||
return time.Time{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SecondsTimeValue converts an int64 pointer to a time.Time value
|
|
||||||
// representing seconds since Epoch or time.Time{} if the pointer is nil.
|
|
||||||
func SecondsTimeValue(v *int64) time.Time {
|
|
||||||
if v != nil {
|
|
||||||
return time.Unix((*v / 1000), 0)
|
|
||||||
}
|
|
||||||
return time.Time{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MillisecondsTimeValue converts an int64 pointer to a time.Time value
|
|
||||||
// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil.
|
|
||||||
func MillisecondsTimeValue(v *int64) time.Time {
|
|
||||||
if v != nil {
|
|
||||||
return time.Unix(0, (*v * 1000000))
|
|
||||||
}
|
|
||||||
return time.Time{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
|
|
||||||
// The result is undefined if the Unix time cannot be represented by an int64.
|
|
||||||
// Which includes calling TimeUnixMilli on a zero Time is undefined.
|
|
||||||
//
|
|
||||||
// This utility is useful for service API's such as CloudWatch Logs which require
|
|
||||||
// their unix time values to be in milliseconds.
|
|
||||||
//
|
|
||||||
// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
|
|
||||||
func TimeUnixMilli(t time.Time) int64 {
|
|
||||||
return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeSlice converts a slice of time.Time values into a slice of
|
|
||||||
// time.Time pointers
|
|
||||||
func TimeSlice(src []time.Time) []*time.Time {
|
|
||||||
dst := make([]*time.Time, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
dst[i] = &(src[i])
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeValueSlice converts a slice of time.Time pointers into a slice of
|
|
||||||
// time.Time values
|
|
||||||
func TimeValueSlice(src []*time.Time) []time.Time {
|
|
||||||
dst := make([]time.Time, len(src))
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if src[i] != nil {
|
|
||||||
dst[i] = *(src[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeMap converts a string map of time.Time values into a string
|
|
||||||
// map of time.Time pointers
|
|
||||||
func TimeMap(src map[string]time.Time) map[string]*time.Time {
|
|
||||||
dst := make(map[string]*time.Time)
|
|
||||||
for k, val := range src {
|
|
||||||
v := val
|
|
||||||
dst[k] = &v
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeValueMap converts a string map of time.Time pointers into a string
|
|
||||||
// map of time.Time values
|
|
||||||
func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
|
|
||||||
dst := make(map[string]time.Time)
|
|
||||||
for k, val := range src {
|
|
||||||
if val != nil {
|
|
||||||
dst[k] = *val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
242
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
242
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
@ -1,242 +0,0 @@
|
|||||||
package corehandlers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"regexp"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Interface for matching types which also have a Len method.
|
|
||||||
type lener interface {
|
|
||||||
Len() int
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildContentLengthHandler builds the content length of a request based on the body,
|
|
||||||
// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
|
|
||||||
// to determine request body length and no "Content-Length" was specified it will panic.
|
|
||||||
//
|
|
||||||
// The Content-Length will only be added to the request if the length of the body
|
|
||||||
// is greater than 0. If the body is empty or the current `Content-Length`
|
|
||||||
// header is <= 0, the header will also be stripped.
|
|
||||||
var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
|
|
||||||
var length int64
|
|
||||||
|
|
||||||
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
|
|
||||||
length, _ = strconv.ParseInt(slength, 10, 64)
|
|
||||||
} else {
|
|
||||||
switch body := r.Body.(type) {
|
|
||||||
case nil:
|
|
||||||
length = 0
|
|
||||||
case lener:
|
|
||||||
length = int64(body.Len())
|
|
||||||
case io.Seeker:
|
|
||||||
r.BodyStart, _ = body.Seek(0, 1)
|
|
||||||
end, _ := body.Seek(0, 2)
|
|
||||||
body.Seek(r.BodyStart, 0) // make sure to seek back to original location
|
|
||||||
length = end - r.BodyStart
|
|
||||||
default:
|
|
||||||
panic("Cannot get length of body, must provide `ContentLength`")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if length > 0 {
|
|
||||||
r.HTTPRequest.ContentLength = length
|
|
||||||
r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
|
|
||||||
} else {
|
|
||||||
r.HTTPRequest.ContentLength = 0
|
|
||||||
r.HTTPRequest.Header.Del("Content-Length")
|
|
||||||
}
|
|
||||||
}}
|
|
||||||
|
|
||||||
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
|
|
||||||
var SDKVersionUserAgentHandler = request.NamedHandler{
|
|
||||||
Name: "core.SDKVersionUserAgentHandler",
|
|
||||||
Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
|
|
||||||
runtime.Version(), runtime.GOOS, runtime.GOARCH),
|
|
||||||
}
|
|
||||||
|
|
||||||
var reStatusCode = regexp.MustCompile(`^(\d{3})`)
|
|
||||||
|
|
||||||
// ValidateReqSigHandler is a request handler to ensure that the request's
|
|
||||||
// signature doesn't expire before it is sent. This can happen when a request
|
|
||||||
// is built and signed significantly before it is sent. Or significant delays
|
|
||||||
// occur when retrying requests that would cause the signature to expire.
|
|
||||||
var ValidateReqSigHandler = request.NamedHandler{
|
|
||||||
Name: "core.ValidateReqSigHandler",
|
|
||||||
Fn: func(r *request.Request) {
|
|
||||||
// Unsigned requests are not signed
|
|
||||||
if r.Config.Credentials == credentials.AnonymousCredentials {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
signedTime := r.Time
|
|
||||||
if !r.LastSignedAt.IsZero() {
|
|
||||||
signedTime = r.LastSignedAt
|
|
||||||
}
|
|
||||||
|
|
||||||
// 10 minutes to allow for some clock skew/delays in transmission.
|
|
||||||
// Would be improved with aws/aws-sdk-go#423
|
|
||||||
if signedTime.Add(10 * time.Minute).After(time.Now()) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println("request expired, resigning")
|
|
||||||
r.Sign()
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// SendHandler is a request handler to send service request using HTTP client.
|
|
||||||
var SendHandler = request.NamedHandler{
|
|
||||||
Name: "core.SendHandler",
|
|
||||||
Fn: func(r *request.Request) {
|
|
||||||
sender := sendFollowRedirects
|
|
||||||
if r.DisableFollowRedirects {
|
|
||||||
sender = sendWithoutFollowRedirects
|
|
||||||
}
|
|
||||||
|
|
||||||
if request.NoBody == r.HTTPRequest.Body {
|
|
||||||
// Strip off the request body if the NoBody reader was used as a
|
|
||||||
// place holder for a request body. This prevents the SDK from
|
|
||||||
// making requests with a request body when it would be invalid
|
|
||||||
// to do so.
|
|
||||||
//
|
|
||||||
// Use a shallow copy of the http.Request to ensure the race condition
|
|
||||||
// of transport on Body will not trigger
|
|
||||||
reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest
|
|
||||||
reqCopy.Body = nil
|
|
||||||
r.HTTPRequest = &reqCopy
|
|
||||||
defer func() {
|
|
||||||
r.HTTPRequest = reqOrig
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
r.HTTPResponse, err = sender(r)
|
|
||||||
if err != nil {
|
|
||||||
handleSendError(r, err)
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func sendFollowRedirects(r *request.Request) (*http.Response, error) {
|
|
||||||
return r.Config.HTTPClient.Do(r.HTTPRequest)
|
|
||||||
}
|
|
||||||
|
|
||||||
func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) {
|
|
||||||
transport := r.Config.HTTPClient.Transport
|
|
||||||
if transport == nil {
|
|
||||||
transport = http.DefaultTransport
|
|
||||||
}
|
|
||||||
|
|
||||||
return transport.RoundTrip(r.HTTPRequest)
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleSendError(r *request.Request, err error) {
|
|
||||||
// Prevent leaking if an HTTPResponse was returned. Clean up
|
|
||||||
// the body.
|
|
||||||
if r.HTTPResponse != nil {
|
|
||||||
r.HTTPResponse.Body.Close()
|
|
||||||
}
|
|
||||||
// Capture the case where url.Error is returned for error processing
|
|
||||||
// response. e.g. 301 without location header comes back as string
|
|
||||||
// error and r.HTTPResponse is nil. Other URL redirect errors will
|
|
||||||
// comeback in a similar method.
|
|
||||||
if e, ok := err.(*url.Error); ok && e.Err != nil {
|
|
||||||
if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
|
|
||||||
code, _ := strconv.ParseInt(s[1], 10, 64)
|
|
||||||
r.HTTPResponse = &http.Response{
|
|
||||||
StatusCode: int(code),
|
|
||||||
Status: http.StatusText(int(code)),
|
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if r.HTTPResponse == nil {
|
|
||||||
// Add a dummy request response object to ensure the HTTPResponse
|
|
||||||
// value is consistent.
|
|
||||||
r.HTTPResponse = &http.Response{
|
|
||||||
StatusCode: int(0),
|
|
||||||
Status: http.StatusText(int(0)),
|
|
||||||
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Catch all other request errors.
|
|
||||||
r.Error = awserr.New("RequestError", "send request failed", err)
|
|
||||||
r.Retryable = aws.Bool(true) // network errors are retryable
|
|
||||||
|
|
||||||
// Override the error with a context canceled error, if that was canceled.
|
|
||||||
ctx := r.Context()
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
r.Error = awserr.New(request.CanceledErrorCode,
|
|
||||||
"request context canceled", ctx.Err())
|
|
||||||
r.Retryable = aws.Bool(false)
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateResponseHandler is a request handler to validate service response.
|
|
||||||
var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
|
|
||||||
if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
|
|
||||||
// this may be replaced by an UnmarshalError handler
|
|
||||||
r.Error = awserr.New("UnknownError", "unknown error", nil)
|
|
||||||
}
|
|
||||||
}}
|
|
||||||
|
|
||||||
// AfterRetryHandler performs final checks to determine if the request should
|
|
||||||
// be retried and how long to delay.
|
|
||||||
var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
|
|
||||||
// If one of the other handlers already set the retry state
|
|
||||||
// we don't want to override it based on the service's state
|
|
||||||
if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
|
|
||||||
r.Retryable = aws.Bool(r.ShouldRetry(r))
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.WillRetry() {
|
|
||||||
r.RetryDelay = r.RetryRules(r)
|
|
||||||
|
|
||||||
if sleepFn := r.Config.SleepDelay; sleepFn != nil {
|
|
||||||
// Support SleepDelay for backwards compatibility and testing
|
|
||||||
sleepFn(r.RetryDelay)
|
|
||||||
} else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
|
|
||||||
r.Error = awserr.New(request.CanceledErrorCode,
|
|
||||||
"request context canceled", err)
|
|
||||||
r.Retryable = aws.Bool(false)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// when the expired token exception occurs the credentials
|
|
||||||
// need to be expired locally so that the next request to
|
|
||||||
// get credentials will trigger a credentials refresh.
|
|
||||||
if r.IsErrorExpired() {
|
|
||||||
r.Config.Credentials.Expire()
|
|
||||||
}
|
|
||||||
|
|
||||||
r.RetryCount++
|
|
||||||
r.Error = nil
|
|
||||||
}
|
|
||||||
}}
|
|
||||||
|
|
||||||
// ValidateEndpointHandler is a request handler to validate a request had the
|
|
||||||
// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
|
|
||||||
// region is not valid.
|
|
||||||
var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
|
|
||||||
if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
|
|
||||||
r.Error = aws.ErrMissingRegion
|
|
||||||
} else if r.ClientInfo.Endpoint == "" {
|
|
||||||
r.Error = aws.ErrMissingEndpoint
|
|
||||||
}
|
|
||||||
}}
|
|
17
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
generated
vendored
17
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
generated
vendored
@ -1,17 +0,0 @@
|
|||||||
package corehandlers
|
|
||||||
|
|
||||||
import "github.com/aws/aws-sdk-go/aws/request"
|
|
||||||
|
|
||||||
// ValidateParametersHandler is a request handler to validate the input parameters.
|
|
||||||
// Validating parameters only has meaning if done prior to the request being sent.
|
|
||||||
var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
|
|
||||||
if !r.ParamsFilled() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if v, ok := r.Params.(request.Validator); ok {
|
|
||||||
if err := v.Validate(); err != nil {
|
|
||||||
r.Error = err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}}
|
|
102
vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
generated
vendored
102
vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
generated
vendored
@ -1,102 +0,0 @@
|
|||||||
package credentials
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrNoValidProvidersFoundInChain Is returned when there are no valid
|
|
||||||
// providers in the ChainProvider.
|
|
||||||
//
|
|
||||||
// This has been deprecated. For verbose error messaging set
|
|
||||||
// aws.Config.CredentialsChainVerboseErrors to true
|
|
||||||
//
|
|
||||||
// @readonly
|
|
||||||
ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
|
|
||||||
`no valid providers in chain. Deprecated.
|
|
||||||
For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
|
|
||||||
nil)
|
|
||||||
)
|
|
||||||
|
|
||||||
// A ChainProvider will search for a provider which returns credentials
|
|
||||||
// and cache that provider until Retrieve is called again.
|
|
||||||
//
|
|
||||||
// The ChainProvider provides a way of chaining multiple providers together
|
|
||||||
// which will pick the first available using priority order of the Providers
|
|
||||||
// in the list.
|
|
||||||
//
|
|
||||||
// If none of the Providers retrieve valid credentials Value, ChainProvider's
|
|
||||||
// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
|
|
||||||
//
|
|
||||||
// If a Provider is found which returns valid credentials Value ChainProvider
|
|
||||||
// will cache that Provider for all calls to IsExpired(), until Retrieve is
|
|
||||||
// called again.
|
|
||||||
//
|
|
||||||
// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
|
|
||||||
// In this example EnvProvider will first check if any credentials are available
|
|
||||||
// via the environment variables. If there are none ChainProvider will check
|
|
||||||
// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
|
|
||||||
// does not return any credentials ChainProvider will return the error
|
|
||||||
// ErrNoValidProvidersFoundInChain
|
|
||||||
//
|
|
||||||
// creds := credentials.NewChainCredentials(
|
|
||||||
// []credentials.Provider{
|
|
||||||
// &credentials.EnvProvider{},
|
|
||||||
// &ec2rolecreds.EC2RoleProvider{
|
|
||||||
// Client: ec2metadata.New(sess),
|
|
||||||
// },
|
|
||||||
// })
|
|
||||||
//
|
|
||||||
// // Usage of ChainCredentials with aws.Config
|
|
||||||
// svc := ec2.New(session.Must(session.NewSession(&aws.Config{
|
|
||||||
// Credentials: creds,
|
|
||||||
// })))
|
|
||||||
//
|
|
||||||
type ChainProvider struct {
|
|
||||||
Providers []Provider
|
|
||||||
curr Provider
|
|
||||||
VerboseErrors bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewChainCredentials returns a pointer to a new Credentials object
|
|
||||||
// wrapping a chain of providers.
|
|
||||||
func NewChainCredentials(providers []Provider) *Credentials {
|
|
||||||
return NewCredentials(&ChainProvider{
|
|
||||||
Providers: append([]Provider{}, providers...),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve returns the credentials value or error if no provider returned
|
|
||||||
// without error.
|
|
||||||
//
|
|
||||||
// If a provider is found it will be cached and any calls to IsExpired()
|
|
||||||
// will return the expired state of the cached provider.
|
|
||||||
func (c *ChainProvider) Retrieve() (Value, error) {
|
|
||||||
var errs []error
|
|
||||||
for _, p := range c.Providers {
|
|
||||||
creds, err := p.Retrieve()
|
|
||||||
if err == nil {
|
|
||||||
c.curr = p
|
|
||||||
return creds, nil
|
|
||||||
}
|
|
||||||
errs = append(errs, err)
|
|
||||||
}
|
|
||||||
c.curr = nil
|
|
||||||
|
|
||||||
var err error
|
|
||||||
err = ErrNoValidProvidersFoundInChain
|
|
||||||
if c.VerboseErrors {
|
|
||||||
err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
|
|
||||||
}
|
|
||||||
return Value{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsExpired will returned the expired state of the currently cached provider
|
|
||||||
// if there is one. If there is no current provider, true will be returned.
|
|
||||||
func (c *ChainProvider) IsExpired() bool {
|
|
||||||
if c.curr != nil {
|
|
||||||
return c.curr.IsExpired()
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
246
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
246
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
@ -1,246 +0,0 @@
|
|||||||
// Package credentials provides credential retrieval and management
|
|
||||||
//
|
|
||||||
// The Credentials is the primary method of getting access to and managing
|
|
||||||
// credentials Values. Using dependency injection retrieval of the credential
|
|
||||||
// values is handled by a object which satisfies the Provider interface.
|
|
||||||
//
|
|
||||||
// By default the Credentials.Get() will cache the successful result of a
|
|
||||||
// Provider's Retrieve() until Provider.IsExpired() returns true. At which
|
|
||||||
// point Credentials will call Provider's Retrieve() to get new credential Value.
|
|
||||||
//
|
|
||||||
// The Provider is responsible for determining when credentials Value have expired.
|
|
||||||
// It is also important to note that Credentials will always call Retrieve the
|
|
||||||
// first time Credentials.Get() is called.
|
|
||||||
//
|
|
||||||
// Example of using the environment variable credentials.
|
|
||||||
//
|
|
||||||
// creds := credentials.NewEnvCredentials()
|
|
||||||
//
|
|
||||||
// // Retrieve the credentials value
|
|
||||||
// credValue, err := creds.Get()
|
|
||||||
// if err != nil {
|
|
||||||
// // handle error
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Example of forcing credentials to expire and be refreshed on the next Get().
|
|
||||||
// This may be helpful to proactively expire credentials and refresh them sooner
|
|
||||||
// than they would naturally expire on their own.
|
|
||||||
//
|
|
||||||
// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{})
|
|
||||||
// creds.Expire()
|
|
||||||
// credsValue, err := creds.Get()
|
|
||||||
// // New credentials will be retrieved instead of from cache.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Custom Provider
|
|
||||||
//
|
|
||||||
// Each Provider built into this package also provides a helper method to generate
|
|
||||||
// a Credentials pointer setup with the provider. To use a custom Provider just
|
|
||||||
// create a type which satisfies the Provider interface and pass it to the
|
|
||||||
// NewCredentials method.
|
|
||||||
//
|
|
||||||
// type MyProvider struct{}
|
|
||||||
// func (m *MyProvider) Retrieve() (Value, error) {...}
|
|
||||||
// func (m *MyProvider) IsExpired() bool {...}
|
|
||||||
//
|
|
||||||
// creds := credentials.NewCredentials(&MyProvider{})
|
|
||||||
// credValue, err := creds.Get()
|
|
||||||
//
|
|
||||||
package credentials
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AnonymousCredentials is an empty Credential object that can be used as
|
|
||||||
// dummy placeholder credentials for requests that do not need signed.
|
|
||||||
//
|
|
||||||
// This Credentials can be used to configure a service to not sign requests
|
|
||||||
// when making service API calls. For example, when accessing public
|
|
||||||
// s3 buckets.
|
|
||||||
//
|
|
||||||
// svc := s3.New(session.Must(session.NewSession(&aws.Config{
|
|
||||||
// Credentials: credentials.AnonymousCredentials,
|
|
||||||
// })))
|
|
||||||
// // Access public S3 buckets.
|
|
||||||
//
|
|
||||||
// @readonly
|
|
||||||
var AnonymousCredentials = NewStaticCredentials("", "", "")
|
|
||||||
|
|
||||||
// A Value is the AWS credentials value for individual credential fields.
|
|
||||||
type Value struct {
|
|
||||||
// AWS Access key ID
|
|
||||||
AccessKeyID string
|
|
||||||
|
|
||||||
// AWS Secret Access Key
|
|
||||||
SecretAccessKey string
|
|
||||||
|
|
||||||
// AWS Session Token
|
|
||||||
SessionToken string
|
|
||||||
|
|
||||||
// Provider used to get credentials
|
|
||||||
ProviderName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Provider is the interface for any component which will provide credentials
|
|
||||||
// Value. A provider is required to manage its own Expired state, and what to
|
|
||||||
// be expired means.
|
|
||||||
//
|
|
||||||
// The Provider should not need to implement its own mutexes, because
|
|
||||||
// that will be managed by Credentials.
|
|
||||||
type Provider interface {
|
|
||||||
// Retrieve returns nil if it successfully retrieved the value.
|
|
||||||
// Error is returned if the value were not obtainable, or empty.
|
|
||||||
Retrieve() (Value, error)
|
|
||||||
|
|
||||||
// IsExpired returns if the credentials are no longer valid, and need
|
|
||||||
// to be retrieved.
|
|
||||||
IsExpired() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// An ErrorProvider is a stub credentials provider that always returns an error
|
|
||||||
// this is used by the SDK when construction a known provider is not possible
|
|
||||||
// due to an error.
|
|
||||||
type ErrorProvider struct {
|
|
||||||
// The error to be returned from Retrieve
|
|
||||||
Err error
|
|
||||||
|
|
||||||
// The provider name to set on the Retrieved returned Value
|
|
||||||
ProviderName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve will always return the error that the ErrorProvider was created with.
|
|
||||||
func (p ErrorProvider) Retrieve() (Value, error) {
|
|
||||||
return Value{ProviderName: p.ProviderName}, p.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsExpired will always return not expired.
|
|
||||||
func (p ErrorProvider) IsExpired() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Expiry provides shared expiration logic to be used by credentials
|
|
||||||
// providers to implement expiry functionality.
|
|
||||||
//
|
|
||||||
// The best method to use this struct is as an anonymous field within the
|
|
||||||
// provider's struct.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
// type EC2RoleProvider struct {
|
|
||||||
// Expiry
|
|
||||||
// ...
|
|
||||||
// }
|
|
||||||
type Expiry struct {
|
|
||||||
// The date/time when to expire on
|
|
||||||
expiration time.Time
|
|
||||||
|
|
||||||
// If set will be used by IsExpired to determine the current time.
|
|
||||||
// Defaults to time.Now if CurrentTime is not set. Available for testing
|
|
||||||
// to be able to mock out the current time.
|
|
||||||
CurrentTime func() time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetExpiration sets the expiration IsExpired will check when called.
|
|
||||||
//
|
|
||||||
// If window is greater than 0 the expiration time will be reduced by the
|
|
||||||
// window value.
|
|
||||||
//
|
|
||||||
// Using a window is helpful to trigger credentials to expire sooner than
|
|
||||||
// the expiration time given to ensure no requests are made with expired
|
|
||||||
// tokens.
|
|
||||||
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
|
|
||||||
e.expiration = expiration
|
|
||||||
if window > 0 {
|
|
||||||
e.expiration = e.expiration.Add(-window)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsExpired returns if the credentials are expired.
|
|
||||||
func (e *Expiry) IsExpired() bool {
|
|
||||||
if e.CurrentTime == nil {
|
|
||||||
e.CurrentTime = time.Now
|
|
||||||
}
|
|
||||||
return e.expiration.Before(e.CurrentTime())
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Credentials provides synchronous safe retrieval of AWS credentials Value.
|
|
||||||
// Credentials will cache the credentials value until they expire. Once the value
|
|
||||||
// expires the next Get will attempt to retrieve valid credentials.
|
|
||||||
//
|
|
||||||
// Credentials is safe to use across multiple goroutines and will manage the
|
|
||||||
// synchronous state so the Providers do not need to implement their own
|
|
||||||
// synchronization.
|
|
||||||
//
|
|
||||||
// The first Credentials.Get() will always call Provider.Retrieve() to get the
|
|
||||||
// first instance of the credentials Value. All calls to Get() after that
|
|
||||||
// will return the cached credentials Value until IsExpired() returns true.
|
|
||||||
type Credentials struct {
|
|
||||||
creds Value
|
|
||||||
forceRefresh bool
|
|
||||||
m sync.Mutex
|
|
||||||
|
|
||||||
provider Provider
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCredentials returns a pointer to a new Credentials with the provider set.
|
|
||||||
func NewCredentials(provider Provider) *Credentials {
|
|
||||||
return &Credentials{
|
|
||||||
provider: provider,
|
|
||||||
forceRefresh: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the credentials value, or error if the credentials Value failed
|
|
||||||
// to be retrieved.
|
|
||||||
//
|
|
||||||
// Will return the cached credentials Value if it has not expired. If the
|
|
||||||
// credentials Value has expired the Provider's Retrieve() will be called
|
|
||||||
// to refresh the credentials.
|
|
||||||
//
|
|
||||||
// If Credentials.Expire() was called the credentials Value will be force
|
|
||||||
// expired, and the next call to Get() will cause them to be refreshed.
|
|
||||||
func (c *Credentials) Get() (Value, error) {
|
|
||||||
c.m.Lock()
|
|
||||||
defer c.m.Unlock()
|
|
||||||
|
|
||||||
if c.isExpired() {
|
|
||||||
creds, err := c.provider.Retrieve()
|
|
||||||
if err != nil {
|
|
||||||
return Value{}, err
|
|
||||||
}
|
|
||||||
c.creds = creds
|
|
||||||
c.forceRefresh = false
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.creds, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expire expires the credentials and forces them to be retrieved on the
|
|
||||||
// next call to Get().
|
|
||||||
//
|
|
||||||
// This will override the Provider's expired state, and force Credentials
|
|
||||||
// to call the Provider's Retrieve().
|
|
||||||
func (c *Credentials) Expire() {
|
|
||||||
c.m.Lock()
|
|
||||||
defer c.m.Unlock()
|
|
||||||
|
|
||||||
c.forceRefresh = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsExpired returns if the credentials are no longer valid, and need
|
|
||||||
// to be retrieved.
|
|
||||||
//
|
|
||||||
// If the Credentials were forced to be expired with Expire() this will
|
|
||||||
// reflect that override.
|
|
||||||
func (c *Credentials) IsExpired() bool {
|
|
||||||
c.m.Lock()
|
|
||||||
defer c.m.Unlock()
|
|
||||||
|
|
||||||
return c.isExpired()
|
|
||||||
}
|
|
||||||
|
|
||||||
// isExpired helper method wrapping the definition of expired credentials.
|
|
||||||
func (c *Credentials) isExpired() bool {
|
|
||||||
return c.forceRefresh || c.provider.IsExpired()
|
|
||||||
}
|
|
178
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
178
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
@ -1,178 +0,0 @@
|
|||||||
package ec2rolecreds
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/client"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ProviderName provides a name of EC2Role provider
|
|
||||||
const ProviderName = "EC2RoleProvider"
|
|
||||||
|
|
||||||
// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
|
|
||||||
// those credentials are expired.
|
|
||||||
//
|
|
||||||
// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
|
|
||||||
// or ExpiryWindow
|
|
||||||
//
|
|
||||||
// p := &ec2rolecreds.EC2RoleProvider{
|
|
||||||
// // Pass in a custom timeout to be used when requesting
|
|
||||||
// // IAM EC2 Role credentials.
|
|
||||||
// Client: ec2metadata.New(sess, aws.Config{
|
|
||||||
// HTTPClient: &http.Client{Timeout: 10 * time.Second},
|
|
||||||
// }),
|
|
||||||
//
|
|
||||||
// // Do not use early expiry of credentials. If a non zero value is
|
|
||||||
// // specified the credentials will be expired early
|
|
||||||
// ExpiryWindow: 0,
|
|
||||||
// }
|
|
||||||
type EC2RoleProvider struct {
|
|
||||||
credentials.Expiry
|
|
||||||
|
|
||||||
// Required EC2Metadata client to use when connecting to EC2 metadata service.
|
|
||||||
Client *ec2metadata.EC2Metadata
|
|
||||||
|
|
||||||
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
|
||||||
// the credentials actually expiring. This is beneficial so race conditions
|
|
||||||
// with expiring credentials do not cause request to fail unexpectedly
|
|
||||||
// due to ExpiredTokenException exceptions.
|
|
||||||
//
|
|
||||||
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
|
||||||
// 10 seconds before the credentials are actually expired.
|
|
||||||
//
|
|
||||||
// If ExpiryWindow is 0 or less it will be ignored.
|
|
||||||
ExpiryWindow time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCredentials returns a pointer to a new Credentials object wrapping
|
|
||||||
// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
|
|
||||||
// The ConfigProvider is satisfied by the session.Session type.
|
|
||||||
func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
|
|
||||||
p := &EC2RoleProvider{
|
|
||||||
Client: ec2metadata.New(c),
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, option := range options {
|
|
||||||
option(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
return credentials.NewCredentials(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
|
|
||||||
// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
|
|
||||||
// metadata service.
|
|
||||||
func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
|
|
||||||
p := &EC2RoleProvider{
|
|
||||||
Client: client,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, option := range options {
|
|
||||||
option(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
return credentials.NewCredentials(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve retrieves credentials from the EC2 service.
|
|
||||||
// Error will be returned if the request fails, or unable to extract
|
|
||||||
// the desired credentials.
|
|
||||||
func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
|
|
||||||
credsList, err := requestCredList(m.Client)
|
|
||||||
if err != nil {
|
|
||||||
return credentials.Value{ProviderName: ProviderName}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(credsList) == 0 {
|
|
||||||
return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
|
|
||||||
}
|
|
||||||
credsName := credsList[0]
|
|
||||||
|
|
||||||
roleCreds, err := requestCred(m.Client, credsName)
|
|
||||||
if err != nil {
|
|
||||||
return credentials.Value{ProviderName: ProviderName}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
|
|
||||||
|
|
||||||
return credentials.Value{
|
|
||||||
AccessKeyID: roleCreds.AccessKeyID,
|
|
||||||
SecretAccessKey: roleCreds.SecretAccessKey,
|
|
||||||
SessionToken: roleCreds.Token,
|
|
||||||
ProviderName: ProviderName,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// A ec2RoleCredRespBody provides the shape for unmarshaling credential
|
|
||||||
// request responses.
|
|
||||||
type ec2RoleCredRespBody struct {
|
|
||||||
// Success State
|
|
||||||
Expiration time.Time
|
|
||||||
AccessKeyID string
|
|
||||||
SecretAccessKey string
|
|
||||||
Token string
|
|
||||||
|
|
||||||
// Error state
|
|
||||||
Code string
|
|
||||||
Message string
|
|
||||||
}
|
|
||||||
|
|
||||||
const iamSecurityCredsPath = "/iam/security-credentials"
|
|
||||||
|
|
||||||
// requestCredList requests a list of credentials from the EC2 service.
|
|
||||||
// If there are no credentials, or there is an error making or receiving the request
|
|
||||||
func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
|
|
||||||
resp, err := client.GetMetadata(iamSecurityCredsPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
credsList := []string{}
|
|
||||||
s := bufio.NewScanner(strings.NewReader(resp))
|
|
||||||
for s.Scan() {
|
|
||||||
credsList = append(credsList, s.Text())
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.Err(); err != nil {
|
|
||||||
return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return credsList, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// requestCred requests the credentials for a specific credentials from the EC2 service.
|
|
||||||
//
|
|
||||||
// If the credentials cannot be found, or there is an error reading the response
|
|
||||||
// and error will be returned.
|
|
||||||
func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
|
|
||||||
resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
|
|
||||||
if err != nil {
|
|
||||||
return ec2RoleCredRespBody{},
|
|
||||||
awserr.New("EC2RoleRequestError",
|
|
||||||
fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
respCreds := ec2RoleCredRespBody{}
|
|
||||||
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
|
|
||||||
return ec2RoleCredRespBody{},
|
|
||||||
awserr.New("SerializationError",
|
|
||||||
fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if respCreds.Code != "Success" {
|
|
||||||
// If an error code was returned something failed requesting the role.
|
|
||||||
return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
return respCreds, nil
|
|
||||||
}
|
|
191
vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
generated
vendored
191
vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
generated
vendored
@ -1,191 +0,0 @@
|
|||||||
// Package endpointcreds provides support for retrieving credentials from an
|
|
||||||
// arbitrary HTTP endpoint.
|
|
||||||
//
|
|
||||||
// The credentials endpoint Provider can receive both static and refreshable
|
|
||||||
// credentials that will expire. Credentials are static when an "Expiration"
|
|
||||||
// value is not provided in the endpoint's response.
|
|
||||||
//
|
|
||||||
// Static credentials will never expire once they have been retrieved. The format
|
|
||||||
// of the static credentials response:
|
|
||||||
// {
|
|
||||||
// "AccessKeyId" : "MUA...",
|
|
||||||
// "SecretAccessKey" : "/7PC5om....",
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
|
|
||||||
// value in the response. The format of the refreshable credentials response:
|
|
||||||
// {
|
|
||||||
// "AccessKeyId" : "MUA...",
|
|
||||||
// "SecretAccessKey" : "/7PC5om....",
|
|
||||||
// "Token" : "AQoDY....=",
|
|
||||||
// "Expiration" : "2016-02-25T06:03:31Z"
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Errors should be returned in the following format and only returned with 400
|
|
||||||
// or 500 HTTP status codes.
|
|
||||||
// {
|
|
||||||
// "code": "ErrorCode",
|
|
||||||
// "message": "Helpful error message."
|
|
||||||
// }
|
|
||||||
package endpointcreds
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/client"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ProviderName is the name of the credentials provider.
|
|
||||||
const ProviderName = `CredentialsEndpointProvider`
|
|
||||||
|
|
||||||
// Provider satisfies the credentials.Provider interface, and is a client to
|
|
||||||
// retrieve credentials from an arbitrary endpoint.
|
|
||||||
type Provider struct {
|
|
||||||
staticCreds bool
|
|
||||||
credentials.Expiry
|
|
||||||
|
|
||||||
// Requires a AWS Client to make HTTP requests to the endpoint with.
|
|
||||||
// the Endpoint the request will be made to is provided by the aws.Config's
|
|
||||||
// Endpoint value.
|
|
||||||
Client *client.Client
|
|
||||||
|
|
||||||
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
|
||||||
// the credentials actually expiring. This is beneficial so race conditions
|
|
||||||
// with expiring credentials do not cause request to fail unexpectedly
|
|
||||||
// due to ExpiredTokenException exceptions.
|
|
||||||
//
|
|
||||||
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
|
||||||
// 10 seconds before the credentials are actually expired.
|
|
||||||
//
|
|
||||||
// If ExpiryWindow is 0 or less it will be ignored.
|
|
||||||
ExpiryWindow time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewProviderClient returns a credentials Provider for retrieving AWS credentials
|
|
||||||
// from arbitrary endpoint.
|
|
||||||
func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
|
|
||||||
p := &Provider{
|
|
||||||
Client: client.New(
|
|
||||||
cfg,
|
|
||||||
metadata.ClientInfo{
|
|
||||||
ServiceName: "CredentialsEndpoint",
|
|
||||||
Endpoint: endpoint,
|
|
||||||
},
|
|
||||||
handlers,
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
|
|
||||||
p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
|
|
||||||
p.Client.Handlers.Validate.Clear()
|
|
||||||
p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
|
|
||||||
|
|
||||||
for _, option := range options {
|
|
||||||
option(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
|
|
||||||
// from an arbitrary endpoint concurrently. The client will request the
|
|
||||||
func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
|
|
||||||
return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsExpired returns true if the credentials retrieved are expired, or not yet
|
|
||||||
// retrieved.
|
|
||||||
func (p *Provider) IsExpired() bool {
|
|
||||||
if p.staticCreds {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return p.Expiry.IsExpired()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve will attempt to request the credentials from the endpoint the Provider
|
|
||||||
// was configured for. And error will be returned if the retrieval fails.
|
|
||||||
func (p *Provider) Retrieve() (credentials.Value, error) {
|
|
||||||
resp, err := p.getCredentials()
|
|
||||||
if err != nil {
|
|
||||||
return credentials.Value{ProviderName: ProviderName},
|
|
||||||
awserr.New("CredentialsEndpointError", "failed to load credentials", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.Expiration != nil {
|
|
||||||
p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
|
|
||||||
} else {
|
|
||||||
p.staticCreds = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return credentials.Value{
|
|
||||||
AccessKeyID: resp.AccessKeyID,
|
|
||||||
SecretAccessKey: resp.SecretAccessKey,
|
|
||||||
SessionToken: resp.Token,
|
|
||||||
ProviderName: ProviderName,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type getCredentialsOutput struct {
|
|
||||||
Expiration *time.Time
|
|
||||||
AccessKeyID string
|
|
||||||
SecretAccessKey string
|
|
||||||
Token string
|
|
||||||
}
|
|
||||||
|
|
||||||
type errorOutput struct {
|
|
||||||
Code string `json:"code"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
|
|
||||||
op := &request.Operation{
|
|
||||||
Name: "GetCredentials",
|
|
||||||
HTTPMethod: "GET",
|
|
||||||
}
|
|
||||||
|
|
||||||
out := &getCredentialsOutput{}
|
|
||||||
req := p.Client.NewRequest(op, nil, out)
|
|
||||||
req.HTTPRequest.Header.Set("Accept", "application/json")
|
|
||||||
|
|
||||||
return out, req.Send()
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateEndpointHandler(r *request.Request) {
|
|
||||||
if len(r.ClientInfo.Endpoint) == 0 {
|
|
||||||
r.Error = aws.ErrMissingEndpoint
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func unmarshalHandler(r *request.Request) {
|
|
||||||
defer r.HTTPResponse.Body.Close()
|
|
||||||
|
|
||||||
out := r.Data.(*getCredentialsOutput)
|
|
||||||
if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
|
|
||||||
r.Error = awserr.New("SerializationError",
|
|
||||||
"failed to decode endpoint credentials",
|
|
||||||
err,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func unmarshalError(r *request.Request) {
|
|
||||||
defer r.HTTPResponse.Body.Close()
|
|
||||||
|
|
||||||
var errOut errorOutput
|
|
||||||
if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil {
|
|
||||||
r.Error = awserr.New("SerializationError",
|
|
||||||
"failed to decode endpoint credentials",
|
|
||||||
err,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Response body format is not consistent between metadata endpoints.
|
|
||||||
// Grab the error message as a string and include that as the source error
|
|
||||||
r.Error = awserr.New(errOut.Code, errOut.Message, nil)
|
|
||||||
}
|
|
78
vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
generated
vendored
78
vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
generated
vendored
@ -1,78 +0,0 @@
|
|||||||
package credentials
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EnvProviderName provides a name of Env provider
|
|
||||||
const EnvProviderName = "EnvProvider"
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
|
|
||||||
// found in the process's environment.
|
|
||||||
//
|
|
||||||
// @readonly
|
|
||||||
ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
|
|
||||||
|
|
||||||
// ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
|
|
||||||
// can't be found in the process's environment.
|
|
||||||
//
|
|
||||||
// @readonly
|
|
||||||
ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
|
|
||||||
)
|
|
||||||
|
|
||||||
// A EnvProvider retrieves credentials from the environment variables of the
|
|
||||||
// running process. Environment credentials never expire.
|
|
||||||
//
|
|
||||||
// Environment variables used:
|
|
||||||
//
|
|
||||||
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
|
|
||||||
//
|
|
||||||
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
|
|
||||||
type EnvProvider struct {
|
|
||||||
retrieved bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEnvCredentials returns a pointer to a new Credentials object
|
|
||||||
// wrapping the environment variable provider.
|
|
||||||
func NewEnvCredentials() *Credentials {
|
|
||||||
return NewCredentials(&EnvProvider{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve retrieves the keys from the environment.
|
|
||||||
func (e *EnvProvider) Retrieve() (Value, error) {
|
|
||||||
e.retrieved = false
|
|
||||||
|
|
||||||
id := os.Getenv("AWS_ACCESS_KEY_ID")
|
|
||||||
if id == "" {
|
|
||||||
id = os.Getenv("AWS_ACCESS_KEY")
|
|
||||||
}
|
|
||||||
|
|
||||||
secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
|
|
||||||
if secret == "" {
|
|
||||||
secret = os.Getenv("AWS_SECRET_KEY")
|
|
||||||
}
|
|
||||||
|
|
||||||
if id == "" {
|
|
||||||
return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
if secret == "" {
|
|
||||||
return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
e.retrieved = true
|
|
||||||
return Value{
|
|
||||||
AccessKeyID: id,
|
|
||||||
SecretAccessKey: secret,
|
|
||||||
SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
|
|
||||||
ProviderName: EnvProviderName,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsExpired returns if the credentials have been retrieved.
|
|
||||||
func (e *EnvProvider) IsExpired() bool {
|
|
||||||
return !e.retrieved
|
|
||||||
}
|
|
12
vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
generated
vendored
12
vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
[default]
|
|
||||||
aws_access_key_id = accessKey
|
|
||||||
aws_secret_access_key = secret
|
|
||||||
aws_session_token = token
|
|
||||||
|
|
||||||
[no_token]
|
|
||||||
aws_access_key_id = accessKey
|
|
||||||
aws_secret_access_key = secret
|
|
||||||
|
|
||||||
[with_colon]
|
|
||||||
aws_access_key_id: accessKey
|
|
||||||
aws_secret_access_key: secret
|
|
150
vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
generated
vendored
150
vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
generated
vendored
@ -1,150 +0,0 @@
|
|||||||
package credentials
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/go-ini/ini"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
"github.com/aws/aws-sdk-go/internal/shareddefaults"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SharedCredsProviderName provides a name of SharedCreds provider
|
|
||||||
const SharedCredsProviderName = "SharedCredentialsProvider"
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
|
|
||||||
ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
|
|
||||||
)
|
|
||||||
|
|
||||||
// A SharedCredentialsProvider retrieves credentials from the current user's home
|
|
||||||
// directory, and keeps track if those credentials are expired.
|
|
||||||
//
|
|
||||||
// Profile ini file example: $HOME/.aws/credentials
|
|
||||||
type SharedCredentialsProvider struct {
|
|
||||||
// Path to the shared credentials file.
|
|
||||||
//
|
|
||||||
// If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
|
|
||||||
// env value is empty will default to current user's home directory.
|
|
||||||
// Linux/OSX: "$HOME/.aws/credentials"
|
|
||||||
// Windows: "%USERPROFILE%\.aws\credentials"
|
|
||||||
Filename string
|
|
||||||
|
|
||||||
// AWS Profile to extract credentials from the shared credentials file. If empty
|
|
||||||
// will default to environment variable "AWS_PROFILE" or "default" if
|
|
||||||
// environment variable is also not set.
|
|
||||||
Profile string
|
|
||||||
|
|
||||||
// retrieved states if the credentials have been successfully retrieved.
|
|
||||||
retrieved bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSharedCredentials returns a pointer to a new Credentials object
|
|
||||||
// wrapping the Profile file provider.
|
|
||||||
func NewSharedCredentials(filename, profile string) *Credentials {
|
|
||||||
return NewCredentials(&SharedCredentialsProvider{
|
|
||||||
Filename: filename,
|
|
||||||
Profile: profile,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve reads and extracts the shared credentials from the current
|
|
||||||
// users home directory.
|
|
||||||
func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
|
|
||||||
p.retrieved = false
|
|
||||||
|
|
||||||
filename, err := p.filename()
|
|
||||||
if err != nil {
|
|
||||||
return Value{ProviderName: SharedCredsProviderName}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
creds, err := loadProfile(filename, p.profile())
|
|
||||||
if err != nil {
|
|
||||||
return Value{ProviderName: SharedCredsProviderName}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
p.retrieved = true
|
|
||||||
return creds, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsExpired returns if the shared credentials have expired.
|
|
||||||
func (p *SharedCredentialsProvider) IsExpired() bool {
|
|
||||||
return !p.retrieved
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
|
|
||||||
// The credentials retrieved from the profile will be returned or error. Error will be
|
|
||||||
// returned if it fails to read from the file, or the data is invalid.
|
|
||||||
func loadProfile(filename, profile string) (Value, error) {
|
|
||||||
config, err := ini.Load(filename)
|
|
||||||
if err != nil {
|
|
||||||
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
|
|
||||||
}
|
|
||||||
iniProfile, err := config.GetSection(profile)
|
|
||||||
if err != nil {
|
|
||||||
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
id, err := iniProfile.GetKey("aws_access_key_id")
|
|
||||||
if err != nil {
|
|
||||||
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
|
|
||||||
fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
secret, err := iniProfile.GetKey("aws_secret_access_key")
|
|
||||||
if err != nil {
|
|
||||||
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
|
|
||||||
fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
|
|
||||||
nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default to empty string if not found
|
|
||||||
token := iniProfile.Key("aws_session_token")
|
|
||||||
|
|
||||||
return Value{
|
|
||||||
AccessKeyID: id.String(),
|
|
||||||
SecretAccessKey: secret.String(),
|
|
||||||
SessionToken: token.String(),
|
|
||||||
ProviderName: SharedCredsProviderName,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// filename returns the filename to use to read AWS shared credentials.
|
|
||||||
//
|
|
||||||
// Will return an error if the user's home directory path cannot be found.
|
|
||||||
func (p *SharedCredentialsProvider) filename() (string, error) {
|
|
||||||
if len(p.Filename) != 0 {
|
|
||||||
return p.Filename, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 {
|
|
||||||
return p.Filename, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if home := shareddefaults.UserHomeDir(); len(home) == 0 {
|
|
||||||
// Backwards compatibility of home directly not found error being returned.
|
|
||||||
// This error is too verbose, failure when opening the file would of been
|
|
||||||
// a better error to return.
|
|
||||||
return "", ErrSharedCredentialsHomeNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
p.Filename = shareddefaults.SharedCredentialsFilename()
|
|
||||||
|
|
||||||
return p.Filename, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// profile returns the AWS shared credentials profile. If empty will read
|
|
||||||
// environment variable "AWS_PROFILE". If that is not set profile will
|
|
||||||
// return "default".
|
|
||||||
func (p *SharedCredentialsProvider) profile() string {
|
|
||||||
if p.Profile == "" {
|
|
||||||
p.Profile = os.Getenv("AWS_PROFILE")
|
|
||||||
}
|
|
||||||
if p.Profile == "" {
|
|
||||||
p.Profile = "default"
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.Profile
|
|
||||||
}
|
|
57
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
57
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
@ -1,57 +0,0 @@
|
|||||||
package credentials
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
)
|
|
||||||
|
|
||||||
// StaticProviderName provides a name of Static provider
|
|
||||||
const StaticProviderName = "StaticProvider"
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrStaticCredentialsEmpty is emitted when static credentials are empty.
|
|
||||||
//
|
|
||||||
// @readonly
|
|
||||||
ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
|
|
||||||
)
|
|
||||||
|
|
||||||
// A StaticProvider is a set of credentials which are set programmatically,
|
|
||||||
// and will never expire.
|
|
||||||
type StaticProvider struct {
|
|
||||||
Value
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewStaticCredentials returns a pointer to a new Credentials object
|
|
||||||
// wrapping a static credentials value provider.
|
|
||||||
func NewStaticCredentials(id, secret, token string) *Credentials {
|
|
||||||
return NewCredentials(&StaticProvider{Value: Value{
|
|
||||||
AccessKeyID: id,
|
|
||||||
SecretAccessKey: secret,
|
|
||||||
SessionToken: token,
|
|
||||||
}})
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
|
|
||||||
// wrapping the static credentials value provide. Same as NewStaticCredentials
|
|
||||||
// but takes the creds Value instead of individual fields
|
|
||||||
func NewStaticCredentialsFromCreds(creds Value) *Credentials {
|
|
||||||
return NewCredentials(&StaticProvider{Value: creds})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve returns the credentials or error if the credentials are invalid.
|
|
||||||
func (s *StaticProvider) Retrieve() (Value, error) {
|
|
||||||
if s.AccessKeyID == "" || s.SecretAccessKey == "" {
|
|
||||||
return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(s.Value.ProviderName) == 0 {
|
|
||||||
s.Value.ProviderName = StaticProviderName
|
|
||||||
}
|
|
||||||
return s.Value, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsExpired returns if the credentials are expired.
|
|
||||||
//
|
|
||||||
// For StaticProvider, the credentials never expired.
|
|
||||||
func (s *StaticProvider) IsExpired() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
298
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
298
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
@ -1,298 +0,0 @@
|
|||||||
/*
|
|
||||||
Package stscreds are credential Providers to retrieve STS AWS credentials.
|
|
||||||
|
|
||||||
STS provides multiple ways to retrieve credentials which can be used when making
|
|
||||||
future AWS service API operation calls.
|
|
||||||
|
|
||||||
The SDK will ensure that per instance of credentials.Credentials all requests
|
|
||||||
to refresh the credentials will be synchronized. But, the SDK is unable to
|
|
||||||
ensure synchronous usage of the AssumeRoleProvider if the value is shared
|
|
||||||
between multiple Credentials, Sessions or service clients.
|
|
||||||
|
|
||||||
Assume Role
|
|
||||||
|
|
||||||
To assume an IAM role using STS with the SDK you can create a new Credentials
|
|
||||||
with the SDKs's stscreds package.
|
|
||||||
|
|
||||||
// Initial credentials loaded from SDK's default credential chain. Such as
|
|
||||||
// the environment, shared credentials (~/.aws/credentials), or EC2 Instance
|
|
||||||
// Role. These credentials will be used to to make the STS Assume Role API.
|
|
||||||
sess := session.Must(session.NewSession())
|
|
||||||
|
|
||||||
// Create the credentials from AssumeRoleProvider to assume the role
|
|
||||||
// referenced by the "myRoleARN" ARN.
|
|
||||||
creds := stscreds.NewCredentials(sess, "myRoleArn")
|
|
||||||
|
|
||||||
// Create service client value configured for credentials
|
|
||||||
// from assumed role.
|
|
||||||
svc := s3.New(sess, &aws.Config{Credentials: creds})
|
|
||||||
|
|
||||||
Assume Role with static MFA Token
|
|
||||||
|
|
||||||
To assume an IAM role with a MFA token you can either specify a MFA token code
|
|
||||||
directly or provide a function to prompt the user each time the credentials
|
|
||||||
need to refresh the role's credentials. Specifying the TokenCode should be used
|
|
||||||
for short lived operations that will not need to be refreshed, and when you do
|
|
||||||
not want to have direct control over the user provides their MFA token.
|
|
||||||
|
|
||||||
With TokenCode the AssumeRoleProvider will be not be able to refresh the role's
|
|
||||||
credentials.
|
|
||||||
|
|
||||||
// Create the credentials from AssumeRoleProvider to assume the role
|
|
||||||
// referenced by the "myRoleARN" ARN using the MFA token code provided.
|
|
||||||
creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
|
|
||||||
p.SerialNumber = aws.String("myTokenSerialNumber")
|
|
||||||
p.TokenCode = aws.String("00000000")
|
|
||||||
})
|
|
||||||
|
|
||||||
// Create service client value configured for credentials
|
|
||||||
// from assumed role.
|
|
||||||
svc := s3.New(sess, &aws.Config{Credentials: creds})
|
|
||||||
|
|
||||||
Assume Role with MFA Token Provider
|
|
||||||
|
|
||||||
To assume an IAM role with MFA for longer running tasks where the credentials
|
|
||||||
may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
|
|
||||||
will allow the credential provider to prompt for new MFA token code when the
|
|
||||||
role's credentials need to be refreshed.
|
|
||||||
|
|
||||||
The StdinTokenProvider function is available to prompt on stdin to retrieve
|
|
||||||
the MFA token code from the user. You can also implement custom prompts by
|
|
||||||
satisfing the TokenProvider function signature.
|
|
||||||
|
|
||||||
Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
|
|
||||||
have undesirable results as the StdinTokenProvider will not be synchronized. A
|
|
||||||
single Credentials with an AssumeRoleProvider can be shared safely.
|
|
||||||
|
|
||||||
// Create the credentials from AssumeRoleProvider to assume the role
|
|
||||||
// referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin.
|
|
||||||
creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
|
|
||||||
p.SerialNumber = aws.String("myTokenSerialNumber")
|
|
||||||
p.TokenProvider = stscreds.StdinTokenProvider
|
|
||||||
})
|
|
||||||
|
|
||||||
// Create service client value configured for credentials
|
|
||||||
// from assumed role.
|
|
||||||
svc := s3.New(sess, &aws.Config{Credentials: creds})
|
|
||||||
|
|
||||||
*/
|
|
||||||
package stscreds
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/client"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/service/sts"
|
|
||||||
)
|
|
||||||
|
|
||||||
// StdinTokenProvider will prompt on stdout and read from stdin for a string value.
|
|
||||||
// An error is returned if reading from stdin fails.
|
|
||||||
//
|
|
||||||
// Use this function go read MFA tokens from stdin. The function makes no attempt
|
|
||||||
// to make atomic prompts from stdin across multiple gorouties.
|
|
||||||
//
|
|
||||||
// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
|
|
||||||
// have undesirable results as the StdinTokenProvider will not be synchronized. A
|
|
||||||
// single Credentials with an AssumeRoleProvider can be shared safely
|
|
||||||
//
|
|
||||||
// Will wait forever until something is provided on the stdin.
|
|
||||||
func StdinTokenProvider() (string, error) {
|
|
||||||
var v string
|
|
||||||
fmt.Printf("Assume Role MFA token code: ")
|
|
||||||
_, err := fmt.Scanln(&v)
|
|
||||||
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProviderName provides a name of AssumeRole provider
|
|
||||||
const ProviderName = "AssumeRoleProvider"
|
|
||||||
|
|
||||||
// AssumeRoler represents the minimal subset of the STS client API used by this provider.
|
|
||||||
type AssumeRoler interface {
|
|
||||||
AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultDuration is the default amount of time in minutes that the credentials
|
|
||||||
// will be valid for.
|
|
||||||
var DefaultDuration = time.Duration(15) * time.Minute
|
|
||||||
|
|
||||||
// AssumeRoleProvider retrieves temporary credentials from the STS service, and
|
|
||||||
// keeps track of their expiration time.
|
|
||||||
//
|
|
||||||
// This credential provider will be used by the SDKs default credential change
|
|
||||||
// when shared configuration is enabled, and the shared config or shared credentials
|
|
||||||
// file configure assume role. See Session docs for how to do this.
|
|
||||||
//
|
|
||||||
// AssumeRoleProvider does not provide any synchronization and it is not safe
|
|
||||||
// to share this value across multiple Credentials, Sessions, or service clients
|
|
||||||
// without also sharing the same Credentials instance.
|
|
||||||
type AssumeRoleProvider struct {
|
|
||||||
credentials.Expiry
|
|
||||||
|
|
||||||
// STS client to make assume role request with.
|
|
||||||
Client AssumeRoler
|
|
||||||
|
|
||||||
// Role to be assumed.
|
|
||||||
RoleARN string
|
|
||||||
|
|
||||||
// Session name, if you wish to reuse the credentials elsewhere.
|
|
||||||
RoleSessionName string
|
|
||||||
|
|
||||||
// Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
|
|
||||||
Duration time.Duration
|
|
||||||
|
|
||||||
// Optional ExternalID to pass along, defaults to nil if not set.
|
|
||||||
ExternalID *string
|
|
||||||
|
|
||||||
// The policy plain text must be 2048 bytes or shorter. However, an internal
|
|
||||||
// conversion compresses it into a packed binary format with a separate limit.
|
|
||||||
// The PackedPolicySize response element indicates by percentage how close to
|
|
||||||
// the upper size limit the policy is, with 100% equaling the maximum allowed
|
|
||||||
// size.
|
|
||||||
Policy *string
|
|
||||||
|
|
||||||
// The identification number of the MFA device that is associated with the user
|
|
||||||
// who is making the AssumeRole call. Specify this value if the trust policy
|
|
||||||
// of the role being assumed includes a condition that requires MFA authentication.
|
|
||||||
// The value is either the serial number for a hardware device (such as GAHT12345678)
|
|
||||||
// or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
|
|
||||||
SerialNumber *string
|
|
||||||
|
|
||||||
// The value provided by the MFA device, if the trust policy of the role being
|
|
||||||
// assumed requires MFA (that is, if the policy includes a condition that tests
|
|
||||||
// for MFA). If the role being assumed requires MFA and if the TokenCode value
|
|
||||||
// is missing or expired, the AssumeRole call returns an "access denied" error.
|
|
||||||
//
|
|
||||||
// If SerialNumber is set and neither TokenCode nor TokenProvider are also
|
|
||||||
// set an error will be returned.
|
|
||||||
TokenCode *string
|
|
||||||
|
|
||||||
// Async method of providing MFA token code for assuming an IAM role with MFA.
|
|
||||||
// The value returned by the function will be used as the TokenCode in the Retrieve
|
|
||||||
// call. See StdinTokenProvider for a provider that prompts and reads from stdin.
|
|
||||||
//
|
|
||||||
// This token provider will be called when ever the assumed role's
|
|
||||||
// credentials need to be refreshed when SerialNumber is also set and
|
|
||||||
// TokenCode is not set.
|
|
||||||
//
|
|
||||||
// If both TokenCode and TokenProvider is set, TokenProvider will be used and
|
|
||||||
// TokenCode is ignored.
|
|
||||||
TokenProvider func() (string, error)
|
|
||||||
|
|
||||||
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
|
||||||
// the credentials actually expiring. This is beneficial so race conditions
|
|
||||||
// with expiring credentials do not cause request to fail unexpectedly
|
|
||||||
// due to ExpiredTokenException exceptions.
|
|
||||||
//
|
|
||||||
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
|
||||||
// 10 seconds before the credentials are actually expired.
|
|
||||||
//
|
|
||||||
// If ExpiryWindow is 0 or less it will be ignored.
|
|
||||||
ExpiryWindow time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCredentials returns a pointer to a new Credentials object wrapping the
|
|
||||||
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
|
||||||
// role will be named after a nanosecond timestamp of this operation.
|
|
||||||
//
|
|
||||||
// Takes a Config provider to create the STS client. The ConfigProvider is
|
|
||||||
// satisfied by the session.Session type.
|
|
||||||
//
|
|
||||||
// It is safe to share the returned Credentials with multiple Sessions and
|
|
||||||
// service clients. All access to the credentials and refreshing them
|
|
||||||
// will be synchronized.
|
|
||||||
func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
|
|
||||||
p := &AssumeRoleProvider{
|
|
||||||
Client: sts.New(c),
|
|
||||||
RoleARN: roleARN,
|
|
||||||
Duration: DefaultDuration,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, option := range options {
|
|
||||||
option(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
return credentials.NewCredentials(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
|
|
||||||
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
|
||||||
// role will be named after a nanosecond timestamp of this operation.
|
|
||||||
//
|
|
||||||
// Takes an AssumeRoler which can be satisfied by the STS client.
|
|
||||||
//
|
|
||||||
// It is safe to share the returned Credentials with multiple Sessions and
|
|
||||||
// service clients. All access to the credentials and refreshing them
|
|
||||||
// will be synchronized.
|
|
||||||
func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
|
|
||||||
p := &AssumeRoleProvider{
|
|
||||||
Client: svc,
|
|
||||||
RoleARN: roleARN,
|
|
||||||
Duration: DefaultDuration,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, option := range options {
|
|
||||||
option(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
return credentials.NewCredentials(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve generates a new set of temporary credentials using STS.
|
|
||||||
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
|
|
||||||
|
|
||||||
// Apply defaults where parameters are not set.
|
|
||||||
if p.RoleSessionName == "" {
|
|
||||||
// Try to work out a role name that will hopefully end up unique.
|
|
||||||
p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
|
|
||||||
}
|
|
||||||
if p.Duration == 0 {
|
|
||||||
// Expire as often as AWS permits.
|
|
||||||
p.Duration = DefaultDuration
|
|
||||||
}
|
|
||||||
input := &sts.AssumeRoleInput{
|
|
||||||
DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
|
|
||||||
RoleArn: aws.String(p.RoleARN),
|
|
||||||
RoleSessionName: aws.String(p.RoleSessionName),
|
|
||||||
ExternalId: p.ExternalID,
|
|
||||||
}
|
|
||||||
if p.Policy != nil {
|
|
||||||
input.Policy = p.Policy
|
|
||||||
}
|
|
||||||
if p.SerialNumber != nil {
|
|
||||||
if p.TokenCode != nil {
|
|
||||||
input.SerialNumber = p.SerialNumber
|
|
||||||
input.TokenCode = p.TokenCode
|
|
||||||
} else if p.TokenProvider != nil {
|
|
||||||
input.SerialNumber = p.SerialNumber
|
|
||||||
code, err := p.TokenProvider()
|
|
||||||
if err != nil {
|
|
||||||
return credentials.Value{ProviderName: ProviderName}, err
|
|
||||||
}
|
|
||||||
input.TokenCode = aws.String(code)
|
|
||||||
} else {
|
|
||||||
return credentials.Value{ProviderName: ProviderName},
|
|
||||||
awserr.New("AssumeRoleTokenNotAvailable",
|
|
||||||
"assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
roleOutput, err := p.Client.AssumeRole(input)
|
|
||||||
if err != nil {
|
|
||||||
return credentials.Value{ProviderName: ProviderName}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// We will proactively generate new credentials before they expire.
|
|
||||||
p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
|
|
||||||
|
|
||||||
return credentials.Value{
|
|
||||||
AccessKeyID: *roleOutput.Credentials.AccessKeyId,
|
|
||||||
SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
|
|
||||||
SessionToken: *roleOutput.Credentials.SessionToken,
|
|
||||||
ProviderName: ProviderName,
|
|
||||||
}, nil
|
|
||||||
}
|
|
163
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
163
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
@ -1,163 +0,0 @@
|
|||||||
// Package defaults is a collection of helpers to retrieve the SDK's default
|
|
||||||
// configuration and handlers.
|
|
||||||
//
|
|
||||||
// Generally this package shouldn't be used directly, but session.Session
|
|
||||||
// instead. This package is useful when you need to reset the defaults
|
|
||||||
// of a session or service client to the SDK defaults before setting
|
|
||||||
// additional parameters.
|
|
||||||
package defaults
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Defaults provides a collection of default values for SDK clients.
|
|
||||||
type Defaults struct {
|
|
||||||
Config *aws.Config
|
|
||||||
Handlers request.Handlers
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the SDK's default values with Config and handlers pre-configured.
|
|
||||||
func Get() Defaults {
|
|
||||||
cfg := Config()
|
|
||||||
handlers := Handlers()
|
|
||||||
cfg.Credentials = CredChain(cfg, handlers)
|
|
||||||
|
|
||||||
return Defaults{
|
|
||||||
Config: cfg,
|
|
||||||
Handlers: handlers,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config returns the default configuration without credentials.
|
|
||||||
// To retrieve a config with credentials also included use
|
|
||||||
// `defaults.Get().Config` instead.
|
|
||||||
//
|
|
||||||
// Generally you shouldn't need to use this method directly, but
|
|
||||||
// is available if you need to reset the configuration of an
|
|
||||||
// existing service client or session.
|
|
||||||
func Config() *aws.Config {
|
|
||||||
return aws.NewConfig().
|
|
||||||
WithCredentials(credentials.AnonymousCredentials).
|
|
||||||
WithRegion(os.Getenv("AWS_REGION")).
|
|
||||||
WithHTTPClient(http.DefaultClient).
|
|
||||||
WithMaxRetries(aws.UseServiceDefaultRetries).
|
|
||||||
WithLogger(aws.NewDefaultLogger()).
|
|
||||||
WithLogLevel(aws.LogOff).
|
|
||||||
WithEndpointResolver(endpoints.DefaultResolver())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handlers returns the default request handlers.
|
|
||||||
//
|
|
||||||
// Generally you shouldn't need to use this method directly, but
|
|
||||||
// is available if you need to reset the request handlers of an
|
|
||||||
// existing service client or session.
|
|
||||||
func Handlers() request.Handlers {
|
|
||||||
var handlers request.Handlers
|
|
||||||
|
|
||||||
handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
|
|
||||||
handlers.Validate.AfterEachFn = request.HandlerListStopOnError
|
|
||||||
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
|
|
||||||
handlers.Build.AfterEachFn = request.HandlerListStopOnError
|
|
||||||
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
|
||||||
handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
|
|
||||||
handlers.Send.PushBackNamed(corehandlers.SendHandler)
|
|
||||||
handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
|
|
||||||
handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
|
|
||||||
|
|
||||||
return handlers
|
|
||||||
}
|
|
||||||
|
|
||||||
// CredChain returns the default credential chain.
|
|
||||||
//
|
|
||||||
// Generally you shouldn't need to use this method directly, but
|
|
||||||
// is available if you need to reset the credentials of an
|
|
||||||
// existing service client or session's Config.
|
|
||||||
func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
|
|
||||||
return credentials.NewCredentials(&credentials.ChainProvider{
|
|
||||||
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
|
|
||||||
Providers: []credentials.Provider{
|
|
||||||
&credentials.EnvProvider{},
|
|
||||||
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
|
|
||||||
RemoteCredProvider(*cfg, handlers),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
|
|
||||||
ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RemoteCredProvider returns a credentials provider for the default remote
|
|
||||||
// endpoints such as EC2 or ECS Roles.
|
|
||||||
func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
|
|
||||||
if u := os.Getenv(httpProviderEnvVar); len(u) > 0 {
|
|
||||||
return localHTTPCredProvider(cfg, handlers, u)
|
|
||||||
}
|
|
||||||
|
|
||||||
if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 {
|
|
||||||
u := fmt.Sprintf("http://169.254.170.2%s", uri)
|
|
||||||
return httpCredProvider(cfg, handlers, u)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ec2RoleProvider(cfg, handlers)
|
|
||||||
}
|
|
||||||
|
|
||||||
func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
|
|
||||||
var errMsg string
|
|
||||||
|
|
||||||
parsed, err := url.Parse(u)
|
|
||||||
if err != nil {
|
|
||||||
errMsg = fmt.Sprintf("invalid URL, %v", err)
|
|
||||||
} else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") {
|
|
||||||
errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errMsg) > 0 {
|
|
||||||
if cfg.Logger != nil {
|
|
||||||
cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err)
|
|
||||||
}
|
|
||||||
return credentials.ErrorProvider{
|
|
||||||
Err: awserr.New("CredentialsEndpointError", errMsg, err),
|
|
||||||
ProviderName: endpointcreds.ProviderName,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return httpCredProvider(cfg, handlers, u)
|
|
||||||
}
|
|
||||||
|
|
||||||
func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
|
|
||||||
return endpointcreds.NewProviderClient(cfg, handlers, u,
|
|
||||||
func(p *endpointcreds.Provider) {
|
|
||||||
p.ExpiryWindow = 5 * time.Minute
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
|
|
||||||
resolver := cfg.EndpointResolver
|
|
||||||
if resolver == nil {
|
|
||||||
resolver = endpoints.DefaultResolver()
|
|
||||||
}
|
|
||||||
|
|
||||||
e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "")
|
|
||||||
return &ec2rolecreds.EC2RoleProvider{
|
|
||||||
Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion),
|
|
||||||
ExpiryWindow: 5 * time.Minute,
|
|
||||||
}
|
|
||||||
}
|
|
27
vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go
generated
vendored
27
vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go
generated
vendored
@ -1,27 +0,0 @@
|
|||||||
package defaults
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/aws/aws-sdk-go/internal/shareddefaults"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SharedCredentialsFilename returns the SDK's default file path
|
|
||||||
// for the shared credentials file.
|
|
||||||
//
|
|
||||||
// Builds the shared config file path based on the OS's platform.
|
|
||||||
//
|
|
||||||
// - Linux/Unix: $HOME/.aws/credentials
|
|
||||||
// - Windows: %USERPROFILE%\.aws\credentials
|
|
||||||
func SharedCredentialsFilename() string {
|
|
||||||
return shareddefaults.SharedCredentialsFilename()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SharedConfigFilename returns the SDK's default file path for
|
|
||||||
// the shared config file.
|
|
||||||
//
|
|
||||||
// Builds the shared config file path based on the OS's platform.
|
|
||||||
//
|
|
||||||
// - Linux/Unix: $HOME/.aws/config
|
|
||||||
// - Windows: %USERPROFILE%\.aws\config
|
|
||||||
func SharedConfigFilename() string {
|
|
||||||
return shareddefaults.SharedConfigFilename()
|
|
||||||
}
|
|
56
vendor/github.com/aws/aws-sdk-go/aws/doc.go
generated
vendored
56
vendor/github.com/aws/aws-sdk-go/aws/doc.go
generated
vendored
@ -1,56 +0,0 @@
|
|||||||
// Package aws provides the core SDK's utilities and shared types. Use this package's
|
|
||||||
// utilities to simplify setting and reading API operations parameters.
|
|
||||||
//
|
|
||||||
// Value and Pointer Conversion Utilities
|
|
||||||
//
|
|
||||||
// This package includes a helper conversion utility for each scalar type the SDK's
|
|
||||||
// API use. These utilities make getting a pointer of the scalar, and dereferencing
|
|
||||||
// a pointer easier.
|
|
||||||
//
|
|
||||||
// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
|
|
||||||
// The Pointer to value will safely dereference the pointer and return its value.
|
|
||||||
// If the pointer was nil, the scalar's zero value will be returned.
|
|
||||||
//
|
|
||||||
// The value to pointer functions will be named after the scalar type. So get a
|
|
||||||
// *string from a string value use the "String" function. This makes it easy to
|
|
||||||
// to get pointer of a literal string value, because getting the address of a
|
|
||||||
// literal requires assigning the value to a variable first.
|
|
||||||
//
|
|
||||||
// var strPtr *string
|
|
||||||
//
|
|
||||||
// // Without the SDK's conversion functions
|
|
||||||
// str := "my string"
|
|
||||||
// strPtr = &str
|
|
||||||
//
|
|
||||||
// // With the SDK's conversion functions
|
|
||||||
// strPtr = aws.String("my string")
|
|
||||||
//
|
|
||||||
// // Convert *string to string value
|
|
||||||
// str = aws.StringValue(strPtr)
|
|
||||||
//
|
|
||||||
// In addition to scalars the aws package also includes conversion utilities for
|
|
||||||
// map and slice for commonly types used in API parameters. The map and slice
|
|
||||||
// conversion functions use similar naming pattern as the scalar conversion
|
|
||||||
// functions.
|
|
||||||
//
|
|
||||||
// var strPtrs []*string
|
|
||||||
// var strs []string = []string{"Go", "Gophers", "Go"}
|
|
||||||
//
|
|
||||||
// // Convert []string to []*string
|
|
||||||
// strPtrs = aws.StringSlice(strs)
|
|
||||||
//
|
|
||||||
// // Convert []*string to []string
|
|
||||||
// strs = aws.StringValueSlice(strPtrs)
|
|
||||||
//
|
|
||||||
// SDK Default HTTP Client
|
|
||||||
//
|
|
||||||
// The SDK will use the http.DefaultClient if a HTTP client is not provided to
|
|
||||||
// the SDK's Session, or service client constructor. This means that if the
|
|
||||||
// http.DefaultClient is modified by other components of your application the
|
|
||||||
// modifications will be picked up by the SDK as well.
|
|
||||||
//
|
|
||||||
// In some cases this might be intended, but it is a better practice to create
|
|
||||||
// a custom HTTP Client to share explicitly through your application. You can
|
|
||||||
// configure the SDK to use the custom HTTP Client by setting the HTTPClient
|
|
||||||
// value of the SDK's Config type when creating a Session or service client.
|
|
||||||
package aws
|
|
162
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
162
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
@ -1,162 +0,0 @@
|
|||||||
package ec2metadata
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetMetadata uses the path provided to request information from the EC2
|
|
||||||
// instance metdata service. The content will be returned as a string, or
|
|
||||||
// error if the request failed.
|
|
||||||
func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
|
||||||
op := &request.Operation{
|
|
||||||
Name: "GetMetadata",
|
|
||||||
HTTPMethod: "GET",
|
|
||||||
HTTPPath: path.Join("/", "meta-data", p),
|
|
||||||
}
|
|
||||||
|
|
||||||
output := &metadataOutput{}
|
|
||||||
req := c.NewRequest(op, nil, output)
|
|
||||||
|
|
||||||
return output.Content, req.Send()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetUserData returns the userdata that was configured for the service. If
|
|
||||||
// there is no user-data setup for the EC2 instance a "NotFoundError" error
|
|
||||||
// code will be returned.
|
|
||||||
func (c *EC2Metadata) GetUserData() (string, error) {
|
|
||||||
op := &request.Operation{
|
|
||||||
Name: "GetUserData",
|
|
||||||
HTTPMethod: "GET",
|
|
||||||
HTTPPath: path.Join("/", "user-data"),
|
|
||||||
}
|
|
||||||
|
|
||||||
output := &metadataOutput{}
|
|
||||||
req := c.NewRequest(op, nil, output)
|
|
||||||
req.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
|
|
||||||
if r.HTTPResponse.StatusCode == http.StatusNotFound {
|
|
||||||
r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
return output.Content, req.Send()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDynamicData uses the path provided to request information from the EC2
|
|
||||||
// instance metadata service for dynamic data. The content will be returned
|
|
||||||
// as a string, or error if the request failed.
|
|
||||||
func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
|
|
||||||
op := &request.Operation{
|
|
||||||
Name: "GetDynamicData",
|
|
||||||
HTTPMethod: "GET",
|
|
||||||
HTTPPath: path.Join("/", "dynamic", p),
|
|
||||||
}
|
|
||||||
|
|
||||||
output := &metadataOutput{}
|
|
||||||
req := c.NewRequest(op, nil, output)
|
|
||||||
|
|
||||||
return output.Content, req.Send()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInstanceIdentityDocument retrieves an identity document describing an
|
|
||||||
// instance. Error is returned if the request fails or is unable to parse
|
|
||||||
// the response.
|
|
||||||
func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
|
|
||||||
resp, err := c.GetDynamicData("instance-identity/document")
|
|
||||||
if err != nil {
|
|
||||||
return EC2InstanceIdentityDocument{},
|
|
||||||
awserr.New("EC2MetadataRequestError",
|
|
||||||
"failed to get EC2 instance identity document", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
doc := EC2InstanceIdentityDocument{}
|
|
||||||
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
|
|
||||||
return EC2InstanceIdentityDocument{},
|
|
||||||
awserr.New("SerializationError",
|
|
||||||
"failed to decode EC2 instance identity document", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return doc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IAMInfo retrieves IAM info from the metadata API
|
|
||||||
func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
|
|
||||||
resp, err := c.GetMetadata("iam/info")
|
|
||||||
if err != nil {
|
|
||||||
return EC2IAMInfo{},
|
|
||||||
awserr.New("EC2MetadataRequestError",
|
|
||||||
"failed to get EC2 IAM info", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
info := EC2IAMInfo{}
|
|
||||||
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
|
|
||||||
return EC2IAMInfo{},
|
|
||||||
awserr.New("SerializationError",
|
|
||||||
"failed to decode EC2 IAM info", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if info.Code != "Success" {
|
|
||||||
errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
|
|
||||||
return EC2IAMInfo{},
|
|
||||||
awserr.New("EC2MetadataError", errMsg, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Region returns the region the instance is running in.
|
|
||||||
func (c *EC2Metadata) Region() (string, error) {
|
|
||||||
resp, err := c.GetMetadata("placement/availability-zone")
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns region without the suffix. Eg: us-west-2a becomes us-west-2
|
|
||||||
return resp[:len(resp)-1], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Available returns if the application has access to the EC2 Metadata service.
|
|
||||||
// Can be used to determine if application is running within an EC2 Instance and
|
|
||||||
// the metadata service is available.
|
|
||||||
func (c *EC2Metadata) Available() bool {
|
|
||||||
if _, err := c.GetMetadata("instance-id"); err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// An EC2IAMInfo provides the shape for unmarshaling
|
|
||||||
// an IAM info from the metadata API
|
|
||||||
type EC2IAMInfo struct {
|
|
||||||
Code string
|
|
||||||
LastUpdated time.Time
|
|
||||||
InstanceProfileArn string
|
|
||||||
InstanceProfileID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// An EC2InstanceIdentityDocument provides the shape for unmarshaling
|
|
||||||
// an instance identity document
|
|
||||||
type EC2InstanceIdentityDocument struct {
|
|
||||||
DevpayProductCodes []string `json:"devpayProductCodes"`
|
|
||||||
AvailabilityZone string `json:"availabilityZone"`
|
|
||||||
PrivateIP string `json:"privateIp"`
|
|
||||||
Version string `json:"version"`
|
|
||||||
Region string `json:"region"`
|
|
||||||
InstanceID string `json:"instanceId"`
|
|
||||||
BillingProducts []string `json:"billingProducts"`
|
|
||||||
InstanceType string `json:"instanceType"`
|
|
||||||
AccountID string `json:"accountId"`
|
|
||||||
PendingTime time.Time `json:"pendingTime"`
|
|
||||||
ImageID string `json:"imageId"`
|
|
||||||
KernelID string `json:"kernelId"`
|
|
||||||
RamdiskID string `json:"ramdiskId"`
|
|
||||||
Architecture string `json:"architecture"`
|
|
||||||
}
|
|
124
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
124
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
@ -1,124 +0,0 @@
|
|||||||
// Package ec2metadata provides the client for making API calls to the
|
|
||||||
// EC2 Metadata service.
|
|
||||||
package ec2metadata
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/client"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ServiceName is the name of the service.
|
|
||||||
const ServiceName = "ec2metadata"
|
|
||||||
|
|
||||||
// A EC2Metadata is an EC2 Metadata service Client.
|
|
||||||
type EC2Metadata struct {
|
|
||||||
*client.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new instance of the EC2Metadata client with a session.
|
|
||||||
// This client is safe to use across multiple goroutines.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
// // Create a EC2Metadata client from just a session.
|
|
||||||
// svc := ec2metadata.New(mySession)
|
|
||||||
//
|
|
||||||
// // Create a EC2Metadata client with additional configuration
|
|
||||||
// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
|
|
||||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
|
|
||||||
c := p.ClientConfig(ServiceName, cfgs...)
|
|
||||||
return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClient returns a new EC2Metadata client. Should be used to create
|
|
||||||
// a client when not using a session. Generally using just New with a session
|
|
||||||
// is preferred.
|
|
||||||
//
|
|
||||||
// If an unmodified HTTP client is provided from the stdlib default, or no client
|
|
||||||
// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
|
|
||||||
// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
|
|
||||||
func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
|
|
||||||
if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
|
|
||||||
// If the http client is unmodified and this feature is not disabled
|
|
||||||
// set custom timeouts for EC2Metadata requests.
|
|
||||||
cfg.HTTPClient = &http.Client{
|
|
||||||
// use a shorter timeout than default because the metadata
|
|
||||||
// service is local if it is running, and to fail faster
|
|
||||||
// if not running on an ec2 instance.
|
|
||||||
Timeout: 5 * time.Second,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
svc := &EC2Metadata{
|
|
||||||
Client: client.New(
|
|
||||||
cfg,
|
|
||||||
metadata.ClientInfo{
|
|
||||||
ServiceName: ServiceName,
|
|
||||||
Endpoint: endpoint,
|
|
||||||
APIVersion: "latest",
|
|
||||||
},
|
|
||||||
handlers,
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
|
|
||||||
svc.Handlers.UnmarshalError.PushBack(unmarshalError)
|
|
||||||
svc.Handlers.Validate.Clear()
|
|
||||||
svc.Handlers.Validate.PushBack(validateEndpointHandler)
|
|
||||||
|
|
||||||
// Add additional options to the service config
|
|
||||||
for _, option := range opts {
|
|
||||||
option(svc.Client)
|
|
||||||
}
|
|
||||||
|
|
||||||
return svc
|
|
||||||
}
|
|
||||||
|
|
||||||
func httpClientZero(c *http.Client) bool {
|
|
||||||
return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
type metadataOutput struct {
|
|
||||||
Content string
|
|
||||||
}
|
|
||||||
|
|
||||||
func unmarshalHandler(r *request.Request) {
|
|
||||||
defer r.HTTPResponse.Body.Close()
|
|
||||||
b := &bytes.Buffer{}
|
|
||||||
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
|
||||||
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if data, ok := r.Data.(*metadataOutput); ok {
|
|
||||||
data.Content = b.String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func unmarshalError(r *request.Request) {
|
|
||||||
defer r.HTTPResponse.Body.Close()
|
|
||||||
b := &bytes.Buffer{}
|
|
||||||
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
|
||||||
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Response body format is not consistent between metadata endpoints.
|
|
||||||
// Grab the error message as a string and include that as the source error
|
|
||||||
r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateEndpointHandler(r *request.Request) {
|
|
||||||
if r.ClientInfo.Endpoint == "" {
|
|
||||||
r.Error = aws.ErrMissingEndpoint
|
|
||||||
}
|
|
||||||
}
|
|
133
vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
133
vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
@ -1,133 +0,0 @@
|
|||||||
package endpoints
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
)
|
|
||||||
|
|
||||||
type modelDefinition map[string]json.RawMessage
|
|
||||||
|
|
||||||
// A DecodeModelOptions are the options for how the endpoints model definition
|
|
||||||
// are decoded.
|
|
||||||
type DecodeModelOptions struct {
|
|
||||||
SkipCustomizations bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set combines all of the option functions together.
|
|
||||||
func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) {
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(d)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeModel unmarshals a Regions and Endpoint model definition file into
|
|
||||||
// a endpoint Resolver. If the file format is not supported, or an error occurs
|
|
||||||
// when unmarshaling the model an error will be returned.
|
|
||||||
//
|
|
||||||
// Casting the return value of this func to a EnumPartitions will
|
|
||||||
// allow you to get a list of the partitions in the order the endpoints
|
|
||||||
// will be resolved in.
|
|
||||||
//
|
|
||||||
// resolver, err := endpoints.DecodeModel(reader)
|
|
||||||
//
|
|
||||||
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
|
|
||||||
// for _, p := range partitions {
|
|
||||||
// // ... inspect partitions
|
|
||||||
// }
|
|
||||||
func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) {
|
|
||||||
var opts DecodeModelOptions
|
|
||||||
opts.Set(optFns...)
|
|
||||||
|
|
||||||
// Get the version of the partition file to determine what
|
|
||||||
// unmarshaling model to use.
|
|
||||||
modelDef := modelDefinition{}
|
|
||||||
if err := json.NewDecoder(r).Decode(&modelDef); err != nil {
|
|
||||||
return nil, newDecodeModelError("failed to decode endpoints model", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var version string
|
|
||||||
if b, ok := modelDef["version"]; ok {
|
|
||||||
version = string(b)
|
|
||||||
} else {
|
|
||||||
return nil, newDecodeModelError("endpoints version not found in model", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if version == "3" {
|
|
||||||
return decodeV3Endpoints(modelDef, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, newDecodeModelError(
|
|
||||||
fmt.Sprintf("endpoints version %s, not supported", version), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) {
|
|
||||||
b, ok := modelDef["partitions"]
|
|
||||||
if !ok {
|
|
||||||
return nil, newDecodeModelError("endpoints model missing partitions", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
ps := partitions{}
|
|
||||||
if err := json.Unmarshal(b, &ps); err != nil {
|
|
||||||
return nil, newDecodeModelError("failed to decode endpoints model", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.SkipCustomizations {
|
|
||||||
return ps, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Customization
|
|
||||||
for i := 0; i < len(ps); i++ {
|
|
||||||
p := &ps[i]
|
|
||||||
custAddEC2Metadata(p)
|
|
||||||
custAddS3DualStack(p)
|
|
||||||
custRmIotDataService(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ps, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func custAddS3DualStack(p *partition) {
|
|
||||||
if p.ID != "aws" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s, ok := p.Services["s3"]
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.Defaults.HasDualStack = boxedTrue
|
|
||||||
s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}"
|
|
||||||
|
|
||||||
p.Services["s3"] = s
|
|
||||||
}
|
|
||||||
|
|
||||||
func custAddEC2Metadata(p *partition) {
|
|
||||||
p.Services["ec2metadata"] = service{
|
|
||||||
IsRegionalized: boxedFalse,
|
|
||||||
PartitionEndpoint: "aws-global",
|
|
||||||
Endpoints: endpoints{
|
|
||||||
"aws-global": endpoint{
|
|
||||||
Hostname: "169.254.169.254/latest",
|
|
||||||
Protocols: []string{"http"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func custRmIotDataService(p *partition) {
|
|
||||||
delete(p.Services, "data.iot")
|
|
||||||
}
|
|
||||||
|
|
||||||
type decodeModelError struct {
|
|
||||||
awsError
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDecodeModelError(msg string, err error) decodeModelError {
|
|
||||||
return decodeModelError{
|
|
||||||
awsError: awserr.New("DecodeEndpointsModelError", msg, err),
|
|
||||||
}
|
|
||||||
}
|
|
2424
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
2424
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
File diff suppressed because it is too large
Load Diff
66
vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
generated
vendored
66
vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
generated
vendored
@ -1,66 +0,0 @@
|
|||||||
// Package endpoints provides the types and functionality for defining regions
|
|
||||||
// and endpoints, as well as querying those definitions.
|
|
||||||
//
|
|
||||||
// The SDK's Regions and Endpoints metadata is code generated into the endpoints
|
|
||||||
// package, and is accessible via the DefaultResolver function. This function
|
|
||||||
// returns a endpoint Resolver will search the metadata and build an associated
|
|
||||||
// endpoint if one is found. The default resolver will search all partitions
|
|
||||||
// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and
|
|
||||||
// AWS GovCloud (US) (aws-us-gov).
|
|
||||||
// .
|
|
||||||
//
|
|
||||||
// Enumerating Regions and Endpoint Metadata
|
|
||||||
//
|
|
||||||
// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface
|
|
||||||
// will allow you to get access to the list of underlying Partitions with the
|
|
||||||
// Partitions method. This is helpful if you want to limit the SDK's endpoint
|
|
||||||
// resolving to a single partition, or enumerate regions, services, and endpoints
|
|
||||||
// in the partition.
|
|
||||||
//
|
|
||||||
// resolver := endpoints.DefaultResolver()
|
|
||||||
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
|
|
||||||
//
|
|
||||||
// for _, p := range partitions {
|
|
||||||
// fmt.Println("Regions for", p.ID())
|
|
||||||
// for id, _ := range p.Regions() {
|
|
||||||
// fmt.Println("*", id)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// fmt.Println("Services for", p.ID())
|
|
||||||
// for id, _ := range p.Services() {
|
|
||||||
// fmt.Println("*", id)
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Using Custom Endpoints
|
|
||||||
//
|
|
||||||
// The endpoints package also gives you the ability to use your own logic how
|
|
||||||
// endpoints are resolved. This is a great way to define a custom endpoint
|
|
||||||
// for select services, without passing that logic down through your code.
|
|
||||||
//
|
|
||||||
// If a type implements the Resolver interface it can be used to resolve
|
|
||||||
// endpoints. To use this with the SDK's Session and Config set the value
|
|
||||||
// of the type to the EndpointsResolver field of aws.Config when initializing
|
|
||||||
// the session, or service client.
|
|
||||||
//
|
|
||||||
// In addition the ResolverFunc is a wrapper for a func matching the signature
|
|
||||||
// of Resolver.EndpointFor, converting it to a type that satisfies the
|
|
||||||
// Resolver interface.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
|
|
||||||
// if service == endpoints.S3ServiceID {
|
|
||||||
// return endpoints.ResolvedEndpoint{
|
|
||||||
// URL: "s3.custom.endpoint.com",
|
|
||||||
// SigningRegion: "custom-signing-region",
|
|
||||||
// }, nil
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// sess := session.Must(session.NewSession(&aws.Config{
|
|
||||||
// Region: aws.String("us-west-2"),
|
|
||||||
// EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
|
|
||||||
// }))
|
|
||||||
package endpoints
|
|
439
vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
generated
vendored
439
vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
generated
vendored
@ -1,439 +0,0 @@
|
|||||||
package endpoints
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Options provide the configuration needed to direct how the
|
|
||||||
// endpoints will be resolved.
|
|
||||||
type Options struct {
|
|
||||||
// DisableSSL forces the endpoint to be resolved as HTTP.
|
|
||||||
// instead of HTTPS if the service supports it.
|
|
||||||
DisableSSL bool
|
|
||||||
|
|
||||||
// Sets the resolver to resolve the endpoint as a dualstack endpoint
|
|
||||||
// for the service. If dualstack support for a service is not known and
|
|
||||||
// StrictMatching is not enabled a dualstack endpoint for the service will
|
|
||||||
// be returned. This endpoint may not be valid. If StrictMatching is
|
|
||||||
// enabled only services that are known to support dualstack will return
|
|
||||||
// dualstack endpoints.
|
|
||||||
UseDualStack bool
|
|
||||||
|
|
||||||
// Enables strict matching of services and regions resolved endpoints.
|
|
||||||
// If the partition doesn't enumerate the exact service and region an
|
|
||||||
// error will be returned. This option will prevent returning endpoints
|
|
||||||
// that look valid, but may not resolve to any real endpoint.
|
|
||||||
StrictMatching bool
|
|
||||||
|
|
||||||
// Enables resolving a service endpoint based on the region provided if the
|
|
||||||
// service does not exist. The service endpoint ID will be used as the service
|
|
||||||
// domain name prefix. By default the endpoint resolver requires the service
|
|
||||||
// to be known when resolving endpoints.
|
|
||||||
//
|
|
||||||
// If resolving an endpoint on the partition list the provided region will
|
|
||||||
// be used to determine which partition's domain name pattern to the service
|
|
||||||
// endpoint ID with. If both the service and region are unkonwn and resolving
|
|
||||||
// the endpoint on partition list an UnknownEndpointError error will be returned.
|
|
||||||
//
|
|
||||||
// If resolving and endpoint on a partition specific resolver that partition's
|
|
||||||
// domain name pattern will be used with the service endpoint ID. If both
|
|
||||||
// region and service do not exist when resolving an endpoint on a specific
|
|
||||||
// partition the partition's domain pattern will be used to combine the
|
|
||||||
// endpoint and region together.
|
|
||||||
//
|
|
||||||
// This option is ignored if StrictMatching is enabled.
|
|
||||||
ResolveUnknownService bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set combines all of the option functions together.
|
|
||||||
func (o *Options) Set(optFns ...func(*Options)) {
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DisableSSLOption sets the DisableSSL options. Can be used as a functional
|
|
||||||
// option when resolving endpoints.
|
|
||||||
func DisableSSLOption(o *Options) {
|
|
||||||
o.DisableSSL = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseDualStackOption sets the UseDualStack option. Can be used as a functional
|
|
||||||
// option when resolving endpoints.
|
|
||||||
func UseDualStackOption(o *Options) {
|
|
||||||
o.UseDualStack = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// StrictMatchingOption sets the StrictMatching option. Can be used as a functional
|
|
||||||
// option when resolving endpoints.
|
|
||||||
func StrictMatchingOption(o *Options) {
|
|
||||||
o.StrictMatching = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used
|
|
||||||
// as a functional option when resolving endpoints.
|
|
||||||
func ResolveUnknownServiceOption(o *Options) {
|
|
||||||
o.ResolveUnknownService = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Resolver provides the interface for functionality to resolve endpoints.
|
|
||||||
// The build in Partition and DefaultResolver return value satisfy this interface.
|
|
||||||
type Resolver interface {
|
|
||||||
EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolverFunc is a helper utility that wraps a function so it satisfies the
|
|
||||||
// Resolver interface. This is useful when you want to add additional endpoint
|
|
||||||
// resolving logic, or stub out specific endpoints with custom values.
|
|
||||||
type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
|
|
||||||
|
|
||||||
// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface.
|
|
||||||
func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
|
||||||
return fn(service, region, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
var schemeRE = regexp.MustCompile("^([^:]+)://")
|
|
||||||
|
|
||||||
// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
|
|
||||||
// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS.
|
|
||||||
//
|
|
||||||
// If disableSSL is set, it will only set the URL's scheme if the URL does not
|
|
||||||
// contain a scheme.
|
|
||||||
func AddScheme(endpoint string, disableSSL bool) string {
|
|
||||||
if !schemeRE.MatchString(endpoint) {
|
|
||||||
scheme := "https"
|
|
||||||
if disableSSL {
|
|
||||||
scheme = "http"
|
|
||||||
}
|
|
||||||
endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
return endpoint
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnumPartitions a provides a way to retrieve the underlying partitions that
|
|
||||||
// make up the SDK's default Resolver, or any resolver decoded from a model
|
|
||||||
// file.
|
|
||||||
//
|
|
||||||
// Use this interface with DefaultResolver and DecodeModels to get the list of
|
|
||||||
// Partitions.
|
|
||||||
type EnumPartitions interface {
|
|
||||||
Partitions() []Partition
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegionsForService returns a map of regions for the partition and service.
|
|
||||||
// If either the partition or service does not exist false will be returned
|
|
||||||
// as the second parameter.
|
|
||||||
//
|
|
||||||
// This example shows how to get the regions for DynamoDB in the AWS partition.
|
|
||||||
// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID)
|
|
||||||
//
|
|
||||||
// This is equivalent to using the partition directly.
|
|
||||||
// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions()
|
|
||||||
func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) {
|
|
||||||
for _, p := range ps {
|
|
||||||
if p.ID() != partitionID {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, ok := p.p.Services[serviceID]; !ok {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
s := Service{
|
|
||||||
id: serviceID,
|
|
||||||
p: p.p,
|
|
||||||
}
|
|
||||||
return s.Regions(), true
|
|
||||||
}
|
|
||||||
|
|
||||||
return map[string]Region{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// PartitionForRegion returns the first partition which includes the region
|
|
||||||
// passed in. This includes both known regions and regions which match
|
|
||||||
// a pattern supported by the partition which may include regions that are
|
|
||||||
// not explicitly known by the partition. Use the Regions method of the
|
|
||||||
// returned Partition if explicit support is needed.
|
|
||||||
func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) {
|
|
||||||
for _, p := range ps {
|
|
||||||
if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) {
|
|
||||||
return p, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return Partition{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Partition provides the ability to enumerate the partition's regions
|
|
||||||
// and services.
|
|
||||||
type Partition struct {
|
|
||||||
id string
|
|
||||||
p *partition
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the identifier of the partition.
|
|
||||||
func (p Partition) ID() string { return p.id }
|
|
||||||
|
|
||||||
// EndpointFor attempts to resolve the endpoint based on service and region.
|
|
||||||
// See Options for information on configuring how the endpoint is resolved.
|
|
||||||
//
|
|
||||||
// If the service cannot be found in the metadata the UnknownServiceError
|
|
||||||
// error will be returned. This validation will occur regardless if
|
|
||||||
// StrictMatching is enabled. To enable resolving unknown services set the
|
|
||||||
// "ResolveUnknownService" option to true. When StrictMatching is disabled
|
|
||||||
// this option allows the partition resolver to resolve a endpoint based on
|
|
||||||
// the service endpoint ID provided.
|
|
||||||
//
|
|
||||||
// When resolving endpoints you can choose to enable StrictMatching. This will
|
|
||||||
// require the provided service and region to be known by the partition.
|
|
||||||
// If the endpoint cannot be strictly resolved an error will be returned. This
|
|
||||||
// mode is useful to ensure the endpoint resolved is valid. Without
|
|
||||||
// StrictMatching enabled the endpoint returned my look valid but may not work.
|
|
||||||
// StrictMatching requires the SDK to be updated if you want to take advantage
|
|
||||||
// of new regions and services expansions.
|
|
||||||
//
|
|
||||||
// Errors that can be returned.
|
|
||||||
// * UnknownServiceError
|
|
||||||
// * UnknownEndpointError
|
|
||||||
func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
|
||||||
return p.p.EndpointFor(service, region, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Regions returns a map of Regions indexed by their ID. This is useful for
|
|
||||||
// enumerating over the regions in a partition.
|
|
||||||
func (p Partition) Regions() map[string]Region {
|
|
||||||
rs := map[string]Region{}
|
|
||||||
for id := range p.p.Regions {
|
|
||||||
rs[id] = Region{
|
|
||||||
id: id,
|
|
||||||
p: p.p,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return rs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Services returns a map of Service indexed by their ID. This is useful for
|
|
||||||
// enumerating over the services in a partition.
|
|
||||||
func (p Partition) Services() map[string]Service {
|
|
||||||
ss := map[string]Service{}
|
|
||||||
for id := range p.p.Services {
|
|
||||||
ss[id] = Service{
|
|
||||||
id: id,
|
|
||||||
p: p.p,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ss
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Region provides information about a region, and ability to resolve an
|
|
||||||
// endpoint from the context of a region, given a service.
|
|
||||||
type Region struct {
|
|
||||||
id, desc string
|
|
||||||
p *partition
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the region's identifier.
|
|
||||||
func (r Region) ID() string { return r.id }
|
|
||||||
|
|
||||||
// ResolveEndpoint resolves an endpoint from the context of the region given
|
|
||||||
// a service. See Partition.EndpointFor for usage and errors that can be returned.
|
|
||||||
func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
|
||||||
return r.p.EndpointFor(service, r.id, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Services returns a list of all services that are known to be in this region.
|
|
||||||
func (r Region) Services() map[string]Service {
|
|
||||||
ss := map[string]Service{}
|
|
||||||
for id, s := range r.p.Services {
|
|
||||||
if _, ok := s.Endpoints[r.id]; ok {
|
|
||||||
ss[id] = Service{
|
|
||||||
id: id,
|
|
||||||
p: r.p,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ss
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Service provides information about a service, and ability to resolve an
|
|
||||||
// endpoint from the context of a service, given a region.
|
|
||||||
type Service struct {
|
|
||||||
id string
|
|
||||||
p *partition
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the identifier for the service.
|
|
||||||
func (s Service) ID() string { return s.id }
|
|
||||||
|
|
||||||
// ResolveEndpoint resolves an endpoint from the context of a service given
|
|
||||||
// a region. See Partition.EndpointFor for usage and errors that can be returned.
|
|
||||||
func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
|
||||||
return s.p.EndpointFor(s.id, region, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Regions returns a map of Regions that the service is present in.
|
|
||||||
//
|
|
||||||
// A region is the AWS region the service exists in. Whereas a Endpoint is
|
|
||||||
// an URL that can be resolved to a instance of a service.
|
|
||||||
func (s Service) Regions() map[string]Region {
|
|
||||||
rs := map[string]Region{}
|
|
||||||
for id := range s.p.Services[s.id].Endpoints {
|
|
||||||
if _, ok := s.p.Regions[id]; ok {
|
|
||||||
rs[id] = Region{
|
|
||||||
id: id,
|
|
||||||
p: s.p,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return rs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Endpoints returns a map of Endpoints indexed by their ID for all known
|
|
||||||
// endpoints for a service.
|
|
||||||
//
|
|
||||||
// A region is the AWS region the service exists in. Whereas a Endpoint is
|
|
||||||
// an URL that can be resolved to a instance of a service.
|
|
||||||
func (s Service) Endpoints() map[string]Endpoint {
|
|
||||||
es := map[string]Endpoint{}
|
|
||||||
for id := range s.p.Services[s.id].Endpoints {
|
|
||||||
es[id] = Endpoint{
|
|
||||||
id: id,
|
|
||||||
serviceID: s.id,
|
|
||||||
p: s.p,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return es
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Endpoint provides information about endpoints, and provides the ability
|
|
||||||
// to resolve that endpoint for the service, and the region the endpoint
|
|
||||||
// represents.
|
|
||||||
type Endpoint struct {
|
|
||||||
id string
|
|
||||||
serviceID string
|
|
||||||
p *partition
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the identifier for an endpoint.
|
|
||||||
func (e Endpoint) ID() string { return e.id }
|
|
||||||
|
|
||||||
// ServiceID returns the identifier the endpoint belongs to.
|
|
||||||
func (e Endpoint) ServiceID() string { return e.serviceID }
|
|
||||||
|
|
||||||
// ResolveEndpoint resolves an endpoint from the context of a service and
|
|
||||||
// region the endpoint represents. See Partition.EndpointFor for usage and
|
|
||||||
// errors that can be returned.
|
|
||||||
func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) {
|
|
||||||
return e.p.EndpointFor(e.serviceID, e.id, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A ResolvedEndpoint is an endpoint that has been resolved based on a partition
|
|
||||||
// service, and region.
|
|
||||||
type ResolvedEndpoint struct {
|
|
||||||
// The endpoint URL
|
|
||||||
URL string
|
|
||||||
|
|
||||||
// The region that should be used for signing requests.
|
|
||||||
SigningRegion string
|
|
||||||
|
|
||||||
// The service name that should be used for signing requests.
|
|
||||||
SigningName string
|
|
||||||
|
|
||||||
// The signing method that should be used for signing requests.
|
|
||||||
SigningMethod string
|
|
||||||
}
|
|
||||||
|
|
||||||
// So that the Error interface type can be included as an anonymous field
|
|
||||||
// in the requestError struct and not conflict with the error.Error() method.
|
|
||||||
type awsError awserr.Error
|
|
||||||
|
|
||||||
// A EndpointNotFoundError is returned when in StrictMatching mode, and the
|
|
||||||
// endpoint for the service and region cannot be found in any of the partitions.
|
|
||||||
type EndpointNotFoundError struct {
|
|
||||||
awsError
|
|
||||||
Partition string
|
|
||||||
Service string
|
|
||||||
Region string
|
|
||||||
}
|
|
||||||
|
|
||||||
// A UnknownServiceError is returned when the service does not resolve to an
|
|
||||||
// endpoint. Includes a list of all known services for the partition. Returned
|
|
||||||
// when a partition does not support the service.
|
|
||||||
type UnknownServiceError struct {
|
|
||||||
awsError
|
|
||||||
Partition string
|
|
||||||
Service string
|
|
||||||
Known []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUnknownServiceError builds and returns UnknownServiceError.
|
|
||||||
func NewUnknownServiceError(p, s string, known []string) UnknownServiceError {
|
|
||||||
return UnknownServiceError{
|
|
||||||
awsError: awserr.New("UnknownServiceError",
|
|
||||||
"could not resolve endpoint for unknown service", nil),
|
|
||||||
Partition: p,
|
|
||||||
Service: s,
|
|
||||||
Known: known,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the string representation of the error.
|
|
||||||
func (e UnknownServiceError) Error() string {
|
|
||||||
extra := fmt.Sprintf("partition: %q, service: %q",
|
|
||||||
e.Partition, e.Service)
|
|
||||||
if len(e.Known) > 0 {
|
|
||||||
extra += fmt.Sprintf(", known: %v", e.Known)
|
|
||||||
}
|
|
||||||
return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the string representation of the error.
|
|
||||||
func (e UnknownServiceError) String() string {
|
|
||||||
return e.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// A UnknownEndpointError is returned when in StrictMatching mode and the
|
|
||||||
// service is valid, but the region does not resolve to an endpoint. Includes
|
|
||||||
// a list of all known endpoints for the service.
|
|
||||||
type UnknownEndpointError struct {
|
|
||||||
awsError
|
|
||||||
Partition string
|
|
||||||
Service string
|
|
||||||
Region string
|
|
||||||
Known []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUnknownEndpointError builds and returns UnknownEndpointError.
|
|
||||||
func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError {
|
|
||||||
return UnknownEndpointError{
|
|
||||||
awsError: awserr.New("UnknownEndpointError",
|
|
||||||
"could not resolve endpoint", nil),
|
|
||||||
Partition: p,
|
|
||||||
Service: s,
|
|
||||||
Region: r,
|
|
||||||
Known: known,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the string representation of the error.
|
|
||||||
func (e UnknownEndpointError) Error() string {
|
|
||||||
extra := fmt.Sprintf("partition: %q, service: %q, region: %q",
|
|
||||||
e.Partition, e.Service, e.Region)
|
|
||||||
if len(e.Known) > 0 {
|
|
||||||
extra += fmt.Sprintf(", known: %v", e.Known)
|
|
||||||
}
|
|
||||||
return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the string representation of the error.
|
|
||||||
func (e UnknownEndpointError) String() string {
|
|
||||||
return e.Error()
|
|
||||||
}
|
|
303
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
generated
vendored
303
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
generated
vendored
@ -1,303 +0,0 @@
|
|||||||
package endpoints
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type partitions []partition
|
|
||||||
|
|
||||||
func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
|
||||||
var opt Options
|
|
||||||
opt.Set(opts...)
|
|
||||||
|
|
||||||
for i := 0; i < len(ps); i++ {
|
|
||||||
if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
return ps[i].EndpointFor(service, region, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If loose matching fallback to first partition format to use
|
|
||||||
// when resolving the endpoint.
|
|
||||||
if !opt.StrictMatching && len(ps) > 0 {
|
|
||||||
return ps[0].EndpointFor(service, region, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Partitions satisfies the EnumPartitions interface and returns a list
|
|
||||||
// of Partitions representing each partition represented in the SDK's
|
|
||||||
// endpoints model.
|
|
||||||
func (ps partitions) Partitions() []Partition {
|
|
||||||
parts := make([]Partition, 0, len(ps))
|
|
||||||
for i := 0; i < len(ps); i++ {
|
|
||||||
parts = append(parts, ps[i].Partition())
|
|
||||||
}
|
|
||||||
|
|
||||||
return parts
|
|
||||||
}
|
|
||||||
|
|
||||||
type partition struct {
|
|
||||||
ID string `json:"partition"`
|
|
||||||
Name string `json:"partitionName"`
|
|
||||||
DNSSuffix string `json:"dnsSuffix"`
|
|
||||||
RegionRegex regionRegex `json:"regionRegex"`
|
|
||||||
Defaults endpoint `json:"defaults"`
|
|
||||||
Regions regions `json:"regions"`
|
|
||||||
Services services `json:"services"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p partition) Partition() Partition {
|
|
||||||
return Partition{
|
|
||||||
id: p.ID,
|
|
||||||
p: &p,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool {
|
|
||||||
s, hasService := p.Services[service]
|
|
||||||
_, hasEndpoint := s.Endpoints[region]
|
|
||||||
|
|
||||||
if hasEndpoint && hasService {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if strictMatch {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.RegionRegex.MatchString(region)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) {
|
|
||||||
var opt Options
|
|
||||||
opt.Set(opts...)
|
|
||||||
|
|
||||||
s, hasService := p.Services[service]
|
|
||||||
if !(hasService || opt.ResolveUnknownService) {
|
|
||||||
// Only return error if the resolver will not fallback to creating
|
|
||||||
// endpoint based on service endpoint ID passed in.
|
|
||||||
return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
|
|
||||||
}
|
|
||||||
|
|
||||||
e, hasEndpoint := s.endpointForRegion(region)
|
|
||||||
if !hasEndpoint && opt.StrictMatching {
|
|
||||||
return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints))
|
|
||||||
}
|
|
||||||
|
|
||||||
defs := []endpoint{p.Defaults, s.Defaults}
|
|
||||||
return e.resolve(service, region, p.DNSSuffix, defs, opt), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func serviceList(ss services) []string {
|
|
||||||
list := make([]string, 0, len(ss))
|
|
||||||
for k := range ss {
|
|
||||||
list = append(list, k)
|
|
||||||
}
|
|
||||||
return list
|
|
||||||
}
|
|
||||||
func endpointList(es endpoints) []string {
|
|
||||||
list := make([]string, 0, len(es))
|
|
||||||
for k := range es {
|
|
||||||
list = append(list, k)
|
|
||||||
}
|
|
||||||
return list
|
|
||||||
}
|
|
||||||
|
|
||||||
type regionRegex struct {
|
|
||||||
*regexp.Regexp
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) {
|
|
||||||
// Strip leading and trailing quotes
|
|
||||||
regex, err := strconv.Unquote(string(b))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to strip quotes from regex, %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
rr.Regexp, err = regexp.Compile(regex)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to unmarshal region regex, %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type regions map[string]region
|
|
||||||
|
|
||||||
type region struct {
|
|
||||||
Description string `json:"description"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type services map[string]service
|
|
||||||
|
|
||||||
type service struct {
|
|
||||||
PartitionEndpoint string `json:"partitionEndpoint"`
|
|
||||||
IsRegionalized boxedBool `json:"isRegionalized,omitempty"`
|
|
||||||
Defaults endpoint `json:"defaults"`
|
|
||||||
Endpoints endpoints `json:"endpoints"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) endpointForRegion(region string) (endpoint, bool) {
|
|
||||||
if s.IsRegionalized == boxedFalse {
|
|
||||||
return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint
|
|
||||||
}
|
|
||||||
|
|
||||||
if e, ok := s.Endpoints[region]; ok {
|
|
||||||
return e, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unable to find any matching endpoint, return
|
|
||||||
// blank that will be used for generic endpoint creation.
|
|
||||||
return endpoint{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
type endpoints map[string]endpoint
|
|
||||||
|
|
||||||
type endpoint struct {
|
|
||||||
Hostname string `json:"hostname"`
|
|
||||||
Protocols []string `json:"protocols"`
|
|
||||||
CredentialScope credentialScope `json:"credentialScope"`
|
|
||||||
|
|
||||||
// Custom fields not modeled
|
|
||||||
HasDualStack boxedBool `json:"-"`
|
|
||||||
DualStackHostname string `json:"-"`
|
|
||||||
|
|
||||||
// Signature Version not used
|
|
||||||
SignatureVersions []string `json:"signatureVersions"`
|
|
||||||
|
|
||||||
// SSLCommonName not used.
|
|
||||||
SSLCommonName string `json:"sslCommonName"`
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultProtocol = "https"
|
|
||||||
defaultSigner = "v4"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
protocolPriority = []string{"https", "http"}
|
|
||||||
signerPriority = []string{"v4", "v2"}
|
|
||||||
)
|
|
||||||
|
|
||||||
func getByPriority(s []string, p []string, def string) string {
|
|
||||||
if len(s) == 0 {
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(p); i++ {
|
|
||||||
for j := 0; j < len(s); j++ {
|
|
||||||
if s[j] == p[i] {
|
|
||||||
return s[j]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return s[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
|
|
||||||
var merged endpoint
|
|
||||||
for _, def := range defs {
|
|
||||||
merged.mergeIn(def)
|
|
||||||
}
|
|
||||||
merged.mergeIn(e)
|
|
||||||
e = merged
|
|
||||||
|
|
||||||
hostname := e.Hostname
|
|
||||||
|
|
||||||
// Offset the hostname for dualstack if enabled
|
|
||||||
if opts.UseDualStack && e.HasDualStack == boxedTrue {
|
|
||||||
hostname = e.DualStackHostname
|
|
||||||
}
|
|
||||||
|
|
||||||
u := strings.Replace(hostname, "{service}", service, 1)
|
|
||||||
u = strings.Replace(u, "{region}", region, 1)
|
|
||||||
u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1)
|
|
||||||
|
|
||||||
scheme := getEndpointScheme(e.Protocols, opts.DisableSSL)
|
|
||||||
u = fmt.Sprintf("%s://%s", scheme, u)
|
|
||||||
|
|
||||||
signingRegion := e.CredentialScope.Region
|
|
||||||
if len(signingRegion) == 0 {
|
|
||||||
signingRegion = region
|
|
||||||
}
|
|
||||||
signingName := e.CredentialScope.Service
|
|
||||||
if len(signingName) == 0 {
|
|
||||||
signingName = service
|
|
||||||
}
|
|
||||||
|
|
||||||
return ResolvedEndpoint{
|
|
||||||
URL: u,
|
|
||||||
SigningRegion: signingRegion,
|
|
||||||
SigningName: signingName,
|
|
||||||
SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getEndpointScheme(protocols []string, disableSSL bool) string {
|
|
||||||
if disableSSL {
|
|
||||||
return "http"
|
|
||||||
}
|
|
||||||
|
|
||||||
return getByPriority(protocols, protocolPriority, defaultProtocol)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpoint) mergeIn(other endpoint) {
|
|
||||||
if len(other.Hostname) > 0 {
|
|
||||||
e.Hostname = other.Hostname
|
|
||||||
}
|
|
||||||
if len(other.Protocols) > 0 {
|
|
||||||
e.Protocols = other.Protocols
|
|
||||||
}
|
|
||||||
if len(other.SignatureVersions) > 0 {
|
|
||||||
e.SignatureVersions = other.SignatureVersions
|
|
||||||
}
|
|
||||||
if len(other.CredentialScope.Region) > 0 {
|
|
||||||
e.CredentialScope.Region = other.CredentialScope.Region
|
|
||||||
}
|
|
||||||
if len(other.CredentialScope.Service) > 0 {
|
|
||||||
e.CredentialScope.Service = other.CredentialScope.Service
|
|
||||||
}
|
|
||||||
if len(other.SSLCommonName) > 0 {
|
|
||||||
e.SSLCommonName = other.SSLCommonName
|
|
||||||
}
|
|
||||||
if other.HasDualStack != boxedBoolUnset {
|
|
||||||
e.HasDualStack = other.HasDualStack
|
|
||||||
}
|
|
||||||
if len(other.DualStackHostname) > 0 {
|
|
||||||
e.DualStackHostname = other.DualStackHostname
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type credentialScope struct {
|
|
||||||
Region string `json:"region"`
|
|
||||||
Service string `json:"service"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type boxedBool int
|
|
||||||
|
|
||||||
func (b *boxedBool) UnmarshalJSON(buf []byte) error {
|
|
||||||
v, err := strconv.ParseBool(string(buf))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if v {
|
|
||||||
*b = boxedTrue
|
|
||||||
} else {
|
|
||||||
*b = boxedFalse
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
boxedBoolUnset boxedBool = iota
|
|
||||||
boxedFalse
|
|
||||||
boxedTrue
|
|
||||||
)
|
|
337
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
generated
vendored
337
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
generated
vendored
@ -1,337 +0,0 @@
|
|||||||
// +build codegen
|
|
||||||
|
|
||||||
package endpoints
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"text/template"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A CodeGenOptions are the options for code generating the endpoints into
|
|
||||||
// Go code from the endpoints model definition.
|
|
||||||
type CodeGenOptions struct {
|
|
||||||
// Options for how the model will be decoded.
|
|
||||||
DecodeModelOptions DecodeModelOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set combines all of the option functions together
|
|
||||||
func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) {
|
|
||||||
for _, fn := range optFns {
|
|
||||||
fn(d)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CodeGenModel given a endpoints model file will decode it and attempt to
|
|
||||||
// generate Go code from the model definition. Error will be returned if
|
|
||||||
// the code is unable to be generated, or decoded.
|
|
||||||
func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error {
|
|
||||||
var opts CodeGenOptions
|
|
||||||
opts.Set(optFns...)
|
|
||||||
|
|
||||||
resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) {
|
|
||||||
*d = opts.DecodeModelOptions
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl))
|
|
||||||
if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil {
|
|
||||||
return fmt.Errorf("failed to execute template, %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func toSymbol(v string) string {
|
|
||||||
out := []rune{}
|
|
||||||
for _, c := range strings.Title(v) {
|
|
||||||
if !(unicode.IsNumber(c) || unicode.IsLetter(c)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
out = append(out, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(out)
|
|
||||||
}
|
|
||||||
|
|
||||||
func quoteString(v string) string {
|
|
||||||
return fmt.Sprintf("%q", v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func regionConstName(p, r string) string {
|
|
||||||
return toSymbol(p) + toSymbol(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func partitionGetter(id string) string {
|
|
||||||
return fmt.Sprintf("%sPartition", toSymbol(id))
|
|
||||||
}
|
|
||||||
|
|
||||||
func partitionVarName(id string) string {
|
|
||||||
return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id)))
|
|
||||||
}
|
|
||||||
|
|
||||||
func listPartitionNames(ps partitions) string {
|
|
||||||
names := []string{}
|
|
||||||
switch len(ps) {
|
|
||||||
case 1:
|
|
||||||
return ps[0].Name
|
|
||||||
case 2:
|
|
||||||
return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name)
|
|
||||||
default:
|
|
||||||
for i, p := range ps {
|
|
||||||
if i == len(ps)-1 {
|
|
||||||
names = append(names, "and "+p.Name)
|
|
||||||
} else {
|
|
||||||
names = append(names, p.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.Join(names, ", ")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func boxedBoolIfSet(msg string, v boxedBool) string {
|
|
||||||
switch v {
|
|
||||||
case boxedTrue:
|
|
||||||
return fmt.Sprintf(msg, "boxedTrue")
|
|
||||||
case boxedFalse:
|
|
||||||
return fmt.Sprintf(msg, "boxedFalse")
|
|
||||||
default:
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func stringIfSet(msg, v string) string {
|
|
||||||
if len(v) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf(msg, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func stringSliceIfSet(msg string, vs []string) string {
|
|
||||||
if len(vs) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
names := []string{}
|
|
||||||
for _, v := range vs {
|
|
||||||
names = append(names, `"`+v+`"`)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf(msg, strings.Join(names, ","))
|
|
||||||
}
|
|
||||||
|
|
||||||
func endpointIsSet(v endpoint) bool {
|
|
||||||
return !reflect.DeepEqual(v, endpoint{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func serviceSet(ps partitions) map[string]struct{} {
|
|
||||||
set := map[string]struct{}{}
|
|
||||||
for _, p := range ps {
|
|
||||||
for id := range p.Services {
|
|
||||||
set[id] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return set
|
|
||||||
}
|
|
||||||
|
|
||||||
var funcMap = template.FuncMap{
|
|
||||||
"ToSymbol": toSymbol,
|
|
||||||
"QuoteString": quoteString,
|
|
||||||
"RegionConst": regionConstName,
|
|
||||||
"PartitionGetter": partitionGetter,
|
|
||||||
"PartitionVarName": partitionVarName,
|
|
||||||
"ListPartitionNames": listPartitionNames,
|
|
||||||
"BoxedBoolIfSet": boxedBoolIfSet,
|
|
||||||
"StringIfSet": stringIfSet,
|
|
||||||
"StringSliceIfSet": stringSliceIfSet,
|
|
||||||
"EndpointIsSet": endpointIsSet,
|
|
||||||
"ServicesSet": serviceSet,
|
|
||||||
}
|
|
||||||
|
|
||||||
const v3Tmpl = `
|
|
||||||
{{ define "defaults" -}}
|
|
||||||
// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
|
|
||||||
|
|
||||||
package endpoints
|
|
||||||
|
|
||||||
import (
|
|
||||||
"regexp"
|
|
||||||
)
|
|
||||||
|
|
||||||
{{ template "partition consts" . }}
|
|
||||||
|
|
||||||
{{ range $_, $partition := . }}
|
|
||||||
{{ template "partition region consts" $partition }}
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{ template "service consts" . }}
|
|
||||||
|
|
||||||
{{ template "endpoint resolvers" . }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{ define "partition consts" }}
|
|
||||||
// Partition identifiers
|
|
||||||
const (
|
|
||||||
{{ range $_, $p := . -}}
|
|
||||||
{{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition.
|
|
||||||
{{ end -}}
|
|
||||||
)
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{ define "partition region consts" }}
|
|
||||||
// {{ .Name }} partition's regions.
|
|
||||||
const (
|
|
||||||
{{ range $id, $region := .Regions -}}
|
|
||||||
{{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}.
|
|
||||||
{{ end -}}
|
|
||||||
)
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{ define "service consts" }}
|
|
||||||
// Service identifiers
|
|
||||||
const (
|
|
||||||
{{ $serviceSet := ServicesSet . -}}
|
|
||||||
{{ range $id, $_ := $serviceSet -}}
|
|
||||||
{{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}.
|
|
||||||
{{ end -}}
|
|
||||||
)
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{ define "endpoint resolvers" }}
|
|
||||||
// DefaultResolver returns an Endpoint resolver that will be able
|
|
||||||
// to resolve endpoints for: {{ ListPartitionNames . }}.
|
|
||||||
//
|
|
||||||
// Use DefaultPartitions() to get the list of the default partitions.
|
|
||||||
func DefaultResolver() Resolver {
|
|
||||||
return defaultPartitions
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultPartitions returns a list of the partitions the SDK is bundled
|
|
||||||
// with. The available partitions are: {{ ListPartitionNames . }}.
|
|
||||||
//
|
|
||||||
// partitions := endpoints.DefaultPartitions
|
|
||||||
// for _, p := range partitions {
|
|
||||||
// // ... inspect partitions
|
|
||||||
// }
|
|
||||||
func DefaultPartitions() []Partition {
|
|
||||||
return defaultPartitions.Partitions()
|
|
||||||
}
|
|
||||||
|
|
||||||
var defaultPartitions = partitions{
|
|
||||||
{{ range $_, $partition := . -}}
|
|
||||||
{{ PartitionVarName $partition.ID }},
|
|
||||||
{{ end }}
|
|
||||||
}
|
|
||||||
|
|
||||||
{{ range $_, $partition := . -}}
|
|
||||||
{{ $name := PartitionGetter $partition.ID -}}
|
|
||||||
// {{ $name }} returns the Resolver for {{ $partition.Name }}.
|
|
||||||
func {{ $name }}() Partition {
|
|
||||||
return {{ PartitionVarName $partition.ID }}.Partition()
|
|
||||||
}
|
|
||||||
var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }}
|
|
||||||
{{ end }}
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{ define "default partitions" }}
|
|
||||||
func DefaultPartitions() []Partition {
|
|
||||||
return []partition{
|
|
||||||
{{ range $_, $partition := . -}}
|
|
||||||
// {{ ToSymbol $partition.ID}}Partition(),
|
|
||||||
{{ end }}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{ define "gocode Partition" -}}
|
|
||||||
partition{
|
|
||||||
{{ StringIfSet "ID: %q,\n" .ID -}}
|
|
||||||
{{ StringIfSet "Name: %q,\n" .Name -}}
|
|
||||||
{{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}}
|
|
||||||
RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }},
|
|
||||||
{{ if EndpointIsSet .Defaults -}}
|
|
||||||
Defaults: {{ template "gocode Endpoint" .Defaults }},
|
|
||||||
{{- end }}
|
|
||||||
Regions: {{ template "gocode Regions" .Regions }},
|
|
||||||
Services: {{ template "gocode Services" .Services }},
|
|
||||||
}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{ define "gocode RegionRegex" -}}
|
|
||||||
regionRegex{
|
|
||||||
Regexp: func() *regexp.Regexp{
|
|
||||||
reg, _ := regexp.Compile({{ QuoteString .Regexp.String }})
|
|
||||||
return reg
|
|
||||||
}(),
|
|
||||||
}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{ define "gocode Regions" -}}
|
|
||||||
regions{
|
|
||||||
{{ range $id, $region := . -}}
|
|
||||||
"{{ $id }}": {{ template "gocode Region" $region }},
|
|
||||||
{{ end -}}
|
|
||||||
}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{ define "gocode Region" -}}
|
|
||||||
region{
|
|
||||||
{{ StringIfSet "Description: %q,\n" .Description -}}
|
|
||||||
}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{ define "gocode Services" -}}
|
|
||||||
services{
|
|
||||||
{{ range $id, $service := . -}}
|
|
||||||
"{{ $id }}": {{ template "gocode Service" $service }},
|
|
||||||
{{ end }}
|
|
||||||
}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{ define "gocode Service" -}}
|
|
||||||
service{
|
|
||||||
{{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}}
|
|
||||||
{{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}}
|
|
||||||
{{ if EndpointIsSet .Defaults -}}
|
|
||||||
Defaults: {{ template "gocode Endpoint" .Defaults -}},
|
|
||||||
{{- end }}
|
|
||||||
{{ if .Endpoints -}}
|
|
||||||
Endpoints: {{ template "gocode Endpoints" .Endpoints }},
|
|
||||||
{{- end }}
|
|
||||||
}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{ define "gocode Endpoints" -}}
|
|
||||||
endpoints{
|
|
||||||
{{ range $id, $endpoint := . -}}
|
|
||||||
"{{ $id }}": {{ template "gocode Endpoint" $endpoint }},
|
|
||||||
{{ end }}
|
|
||||||
}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{ define "gocode Endpoint" -}}
|
|
||||||
endpoint{
|
|
||||||
{{ StringIfSet "Hostname: %q,\n" .Hostname -}}
|
|
||||||
{{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}}
|
|
||||||
{{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}}
|
|
||||||
{{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}}
|
|
||||||
{{ if or .CredentialScope.Region .CredentialScope.Service -}}
|
|
||||||
CredentialScope: credentialScope{
|
|
||||||
{{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}}
|
|
||||||
{{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}}
|
|
||||||
},
|
|
||||||
{{- end }}
|
|
||||||
{{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}}
|
|
||||||
{{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}}
|
|
||||||
|
|
||||||
}
|
|
||||||
{{- end }}
|
|
||||||
`
|
|
17
vendor/github.com/aws/aws-sdk-go/aws/errors.go
generated
vendored
17
vendor/github.com/aws/aws-sdk-go/aws/errors.go
generated
vendored
@ -1,17 +0,0 @@
|
|||||||
package aws
|
|
||||||
|
|
||||||
import "github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrMissingRegion is an error that is returned if region configuration is
|
|
||||||
// not found.
|
|
||||||
//
|
|
||||||
// @readonly
|
|
||||||
ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
|
|
||||||
|
|
||||||
// ErrMissingEndpoint is an error that is returned if an endpoint cannot be
|
|
||||||
// resolved for a service.
|
|
||||||
//
|
|
||||||
// @readonly
|
|
||||||
ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
|
|
||||||
)
|
|
12
vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
generated
vendored
12
vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
package aws
|
|
||||||
|
|
||||||
// JSONValue is a representation of a grab bag type that will be marshaled
|
|
||||||
// into a json string. This type can be used just like any other map.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
//
|
|
||||||
// values := aws.JSONValue{
|
|
||||||
// "Foo": "Bar",
|
|
||||||
// }
|
|
||||||
// values["Baz"] = "Qux"
|
|
||||||
type JSONValue map[string]interface{}
|
|
112
vendor/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
112
vendor/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
@ -1,112 +0,0 @@
|
|||||||
package aws
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A LogLevelType defines the level logging should be performed at. Used to instruct
|
|
||||||
// the SDK which statements should be logged.
|
|
||||||
type LogLevelType uint
|
|
||||||
|
|
||||||
// LogLevel returns the pointer to a LogLevel. Should be used to workaround
|
|
||||||
// not being able to take the address of a non-composite literal.
|
|
||||||
func LogLevel(l LogLevelType) *LogLevelType {
|
|
||||||
return &l
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the LogLevel value or the default value LogOff if the LogLevel
|
|
||||||
// is nil. Safe to use on nil value LogLevelTypes.
|
|
||||||
func (l *LogLevelType) Value() LogLevelType {
|
|
||||||
if l != nil {
|
|
||||||
return *l
|
|
||||||
}
|
|
||||||
return LogOff
|
|
||||||
}
|
|
||||||
|
|
||||||
// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
|
|
||||||
// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
|
|
||||||
// LogLevel is nil, will default to LogOff comparison.
|
|
||||||
func (l *LogLevelType) Matches(v LogLevelType) bool {
|
|
||||||
c := l.Value()
|
|
||||||
return c&v == v
|
|
||||||
}
|
|
||||||
|
|
||||||
// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
|
|
||||||
// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default
|
|
||||||
// to LogOff comparison.
|
|
||||||
func (l *LogLevelType) AtLeast(v LogLevelType) bool {
|
|
||||||
c := l.Value()
|
|
||||||
return c >= v
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// LogOff states that no logging should be performed by the SDK. This is the
|
|
||||||
// default state of the SDK, and should be use to disable all logging.
|
|
||||||
LogOff LogLevelType = iota * 0x1000
|
|
||||||
|
|
||||||
// LogDebug state that debug output should be logged by the SDK. This should
|
|
||||||
// be used to inspect request made and responses received.
|
|
||||||
LogDebug
|
|
||||||
)
|
|
||||||
|
|
||||||
// Debug Logging Sub Levels
|
|
||||||
const (
|
|
||||||
// LogDebugWithSigning states that the SDK should log request signing and
|
|
||||||
// presigning events. This should be used to log the signing details of
|
|
||||||
// requests for debugging. Will also enable LogDebug.
|
|
||||||
LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
|
|
||||||
|
|
||||||
// LogDebugWithHTTPBody states the SDK should log HTTP request and response
|
|
||||||
// HTTP bodys in addition to the headers and path. This should be used to
|
|
||||||
// see the body content of requests and responses made while using the SDK
|
|
||||||
// Will also enable LogDebug.
|
|
||||||
LogDebugWithHTTPBody
|
|
||||||
|
|
||||||
// LogDebugWithRequestRetries states the SDK should log when service requests will
|
|
||||||
// be retried. This should be used to log when you want to log when service
|
|
||||||
// requests are being retried. Will also enable LogDebug.
|
|
||||||
LogDebugWithRequestRetries
|
|
||||||
|
|
||||||
// LogDebugWithRequestErrors states the SDK should log when service requests fail
|
|
||||||
// to build, send, validate, or unmarshal.
|
|
||||||
LogDebugWithRequestErrors
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Logger is a minimalistic interface for the SDK to log messages to. Should
|
|
||||||
// be used to provide custom logging writers for the SDK to use.
|
|
||||||
type Logger interface {
|
|
||||||
Log(...interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// A LoggerFunc is a convenience type to convert a function taking a variadic
|
|
||||||
// list of arguments and wrap it so the Logger interface can be used.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
|
|
||||||
// fmt.Fprintln(os.Stdout, args...)
|
|
||||||
// })})
|
|
||||||
type LoggerFunc func(...interface{})
|
|
||||||
|
|
||||||
// Log calls the wrapped function with the arguments provided
|
|
||||||
func (f LoggerFunc) Log(args ...interface{}) {
|
|
||||||
f(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDefaultLogger returns a Logger which will write log messages to stdout, and
|
|
||||||
// use same formatting runes as the stdlib log.Logger
|
|
||||||
func NewDefaultLogger() Logger {
|
|
||||||
return &defaultLogger{
|
|
||||||
logger: log.New(os.Stdout, "", log.LstdFlags),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
|
|
||||||
type defaultLogger struct {
|
|
||||||
logger *log.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// Log logs the parameters to the stdlib logger. See log.Println.
|
|
||||||
func (l defaultLogger) Log(args ...interface{}) {
|
|
||||||
l.logger.Println(args...)
|
|
||||||
}
|
|
19
vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
generated
vendored
19
vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
generated
vendored
@ -1,19 +0,0 @@
|
|||||||
// +build !appengine,!plan9
|
|
||||||
|
|
||||||
package request
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
func isErrConnectionReset(err error) bool {
|
|
||||||
if opErr, ok := err.(*net.OpError); ok {
|
|
||||||
if sysErr, ok := opErr.Err.(*os.SyscallError); ok {
|
|
||||||
return sysErr.Err == syscall.ECONNRESET
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
11
vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go
generated
vendored
11
vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go
generated
vendored
@ -1,11 +0,0 @@
|
|||||||
// +build appengine plan9
|
|
||||||
|
|
||||||
package request
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func isErrConnectionReset(err error) bool {
|
|
||||||
return strings.Contains(err.Error(), "connection reset")
|
|
||||||
}
|
|
256
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
256
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
@ -1,256 +0,0 @@
|
|||||||
package request
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Handlers provides a collection of request handlers for various
|
|
||||||
// stages of handling requests.
|
|
||||||
type Handlers struct {
|
|
||||||
Validate HandlerList
|
|
||||||
Build HandlerList
|
|
||||||
Sign HandlerList
|
|
||||||
Send HandlerList
|
|
||||||
ValidateResponse HandlerList
|
|
||||||
Unmarshal HandlerList
|
|
||||||
UnmarshalMeta HandlerList
|
|
||||||
UnmarshalError HandlerList
|
|
||||||
Retry HandlerList
|
|
||||||
AfterRetry HandlerList
|
|
||||||
Complete HandlerList
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy returns of this handler's lists.
|
|
||||||
func (h *Handlers) Copy() Handlers {
|
|
||||||
return Handlers{
|
|
||||||
Validate: h.Validate.copy(),
|
|
||||||
Build: h.Build.copy(),
|
|
||||||
Sign: h.Sign.copy(),
|
|
||||||
Send: h.Send.copy(),
|
|
||||||
ValidateResponse: h.ValidateResponse.copy(),
|
|
||||||
Unmarshal: h.Unmarshal.copy(),
|
|
||||||
UnmarshalError: h.UnmarshalError.copy(),
|
|
||||||
UnmarshalMeta: h.UnmarshalMeta.copy(),
|
|
||||||
Retry: h.Retry.copy(),
|
|
||||||
AfterRetry: h.AfterRetry.copy(),
|
|
||||||
Complete: h.Complete.copy(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear removes callback functions for all handlers
|
|
||||||
func (h *Handlers) Clear() {
|
|
||||||
h.Validate.Clear()
|
|
||||||
h.Build.Clear()
|
|
||||||
h.Send.Clear()
|
|
||||||
h.Sign.Clear()
|
|
||||||
h.Unmarshal.Clear()
|
|
||||||
h.UnmarshalMeta.Clear()
|
|
||||||
h.UnmarshalError.Clear()
|
|
||||||
h.ValidateResponse.Clear()
|
|
||||||
h.Retry.Clear()
|
|
||||||
h.AfterRetry.Clear()
|
|
||||||
h.Complete.Clear()
|
|
||||||
}
|
|
||||||
|
|
||||||
// A HandlerListRunItem represents an entry in the HandlerList which
|
|
||||||
// is being run.
|
|
||||||
type HandlerListRunItem struct {
|
|
||||||
Index int
|
|
||||||
Handler NamedHandler
|
|
||||||
Request *Request
|
|
||||||
}
|
|
||||||
|
|
||||||
// A HandlerList manages zero or more handlers in a list.
|
|
||||||
type HandlerList struct {
|
|
||||||
list []NamedHandler
|
|
||||||
|
|
||||||
// Called after each request handler in the list is called. If set
|
|
||||||
// and the func returns true the HandlerList will continue to iterate
|
|
||||||
// over the request handlers. If false is returned the HandlerList
|
|
||||||
// will stop iterating.
|
|
||||||
//
|
|
||||||
// Should be used if extra logic to be performed between each handler
|
|
||||||
// in the list. This can be used to terminate a list's iteration
|
|
||||||
// based on a condition such as error like, HandlerListStopOnError.
|
|
||||||
// Or for logging like HandlerListLogItem.
|
|
||||||
AfterEachFn func(item HandlerListRunItem) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// A NamedHandler is a struct that contains a name and function callback.
|
|
||||||
type NamedHandler struct {
|
|
||||||
Name string
|
|
||||||
Fn func(*Request)
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy creates a copy of the handler list.
|
|
||||||
func (l *HandlerList) copy() HandlerList {
|
|
||||||
n := HandlerList{
|
|
||||||
AfterEachFn: l.AfterEachFn,
|
|
||||||
}
|
|
||||||
if len(l.list) == 0 {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...)
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear clears the handler list.
|
|
||||||
func (l *HandlerList) Clear() {
|
|
||||||
l.list = l.list[0:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of handlers in the list.
|
|
||||||
func (l *HandlerList) Len() int {
|
|
||||||
return len(l.list)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushBack pushes handler f to the back of the handler list.
|
|
||||||
func (l *HandlerList) PushBack(f func(*Request)) {
|
|
||||||
l.PushBackNamed(NamedHandler{"__anonymous", f})
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushBackNamed pushes named handler f to the back of the handler list.
|
|
||||||
func (l *HandlerList) PushBackNamed(n NamedHandler) {
|
|
||||||
if cap(l.list) == 0 {
|
|
||||||
l.list = make([]NamedHandler, 0, 5)
|
|
||||||
}
|
|
||||||
l.list = append(l.list, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushFront pushes handler f to the front of the handler list.
|
|
||||||
func (l *HandlerList) PushFront(f func(*Request)) {
|
|
||||||
l.PushFrontNamed(NamedHandler{"__anonymous", f})
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushFrontNamed pushes named handler f to the front of the handler list.
|
|
||||||
func (l *HandlerList) PushFrontNamed(n NamedHandler) {
|
|
||||||
if cap(l.list) == len(l.list) {
|
|
||||||
// Allocating new list required
|
|
||||||
l.list = append([]NamedHandler{n}, l.list...)
|
|
||||||
} else {
|
|
||||||
// Enough room to prepend into list.
|
|
||||||
l.list = append(l.list, NamedHandler{})
|
|
||||||
copy(l.list[1:], l.list)
|
|
||||||
l.list[0] = n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes a NamedHandler n
|
|
||||||
func (l *HandlerList) Remove(n NamedHandler) {
|
|
||||||
l.RemoveByName(n.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveByName removes a NamedHandler by name.
|
|
||||||
func (l *HandlerList) RemoveByName(name string) {
|
|
||||||
for i := 0; i < len(l.list); i++ {
|
|
||||||
m := l.list[i]
|
|
||||||
if m.Name == name {
|
|
||||||
// Shift array preventing creating new arrays
|
|
||||||
copy(l.list[i:], l.list[i+1:])
|
|
||||||
l.list[len(l.list)-1] = NamedHandler{}
|
|
||||||
l.list = l.list[:len(l.list)-1]
|
|
||||||
|
|
||||||
// decrement list so next check to length is correct
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SwapNamed will swap out any existing handlers with the same name as the
|
|
||||||
// passed in NamedHandler returning true if handlers were swapped. False is
|
|
||||||
// returned otherwise.
|
|
||||||
func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) {
|
|
||||||
for i := 0; i < len(l.list); i++ {
|
|
||||||
if l.list[i].Name == n.Name {
|
|
||||||
l.list[i].Fn = n.Fn
|
|
||||||
swapped = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return swapped
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBackNamed will replace the named handler if it exists in the handler list.
|
|
||||||
// If the handler does not exist the handler will be added to the end of the list.
|
|
||||||
func (l *HandlerList) SetBackNamed(n NamedHandler) {
|
|
||||||
if !l.SwapNamed(n) {
|
|
||||||
l.PushBackNamed(n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetFrontNamed will replace the named handler if it exists in the handler list.
|
|
||||||
// If the handler does not exist the handler will be added to the beginning of
|
|
||||||
// the list.
|
|
||||||
func (l *HandlerList) SetFrontNamed(n NamedHandler) {
|
|
||||||
if !l.SwapNamed(n) {
|
|
||||||
l.PushFrontNamed(n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run executes all handlers in the list with a given request object.
|
|
||||||
func (l *HandlerList) Run(r *Request) {
|
|
||||||
for i, h := range l.list {
|
|
||||||
h.Fn(r)
|
|
||||||
item := HandlerListRunItem{
|
|
||||||
Index: i, Handler: h, Request: r,
|
|
||||||
}
|
|
||||||
if l.AfterEachFn != nil && !l.AfterEachFn(item) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlerListLogItem logs the request handler and the state of the
|
|
||||||
// request's Error value. Always returns true to continue iterating
|
|
||||||
// request handlers in a HandlerList.
|
|
||||||
func HandlerListLogItem(item HandlerListRunItem) bool {
|
|
||||||
if item.Request.Config.Logger == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
item.Request.Config.Logger.Log("DEBUG: RequestHandler",
|
|
||||||
item.Index, item.Handler.Name, item.Request.Error)
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandlerListStopOnError returns false to stop the HandlerList iterating
|
|
||||||
// over request handlers if Request.Error is not nil. True otherwise
|
|
||||||
// to continue iterating.
|
|
||||||
func HandlerListStopOnError(item HandlerListRunItem) bool {
|
|
||||||
return item.Request.Error == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAppendUserAgent will add a string to the user agent prefixed with a
|
|
||||||
// single white space.
|
|
||||||
func WithAppendUserAgent(s string) Option {
|
|
||||||
return func(r *Request) {
|
|
||||||
r.Handlers.Build.PushBack(func(r2 *Request) {
|
|
||||||
AddToUserAgent(r, s)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
|
|
||||||
// header. If the extra parameters are provided they will be added as metadata to the
|
|
||||||
// name/version pair resulting in the following format.
|
|
||||||
// "name/version (extra0; extra1; ...)"
|
|
||||||
// The user agent part will be concatenated with this current request's user agent string.
|
|
||||||
func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
|
|
||||||
ua := fmt.Sprintf("%s/%s", name, version)
|
|
||||||
if len(extra) > 0 {
|
|
||||||
ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
|
|
||||||
}
|
|
||||||
return func(r *Request) {
|
|
||||||
AddToUserAgent(r, ua)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
|
|
||||||
// The input string will be concatenated with the current request's user agent string.
|
|
||||||
func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
|
|
||||||
return func(r *Request) {
|
|
||||||
AddToUserAgent(r, s)
|
|
||||||
}
|
|
||||||
}
|
|
24
vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
generated
vendored
24
vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
generated
vendored
@ -1,24 +0,0 @@
|
|||||||
package request
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
)
|
|
||||||
|
|
||||||
func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
|
|
||||||
req := new(http.Request)
|
|
||||||
*req = *r
|
|
||||||
req.URL = &url.URL{}
|
|
||||||
*req.URL = *r.URL
|
|
||||||
req.Body = body
|
|
||||||
|
|
||||||
req.Header = http.Header{}
|
|
||||||
for k, v := range r.Header {
|
|
||||||
for _, vv := range v {
|
|
||||||
req.Header.Add(k, vv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return req
|
|
||||||
}
|
|
58
vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
generated
vendored
58
vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
generated
vendored
@ -1,58 +0,0 @@
|
|||||||
package request
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// offsetReader is a thread-safe io.ReadCloser to prevent racing
|
|
||||||
// with retrying requests
|
|
||||||
type offsetReader struct {
|
|
||||||
buf io.ReadSeeker
|
|
||||||
lock sync.Mutex
|
|
||||||
closed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
|
|
||||||
reader := &offsetReader{}
|
|
||||||
buf.Seek(offset, 0)
|
|
||||||
|
|
||||||
reader.buf = buf
|
|
||||||
return reader
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close will close the instance of the offset reader's access to
|
|
||||||
// the underlying io.ReadSeeker.
|
|
||||||
func (o *offsetReader) Close() error {
|
|
||||||
o.lock.Lock()
|
|
||||||
defer o.lock.Unlock()
|
|
||||||
o.closed = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read is a thread-safe read of the underlying io.ReadSeeker
|
|
||||||
func (o *offsetReader) Read(p []byte) (int, error) {
|
|
||||||
o.lock.Lock()
|
|
||||||
defer o.lock.Unlock()
|
|
||||||
|
|
||||||
if o.closed {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.buf.Read(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seek is a thread-safe seeking operation.
|
|
||||||
func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
|
|
||||||
o.lock.Lock()
|
|
||||||
defer o.lock.Unlock()
|
|
||||||
|
|
||||||
return o.buf.Seek(offset, whence)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CloseAndCopy will return a new offsetReader with a copy of the old buffer
|
|
||||||
// and close the old buffer.
|
|
||||||
func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
|
|
||||||
o.Close()
|
|
||||||
return newOffsetReader(o.buf, offset)
|
|
||||||
}
|
|
581
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
581
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
@ -1,581 +0,0 @@
|
|||||||
package request
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ErrCodeSerialization is the serialization error code that is received
|
|
||||||
// during protocol unmarshaling.
|
|
||||||
ErrCodeSerialization = "SerializationError"
|
|
||||||
|
|
||||||
// ErrCodeRead is an error that is returned during HTTP reads.
|
|
||||||
ErrCodeRead = "ReadError"
|
|
||||||
|
|
||||||
// ErrCodeResponseTimeout is the connection timeout error that is received
|
|
||||||
// during body reads.
|
|
||||||
ErrCodeResponseTimeout = "ResponseTimeout"
|
|
||||||
|
|
||||||
// CanceledErrorCode is the error code that will be returned by an
|
|
||||||
// API request that was canceled. Requests given a aws.Context may
|
|
||||||
// return this error when canceled.
|
|
||||||
CanceledErrorCode = "RequestCanceled"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Request is the service request to be made.
|
|
||||||
type Request struct {
|
|
||||||
Config aws.Config
|
|
||||||
ClientInfo metadata.ClientInfo
|
|
||||||
Handlers Handlers
|
|
||||||
|
|
||||||
Retryer
|
|
||||||
Time time.Time
|
|
||||||
ExpireTime time.Duration
|
|
||||||
Operation *Operation
|
|
||||||
HTTPRequest *http.Request
|
|
||||||
HTTPResponse *http.Response
|
|
||||||
Body io.ReadSeeker
|
|
||||||
BodyStart int64 // offset from beginning of Body that the request body starts
|
|
||||||
Params interface{}
|
|
||||||
Error error
|
|
||||||
Data interface{}
|
|
||||||
RequestID string
|
|
||||||
RetryCount int
|
|
||||||
Retryable *bool
|
|
||||||
RetryDelay time.Duration
|
|
||||||
NotHoist bool
|
|
||||||
SignedHeaderVals http.Header
|
|
||||||
LastSignedAt time.Time
|
|
||||||
DisableFollowRedirects bool
|
|
||||||
|
|
||||||
context aws.Context
|
|
||||||
|
|
||||||
built bool
|
|
||||||
|
|
||||||
// Need to persist an intermediate body between the input Body and HTTP
|
|
||||||
// request body because the HTTP Client's transport can maintain a reference
|
|
||||||
// to the HTTP request's body after the client has returned. This value is
|
|
||||||
// safe to use concurrently and wrap the input Body for each HTTP request.
|
|
||||||
safeBody *offsetReader
|
|
||||||
}
|
|
||||||
|
|
||||||
// An Operation is the service API operation to be made.
|
|
||||||
type Operation struct {
|
|
||||||
Name string
|
|
||||||
HTTPMethod string
|
|
||||||
HTTPPath string
|
|
||||||
*Paginator
|
|
||||||
|
|
||||||
BeforePresignFn func(r *Request) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new Request pointer for the service API
|
|
||||||
// operation and parameters.
|
|
||||||
//
|
|
||||||
// Params is any value of input parameters to be the request payload.
|
|
||||||
// Data is pointer value to an object which the request's response
|
|
||||||
// payload will be deserialized to.
|
|
||||||
func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
|
|
||||||
retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
|
|
||||||
|
|
||||||
method := operation.HTTPMethod
|
|
||||||
if method == "" {
|
|
||||||
method = "POST"
|
|
||||||
}
|
|
||||||
|
|
||||||
httpReq, _ := http.NewRequest(method, "", nil)
|
|
||||||
|
|
||||||
var err error
|
|
||||||
httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath)
|
|
||||||
if err != nil {
|
|
||||||
httpReq.URL = &url.URL{}
|
|
||||||
err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
r := &Request{
|
|
||||||
Config: cfg,
|
|
||||||
ClientInfo: clientInfo,
|
|
||||||
Handlers: handlers.Copy(),
|
|
||||||
|
|
||||||
Retryer: retryer,
|
|
||||||
Time: time.Now(),
|
|
||||||
ExpireTime: 0,
|
|
||||||
Operation: operation,
|
|
||||||
HTTPRequest: httpReq,
|
|
||||||
Body: nil,
|
|
||||||
Params: params,
|
|
||||||
Error: err,
|
|
||||||
Data: data,
|
|
||||||
}
|
|
||||||
r.SetBufferBody([]byte{})
|
|
||||||
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Option is a functional option that can augment or modify a request when
|
|
||||||
// using a WithContext API operation method.
|
|
||||||
type Option func(*Request)
|
|
||||||
|
|
||||||
// WithGetResponseHeader builds a request Option which will retrieve a single
|
|
||||||
// header value from the HTTP Response. If there are multiple values for the
|
|
||||||
// header key use WithGetResponseHeaders instead to access the http.Header
|
|
||||||
// map directly. The passed in val pointer must be non-nil.
|
|
||||||
//
|
|
||||||
// This Option can be used multiple times with a single API operation.
|
|
||||||
//
|
|
||||||
// var id2, versionID string
|
|
||||||
// svc.PutObjectWithContext(ctx, params,
|
|
||||||
// request.WithGetResponseHeader("x-amz-id-2", &id2),
|
|
||||||
// request.WithGetResponseHeader("x-amz-version-id", &versionID),
|
|
||||||
// )
|
|
||||||
func WithGetResponseHeader(key string, val *string) Option {
|
|
||||||
return func(r *Request) {
|
|
||||||
r.Handlers.Complete.PushBack(func(req *Request) {
|
|
||||||
*val = req.HTTPResponse.Header.Get(key)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithGetResponseHeaders builds a request Option which will retrieve the
|
|
||||||
// headers from the HTTP response and assign them to the passed in headers
|
|
||||||
// variable. The passed in headers pointer must be non-nil.
|
|
||||||
//
|
|
||||||
// var headers http.Header
|
|
||||||
// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers))
|
|
||||||
func WithGetResponseHeaders(headers *http.Header) Option {
|
|
||||||
return func(r *Request) {
|
|
||||||
r.Handlers.Complete.PushBack(func(req *Request) {
|
|
||||||
*headers = req.HTTPResponse.Header
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLogLevel is a request option that will set the request to use a specific
|
|
||||||
// log level when the request is made.
|
|
||||||
//
|
|
||||||
// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody)
|
|
||||||
func WithLogLevel(l aws.LogLevelType) Option {
|
|
||||||
return func(r *Request) {
|
|
||||||
r.Config.LogLevel = aws.LogLevel(l)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ApplyOptions will apply each option to the request calling them in the order
|
|
||||||
// the were provided.
|
|
||||||
func (r *Request) ApplyOptions(opts ...Option) {
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Context will always returns a non-nil context. If Request does not have a
|
|
||||||
// context aws.BackgroundContext will be returned.
|
|
||||||
func (r *Request) Context() aws.Context {
|
|
||||||
if r.context != nil {
|
|
||||||
return r.context
|
|
||||||
}
|
|
||||||
return aws.BackgroundContext()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetContext adds a Context to the current request that can be used to cancel
|
|
||||||
// a in-flight request. The Context value must not be nil, or this method will
|
|
||||||
// panic.
|
|
||||||
//
|
|
||||||
// Unlike http.Request.WithContext, SetContext does not return a copy of the
|
|
||||||
// Request. It is not safe to use use a single Request value for multiple
|
|
||||||
// requests. A new Request should be created for each API operation request.
|
|
||||||
//
|
|
||||||
// Go 1.6 and below:
|
|
||||||
// The http.Request's Cancel field will be set to the Done() value of
|
|
||||||
// the context. This will overwrite the Cancel field's value.
|
|
||||||
//
|
|
||||||
// Go 1.7 and above:
|
|
||||||
// The http.Request.WithContext will be used to set the context on the underlying
|
|
||||||
// http.Request. This will create a shallow copy of the http.Request. The SDK
|
|
||||||
// may create sub contexts in the future for nested requests such as retries.
|
|
||||||
func (r *Request) SetContext(ctx aws.Context) {
|
|
||||||
if ctx == nil {
|
|
||||||
panic("context cannot be nil")
|
|
||||||
}
|
|
||||||
setRequestContext(r, ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WillRetry returns if the request's can be retried.
|
|
||||||
func (r *Request) WillRetry() bool {
|
|
||||||
return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParamsFilled returns if the request's parameters have been populated
|
|
||||||
// and the parameters are valid. False is returned if no parameters are
|
|
||||||
// provided or invalid.
|
|
||||||
func (r *Request) ParamsFilled() bool {
|
|
||||||
return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataFilled returns true if the request's data for response deserialization
|
|
||||||
// target has been set and is a valid. False is returned if data is not
|
|
||||||
// set, or is invalid.
|
|
||||||
func (r *Request) DataFilled() bool {
|
|
||||||
return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBufferBody will set the request's body bytes that will be sent to
|
|
||||||
// the service API.
|
|
||||||
func (r *Request) SetBufferBody(buf []byte) {
|
|
||||||
r.SetReaderBody(bytes.NewReader(buf))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStringBody sets the body of the request to be backed by a string.
|
|
||||||
func (r *Request) SetStringBody(s string) {
|
|
||||||
r.SetReaderBody(strings.NewReader(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetReaderBody will set the request's body reader.
|
|
||||||
func (r *Request) SetReaderBody(reader io.ReadSeeker) {
|
|
||||||
r.Body = reader
|
|
||||||
r.ResetBody()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Presign returns the request's signed URL. Error will be returned
|
|
||||||
// if the signing fails.
|
|
||||||
func (r *Request) Presign(expireTime time.Duration) (string, error) {
|
|
||||||
r.ExpireTime = expireTime
|
|
||||||
r.NotHoist = false
|
|
||||||
|
|
||||||
if r.Operation.BeforePresignFn != nil {
|
|
||||||
r = r.copy()
|
|
||||||
err := r.Operation.BeforePresignFn(r)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Sign()
|
|
||||||
if r.Error != nil {
|
|
||||||
return "", r.Error
|
|
||||||
}
|
|
||||||
return r.HTTPRequest.URL.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PresignRequest behaves just like presign, with the addition of returning a
|
|
||||||
// set of headers that were signed.
|
|
||||||
//
|
|
||||||
// Returns the URL string for the API operation with signature in the query string,
|
|
||||||
// and the HTTP headers that were included in the signature. These headers must
|
|
||||||
// be included in any HTTP request made with the presigned URL.
|
|
||||||
//
|
|
||||||
// To prevent hoisting any headers to the query string set NotHoist to true on
|
|
||||||
// this Request value prior to calling PresignRequest.
|
|
||||||
func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) {
|
|
||||||
r.ExpireTime = expireTime
|
|
||||||
r.Sign()
|
|
||||||
if r.Error != nil {
|
|
||||||
return "", nil, r.Error
|
|
||||||
}
|
|
||||||
return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func debugLogReqError(r *Request, stage string, retrying bool, err error) {
|
|
||||||
if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
retryStr := "not retrying"
|
|
||||||
if retrying {
|
|
||||||
retryStr = "will retry"
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
|
|
||||||
stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build will build the request's object so it can be signed and sent
|
|
||||||
// to the service. Build will also validate all the request's parameters.
|
|
||||||
// Anny additional build Handlers set on this request will be run
|
|
||||||
// in the order they were set.
|
|
||||||
//
|
|
||||||
// The request will only be built once. Multiple calls to build will have
|
|
||||||
// no effect.
|
|
||||||
//
|
|
||||||
// If any Validate or Build errors occur the build will stop and the error
|
|
||||||
// which occurred will be returned.
|
|
||||||
func (r *Request) Build() error {
|
|
||||||
if !r.built {
|
|
||||||
r.Handlers.Validate.Run(r)
|
|
||||||
if r.Error != nil {
|
|
||||||
debugLogReqError(r, "Validate Request", false, r.Error)
|
|
||||||
return r.Error
|
|
||||||
}
|
|
||||||
r.Handlers.Build.Run(r)
|
|
||||||
if r.Error != nil {
|
|
||||||
debugLogReqError(r, "Build Request", false, r.Error)
|
|
||||||
return r.Error
|
|
||||||
}
|
|
||||||
r.built = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return r.Error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign will sign the request returning error if errors are encountered.
|
|
||||||
//
|
|
||||||
// Send will build the request prior to signing. All Sign Handlers will
|
|
||||||
// be executed in the order they were set.
|
|
||||||
func (r *Request) Sign() error {
|
|
||||||
r.Build()
|
|
||||||
if r.Error != nil {
|
|
||||||
debugLogReqError(r, "Build Request", false, r.Error)
|
|
||||||
return r.Error
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Handlers.Sign.Run(r)
|
|
||||||
return r.Error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
|
|
||||||
if r.safeBody != nil {
|
|
||||||
r.safeBody.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
r.safeBody = newOffsetReader(r.Body, r.BodyStart)
|
|
||||||
|
|
||||||
// Go 1.8 tightened and clarified the rules code needs to use when building
|
|
||||||
// requests with the http package. Go 1.8 removed the automatic detection
|
|
||||||
// of if the Request.Body was empty, or actually had bytes in it. The SDK
|
|
||||||
// always sets the Request.Body even if it is empty and should not actually
|
|
||||||
// be sent. This is incorrect.
|
|
||||||
//
|
|
||||||
// Go 1.8 did add a http.NoBody value that the SDK can use to tell the http
|
|
||||||
// client that the request really should be sent without a body. The
|
|
||||||
// Request.Body cannot be set to nil, which is preferable, because the
|
|
||||||
// field is exported and could introduce nil pointer dereferences for users
|
|
||||||
// of the SDK if they used that field.
|
|
||||||
//
|
|
||||||
// Related golang/go#18257
|
|
||||||
l, err := computeBodyLength(r.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var body io.ReadCloser
|
|
||||||
if l == 0 {
|
|
||||||
body = NoBody
|
|
||||||
} else if l > 0 {
|
|
||||||
body = r.safeBody
|
|
||||||
} else {
|
|
||||||
// Hack to prevent sending bodies for methods where the body
|
|
||||||
// should be ignored by the server. Sending bodies on these
|
|
||||||
// methods without an associated ContentLength will cause the
|
|
||||||
// request to socket timeout because the server does not handle
|
|
||||||
// Transfer-Encoding: chunked bodies for these methods.
|
|
||||||
//
|
|
||||||
// This would only happen if a aws.ReaderSeekerCloser was used with
|
|
||||||
// a io.Reader that was not also an io.Seeker.
|
|
||||||
switch r.Operation.HTTPMethod {
|
|
||||||
case "GET", "HEAD", "DELETE":
|
|
||||||
body = NoBody
|
|
||||||
default:
|
|
||||||
body = r.safeBody
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return body, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempts to compute the length of the body of the reader using the
|
|
||||||
// io.Seeker interface. If the value is not seekable because of being
|
|
||||||
// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned.
|
|
||||||
// If no error occurs the length of the body will be returned.
|
|
||||||
func computeBodyLength(r io.ReadSeeker) (int64, error) {
|
|
||||||
seekable := true
|
|
||||||
// Determine if the seeker is actually seekable. ReaderSeekerCloser
|
|
||||||
// hides the fact that a io.Readers might not actually be seekable.
|
|
||||||
switch v := r.(type) {
|
|
||||||
case aws.ReaderSeekerCloser:
|
|
||||||
seekable = v.IsSeeker()
|
|
||||||
case *aws.ReaderSeekerCloser:
|
|
||||||
seekable = v.IsSeeker()
|
|
||||||
}
|
|
||||||
if !seekable {
|
|
||||||
return -1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
curOffset, err := r.Seek(0, 1)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
endOffset, err := r.Seek(0, 2)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = r.Seek(curOffset, 0)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return endOffset - curOffset, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBody will return an io.ReadSeeker of the Request's underlying
|
|
||||||
// input body with a concurrency safe wrapper.
|
|
||||||
func (r *Request) GetBody() io.ReadSeeker {
|
|
||||||
return r.safeBody
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send will send the request returning error if errors are encountered.
|
|
||||||
//
|
|
||||||
// Send will sign the request prior to sending. All Send Handlers will
|
|
||||||
// be executed in the order they were set.
|
|
||||||
//
|
|
||||||
// Canceling a request is non-deterministic. If a request has been canceled,
|
|
||||||
// then the transport will choose, randomly, one of the state channels during
|
|
||||||
// reads or getting the connection.
|
|
||||||
//
|
|
||||||
// readLoop() and getConn(req *Request, cm connectMethod)
|
|
||||||
// https://github.com/golang/go/blob/master/src/net/http/transport.go
|
|
||||||
//
|
|
||||||
// Send will not close the request.Request's body.
|
|
||||||
func (r *Request) Send() error {
|
|
||||||
defer func() {
|
|
||||||
// Regardless of success or failure of the request trigger the Complete
|
|
||||||
// request handlers.
|
|
||||||
r.Handlers.Complete.Run(r)
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
|
||||||
if aws.BoolValue(r.Retryable) {
|
|
||||||
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
|
|
||||||
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
|
|
||||||
r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
|
|
||||||
}
|
|
||||||
|
|
||||||
// The previous http.Request will have a reference to the r.Body
|
|
||||||
// and the HTTP Client's Transport may still be reading from
|
|
||||||
// the request's body even though the Client's Do returned.
|
|
||||||
r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
|
|
||||||
r.ResetBody()
|
|
||||||
|
|
||||||
// Closing response body to ensure that no response body is leaked
|
|
||||||
// between retry attempts.
|
|
||||||
if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
|
|
||||||
r.HTTPResponse.Body.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Sign()
|
|
||||||
if r.Error != nil {
|
|
||||||
return r.Error
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Retryable = nil
|
|
||||||
|
|
||||||
r.Handlers.Send.Run(r)
|
|
||||||
if r.Error != nil {
|
|
||||||
if !shouldRetryCancel(r) {
|
|
||||||
return r.Error
|
|
||||||
}
|
|
||||||
|
|
||||||
err := r.Error
|
|
||||||
r.Handlers.Retry.Run(r)
|
|
||||||
r.Handlers.AfterRetry.Run(r)
|
|
||||||
if r.Error != nil {
|
|
||||||
debugLogReqError(r, "Send Request", false, err)
|
|
||||||
return r.Error
|
|
||||||
}
|
|
||||||
debugLogReqError(r, "Send Request", true, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
r.Handlers.UnmarshalMeta.Run(r)
|
|
||||||
r.Handlers.ValidateResponse.Run(r)
|
|
||||||
if r.Error != nil {
|
|
||||||
r.Handlers.UnmarshalError.Run(r)
|
|
||||||
err := r.Error
|
|
||||||
|
|
||||||
r.Handlers.Retry.Run(r)
|
|
||||||
r.Handlers.AfterRetry.Run(r)
|
|
||||||
if r.Error != nil {
|
|
||||||
debugLogReqError(r, "Validate Response", false, err)
|
|
||||||
return r.Error
|
|
||||||
}
|
|
||||||
debugLogReqError(r, "Validate Response", true, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Handlers.Unmarshal.Run(r)
|
|
||||||
if r.Error != nil {
|
|
||||||
err := r.Error
|
|
||||||
r.Handlers.Retry.Run(r)
|
|
||||||
r.Handlers.AfterRetry.Run(r)
|
|
||||||
if r.Error != nil {
|
|
||||||
debugLogReqError(r, "Unmarshal Response", false, err)
|
|
||||||
return r.Error
|
|
||||||
}
|
|
||||||
debugLogReqError(r, "Unmarshal Response", true, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy will copy a request which will allow for local manipulation of the
|
|
||||||
// request.
|
|
||||||
func (r *Request) copy() *Request {
|
|
||||||
req := &Request{}
|
|
||||||
*req = *r
|
|
||||||
req.Handlers = r.Handlers.Copy()
|
|
||||||
op := *r.Operation
|
|
||||||
req.Operation = &op
|
|
||||||
return req
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddToUserAgent adds the string to the end of the request's current user agent.
|
|
||||||
func AddToUserAgent(r *Request, s string) {
|
|
||||||
curUA := r.HTTPRequest.Header.Get("User-Agent")
|
|
||||||
if len(curUA) > 0 {
|
|
||||||
s = curUA + " " + s
|
|
||||||
}
|
|
||||||
r.HTTPRequest.Header.Set("User-Agent", s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func shouldRetryCancel(r *Request) bool {
|
|
||||||
awsErr, ok := r.Error.(awserr.Error)
|
|
||||||
timeoutErr := false
|
|
||||||
errStr := r.Error.Error()
|
|
||||||
if ok {
|
|
||||||
if awsErr.Code() == CanceledErrorCode {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
err := awsErr.OrigErr()
|
|
||||||
netErr, netOK := err.(net.Error)
|
|
||||||
timeoutErr = netOK && netErr.Temporary()
|
|
||||||
if urlErr, ok := err.(*url.Error); !timeoutErr && ok {
|
|
||||||
errStr = urlErr.Err.Error()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// There can be two types of canceled errors here.
|
|
||||||
// The first being a net.Error and the other being an error.
|
|
||||||
// If the request was timed out, we want to continue the retry
|
|
||||||
// process. Otherwise, return the canceled error.
|
|
||||||
return timeoutErr ||
|
|
||||||
(errStr != "net/http: request canceled" &&
|
|
||||||
errStr != "net/http: request canceled while waiting for connection")
|
|
||||||
|
|
||||||
}
|
|
39
vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
generated
vendored
39
vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
generated
vendored
@ -1,39 +0,0 @@
|
|||||||
// +build !go1.8
|
|
||||||
|
|
||||||
package request
|
|
||||||
|
|
||||||
import "io"
|
|
||||||
|
|
||||||
// NoBody is an io.ReadCloser with no bytes. Read always returns EOF
|
|
||||||
// and Close always returns nil. It can be used in an outgoing client
|
|
||||||
// request to explicitly signal that a request has zero bytes.
|
|
||||||
// An alternative, however, is to simply set Request.Body to nil.
|
|
||||||
//
|
|
||||||
// Copy of Go 1.8 NoBody type from net/http/http.go
|
|
||||||
type noBody struct{}
|
|
||||||
|
|
||||||
func (noBody) Read([]byte) (int, error) { return 0, io.EOF }
|
|
||||||
func (noBody) Close() error { return nil }
|
|
||||||
func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil }
|
|
||||||
|
|
||||||
// NoBody is an empty reader that will trigger the Go HTTP client to not include
|
|
||||||
// and body in the HTTP request.
|
|
||||||
var NoBody = noBody{}
|
|
||||||
|
|
||||||
// ResetBody rewinds the request body back to its starting position, and
|
|
||||||
// set's the HTTP Request body reference. When the body is read prior
|
|
||||||
// to being sent in the HTTP request it will need to be rewound.
|
|
||||||
//
|
|
||||||
// ResetBody will automatically be called by the SDK's build handler, but if
|
|
||||||
// the request is being used directly ResetBody must be called before the request
|
|
||||||
// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically
|
|
||||||
// call ResetBody.
|
|
||||||
func (r *Request) ResetBody() {
|
|
||||||
body, err := r.getNextRequestBody()
|
|
||||||
if err != nil {
|
|
||||||
r.Error = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
r.HTTPRequest.Body = body
|
|
||||||
}
|
|
33
vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
generated
vendored
33
vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
generated
vendored
@ -1,33 +0,0 @@
|
|||||||
// +build go1.8
|
|
||||||
|
|
||||||
package request
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NoBody is a http.NoBody reader instructing Go HTTP client to not include
|
|
||||||
// and body in the HTTP request.
|
|
||||||
var NoBody = http.NoBody
|
|
||||||
|
|
||||||
// ResetBody rewinds the request body back to its starting position, and
|
|
||||||
// set's the HTTP Request body reference. When the body is read prior
|
|
||||||
// to being sent in the HTTP request it will need to be rewound.
|
|
||||||
//
|
|
||||||
// ResetBody will automatically be called by the SDK's build handler, but if
|
|
||||||
// the request is being used directly ResetBody must be called before the request
|
|
||||||
// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically
|
|
||||||
// call ResetBody.
|
|
||||||
//
|
|
||||||
// Will also set the Go 1.8's http.Request.GetBody member to allow retrying
|
|
||||||
// PUT/POST redirects.
|
|
||||||
func (r *Request) ResetBody() {
|
|
||||||
body, err := r.getNextRequestBody()
|
|
||||||
if err != nil {
|
|
||||||
r.Error = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
r.HTTPRequest.Body = body
|
|
||||||
r.HTTPRequest.GetBody = r.getNextRequestBody
|
|
||||||
}
|
|
14
vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
generated
vendored
@ -1,14 +0,0 @@
|
|||||||
// +build go1.7
|
|
||||||
|
|
||||||
package request
|
|
||||||
|
|
||||||
import "github.com/aws/aws-sdk-go/aws"
|
|
||||||
|
|
||||||
// setContext updates the Request to use the passed in context for cancellation.
|
|
||||||
// Context will also be used for request retry delay.
|
|
||||||
//
|
|
||||||
// Creates shallow copy of the http.Request with the WithContext method.
|
|
||||||
func setRequestContext(r *Request, ctx aws.Context) {
|
|
||||||
r.context = ctx
|
|
||||||
r.HTTPRequest = r.HTTPRequest.WithContext(ctx)
|
|
||||||
}
|
|
14
vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
generated
vendored
@ -1,14 +0,0 @@
|
|||||||
// +build !go1.7
|
|
||||||
|
|
||||||
package request
|
|
||||||
|
|
||||||
import "github.com/aws/aws-sdk-go/aws"
|
|
||||||
|
|
||||||
// setContext updates the Request to use the passed in context for cancellation.
|
|
||||||
// Context will also be used for request retry delay.
|
|
||||||
//
|
|
||||||
// Creates shallow copy of the http.Request with the WithContext method.
|
|
||||||
func setRequestContext(r *Request, ctx aws.Context) {
|
|
||||||
r.context = ctx
|
|
||||||
r.HTTPRequest.Cancel = ctx.Done()
|
|
||||||
}
|
|
236
vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
generated
vendored
236
vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
generated
vendored
@ -1,236 +0,0 @@
|
|||||||
package request
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Pagination provides paginating of SDK API operations which are paginatable.
|
|
||||||
// Generally you should not use this type directly, but use the "Pages" API
|
|
||||||
// operations method to automatically perform pagination for you. Such as,
|
|
||||||
// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods.
|
|
||||||
//
|
|
||||||
// Pagination differs from a Paginator type in that pagination is the type that
|
|
||||||
// does the pagination between API operations, and Paginator defines the
|
|
||||||
// configuration that will be used per page request.
|
|
||||||
//
|
|
||||||
// cont := true
|
|
||||||
// for p.Next() && cont {
|
|
||||||
// data := p.Page().(*s3.ListObjectsOutput)
|
|
||||||
// // process the page's data
|
|
||||||
// }
|
|
||||||
// return p.Err()
|
|
||||||
//
|
|
||||||
// See service client API operation Pages methods for examples how the SDK will
|
|
||||||
// use the Pagination type.
|
|
||||||
type Pagination struct {
|
|
||||||
// Function to return a Request value for each pagination request.
|
|
||||||
// Any configuration or handlers that need to be applied to the request
|
|
||||||
// prior to getting the next page should be done here before the request
|
|
||||||
// returned.
|
|
||||||
//
|
|
||||||
// NewRequest should always be built from the same API operations. It is
|
|
||||||
// undefined if different API operations are returned on subsequent calls.
|
|
||||||
NewRequest func() (*Request, error)
|
|
||||||
|
|
||||||
started bool
|
|
||||||
nextTokens []interface{}
|
|
||||||
|
|
||||||
err error
|
|
||||||
curPage interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasNextPage will return true if Pagination is able to determine that the API
|
|
||||||
// operation has additional pages. False will be returned if there are no more
|
|
||||||
// pages remaining.
|
|
||||||
//
|
|
||||||
// Will always return true if Next has not been called yet.
|
|
||||||
func (p *Pagination) HasNextPage() bool {
|
|
||||||
return !(p.started && len(p.nextTokens) == 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Err returns the error Pagination encountered when retrieving the next page.
|
|
||||||
func (p *Pagination) Err() error {
|
|
||||||
return p.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Page returns the current page. Page should only be called after a successful
|
|
||||||
// call to Next. It is undefined what Page will return if Page is called after
|
|
||||||
// Next returns false.
|
|
||||||
func (p *Pagination) Page() interface{} {
|
|
||||||
return p.curPage
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next will attempt to retrieve the next page for the API operation. When a page
|
|
||||||
// is retrieved true will be returned. If the page cannot be retrieved, or there
|
|
||||||
// are no more pages false will be returned.
|
|
||||||
//
|
|
||||||
// Use the Page method to retrieve the current page data. The data will need
|
|
||||||
// to be cast to the API operation's output type.
|
|
||||||
//
|
|
||||||
// Use the Err method to determine if an error occurred if Page returns false.
|
|
||||||
func (p *Pagination) Next() bool {
|
|
||||||
if !p.HasNextPage() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := p.NewRequest()
|
|
||||||
if err != nil {
|
|
||||||
p.err = err
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.started {
|
|
||||||
for i, intok := range req.Operation.InputTokens {
|
|
||||||
awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
p.started = true
|
|
||||||
|
|
||||||
err = req.Send()
|
|
||||||
if err != nil {
|
|
||||||
p.err = err
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
p.nextTokens = req.nextPageTokens()
|
|
||||||
p.curPage = req.Data
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Paginator is the configuration data that defines how an API operation
|
|
||||||
// should be paginated. This type is used by the API service models to define
|
|
||||||
// the generated pagination config for service APIs.
|
|
||||||
//
|
|
||||||
// The Pagination type is what provides iterating between pages of an API. It
|
|
||||||
// is only used to store the token metadata the SDK should use for performing
|
|
||||||
// pagination.
|
|
||||||
type Paginator struct {
|
|
||||||
InputTokens []string
|
|
||||||
OutputTokens []string
|
|
||||||
LimitToken string
|
|
||||||
TruncationToken string
|
|
||||||
}
|
|
||||||
|
|
||||||
// nextPageTokens returns the tokens to use when asking for the next page of data.
|
|
||||||
func (r *Request) nextPageTokens() []interface{} {
|
|
||||||
if r.Operation.Paginator == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if r.Operation.TruncationToken != "" {
|
|
||||||
tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
|
|
||||||
if len(tr) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v := tr[0].(type) {
|
|
||||||
case *bool:
|
|
||||||
if !aws.BoolValue(v) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case bool:
|
|
||||||
if v == false {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tokens := []interface{}{}
|
|
||||||
tokenAdded := false
|
|
||||||
for _, outToken := range r.Operation.OutputTokens {
|
|
||||||
v, _ := awsutil.ValuesAtPath(r.Data, outToken)
|
|
||||||
if len(v) > 0 {
|
|
||||||
tokens = append(tokens, v[0])
|
|
||||||
tokenAdded = true
|
|
||||||
} else {
|
|
||||||
tokens = append(tokens, nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !tokenAdded {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return tokens
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure a deprecated item is only logged once instead of each time its used.
|
|
||||||
func logDeprecatedf(logger aws.Logger, flag *int32, msg string) {
|
|
||||||
if logger == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if atomic.CompareAndSwapInt32(flag, 0, 1) {
|
|
||||||
logger.Log(msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
logDeprecatedHasNextPage int32
|
|
||||||
logDeprecatedNextPage int32
|
|
||||||
logDeprecatedEachPage int32
|
|
||||||
)
|
|
||||||
|
|
||||||
// HasNextPage returns true if this request has more pages of data available.
|
|
||||||
//
|
|
||||||
// Deprecated Use Pagination type for configurable pagination of API operations
|
|
||||||
func (r *Request) HasNextPage() bool {
|
|
||||||
logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage,
|
|
||||||
"Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations")
|
|
||||||
|
|
||||||
return len(r.nextPageTokens()) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextPage returns a new Request that can be executed to return the next
|
|
||||||
// page of result data. Call .Send() on this request to execute it.
|
|
||||||
//
|
|
||||||
// Deprecated Use Pagination type for configurable pagination of API operations
|
|
||||||
func (r *Request) NextPage() *Request {
|
|
||||||
logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage,
|
|
||||||
"Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations")
|
|
||||||
|
|
||||||
tokens := r.nextPageTokens()
|
|
||||||
if len(tokens) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
|
|
||||||
nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
|
|
||||||
for i, intok := range nr.Operation.InputTokens {
|
|
||||||
awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
|
|
||||||
}
|
|
||||||
return nr
|
|
||||||
}
|
|
||||||
|
|
||||||
// EachPage iterates over each page of a paginated request object. The fn
|
|
||||||
// parameter should be a function with the following sample signature:
|
|
||||||
//
|
|
||||||
// func(page *T, lastPage bool) bool {
|
|
||||||
// return true // return false to stop iterating
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Where "T" is the structure type matching the output structure of the given
|
|
||||||
// operation. For example, a request object generated by
|
|
||||||
// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
|
|
||||||
// as the structure "T". The lastPage value represents whether the page is
|
|
||||||
// the last page of data or not. The return value of this function should
|
|
||||||
// return true to keep iterating or false to stop.
|
|
||||||
//
|
|
||||||
// Deprecated Use Pagination type for configurable pagination of API operations
|
|
||||||
func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
|
|
||||||
logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage,
|
|
||||||
"Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations")
|
|
||||||
|
|
||||||
for page := r; page != nil; page = page.NextPage() {
|
|
||||||
if err := page.Send(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
|
|
||||||
return page.Error
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
161
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
161
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
@ -1,161 +0,0 @@
|
|||||||
package request
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Retryer is an interface to control retry logic for a given service.
|
|
||||||
// The default implementation used by most services is the client.DefaultRetryer
|
|
||||||
// structure, which contains basic retry logic using exponential backoff.
|
|
||||||
type Retryer interface {
|
|
||||||
RetryRules(*Request) time.Duration
|
|
||||||
ShouldRetry(*Request) bool
|
|
||||||
MaxRetries() int
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithRetryer sets a config Retryer value to the given Config returning it
|
|
||||||
// for chaining.
|
|
||||||
func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
|
|
||||||
cfg.Retryer = retryer
|
|
||||||
return cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
// retryableCodes is a collection of service response codes which are retry-able
|
|
||||||
// without any further action.
|
|
||||||
var retryableCodes = map[string]struct{}{
|
|
||||||
"RequestError": {},
|
|
||||||
"RequestTimeout": {},
|
|
||||||
ErrCodeResponseTimeout: {},
|
|
||||||
"RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout
|
|
||||||
}
|
|
||||||
|
|
||||||
var throttleCodes = map[string]struct{}{
|
|
||||||
"ProvisionedThroughputExceededException": {},
|
|
||||||
"Throttling": {},
|
|
||||||
"ThrottlingException": {},
|
|
||||||
"RequestLimitExceeded": {},
|
|
||||||
"RequestThrottled": {},
|
|
||||||
"TooManyRequestsException": {}, // Lambda functions
|
|
||||||
"PriorRequestNotComplete": {}, // Route53
|
|
||||||
}
|
|
||||||
|
|
||||||
// credsExpiredCodes is a collection of error codes which signify the credentials
|
|
||||||
// need to be refreshed. Expired tokens require refreshing of credentials, and
|
|
||||||
// resigning before the request can be retried.
|
|
||||||
var credsExpiredCodes = map[string]struct{}{
|
|
||||||
"ExpiredToken": {},
|
|
||||||
"ExpiredTokenException": {},
|
|
||||||
"RequestExpired": {}, // EC2 Only
|
|
||||||
}
|
|
||||||
|
|
||||||
func isCodeThrottle(code string) bool {
|
|
||||||
_, ok := throttleCodes[code]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func isCodeRetryable(code string) bool {
|
|
||||||
if _, ok := retryableCodes[code]; ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return isCodeExpiredCreds(code)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isCodeExpiredCreds(code string) bool {
|
|
||||||
_, ok := credsExpiredCodes[code]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
var validParentCodes = map[string]struct{}{
|
|
||||||
ErrCodeSerialization: {},
|
|
||||||
ErrCodeRead: {},
|
|
||||||
}
|
|
||||||
|
|
||||||
type temporaryError interface {
|
|
||||||
Temporary() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func isNestedErrorRetryable(parentErr awserr.Error) bool {
|
|
||||||
if parentErr == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := validParentCodes[parentErr.Code()]; !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
err := parentErr.OrigErr()
|
|
||||||
if err == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if aerr, ok := err.(awserr.Error); ok {
|
|
||||||
return isCodeRetryable(aerr.Code())
|
|
||||||
}
|
|
||||||
|
|
||||||
if t, ok := err.(temporaryError); ok {
|
|
||||||
return t.Temporary()
|
|
||||||
}
|
|
||||||
|
|
||||||
return isErrConnectionReset(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsErrorRetryable returns whether the error is retryable, based on its Code.
|
|
||||||
// Returns false if error is nil.
|
|
||||||
func IsErrorRetryable(err error) bool {
|
|
||||||
if err != nil {
|
|
||||||
if aerr, ok := err.(awserr.Error); ok {
|
|
||||||
return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsErrorThrottle returns whether the error is to be throttled based on its code.
|
|
||||||
// Returns false if error is nil.
|
|
||||||
func IsErrorThrottle(err error) bool {
|
|
||||||
if err != nil {
|
|
||||||
if aerr, ok := err.(awserr.Error); ok {
|
|
||||||
return isCodeThrottle(aerr.Code())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsErrorExpiredCreds returns whether the error code is a credential expiry error.
|
|
||||||
// Returns false if error is nil.
|
|
||||||
func IsErrorExpiredCreds(err error) bool {
|
|
||||||
if err != nil {
|
|
||||||
if aerr, ok := err.(awserr.Error); ok {
|
|
||||||
return isCodeExpiredCreds(aerr.Code())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsErrorRetryable returns whether the error is retryable, based on its Code.
|
|
||||||
// Returns false if the request has no Error set.
|
|
||||||
//
|
|
||||||
// Alias for the utility function IsErrorRetryable
|
|
||||||
func (r *Request) IsErrorRetryable() bool {
|
|
||||||
return IsErrorRetryable(r.Error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsErrorThrottle returns whether the error is to be throttled based on its code.
|
|
||||||
// Returns false if the request has no Error set
|
|
||||||
//
|
|
||||||
// Alias for the utility function IsErrorThrottle
|
|
||||||
func (r *Request) IsErrorThrottle() bool {
|
|
||||||
return IsErrorThrottle(r.Error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsErrorExpired returns whether the error code is a credential expiry error.
|
|
||||||
// Returns false if the request has no Error set.
|
|
||||||
//
|
|
||||||
// Alias for the utility function IsErrorExpiredCreds
|
|
||||||
func (r *Request) IsErrorExpired() bool {
|
|
||||||
return IsErrorExpiredCreds(r.Error)
|
|
||||||
}
|
|
94
vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
generated
vendored
94
vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
generated
vendored
@ -1,94 +0,0 @@
|
|||||||
package request
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
)
|
|
||||||
|
|
||||||
var timeoutErr = awserr.New(
|
|
||||||
ErrCodeResponseTimeout,
|
|
||||||
"read on body has reached the timeout limit",
|
|
||||||
nil,
|
|
||||||
)
|
|
||||||
|
|
||||||
type readResult struct {
|
|
||||||
n int
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// timeoutReadCloser will handle body reads that take too long.
|
|
||||||
// We will return a ErrReadTimeout error if a timeout occurs.
|
|
||||||
type timeoutReadCloser struct {
|
|
||||||
reader io.ReadCloser
|
|
||||||
duration time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read will spin off a goroutine to call the reader's Read method. We will
|
|
||||||
// select on the timer's channel or the read's channel. Whoever completes first
|
|
||||||
// will be returned.
|
|
||||||
func (r *timeoutReadCloser) Read(b []byte) (int, error) {
|
|
||||||
timer := time.NewTimer(r.duration)
|
|
||||||
c := make(chan readResult, 1)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
n, err := r.reader.Read(b)
|
|
||||||
timer.Stop()
|
|
||||||
c <- readResult{n: n, err: err}
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case data := <-c:
|
|
||||||
return data.n, data.err
|
|
||||||
case <-timer.C:
|
|
||||||
return 0, timeoutErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *timeoutReadCloser) Close() error {
|
|
||||||
return r.reader.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// HandlerResponseTimeout is what we use to signify the name of the
|
|
||||||
// response timeout handler.
|
|
||||||
HandlerResponseTimeout = "ResponseTimeoutHandler"
|
|
||||||
)
|
|
||||||
|
|
||||||
// adaptToResponseTimeoutError is a handler that will replace any top level error
|
|
||||||
// to a ErrCodeResponseTimeout, if its child is that.
|
|
||||||
func adaptToResponseTimeoutError(req *Request) {
|
|
||||||
if err, ok := req.Error.(awserr.Error); ok {
|
|
||||||
aerr, ok := err.OrigErr().(awserr.Error)
|
|
||||||
if ok && aerr.Code() == ErrCodeResponseTimeout {
|
|
||||||
req.Error = aerr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer.
|
|
||||||
// This will allow for per read timeouts. If a timeout occurred, we will return the
|
|
||||||
// ErrCodeResponseTimeout.
|
|
||||||
//
|
|
||||||
// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second)
|
|
||||||
func WithResponseReadTimeout(duration time.Duration) Option {
|
|
||||||
return func(r *Request) {
|
|
||||||
|
|
||||||
var timeoutHandler = NamedHandler{
|
|
||||||
HandlerResponseTimeout,
|
|
||||||
func(req *Request) {
|
|
||||||
req.HTTPResponse.Body = &timeoutReadCloser{
|
|
||||||
reader: req.HTTPResponse.Body,
|
|
||||||
duration: duration,
|
|
||||||
}
|
|
||||||
}}
|
|
||||||
|
|
||||||
// remove the handler so we are not stomping over any new durations.
|
|
||||||
r.Handlers.Send.RemoveByName(HandlerResponseTimeout)
|
|
||||||
r.Handlers.Send.PushBackNamed(timeoutHandler)
|
|
||||||
|
|
||||||
r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError)
|
|
||||||
r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError)
|
|
||||||
}
|
|
||||||
}
|
|
234
vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
generated
vendored
234
vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
generated
vendored
@ -1,234 +0,0 @@
|
|||||||
package request
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// InvalidParameterErrCode is the error code for invalid parameters errors
|
|
||||||
InvalidParameterErrCode = "InvalidParameter"
|
|
||||||
// ParamRequiredErrCode is the error code for required parameter errors
|
|
||||||
ParamRequiredErrCode = "ParamRequiredError"
|
|
||||||
// ParamMinValueErrCode is the error code for fields with too low of a
|
|
||||||
// number value.
|
|
||||||
ParamMinValueErrCode = "ParamMinValueError"
|
|
||||||
// ParamMinLenErrCode is the error code for fields without enough elements.
|
|
||||||
ParamMinLenErrCode = "ParamMinLenError"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Validator provides a way for types to perform validation logic on their
|
|
||||||
// input values that external code can use to determine if a type's values
|
|
||||||
// are valid.
|
|
||||||
type Validator interface {
|
|
||||||
Validate() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// An ErrInvalidParams provides wrapping of invalid parameter errors found when
|
|
||||||
// validating API operation input parameters.
|
|
||||||
type ErrInvalidParams struct {
|
|
||||||
// Context is the base context of the invalid parameter group.
|
|
||||||
Context string
|
|
||||||
errs []ErrInvalidParam
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds a new invalid parameter error to the collection of invalid
|
|
||||||
// parameters. The context of the invalid parameter will be updated to reflect
|
|
||||||
// this collection.
|
|
||||||
func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
|
|
||||||
err.SetContext(e.Context)
|
|
||||||
e.errs = append(e.errs, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddNested adds the invalid parameter errors from another ErrInvalidParams
|
|
||||||
// value into this collection. The nested errors will have their nested context
|
|
||||||
// updated and base context to reflect the merging.
|
|
||||||
//
|
|
||||||
// Use for nested validations errors.
|
|
||||||
func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
|
|
||||||
for _, err := range nested.errs {
|
|
||||||
err.SetContext(e.Context)
|
|
||||||
err.AddNestedContext(nestedCtx)
|
|
||||||
e.errs = append(e.errs, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of invalid parameter errors
|
|
||||||
func (e ErrInvalidParams) Len() int {
|
|
||||||
return len(e.errs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Code returns the code of the error
|
|
||||||
func (e ErrInvalidParams) Code() string {
|
|
||||||
return InvalidParameterErrCode
|
|
||||||
}
|
|
||||||
|
|
||||||
// Message returns the message of the error
|
|
||||||
func (e ErrInvalidParams) Message() string {
|
|
||||||
return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns the string formatted form of the invalid parameters.
|
|
||||||
func (e ErrInvalidParams) Error() string {
|
|
||||||
w := &bytes.Buffer{}
|
|
||||||
fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
|
|
||||||
|
|
||||||
for _, err := range e.errs {
|
|
||||||
fmt.Fprintf(w, "- %s\n", err.Message())
|
|
||||||
}
|
|
||||||
|
|
||||||
return w.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
|
|
||||||
func (e ErrInvalidParams) OrigErr() error {
|
|
||||||
return awserr.NewBatchError(
|
|
||||||
InvalidParameterErrCode, e.Message(), e.OrigErrs())
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrigErrs returns a slice of the invalid parameters
|
|
||||||
func (e ErrInvalidParams) OrigErrs() []error {
|
|
||||||
errs := make([]error, len(e.errs))
|
|
||||||
for i := 0; i < len(errs); i++ {
|
|
||||||
errs[i] = e.errs[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
return errs
|
|
||||||
}
|
|
||||||
|
|
||||||
// An ErrInvalidParam represents an invalid parameter error type.
|
|
||||||
type ErrInvalidParam interface {
|
|
||||||
awserr.Error
|
|
||||||
|
|
||||||
// Field name the error occurred on.
|
|
||||||
Field() string
|
|
||||||
|
|
||||||
// SetContext updates the context of the error.
|
|
||||||
SetContext(string)
|
|
||||||
|
|
||||||
// AddNestedContext updates the error's context to include a nested level.
|
|
||||||
AddNestedContext(string)
|
|
||||||
}
|
|
||||||
|
|
||||||
type errInvalidParam struct {
|
|
||||||
context string
|
|
||||||
nestedContext string
|
|
||||||
field string
|
|
||||||
code string
|
|
||||||
msg string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Code returns the error code for the type of invalid parameter.
|
|
||||||
func (e *errInvalidParam) Code() string {
|
|
||||||
return e.code
|
|
||||||
}
|
|
||||||
|
|
||||||
// Message returns the reason the parameter was invalid, and its context.
|
|
||||||
func (e *errInvalidParam) Message() string {
|
|
||||||
return fmt.Sprintf("%s, %s.", e.msg, e.Field())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns the string version of the invalid parameter error.
|
|
||||||
func (e *errInvalidParam) Error() string {
|
|
||||||
return fmt.Sprintf("%s: %s", e.code, e.Message())
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrigErr returns nil, Implemented for awserr.Error interface.
|
|
||||||
func (e *errInvalidParam) OrigErr() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Field Returns the field and context the error occurred.
|
|
||||||
func (e *errInvalidParam) Field() string {
|
|
||||||
field := e.context
|
|
||||||
if len(field) > 0 {
|
|
||||||
field += "."
|
|
||||||
}
|
|
||||||
if len(e.nestedContext) > 0 {
|
|
||||||
field += fmt.Sprintf("%s.", e.nestedContext)
|
|
||||||
}
|
|
||||||
field += e.field
|
|
||||||
|
|
||||||
return field
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetContext updates the base context of the error.
|
|
||||||
func (e *errInvalidParam) SetContext(ctx string) {
|
|
||||||
e.context = ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddNestedContext prepends a context to the field's path.
|
|
||||||
func (e *errInvalidParam) AddNestedContext(ctx string) {
|
|
||||||
if len(e.nestedContext) == 0 {
|
|
||||||
e.nestedContext = ctx
|
|
||||||
} else {
|
|
||||||
e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// An ErrParamRequired represents an required parameter error.
|
|
||||||
type ErrParamRequired struct {
|
|
||||||
errInvalidParam
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewErrParamRequired creates a new required parameter error.
|
|
||||||
func NewErrParamRequired(field string) *ErrParamRequired {
|
|
||||||
return &ErrParamRequired{
|
|
||||||
errInvalidParam{
|
|
||||||
code: ParamRequiredErrCode,
|
|
||||||
field: field,
|
|
||||||
msg: fmt.Sprintf("missing required field"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// An ErrParamMinValue represents a minimum value parameter error.
|
|
||||||
type ErrParamMinValue struct {
|
|
||||||
errInvalidParam
|
|
||||||
min float64
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewErrParamMinValue creates a new minimum value parameter error.
|
|
||||||
func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
|
|
||||||
return &ErrParamMinValue{
|
|
||||||
errInvalidParam: errInvalidParam{
|
|
||||||
code: ParamMinValueErrCode,
|
|
||||||
field: field,
|
|
||||||
msg: fmt.Sprintf("minimum field value of %v", min),
|
|
||||||
},
|
|
||||||
min: min,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MinValue returns the field's require minimum value.
|
|
||||||
//
|
|
||||||
// float64 is returned for both int and float min values.
|
|
||||||
func (e *ErrParamMinValue) MinValue() float64 {
|
|
||||||
return e.min
|
|
||||||
}
|
|
||||||
|
|
||||||
// An ErrParamMinLen represents a minimum length parameter error.
|
|
||||||
type ErrParamMinLen struct {
|
|
||||||
errInvalidParam
|
|
||||||
min int
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewErrParamMinLen creates a new minimum length parameter error.
|
|
||||||
func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
|
|
||||||
return &ErrParamMinLen{
|
|
||||||
errInvalidParam: errInvalidParam{
|
|
||||||
code: ParamMinLenErrCode,
|
|
||||||
field: field,
|
|
||||||
msg: fmt.Sprintf("minimum field size of %v", min),
|
|
||||||
},
|
|
||||||
min: min,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MinLen returns the field's required minimum length.
|
|
||||||
func (e *ErrParamMinLen) MinLen() int {
|
|
||||||
return e.min
|
|
||||||
}
|
|
295
vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
generated
vendored
295
vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
generated
vendored
@ -1,295 +0,0 @@
|
|||||||
package request
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when
|
|
||||||
// the waiter's max attempts have been exhausted.
|
|
||||||
const WaiterResourceNotReadyErrorCode = "ResourceNotReady"
|
|
||||||
|
|
||||||
// A WaiterOption is a function that will update the Waiter value's fields to
|
|
||||||
// configure the waiter.
|
|
||||||
type WaiterOption func(*Waiter)
|
|
||||||
|
|
||||||
// WithWaiterMaxAttempts returns the maximum number of times the waiter should
|
|
||||||
// attempt to check the resource for the target state.
|
|
||||||
func WithWaiterMaxAttempts(max int) WaiterOption {
|
|
||||||
return func(w *Waiter) {
|
|
||||||
w.MaxAttempts = max
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaiterDelay will return a delay the waiter should pause between attempts to
|
|
||||||
// check the resource state. The passed in attempt is the number of times the
|
|
||||||
// Waiter has checked the resource state.
|
|
||||||
//
|
|
||||||
// Attempt is the number of attempts the Waiter has made checking the resource
|
|
||||||
// state.
|
|
||||||
type WaiterDelay func(attempt int) time.Duration
|
|
||||||
|
|
||||||
// ConstantWaiterDelay returns a WaiterDelay that will always return a constant
|
|
||||||
// delay the waiter should use between attempts. It ignores the number of
|
|
||||||
// attempts made.
|
|
||||||
func ConstantWaiterDelay(delay time.Duration) WaiterDelay {
|
|
||||||
return func(attempt int) time.Duration {
|
|
||||||
return delay
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in.
|
|
||||||
func WithWaiterDelay(delayer WaiterDelay) WaiterOption {
|
|
||||||
return func(w *Waiter) {
|
|
||||||
w.Delay = delayer
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithWaiterLogger returns a waiter option to set the logger a waiter
|
|
||||||
// should use to log warnings and errors to.
|
|
||||||
func WithWaiterLogger(logger aws.Logger) WaiterOption {
|
|
||||||
return func(w *Waiter) {
|
|
||||||
w.Logger = logger
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithWaiterRequestOptions returns a waiter option setting the request
|
|
||||||
// options for each request the waiter makes. Appends to waiter's request
|
|
||||||
// options already set.
|
|
||||||
func WithWaiterRequestOptions(opts ...Option) WaiterOption {
|
|
||||||
return func(w *Waiter) {
|
|
||||||
w.RequestOptions = append(w.RequestOptions, opts...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Waiter provides the functionality to perform a blocking call which will
|
|
||||||
// wait for a resource state to be satisfied by a service.
|
|
||||||
//
|
|
||||||
// This type should not be used directly. The API operations provided in the
|
|
||||||
// service packages prefixed with "WaitUntil" should be used instead.
|
|
||||||
type Waiter struct {
|
|
||||||
Name string
|
|
||||||
Acceptors []WaiterAcceptor
|
|
||||||
Logger aws.Logger
|
|
||||||
|
|
||||||
MaxAttempts int
|
|
||||||
Delay WaiterDelay
|
|
||||||
|
|
||||||
RequestOptions []Option
|
|
||||||
NewRequest func([]Option) (*Request, error)
|
|
||||||
SleepWithContext func(aws.Context, time.Duration) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// ApplyOptions updates the waiter with the list of waiter options provided.
|
|
||||||
func (w *Waiter) ApplyOptions(opts ...WaiterOption) {
|
|
||||||
for _, fn := range opts {
|
|
||||||
fn(w)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaiterState are states the waiter uses based on WaiterAcceptor definitions
|
|
||||||
// to identify if the resource state the waiter is waiting on has occurred.
|
|
||||||
type WaiterState int
|
|
||||||
|
|
||||||
// String returns the string representation of the waiter state.
|
|
||||||
func (s WaiterState) String() string {
|
|
||||||
switch s {
|
|
||||||
case SuccessWaiterState:
|
|
||||||
return "success"
|
|
||||||
case FailureWaiterState:
|
|
||||||
return "failure"
|
|
||||||
case RetryWaiterState:
|
|
||||||
return "retry"
|
|
||||||
default:
|
|
||||||
return "unknown waiter state"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// States the waiter acceptors will use to identify target resource states.
|
|
||||||
const (
|
|
||||||
SuccessWaiterState WaiterState = iota // waiter successful
|
|
||||||
FailureWaiterState // waiter failed
|
|
||||||
RetryWaiterState // waiter needs to be retried
|
|
||||||
)
|
|
||||||
|
|
||||||
// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor
|
|
||||||
// definition's Expected attribute.
|
|
||||||
type WaiterMatchMode int
|
|
||||||
|
|
||||||
// Modes the waiter will use when inspecting API response to identify target
|
|
||||||
// resource states.
|
|
||||||
const (
|
|
||||||
PathAllWaiterMatch WaiterMatchMode = iota // match on all paths
|
|
||||||
PathWaiterMatch // match on specific path
|
|
||||||
PathAnyWaiterMatch // match on any path
|
|
||||||
PathListWaiterMatch // match on list of paths
|
|
||||||
StatusWaiterMatch // match on status code
|
|
||||||
ErrorWaiterMatch // match on error
|
|
||||||
)
|
|
||||||
|
|
||||||
// String returns the string representation of the waiter match mode.
|
|
||||||
func (m WaiterMatchMode) String() string {
|
|
||||||
switch m {
|
|
||||||
case PathAllWaiterMatch:
|
|
||||||
return "pathAll"
|
|
||||||
case PathWaiterMatch:
|
|
||||||
return "path"
|
|
||||||
case PathAnyWaiterMatch:
|
|
||||||
return "pathAny"
|
|
||||||
case PathListWaiterMatch:
|
|
||||||
return "pathList"
|
|
||||||
case StatusWaiterMatch:
|
|
||||||
return "status"
|
|
||||||
case ErrorWaiterMatch:
|
|
||||||
return "error"
|
|
||||||
default:
|
|
||||||
return "unknown waiter match mode"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitWithContext will make requests for the API operation using NewRequest to
|
|
||||||
// build API requests. The request's response will be compared against the
|
|
||||||
// Waiter's Acceptors to determine the successful state of the resource the
|
|
||||||
// waiter is inspecting.
|
|
||||||
//
|
|
||||||
// The passed in context must not be nil. If it is nil a panic will occur. The
|
|
||||||
// Context will be used to cancel the waiter's pending requests and retry delays.
|
|
||||||
// Use aws.BackgroundContext if no context is available.
|
|
||||||
//
|
|
||||||
// The waiter will continue until the target state defined by the Acceptors,
|
|
||||||
// or the max attempts expires.
|
|
||||||
//
|
|
||||||
// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's
|
|
||||||
// retryer ShouldRetry returns false. This normally will happen when the max
|
|
||||||
// wait attempts expires.
|
|
||||||
func (w Waiter) WaitWithContext(ctx aws.Context) error {
|
|
||||||
|
|
||||||
for attempt := 1; ; attempt++ {
|
|
||||||
req, err := w.NewRequest(w.RequestOptions)
|
|
||||||
if err != nil {
|
|
||||||
waiterLogf(w.Logger, "unable to create request %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter"))
|
|
||||||
err = req.Send()
|
|
||||||
|
|
||||||
// See if any of the acceptors match the request's response, or error
|
|
||||||
for _, a := range w.Acceptors {
|
|
||||||
if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched {
|
|
||||||
return matchErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The Waiter should only check the resource state MaxAttempts times
|
|
||||||
// This is here instead of in the for loop above to prevent delaying
|
|
||||||
// unnecessary when the waiter will not retry.
|
|
||||||
if attempt == w.MaxAttempts {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delay to wait before inspecting the resource again
|
|
||||||
delay := w.Delay(attempt)
|
|
||||||
if sleepFn := req.Config.SleepDelay; sleepFn != nil {
|
|
||||||
// Support SleepDelay for backwards compatibility and testing
|
|
||||||
sleepFn(delay)
|
|
||||||
} else {
|
|
||||||
sleepCtxFn := w.SleepWithContext
|
|
||||||
if sleepCtxFn == nil {
|
|
||||||
sleepCtxFn = aws.SleepWithContext
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := sleepCtxFn(ctx, delay); err != nil {
|
|
||||||
return awserr.New(CanceledErrorCode, "waiter context canceled", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A WaiterAcceptor provides the information needed to wait for an API operation
|
|
||||||
// to complete.
|
|
||||||
type WaiterAcceptor struct {
|
|
||||||
State WaiterState
|
|
||||||
Matcher WaiterMatchMode
|
|
||||||
Argument string
|
|
||||||
Expected interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// match returns if the acceptor found a match with the passed in request
|
|
||||||
// or error. True is returned if the acceptor made a match, error is returned
|
|
||||||
// if there was an error attempting to perform the match.
|
|
||||||
func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) {
|
|
||||||
result := false
|
|
||||||
var vals []interface{}
|
|
||||||
|
|
||||||
switch a.Matcher {
|
|
||||||
case PathAllWaiterMatch, PathWaiterMatch:
|
|
||||||
// Require all matches to be equal for result to match
|
|
||||||
vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
|
|
||||||
if len(vals) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
result = true
|
|
||||||
for _, val := range vals {
|
|
||||||
if !awsutil.DeepEqual(val, a.Expected) {
|
|
||||||
result = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case PathAnyWaiterMatch:
|
|
||||||
// Only a single match needs to equal for the result to match
|
|
||||||
vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
|
|
||||||
for _, val := range vals {
|
|
||||||
if awsutil.DeepEqual(val, a.Expected) {
|
|
||||||
result = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case PathListWaiterMatch:
|
|
||||||
// ignored matcher
|
|
||||||
case StatusWaiterMatch:
|
|
||||||
s := a.Expected.(int)
|
|
||||||
result = s == req.HTTPResponse.StatusCode
|
|
||||||
case ErrorWaiterMatch:
|
|
||||||
if aerr, ok := err.(awserr.Error); ok {
|
|
||||||
result = aerr.Code() == a.Expected.(string)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s",
|
|
||||||
name, a.Matcher)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !result {
|
|
||||||
// If there was no matching result found there is nothing more to do
|
|
||||||
// for this response, retry the request.
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch a.State {
|
|
||||||
case SuccessWaiterState:
|
|
||||||
// waiter completed
|
|
||||||
return true, nil
|
|
||||||
case FailureWaiterState:
|
|
||||||
// Waiter failure state triggered
|
|
||||||
return true, awserr.New(WaiterResourceNotReadyErrorCode,
|
|
||||||
"failed waiting for successful resource state", err)
|
|
||||||
case RetryWaiterState:
|
|
||||||
// clear the error and retry the operation
|
|
||||||
return false, nil
|
|
||||||
default:
|
|
||||||
waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s",
|
|
||||||
name, a.State)
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func waiterLogf(logger aws.Logger, msg string, args ...interface{}) {
|
|
||||||
if logger != nil {
|
|
||||||
logger.Log(fmt.Sprintf(msg, args...))
|
|
||||||
}
|
|
||||||
}
|
|
273
vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
generated
vendored
273
vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
generated
vendored
@ -1,273 +0,0 @@
|
|||||||
/*
|
|
||||||
Package session provides configuration for the SDK's service clients.
|
|
||||||
|
|
||||||
Sessions can be shared across all service clients that share the same base
|
|
||||||
configuration. The Session is built from the SDK's default configuration and
|
|
||||||
request handlers.
|
|
||||||
|
|
||||||
Sessions should be cached when possible, because creating a new Session will
|
|
||||||
load all configuration values from the environment, and config files each time
|
|
||||||
the Session is created. Sharing the Session value across all of your service
|
|
||||||
clients will ensure the configuration is loaded the fewest number of times possible.
|
|
||||||
|
|
||||||
Concurrency
|
|
||||||
|
|
||||||
Sessions are safe to use concurrently as long as the Session is not being
|
|
||||||
modified. The SDK will not modify the Session once the Session has been created.
|
|
||||||
Creating service clients concurrently from a shared Session is safe.
|
|
||||||
|
|
||||||
Sessions from Shared Config
|
|
||||||
|
|
||||||
Sessions can be created using the method above that will only load the
|
|
||||||
additional config if the AWS_SDK_LOAD_CONFIG environment variable is set.
|
|
||||||
Alternatively you can explicitly create a Session with shared config enabled.
|
|
||||||
To do this you can use NewSessionWithOptions to configure how the Session will
|
|
||||||
be created. Using the NewSessionWithOptions with SharedConfigState set to
|
|
||||||
SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG
|
|
||||||
environment variable was set.
|
|
||||||
|
|
||||||
Creating Sessions
|
|
||||||
|
|
||||||
When creating Sessions optional aws.Config values can be passed in that will
|
|
||||||
override the default, or loaded config values the Session is being created
|
|
||||||
with. This allows you to provide additional, or case based, configuration
|
|
||||||
as needed.
|
|
||||||
|
|
||||||
By default NewSession will only load credentials from the shared credentials
|
|
||||||
file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is
|
|
||||||
set to a truthy value the Session will be created from the configuration
|
|
||||||
values from the shared config (~/.aws/config) and shared credentials
|
|
||||||
(~/.aws/credentials) files. See the section Sessions from Shared Config for
|
|
||||||
more information.
|
|
||||||
|
|
||||||
Create a Session with the default config and request handlers. With credentials
|
|
||||||
region, and profile loaded from the environment and shared config automatically.
|
|
||||||
Requires the AWS_PROFILE to be set, or "default" is used.
|
|
||||||
|
|
||||||
// Create Session
|
|
||||||
sess := session.Must(session.NewSession())
|
|
||||||
|
|
||||||
// Create a Session with a custom region
|
|
||||||
sess := session.Must(session.NewSession(&aws.Config{
|
|
||||||
Region: aws.String("us-east-1"),
|
|
||||||
}))
|
|
||||||
|
|
||||||
// Create a S3 client instance from a session
|
|
||||||
sess := session.Must(session.NewSession())
|
|
||||||
|
|
||||||
svc := s3.New(sess)
|
|
||||||
|
|
||||||
Create Session With Option Overrides
|
|
||||||
|
|
||||||
In addition to NewSession, Sessions can be created using NewSessionWithOptions.
|
|
||||||
This func allows you to control and override how the Session will be created
|
|
||||||
through code instead of being driven by environment variables only.
|
|
||||||
|
|
||||||
Use NewSessionWithOptions when you want to provide the config profile, or
|
|
||||||
override the shared config state (AWS_SDK_LOAD_CONFIG).
|
|
||||||
|
|
||||||
// Equivalent to session.NewSession()
|
|
||||||
sess := session.Must(session.NewSessionWithOptions(session.Options{
|
|
||||||
// Options
|
|
||||||
}))
|
|
||||||
|
|
||||||
// Specify profile to load for the session's config
|
|
||||||
sess := session.Must(session.NewSessionWithOptions(session.Options{
|
|
||||||
Profile: "profile_name",
|
|
||||||
}))
|
|
||||||
|
|
||||||
// Specify profile for config and region for requests
|
|
||||||
sess := session.Must(session.NewSessionWithOptions(session.Options{
|
|
||||||
Config: aws.Config{Region: aws.String("us-east-1")},
|
|
||||||
Profile: "profile_name",
|
|
||||||
}))
|
|
||||||
|
|
||||||
// Force enable Shared Config support
|
|
||||||
sess := session.Must(session.NewSessionWithOptions(session.Options{
|
|
||||||
SharedConfigState: session.SharedConfigEnable,
|
|
||||||
}))
|
|
||||||
|
|
||||||
Adding Handlers
|
|
||||||
|
|
||||||
You can add handlers to a session for processing HTTP requests. All service
|
|
||||||
clients that use the session inherit the handlers. For example, the following
|
|
||||||
handler logs every request and its payload made by a service client:
|
|
||||||
|
|
||||||
// Create a session, and add additional handlers for all service
|
|
||||||
// clients created with the Session to inherit. Adds logging handler.
|
|
||||||
sess := session.Must(session.NewSession())
|
|
||||||
|
|
||||||
sess.Handlers.Send.PushFront(func(r *request.Request) {
|
|
||||||
// Log every request made and its payload
|
|
||||||
logger.Println("Request: %s/%s, Payload: %s",
|
|
||||||
r.ClientInfo.ServiceName, r.Operation, r.Params)
|
|
||||||
})
|
|
||||||
|
|
||||||
Deprecated "New" function
|
|
||||||
|
|
||||||
The New session function has been deprecated because it does not provide good
|
|
||||||
way to return errors that occur when loading the configuration files and values.
|
|
||||||
Because of this, NewSession was created so errors can be retrieved when
|
|
||||||
creating a session fails.
|
|
||||||
|
|
||||||
Shared Config Fields
|
|
||||||
|
|
||||||
By default the SDK will only load the shared credentials file's (~/.aws/credentials)
|
|
||||||
credentials values, and all other config is provided by the environment variables,
|
|
||||||
SDK defaults, and user provided aws.Config values.
|
|
||||||
|
|
||||||
If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable
|
|
||||||
option is used to create the Session the full shared config values will be
|
|
||||||
loaded. This includes credentials, region, and support for assume role. In
|
|
||||||
addition the Session will load its configuration from both the shared config
|
|
||||||
file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both
|
|
||||||
files have the same format.
|
|
||||||
|
|
||||||
If both config files are present the configuration from both files will be
|
|
||||||
read. The Session will be created from configuration values from the shared
|
|
||||||
credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config).
|
|
||||||
|
|
||||||
Credentials are the values the SDK should use for authenticating requests with
|
|
||||||
AWS Services. They arfrom a configuration file will need to include both
|
|
||||||
aws_access_key_id and aws_secret_access_key must be provided together in the
|
|
||||||
same file to be considered valid. The values will be ignored if not a complete
|
|
||||||
group. aws_session_token is an optional field that can be provided if both of
|
|
||||||
the other two fields are also provided.
|
|
||||||
|
|
||||||
aws_access_key_id = AKID
|
|
||||||
aws_secret_access_key = SECRET
|
|
||||||
aws_session_token = TOKEN
|
|
||||||
|
|
||||||
Assume Role values allow you to configure the SDK to assume an IAM role using
|
|
||||||
a set of credentials provided in a config file via the source_profile field.
|
|
||||||
Both "role_arn" and "source_profile" are required. The SDK supports assuming
|
|
||||||
a role with MFA token if the session option AssumeRoleTokenProvider
|
|
||||||
is set.
|
|
||||||
|
|
||||||
role_arn = arn:aws:iam::<account_number>:role/<role_name>
|
|
||||||
source_profile = profile_with_creds
|
|
||||||
external_id = 1234
|
|
||||||
mfa_serial = <serial or mfa arn>
|
|
||||||
role_session_name = session_name
|
|
||||||
|
|
||||||
Region is the region the SDK should use for looking up AWS service endpoints
|
|
||||||
and signing requests.
|
|
||||||
|
|
||||||
region = us-east-1
|
|
||||||
|
|
||||||
Assume Role with MFA token
|
|
||||||
|
|
||||||
To create a session with support for assuming an IAM role with MFA set the
|
|
||||||
session option AssumeRoleTokenProvider to a function that will prompt for the
|
|
||||||
MFA token code when the SDK assumes the role and refreshes the role's credentials.
|
|
||||||
This allows you to configure the SDK via the shared config to assumea role
|
|
||||||
with MFA tokens.
|
|
||||||
|
|
||||||
In order for the SDK to assume a role with MFA the SharedConfigState
|
|
||||||
session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG
|
|
||||||
environment variable set.
|
|
||||||
|
|
||||||
The shared configuration instructs the SDK to assume an IAM role with MFA
|
|
||||||
when the mfa_serial configuration field is set in the shared config
|
|
||||||
(~/.aws/config) or shared credentials (~/.aws/credentials) file.
|
|
||||||
|
|
||||||
If mfa_serial is set in the configuration, the SDK will assume the role, and
|
|
||||||
the AssumeRoleTokenProvider session option is not set an an error will
|
|
||||||
be returned when creating the session.
|
|
||||||
|
|
||||||
sess := session.Must(session.NewSessionWithOptions(session.Options{
|
|
||||||
AssumeRoleTokenProvider: stscreds.StdinTokenProvider,
|
|
||||||
}))
|
|
||||||
|
|
||||||
// Create service client value configured for credentials
|
|
||||||
// from assumed role.
|
|
||||||
svc := s3.New(sess)
|
|
||||||
|
|
||||||
To setup assume role outside of a session see the stscrds.AssumeRoleProvider
|
|
||||||
documentation.
|
|
||||||
|
|
||||||
Environment Variables
|
|
||||||
|
|
||||||
When a Session is created several environment variables can be set to adjust
|
|
||||||
how the SDK functions, and what configuration data it loads when creating
|
|
||||||
Sessions. All environment values are optional, but some values like credentials
|
|
||||||
require multiple of the values to set or the partial values will be ignored.
|
|
||||||
All environment variable values are strings unless otherwise noted.
|
|
||||||
|
|
||||||
Environment configuration values. If set both Access Key ID and Secret Access
|
|
||||||
Key must be provided. Session Token and optionally also be provided, but is
|
|
||||||
not required.
|
|
||||||
|
|
||||||
# Access Key ID
|
|
||||||
AWS_ACCESS_KEY_ID=AKID
|
|
||||||
AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
|
|
||||||
|
|
||||||
# Secret Access Key
|
|
||||||
AWS_SECRET_ACCESS_KEY=SECRET
|
|
||||||
AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
|
|
||||||
|
|
||||||
# Session Token
|
|
||||||
AWS_SESSION_TOKEN=TOKEN
|
|
||||||
|
|
||||||
Region value will instruct the SDK where to make service API requests to. If is
|
|
||||||
not provided in the environment the region must be provided before a service
|
|
||||||
client request is made.
|
|
||||||
|
|
||||||
AWS_REGION=us-east-1
|
|
||||||
|
|
||||||
# AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
|
|
||||||
# and AWS_REGION is not also set.
|
|
||||||
AWS_DEFAULT_REGION=us-east-1
|
|
||||||
|
|
||||||
Profile name the SDK should load use when loading shared config from the
|
|
||||||
configuration files. If not provided "default" will be used as the profile name.
|
|
||||||
|
|
||||||
AWS_PROFILE=my_profile
|
|
||||||
|
|
||||||
# AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
|
|
||||||
# and AWS_PROFILE is not also set.
|
|
||||||
AWS_DEFAULT_PROFILE=my_profile
|
|
||||||
|
|
||||||
SDK load config instructs the SDK to load the shared config in addition to
|
|
||||||
shared credentials. This also expands the configuration loaded so the shared
|
|
||||||
credentials will have parity with the shared config file. This also enables
|
|
||||||
Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
|
|
||||||
env values as well.
|
|
||||||
|
|
||||||
AWS_SDK_LOAD_CONFIG=1
|
|
||||||
|
|
||||||
Shared credentials file path can be set to instruct the SDK to use an alternative
|
|
||||||
file for the shared credentials. If not set the file will be loaded from
|
|
||||||
$HOME/.aws/credentials on Linux/Unix based systems, and
|
|
||||||
%USERPROFILE%\.aws\credentials on Windows.
|
|
||||||
|
|
||||||
AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
|
|
||||||
|
|
||||||
Shared config file path can be set to instruct the SDK to use an alternative
|
|
||||||
file for the shared config. If not set the file will be loaded from
|
|
||||||
$HOME/.aws/config on Linux/Unix based systems, and
|
|
||||||
%USERPROFILE%\.aws\config on Windows.
|
|
||||||
|
|
||||||
AWS_CONFIG_FILE=$HOME/my_shared_config
|
|
||||||
|
|
||||||
Path to a custom Credentials Authority (CA) bundle PEM file that the SDK
|
|
||||||
will use instead of the default system's root CA bundle. Use this only
|
|
||||||
if you want to replace the CA bundle the SDK uses for TLS requests.
|
|
||||||
|
|
||||||
AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
|
|
||||||
|
|
||||||
Enabling this option will attempt to merge the Transport into the SDK's HTTP
|
|
||||||
client. If the client's Transport is not a http.Transport an error will be
|
|
||||||
returned. If the Transport's TLS config is set this option will cause the SDK
|
|
||||||
to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file
|
|
||||||
contains multiple certificates all of them will be loaded.
|
|
||||||
|
|
||||||
The Session option CustomCABundle is also available when creating sessions
|
|
||||||
to also enable this feature. CustomCABundle session option field has priority
|
|
||||||
over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
|
|
||||||
|
|
||||||
Setting a custom HTTPClient in the aws.Config options will override this setting.
|
|
||||||
To use this option and custom HTTP client, the HTTP client needs to be provided
|
|
||||||
when creating the session. Not the service client.
|
|
||||||
*/
|
|
||||||
package session
|
|
191
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
191
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
@ -1,191 +0,0 @@
|
|||||||
package session
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EnvProviderName provides a name of the provider when config is loaded from environment.
|
|
||||||
const EnvProviderName = "EnvConfigCredentials"
|
|
||||||
|
|
||||||
// envConfig is a collection of environment values the SDK will read
|
|
||||||
// setup config from. All environment values are optional. But some values
|
|
||||||
// such as credentials require multiple values to be complete or the values
|
|
||||||
// will be ignored.
|
|
||||||
type envConfig struct {
|
|
||||||
// Environment configuration values. If set both Access Key ID and Secret Access
|
|
||||||
// Key must be provided. Session Token and optionally also be provided, but is
|
|
||||||
// not required.
|
|
||||||
//
|
|
||||||
// # Access Key ID
|
|
||||||
// AWS_ACCESS_KEY_ID=AKID
|
|
||||||
// AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
|
|
||||||
//
|
|
||||||
// # Secret Access Key
|
|
||||||
// AWS_SECRET_ACCESS_KEY=SECRET
|
|
||||||
// AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
|
|
||||||
//
|
|
||||||
// # Session Token
|
|
||||||
// AWS_SESSION_TOKEN=TOKEN
|
|
||||||
Creds credentials.Value
|
|
||||||
|
|
||||||
// Region value will instruct the SDK where to make service API requests to. If is
|
|
||||||
// not provided in the environment the region must be provided before a service
|
|
||||||
// client request is made.
|
|
||||||
//
|
|
||||||
// AWS_REGION=us-east-1
|
|
||||||
//
|
|
||||||
// # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
|
|
||||||
// # and AWS_REGION is not also set.
|
|
||||||
// AWS_DEFAULT_REGION=us-east-1
|
|
||||||
Region string
|
|
||||||
|
|
||||||
// Profile name the SDK should load use when loading shared configuration from the
|
|
||||||
// shared configuration files. If not provided "default" will be used as the
|
|
||||||
// profile name.
|
|
||||||
//
|
|
||||||
// AWS_PROFILE=my_profile
|
|
||||||
//
|
|
||||||
// # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
|
|
||||||
// # and AWS_PROFILE is not also set.
|
|
||||||
// AWS_DEFAULT_PROFILE=my_profile
|
|
||||||
Profile string
|
|
||||||
|
|
||||||
// SDK load config instructs the SDK to load the shared config in addition to
|
|
||||||
// shared credentials. This also expands the configuration loaded from the shared
|
|
||||||
// credentials to have parity with the shared config file. This also enables
|
|
||||||
// Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
|
|
||||||
// env values as well.
|
|
||||||
//
|
|
||||||
// AWS_SDK_LOAD_CONFIG=1
|
|
||||||
EnableSharedConfig bool
|
|
||||||
|
|
||||||
// Shared credentials file path can be set to instruct the SDK to use an alternate
|
|
||||||
// file for the shared credentials. If not set the file will be loaded from
|
|
||||||
// $HOME/.aws/credentials on Linux/Unix based systems, and
|
|
||||||
// %USERPROFILE%\.aws\credentials on Windows.
|
|
||||||
//
|
|
||||||
// AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
|
|
||||||
SharedCredentialsFile string
|
|
||||||
|
|
||||||
// Shared config file path can be set to instruct the SDK to use an alternate
|
|
||||||
// file for the shared config. If not set the file will be loaded from
|
|
||||||
// $HOME/.aws/config on Linux/Unix based systems, and
|
|
||||||
// %USERPROFILE%\.aws\config on Windows.
|
|
||||||
//
|
|
||||||
// AWS_CONFIG_FILE=$HOME/my_shared_config
|
|
||||||
SharedConfigFile string
|
|
||||||
|
|
||||||
// Sets the path to a custom Credentials Authroity (CA) Bundle PEM file
|
|
||||||
// that the SDK will use instead of the system's root CA bundle.
|
|
||||||
// Only use this if you want to configure the SDK to use a custom set
|
|
||||||
// of CAs.
|
|
||||||
//
|
|
||||||
// Enabling this option will attempt to merge the Transport
|
|
||||||
// into the SDK's HTTP client. If the client's Transport is
|
|
||||||
// not a http.Transport an error will be returned. If the
|
|
||||||
// Transport's TLS config is set this option will cause the
|
|
||||||
// SDK to overwrite the Transport's TLS config's RootCAs value.
|
|
||||||
//
|
|
||||||
// Setting a custom HTTPClient in the aws.Config options will override this setting.
|
|
||||||
// To use this option and custom HTTP client, the HTTP client needs to be provided
|
|
||||||
// when creating the session. Not the service client.
|
|
||||||
//
|
|
||||||
// AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
|
|
||||||
CustomCABundle string
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
credAccessEnvKey = []string{
|
|
||||||
"AWS_ACCESS_KEY_ID",
|
|
||||||
"AWS_ACCESS_KEY",
|
|
||||||
}
|
|
||||||
credSecretEnvKey = []string{
|
|
||||||
"AWS_SECRET_ACCESS_KEY",
|
|
||||||
"AWS_SECRET_KEY",
|
|
||||||
}
|
|
||||||
credSessionEnvKey = []string{
|
|
||||||
"AWS_SESSION_TOKEN",
|
|
||||||
}
|
|
||||||
|
|
||||||
regionEnvKeys = []string{
|
|
||||||
"AWS_REGION",
|
|
||||||
"AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set
|
|
||||||
}
|
|
||||||
profileEnvKeys = []string{
|
|
||||||
"AWS_PROFILE",
|
|
||||||
"AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set
|
|
||||||
}
|
|
||||||
sharedCredsFileEnvKey = []string{
|
|
||||||
"AWS_SHARED_CREDENTIALS_FILE",
|
|
||||||
}
|
|
||||||
sharedConfigFileEnvKey = []string{
|
|
||||||
"AWS_CONFIG_FILE",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// loadEnvConfig retrieves the SDK's environment configuration.
|
|
||||||
// See `envConfig` for the values that will be retrieved.
|
|
||||||
//
|
|
||||||
// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
|
|
||||||
// the shared SDK config will be loaded in addition to the SDK's specific
|
|
||||||
// configuration values.
|
|
||||||
func loadEnvConfig() envConfig {
|
|
||||||
enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
|
|
||||||
return envConfigLoad(enableSharedConfig)
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadEnvSharedConfig retrieves the SDK's environment configuration, and the
|
|
||||||
// SDK shared config. See `envConfig` for the values that will be retrieved.
|
|
||||||
//
|
|
||||||
// Loads the shared configuration in addition to the SDK's specific configuration.
|
|
||||||
// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
|
|
||||||
// environment variable is set.
|
|
||||||
func loadSharedEnvConfig() envConfig {
|
|
||||||
return envConfigLoad(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func envConfigLoad(enableSharedConfig bool) envConfig {
|
|
||||||
cfg := envConfig{}
|
|
||||||
|
|
||||||
cfg.EnableSharedConfig = enableSharedConfig
|
|
||||||
|
|
||||||
setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey)
|
|
||||||
setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey)
|
|
||||||
setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey)
|
|
||||||
|
|
||||||
// Require logical grouping of credentials
|
|
||||||
if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
|
|
||||||
cfg.Creds = credentials.Value{}
|
|
||||||
} else {
|
|
||||||
cfg.Creds.ProviderName = EnvProviderName
|
|
||||||
}
|
|
||||||
|
|
||||||
regionKeys := regionEnvKeys
|
|
||||||
profileKeys := profileEnvKeys
|
|
||||||
if !cfg.EnableSharedConfig {
|
|
||||||
regionKeys = regionKeys[:1]
|
|
||||||
profileKeys = profileKeys[:1]
|
|
||||||
}
|
|
||||||
|
|
||||||
setFromEnvVal(&cfg.Region, regionKeys)
|
|
||||||
setFromEnvVal(&cfg.Profile, profileKeys)
|
|
||||||
|
|
||||||
setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey)
|
|
||||||
setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey)
|
|
||||||
|
|
||||||
cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
|
|
||||||
|
|
||||||
return cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
func setFromEnvVal(dst *string, keys []string) {
|
|
||||||
for _, k := range keys {
|
|
||||||
if v := os.Getenv(k); len(v) > 0 {
|
|
||||||
*dst = v
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
606
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
606
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
@ -1,606 +0,0 @@
|
|||||||
package session
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/client"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Session provides a central location to create service clients from and
|
|
||||||
// store configurations and request handlers for those services.
|
|
||||||
//
|
|
||||||
// Sessions are safe to create service clients concurrently, but it is not safe
|
|
||||||
// to mutate the Session concurrently.
|
|
||||||
//
|
|
||||||
// The Session satisfies the service client's client.ClientConfigProvider.
|
|
||||||
type Session struct {
|
|
||||||
Config *aws.Config
|
|
||||||
Handlers request.Handlers
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new instance of the handlers merging in the provided configs
|
|
||||||
// on top of the SDK's default configurations. Once the Session is created it
|
|
||||||
// can be mutated to modify the Config or Handlers. The Session is safe to be
|
|
||||||
// read concurrently, but it should not be written to concurrently.
|
|
||||||
//
|
|
||||||
// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New
|
|
||||||
// method could now encounter an error when loading the configuration. When
|
|
||||||
// The environment variable is set, and an error occurs, New will return a
|
|
||||||
// session that will fail all requests reporting the error that occurred while
|
|
||||||
// loading the session. Use NewSession to get the error when creating the
|
|
||||||
// session.
|
|
||||||
//
|
|
||||||
// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
|
|
||||||
// the shared config file (~/.aws/config) will also be loaded, in addition to
|
|
||||||
// the shared credentials file (~/.aws/credentials). Values set in both the
|
|
||||||
// shared config, and shared credentials will be taken from the shared
|
|
||||||
// credentials file.
|
|
||||||
//
|
|
||||||
// Deprecated: Use NewSession functions to create sessions instead. NewSession
|
|
||||||
// has the same functionality as New except an error can be returned when the
|
|
||||||
// func is called instead of waiting to receive an error until a request is made.
|
|
||||||
func New(cfgs ...*aws.Config) *Session {
|
|
||||||
// load initial config from environment
|
|
||||||
envCfg := loadEnvConfig()
|
|
||||||
|
|
||||||
if envCfg.EnableSharedConfig {
|
|
||||||
s, err := newSession(Options{}, envCfg, cfgs...)
|
|
||||||
if err != nil {
|
|
||||||
// Old session.New expected all errors to be discovered when
|
|
||||||
// a request is made, and would report the errors then. This
|
|
||||||
// needs to be replicated if an error occurs while creating
|
|
||||||
// the session.
|
|
||||||
msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " +
|
|
||||||
"Use session.NewSession to handle errors occurring during session creation."
|
|
||||||
|
|
||||||
// Session creation failed, need to report the error and prevent
|
|
||||||
// any requests from succeeding.
|
|
||||||
s = &Session{Config: defaults.Config()}
|
|
||||||
s.Config.MergeIn(cfgs...)
|
|
||||||
s.Config.Logger.Log("ERROR:", msg, "Error:", err)
|
|
||||||
s.Handlers.Validate.PushBack(func(r *request.Request) {
|
|
||||||
r.Error = err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
return deprecatedNewSession(cfgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSession returns a new Session created from SDK defaults, config files,
|
|
||||||
// environment, and user provided config files. Once the Session is created
|
|
||||||
// it can be mutated to modify the Config or Handlers. The Session is safe to
|
|
||||||
// be read concurrently, but it should not be written to concurrently.
|
|
||||||
//
|
|
||||||
// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
|
|
||||||
// the shared config file (~/.aws/config) will also be loaded in addition to
|
|
||||||
// the shared credentials file (~/.aws/credentials). Values set in both the
|
|
||||||
// shared config, and shared credentials will be taken from the shared
|
|
||||||
// credentials file. Enabling the Shared Config will also allow the Session
|
|
||||||
// to be built with retrieving credentials with AssumeRole set in the config.
|
|
||||||
//
|
|
||||||
// See the NewSessionWithOptions func for information on how to override or
|
|
||||||
// control through code how the Session will be created. Such as specifying the
|
|
||||||
// config profile, and controlling if shared config is enabled or not.
|
|
||||||
func NewSession(cfgs ...*aws.Config) (*Session, error) {
|
|
||||||
opts := Options{}
|
|
||||||
opts.Config.MergeIn(cfgs...)
|
|
||||||
|
|
||||||
return NewSessionWithOptions(opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SharedConfigState provides the ability to optionally override the state
|
|
||||||
// of the session's creation based on the shared config being enabled or
|
|
||||||
// disabled.
|
|
||||||
type SharedConfigState int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// SharedConfigStateFromEnv does not override any state of the
|
|
||||||
// AWS_SDK_LOAD_CONFIG env var. It is the default value of the
|
|
||||||
// SharedConfigState type.
|
|
||||||
SharedConfigStateFromEnv SharedConfigState = iota
|
|
||||||
|
|
||||||
// SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value
|
|
||||||
// and disables the shared config functionality.
|
|
||||||
SharedConfigDisable
|
|
||||||
|
|
||||||
// SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value
|
|
||||||
// and enables the shared config functionality.
|
|
||||||
SharedConfigEnable
|
|
||||||
)
|
|
||||||
|
|
||||||
// Options provides the means to control how a Session is created and what
|
|
||||||
// configuration values will be loaded.
|
|
||||||
//
|
|
||||||
type Options struct {
|
|
||||||
// Provides config values for the SDK to use when creating service clients
|
|
||||||
// and making API requests to services. Any value set in with this field
|
|
||||||
// will override the associated value provided by the SDK defaults,
|
|
||||||
// environment or config files where relevant.
|
|
||||||
//
|
|
||||||
// If not set, configuration values from from SDK defaults, environment,
|
|
||||||
// config will be used.
|
|
||||||
Config aws.Config
|
|
||||||
|
|
||||||
// Overrides the config profile the Session should be created from. If not
|
|
||||||
// set the value of the environment variable will be loaded (AWS_PROFILE,
|
|
||||||
// or AWS_DEFAULT_PROFILE if the Shared Config is enabled).
|
|
||||||
//
|
|
||||||
// If not set and environment variables are not set the "default"
|
|
||||||
// (DefaultSharedConfigProfile) will be used as the profile to load the
|
|
||||||
// session config from.
|
|
||||||
Profile string
|
|
||||||
|
|
||||||
// Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG
|
|
||||||
// environment variable. By default a Session will be created using the
|
|
||||||
// value provided by the AWS_SDK_LOAD_CONFIG environment variable.
|
|
||||||
//
|
|
||||||
// Setting this value to SharedConfigEnable or SharedConfigDisable
|
|
||||||
// will allow you to override the AWS_SDK_LOAD_CONFIG environment variable
|
|
||||||
// and enable or disable the shared config functionality.
|
|
||||||
SharedConfigState SharedConfigState
|
|
||||||
|
|
||||||
// Ordered list of files the session will load configuration from.
|
|
||||||
// It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE.
|
|
||||||
SharedConfigFiles []string
|
|
||||||
|
|
||||||
// When the SDK's shared config is configured to assume a role with MFA
|
|
||||||
// this option is required in order to provide the mechanism that will
|
|
||||||
// retrieve the MFA token. There is no default value for this field. If
|
|
||||||
// it is not set an error will be returned when creating the session.
|
|
||||||
//
|
|
||||||
// This token provider will be called when ever the assumed role's
|
|
||||||
// credentials need to be refreshed. Within the context of service clients
|
|
||||||
// all sharing the same session the SDK will ensure calls to the token
|
|
||||||
// provider are atomic. When sharing a token provider across multiple
|
|
||||||
// sessions additional synchronization logic is needed to ensure the
|
|
||||||
// token providers do not introduce race conditions. It is recommend to
|
|
||||||
// share the session where possible.
|
|
||||||
//
|
|
||||||
// stscreds.StdinTokenProvider is a basic implementation that will prompt
|
|
||||||
// from stdin for the MFA token code.
|
|
||||||
//
|
|
||||||
// This field is only used if the shared configuration is enabled, and
|
|
||||||
// the config enables assume role wit MFA via the mfa_serial field.
|
|
||||||
AssumeRoleTokenProvider func() (string, error)
|
|
||||||
|
|
||||||
// Reader for a custom Credentials Authority (CA) bundle in PEM format that
|
|
||||||
// the SDK will use instead of the default system's root CA bundle. Use this
|
|
||||||
// only if you want to replace the CA bundle the SDK uses for TLS requests.
|
|
||||||
//
|
|
||||||
// Enabling this option will attempt to merge the Transport into the SDK's HTTP
|
|
||||||
// client. If the client's Transport is not a http.Transport an error will be
|
|
||||||
// returned. If the Transport's TLS config is set this option will cause the SDK
|
|
||||||
// to overwrite the Transport's TLS config's RootCAs value. If the CA
|
|
||||||
// bundle reader contains multiple certificates all of them will be loaded.
|
|
||||||
//
|
|
||||||
// The Session option CustomCABundle is also available when creating sessions
|
|
||||||
// to also enable this feature. CustomCABundle session option field has priority
|
|
||||||
// over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
|
|
||||||
CustomCABundle io.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
|
|
||||||
// environment, and user provided config files. This func uses the Options
|
|
||||||
// values to configure how the Session is created.
|
|
||||||
//
|
|
||||||
// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
|
|
||||||
// the shared config file (~/.aws/config) will also be loaded in addition to
|
|
||||||
// the shared credentials file (~/.aws/credentials). Values set in both the
|
|
||||||
// shared config, and shared credentials will be taken from the shared
|
|
||||||
// credentials file. Enabling the Shared Config will also allow the Session
|
|
||||||
// to be built with retrieving credentials with AssumeRole set in the config.
|
|
||||||
//
|
|
||||||
// // Equivalent to session.New
|
|
||||||
// sess := session.Must(session.NewSessionWithOptions(session.Options{}))
|
|
||||||
//
|
|
||||||
// // Specify profile to load for the session's config
|
|
||||||
// sess := session.Must(session.NewSessionWithOptions(session.Options{
|
|
||||||
// Profile: "profile_name",
|
|
||||||
// }))
|
|
||||||
//
|
|
||||||
// // Specify profile for config and region for requests
|
|
||||||
// sess := session.Must(session.NewSessionWithOptions(session.Options{
|
|
||||||
// Config: aws.Config{Region: aws.String("us-east-1")},
|
|
||||||
// Profile: "profile_name",
|
|
||||||
// }))
|
|
||||||
//
|
|
||||||
// // Force enable Shared Config support
|
|
||||||
// sess := session.Must(session.NewSessionWithOptions(session.Options{
|
|
||||||
// SharedConfigState: session.SharedConfigEnable,
|
|
||||||
// }))
|
|
||||||
func NewSessionWithOptions(opts Options) (*Session, error) {
|
|
||||||
var envCfg envConfig
|
|
||||||
if opts.SharedConfigState == SharedConfigEnable {
|
|
||||||
envCfg = loadSharedEnvConfig()
|
|
||||||
} else {
|
|
||||||
envCfg = loadEnvConfig()
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(opts.Profile) > 0 {
|
|
||||||
envCfg.Profile = opts.Profile
|
|
||||||
}
|
|
||||||
|
|
||||||
switch opts.SharedConfigState {
|
|
||||||
case SharedConfigDisable:
|
|
||||||
envCfg.EnableSharedConfig = false
|
|
||||||
case SharedConfigEnable:
|
|
||||||
envCfg.EnableSharedConfig = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(envCfg.SharedCredentialsFile) == 0 {
|
|
||||||
envCfg.SharedCredentialsFile = defaults.SharedCredentialsFilename()
|
|
||||||
}
|
|
||||||
if len(envCfg.SharedConfigFile) == 0 {
|
|
||||||
envCfg.SharedConfigFile = defaults.SharedConfigFilename()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only use AWS_CA_BUNDLE if session option is not provided.
|
|
||||||
if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
|
|
||||||
f, err := os.Open(envCfg.CustomCABundle)
|
|
||||||
if err != nil {
|
|
||||||
return nil, awserr.New("LoadCustomCABundleError",
|
|
||||||
"failed to open custom CA bundle PEM file", err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
opts.CustomCABundle = f
|
|
||||||
}
|
|
||||||
|
|
||||||
return newSession(opts, envCfg, &opts.Config)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Must is a helper function to ensure the Session is valid and there was no
|
|
||||||
// error when calling a NewSession function.
|
|
||||||
//
|
|
||||||
// This helper is intended to be used in variable initialization to load the
|
|
||||||
// Session and configuration at startup. Such as:
|
|
||||||
//
|
|
||||||
// var sess = session.Must(session.NewSession())
|
|
||||||
func Must(sess *Session, err error) *Session {
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return sess
|
|
||||||
}
|
|
||||||
|
|
||||||
func deprecatedNewSession(cfgs ...*aws.Config) *Session {
|
|
||||||
cfg := defaults.Config()
|
|
||||||
handlers := defaults.Handlers()
|
|
||||||
|
|
||||||
// Apply the passed in configs so the configuration can be applied to the
|
|
||||||
// default credential chain
|
|
||||||
cfg.MergeIn(cfgs...)
|
|
||||||
if cfg.EndpointResolver == nil {
|
|
||||||
// An endpoint resolver is required for a session to be able to provide
|
|
||||||
// endpoints for service client configurations.
|
|
||||||
cfg.EndpointResolver = endpoints.DefaultResolver()
|
|
||||||
}
|
|
||||||
cfg.Credentials = defaults.CredChain(cfg, handlers)
|
|
||||||
|
|
||||||
// Reapply any passed in configs to override credentials if set
|
|
||||||
cfg.MergeIn(cfgs...)
|
|
||||||
|
|
||||||
s := &Session{
|
|
||||||
Config: cfg,
|
|
||||||
Handlers: handlers,
|
|
||||||
}
|
|
||||||
|
|
||||||
initHandlers(s)
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
|
|
||||||
cfg := defaults.Config()
|
|
||||||
handlers := defaults.Handlers()
|
|
||||||
|
|
||||||
// Get a merged version of the user provided config to determine if
|
|
||||||
// credentials were.
|
|
||||||
userCfg := &aws.Config{}
|
|
||||||
userCfg.MergeIn(cfgs...)
|
|
||||||
|
|
||||||
// Ordered config files will be loaded in with later files overwriting
|
|
||||||
// previous config file values.
|
|
||||||
var cfgFiles []string
|
|
||||||
if opts.SharedConfigFiles != nil {
|
|
||||||
cfgFiles = opts.SharedConfigFiles
|
|
||||||
} else {
|
|
||||||
cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile}
|
|
||||||
if !envCfg.EnableSharedConfig {
|
|
||||||
// The shared config file (~/.aws/config) is only loaded if instructed
|
|
||||||
// to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG).
|
|
||||||
cfgFiles = cfgFiles[1:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load additional config from file(s)
|
|
||||||
sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
s := &Session{
|
|
||||||
Config: cfg,
|
|
||||||
Handlers: handlers,
|
|
||||||
}
|
|
||||||
|
|
||||||
initHandlers(s)
|
|
||||||
|
|
||||||
// Setup HTTP client with custom cert bundle if enabled
|
|
||||||
if opts.CustomCABundle != nil {
|
|
||||||
if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadCustomCABundle(s *Session, bundle io.Reader) error {
|
|
||||||
var t *http.Transport
|
|
||||||
switch v := s.Config.HTTPClient.Transport.(type) {
|
|
||||||
case *http.Transport:
|
|
||||||
t = v
|
|
||||||
default:
|
|
||||||
if s.Config.HTTPClient.Transport != nil {
|
|
||||||
return awserr.New("LoadCustomCABundleError",
|
|
||||||
"unable to load custom CA bundle, HTTPClient's transport unsupported type", nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if t == nil {
|
|
||||||
t = &http.Transport{}
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := loadCertPool(bundle)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if t.TLSClientConfig == nil {
|
|
||||||
t.TLSClientConfig = &tls.Config{}
|
|
||||||
}
|
|
||||||
t.TLSClientConfig.RootCAs = p
|
|
||||||
|
|
||||||
s.Config.HTTPClient.Transport = t
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadCertPool(r io.Reader) (*x509.CertPool, error) {
|
|
||||||
b, err := ioutil.ReadAll(r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, awserr.New("LoadCustomCABundleError",
|
|
||||||
"failed to read custom CA bundle PEM file", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
p := x509.NewCertPool()
|
|
||||||
if !p.AppendCertsFromPEM(b) {
|
|
||||||
return nil, awserr.New("LoadCustomCABundleError",
|
|
||||||
"failed to load custom CA bundle PEM file", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return p, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error {
|
|
||||||
// Merge in user provided configuration
|
|
||||||
cfg.MergeIn(userCfg)
|
|
||||||
|
|
||||||
// Region if not already set by user
|
|
||||||
if len(aws.StringValue(cfg.Region)) == 0 {
|
|
||||||
if len(envCfg.Region) > 0 {
|
|
||||||
cfg.WithRegion(envCfg.Region)
|
|
||||||
} else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 {
|
|
||||||
cfg.WithRegion(sharedCfg.Region)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Configure credentials if not already set
|
|
||||||
if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
|
|
||||||
if len(envCfg.Creds.AccessKeyID) > 0 {
|
|
||||||
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
|
|
||||||
envCfg.Creds,
|
|
||||||
)
|
|
||||||
} else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil {
|
|
||||||
cfgCp := *cfg
|
|
||||||
cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
|
|
||||||
sharedCfg.AssumeRoleSource.Creds,
|
|
||||||
)
|
|
||||||
if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
|
|
||||||
// AssumeRole Token provider is required if doing Assume Role
|
|
||||||
// with MFA.
|
|
||||||
return AssumeRoleTokenProviderNotSetError{}
|
|
||||||
}
|
|
||||||
cfg.Credentials = stscreds.NewCredentials(
|
|
||||||
&Session{
|
|
||||||
Config: &cfgCp,
|
|
||||||
Handlers: handlers.Copy(),
|
|
||||||
},
|
|
||||||
sharedCfg.AssumeRole.RoleARN,
|
|
||||||
func(opt *stscreds.AssumeRoleProvider) {
|
|
||||||
opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
|
|
||||||
|
|
||||||
// Assume role with external ID
|
|
||||||
if len(sharedCfg.AssumeRole.ExternalID) > 0 {
|
|
||||||
opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assume role with MFA
|
|
||||||
if len(sharedCfg.AssumeRole.MFASerial) > 0 {
|
|
||||||
opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
|
|
||||||
opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
} else if len(sharedCfg.Creds.AccessKeyID) > 0 {
|
|
||||||
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
|
|
||||||
sharedCfg.Creds,
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
// Fallback to default credentials provider, include mock errors
|
|
||||||
// for the credential chain so user can identify why credentials
|
|
||||||
// failed to be retrieved.
|
|
||||||
cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{
|
|
||||||
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
|
|
||||||
Providers: []credentials.Provider{
|
|
||||||
&credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)},
|
|
||||||
&credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)},
|
|
||||||
defaults.RemoteCredProvider(*cfg, handlers),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the
|
|
||||||
// MFAToken option is not set when shared config is configured load assume a
|
|
||||||
// role with an MFA token.
|
|
||||||
type AssumeRoleTokenProviderNotSetError struct{}
|
|
||||||
|
|
||||||
// Code is the short id of the error.
|
|
||||||
func (e AssumeRoleTokenProviderNotSetError) Code() string {
|
|
||||||
return "AssumeRoleTokenProviderNotSetError"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Message is the description of the error
|
|
||||||
func (e AssumeRoleTokenProviderNotSetError) Message() string {
|
|
||||||
return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrigErr is the underlying error that caused the failure.
|
|
||||||
func (e AssumeRoleTokenProviderNotSetError) OrigErr() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error satisfies the error interface.
|
|
||||||
func (e AssumeRoleTokenProviderNotSetError) Error() string {
|
|
||||||
return awserr.SprintError(e.Code(), e.Message(), "", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
type credProviderError struct {
|
|
||||||
Err error
|
|
||||||
}
|
|
||||||
|
|
||||||
var emptyCreds = credentials.Value{}
|
|
||||||
|
|
||||||
func (c credProviderError) Retrieve() (credentials.Value, error) {
|
|
||||||
return credentials.Value{}, c.Err
|
|
||||||
}
|
|
||||||
func (c credProviderError) IsExpired() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func initHandlers(s *Session) {
|
|
||||||
// Add the Validate parameter handler if it is not disabled.
|
|
||||||
s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
|
|
||||||
if !aws.BoolValue(s.Config.DisableParamValidation) {
|
|
||||||
s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy creates and returns a copy of the current Session, coping the config
|
|
||||||
// and handlers. If any additional configs are provided they will be merged
|
|
||||||
// on top of the Session's copied config.
|
|
||||||
//
|
|
||||||
// // Create a copy of the current Session, configured for the us-west-2 region.
|
|
||||||
// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
|
|
||||||
func (s *Session) Copy(cfgs ...*aws.Config) *Session {
|
|
||||||
newSession := &Session{
|
|
||||||
Config: s.Config.Copy(cfgs...),
|
|
||||||
Handlers: s.Handlers.Copy(),
|
|
||||||
}
|
|
||||||
|
|
||||||
initHandlers(newSession)
|
|
||||||
|
|
||||||
return newSession
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClientConfig satisfies the client.ConfigProvider interface and is used to
|
|
||||||
// configure the service client instances. Passing the Session to the service
|
|
||||||
// client's constructor (New) will use this method to configure the client.
|
|
||||||
func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
|
|
||||||
// Backwards compatibility, the error will be eaten if user calls ClientConfig
|
|
||||||
// directly. All SDK services will use ClientconfigWithError.
|
|
||||||
cfg, _ := s.clientConfigWithErr(serviceName, cfgs...)
|
|
||||||
|
|
||||||
return cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) {
|
|
||||||
s = s.Copy(cfgs...)
|
|
||||||
|
|
||||||
var resolved endpoints.ResolvedEndpoint
|
|
||||||
var err error
|
|
||||||
|
|
||||||
region := aws.StringValue(s.Config.Region)
|
|
||||||
|
|
||||||
if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 {
|
|
||||||
resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL))
|
|
||||||
resolved.SigningRegion = region
|
|
||||||
} else {
|
|
||||||
resolved, err = s.Config.EndpointResolver.EndpointFor(
|
|
||||||
serviceName, region,
|
|
||||||
func(opt *endpoints.Options) {
|
|
||||||
opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL)
|
|
||||||
opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack)
|
|
||||||
|
|
||||||
// Support the condition where the service is modeled but its
|
|
||||||
// endpoint metadata is not available.
|
|
||||||
opt.ResolveUnknownService = true
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return client.Config{
|
|
||||||
Config: s.Config,
|
|
||||||
Handlers: s.Handlers,
|
|
||||||
Endpoint: resolved.URL,
|
|
||||||
SigningRegion: resolved.SigningRegion,
|
|
||||||
SigningName: resolved.SigningName,
|
|
||||||
}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception
|
|
||||||
// that the EndpointResolver will not be used to resolve the endpoint. The only
|
|
||||||
// endpoint set must come from the aws.Config.Endpoint field.
|
|
||||||
func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config {
|
|
||||||
s = s.Copy(cfgs...)
|
|
||||||
|
|
||||||
var resolved endpoints.ResolvedEndpoint
|
|
||||||
|
|
||||||
region := aws.StringValue(s.Config.Region)
|
|
||||||
|
|
||||||
if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 {
|
|
||||||
resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL))
|
|
||||||
resolved.SigningRegion = region
|
|
||||||
}
|
|
||||||
|
|
||||||
return client.Config{
|
|
||||||
Config: s.Config,
|
|
||||||
Handlers: s.Handlers,
|
|
||||||
Endpoint: resolved.URL,
|
|
||||||
SigningRegion: resolved.SigningRegion,
|
|
||||||
SigningName: resolved.SigningName,
|
|
||||||
}
|
|
||||||
}
|
|
295
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
295
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
@ -1,295 +0,0 @@
|
|||||||
package session
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/go-ini/ini"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Static Credentials group
|
|
||||||
accessKeyIDKey = `aws_access_key_id` // group required
|
|
||||||
secretAccessKey = `aws_secret_access_key` // group required
|
|
||||||
sessionTokenKey = `aws_session_token` // optional
|
|
||||||
|
|
||||||
// Assume Role Credentials group
|
|
||||||
roleArnKey = `role_arn` // group required
|
|
||||||
sourceProfileKey = `source_profile` // group required
|
|
||||||
externalIDKey = `external_id` // optional
|
|
||||||
mfaSerialKey = `mfa_serial` // optional
|
|
||||||
roleSessionNameKey = `role_session_name` // optional
|
|
||||||
|
|
||||||
// Additional Config fields
|
|
||||||
regionKey = `region`
|
|
||||||
|
|
||||||
// DefaultSharedConfigProfile is the default profile to be used when
|
|
||||||
// loading configuration from the config files if another profile name
|
|
||||||
// is not provided.
|
|
||||||
DefaultSharedConfigProfile = `default`
|
|
||||||
)
|
|
||||||
|
|
||||||
type assumeRoleConfig struct {
|
|
||||||
RoleARN string
|
|
||||||
SourceProfile string
|
|
||||||
ExternalID string
|
|
||||||
MFASerial string
|
|
||||||
RoleSessionName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// sharedConfig represents the configuration fields of the SDK config files.
|
|
||||||
type sharedConfig struct {
|
|
||||||
// Credentials values from the config file. Both aws_access_key_id
|
|
||||||
// and aws_secret_access_key must be provided together in the same file
|
|
||||||
// to be considered valid. The values will be ignored if not a complete group.
|
|
||||||
// aws_session_token is an optional field that can be provided if both of the
|
|
||||||
// other two fields are also provided.
|
|
||||||
//
|
|
||||||
// aws_access_key_id
|
|
||||||
// aws_secret_access_key
|
|
||||||
// aws_session_token
|
|
||||||
Creds credentials.Value
|
|
||||||
|
|
||||||
AssumeRole assumeRoleConfig
|
|
||||||
AssumeRoleSource *sharedConfig
|
|
||||||
|
|
||||||
// Region is the region the SDK should use for looking up AWS service endpoints
|
|
||||||
// and signing requests.
|
|
||||||
//
|
|
||||||
// region
|
|
||||||
Region string
|
|
||||||
}
|
|
||||||
|
|
||||||
type sharedConfigFile struct {
|
|
||||||
Filename string
|
|
||||||
IniData *ini.File
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadSharedConfig retrieves the configuration from the list of files
|
|
||||||
// using the profile provided. The order the files are listed will determine
|
|
||||||
// precedence. Values in subsequent files will overwrite values defined in
|
|
||||||
// earlier files.
|
|
||||||
//
|
|
||||||
// For example, given two files A and B. Both define credentials. If the order
|
|
||||||
// of the files are A then B, B's credential values will be used instead of A's.
|
|
||||||
//
|
|
||||||
// See sharedConfig.setFromFile for information how the config files
|
|
||||||
// will be loaded.
|
|
||||||
func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) {
|
|
||||||
if len(profile) == 0 {
|
|
||||||
profile = DefaultSharedConfigProfile
|
|
||||||
}
|
|
||||||
|
|
||||||
files, err := loadSharedConfigIniFiles(filenames)
|
|
||||||
if err != nil {
|
|
||||||
return sharedConfig{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg := sharedConfig{}
|
|
||||||
if err = cfg.setFromIniFiles(profile, files); err != nil {
|
|
||||||
return sharedConfig{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cfg.AssumeRole.SourceProfile) > 0 {
|
|
||||||
if err := cfg.setAssumeRoleSource(profile, files); err != nil {
|
|
||||||
return sharedConfig{}, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return cfg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
|
|
||||||
files := make([]sharedConfigFile, 0, len(filenames))
|
|
||||||
|
|
||||||
for _, filename := range filenames {
|
|
||||||
b, err := ioutil.ReadFile(filename)
|
|
||||||
if err != nil {
|
|
||||||
// Skip files which can't be opened and read for whatever reason
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := ini.Load(b)
|
|
||||||
if err != nil {
|
|
||||||
return nil, SharedConfigLoadError{Filename: filename, Err: err}
|
|
||||||
}
|
|
||||||
|
|
||||||
files = append(files, sharedConfigFile{
|
|
||||||
Filename: filename, IniData: f,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error {
|
|
||||||
var assumeRoleSrc sharedConfig
|
|
||||||
|
|
||||||
// Multiple level assume role chains are not support
|
|
||||||
if cfg.AssumeRole.SourceProfile == origProfile {
|
|
||||||
assumeRoleSrc = *cfg
|
|
||||||
assumeRoleSrc.AssumeRole = assumeRoleConfig{}
|
|
||||||
} else {
|
|
||||||
err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(assumeRoleSrc.Creds.AccessKeyID) == 0 {
|
|
||||||
return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN}
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg.AssumeRoleSource = &assumeRoleSrc
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error {
|
|
||||||
// Trim files from the list that don't exist.
|
|
||||||
for _, f := range files {
|
|
||||||
if err := cfg.setFromIniFile(profile, f); err != nil {
|
|
||||||
if _, ok := err.(SharedConfigProfileNotExistsError); ok {
|
|
||||||
// Ignore proviles missings
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// setFromFile loads the configuration from the file using
|
|
||||||
// the profile provided. A sharedConfig pointer type value is used so that
|
|
||||||
// multiple config file loadings can be chained.
|
|
||||||
//
|
|
||||||
// Only loads complete logically grouped values, and will not set fields in cfg
|
|
||||||
// for incomplete grouped values in the config. Such as credentials. For example
|
|
||||||
// if a config file only includes aws_access_key_id but no aws_secret_access_key
|
|
||||||
// the aws_access_key_id will be ignored.
|
|
||||||
func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error {
|
|
||||||
section, err := file.IniData.GetSection(profile)
|
|
||||||
if err != nil {
|
|
||||||
// Fallback to to alternate profile name: profile <name>
|
|
||||||
section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
|
|
||||||
if err != nil {
|
|
||||||
return SharedConfigProfileNotExistsError{Profile: profile, Err: err}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shared Credentials
|
|
||||||
akid := section.Key(accessKeyIDKey).String()
|
|
||||||
secret := section.Key(secretAccessKey).String()
|
|
||||||
if len(akid) > 0 && len(secret) > 0 {
|
|
||||||
cfg.Creds = credentials.Value{
|
|
||||||
AccessKeyID: akid,
|
|
||||||
SecretAccessKey: secret,
|
|
||||||
SessionToken: section.Key(sessionTokenKey).String(),
|
|
||||||
ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assume Role
|
|
||||||
roleArn := section.Key(roleArnKey).String()
|
|
||||||
srcProfile := section.Key(sourceProfileKey).String()
|
|
||||||
if len(roleArn) > 0 && len(srcProfile) > 0 {
|
|
||||||
cfg.AssumeRole = assumeRoleConfig{
|
|
||||||
RoleARN: roleArn,
|
|
||||||
SourceProfile: srcProfile,
|
|
||||||
ExternalID: section.Key(externalIDKey).String(),
|
|
||||||
MFASerial: section.Key(mfaSerialKey).String(),
|
|
||||||
RoleSessionName: section.Key(roleSessionNameKey).String(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Region
|
|
||||||
if v := section.Key(regionKey).String(); len(v) > 0 {
|
|
||||||
cfg.Region = v
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SharedConfigLoadError is an error for the shared config file failed to load.
|
|
||||||
type SharedConfigLoadError struct {
|
|
||||||
Filename string
|
|
||||||
Err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Code is the short id of the error.
|
|
||||||
func (e SharedConfigLoadError) Code() string {
|
|
||||||
return "SharedConfigLoadError"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Message is the description of the error
|
|
||||||
func (e SharedConfigLoadError) Message() string {
|
|
||||||
return fmt.Sprintf("failed to load config file, %s", e.Filename)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrigErr is the underlying error that caused the failure.
|
|
||||||
func (e SharedConfigLoadError) OrigErr() error {
|
|
||||||
return e.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error satisfies the error interface.
|
|
||||||
func (e SharedConfigLoadError) Error() string {
|
|
||||||
return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SharedConfigProfileNotExistsError is an error for the shared config when
|
|
||||||
// the profile was not find in the config file.
|
|
||||||
type SharedConfigProfileNotExistsError struct {
|
|
||||||
Profile string
|
|
||||||
Err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Code is the short id of the error.
|
|
||||||
func (e SharedConfigProfileNotExistsError) Code() string {
|
|
||||||
return "SharedConfigProfileNotExistsError"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Message is the description of the error
|
|
||||||
func (e SharedConfigProfileNotExistsError) Message() string {
|
|
||||||
return fmt.Sprintf("failed to get profile, %s", e.Profile)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrigErr is the underlying error that caused the failure.
|
|
||||||
func (e SharedConfigProfileNotExistsError) OrigErr() error {
|
|
||||||
return e.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error satisfies the error interface.
|
|
||||||
func (e SharedConfigProfileNotExistsError) Error() string {
|
|
||||||
return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SharedConfigAssumeRoleError is an error for the shared config when the
|
|
||||||
// profile contains assume role information, but that information is invalid
|
|
||||||
// or not complete.
|
|
||||||
type SharedConfigAssumeRoleError struct {
|
|
||||||
RoleARN string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Code is the short id of the error.
|
|
||||||
func (e SharedConfigAssumeRoleError) Code() string {
|
|
||||||
return "SharedConfigAssumeRoleError"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Message is the description of the error
|
|
||||||
func (e SharedConfigAssumeRoleError) Message() string {
|
|
||||||
return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials",
|
|
||||||
e.RoleARN)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrigErr is the underlying error that caused the failure.
|
|
||||||
func (e SharedConfigAssumeRoleError) OrigErr() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error satisfies the error interface.
|
|
||||||
func (e SharedConfigAssumeRoleError) Error() string {
|
|
||||||
return awserr.SprintError(e.Code(), e.Message(), "", nil)
|
|
||||||
}
|
|
82
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
generated
vendored
82
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
generated
vendored
@ -1,82 +0,0 @@
|
|||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// validator houses a set of rule needed for validation of a
|
|
||||||
// string value
|
|
||||||
type rules []rule
|
|
||||||
|
|
||||||
// rule interface allows for more flexible rules and just simply
|
|
||||||
// checks whether or not a value adheres to that rule
|
|
||||||
type rule interface {
|
|
||||||
IsValid(value string) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid will iterate through all rules and see if any rules
|
|
||||||
// apply to the value and supports nested rules
|
|
||||||
func (r rules) IsValid(value string) bool {
|
|
||||||
for _, rule := range r {
|
|
||||||
if rule.IsValid(value) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// mapRule generic rule for maps
|
|
||||||
type mapRule map[string]struct{}
|
|
||||||
|
|
||||||
// IsValid for the map rule satisfies whether it exists in the map
|
|
||||||
func (m mapRule) IsValid(value string) bool {
|
|
||||||
_, ok := m[value]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// whitelist is a generic rule for whitelisting
|
|
||||||
type whitelist struct {
|
|
||||||
rule
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid for whitelist checks if the value is within the whitelist
|
|
||||||
func (w whitelist) IsValid(value string) bool {
|
|
||||||
return w.rule.IsValid(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// blacklist is a generic rule for blacklisting
|
|
||||||
type blacklist struct {
|
|
||||||
rule
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsValid for whitelist checks if the value is within the whitelist
|
|
||||||
func (b blacklist) IsValid(value string) bool {
|
|
||||||
return !b.rule.IsValid(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
type patterns []string
|
|
||||||
|
|
||||||
// IsValid for patterns checks each pattern and returns if a match has
|
|
||||||
// been found
|
|
||||||
func (p patterns) IsValid(value string) bool {
|
|
||||||
for _, pattern := range p {
|
|
||||||
if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// inclusiveRules rules allow for rules to depend on one another
|
|
||||||
type inclusiveRules []rule
|
|
||||||
|
|
||||||
// IsValid will return true if all rules are true
|
|
||||||
func (r inclusiveRules) IsValid(value string) bool {
|
|
||||||
for _, rule := range r {
|
|
||||||
if !rule.IsValid(value) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
7
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
generated
vendored
7
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
generated
vendored
@ -1,7 +0,0 @@
|
|||||||
package v4
|
|
||||||
|
|
||||||
// WithUnsignedPayload will enable and set the UnsignedPayload field to
|
|
||||||
// true of the signer.
|
|
||||||
func WithUnsignedPayload(v4 *Signer) {
|
|
||||||
v4.UnsignedPayload = true
|
|
||||||
}
|
|
24
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
generated
vendored
24
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
generated
vendored
@ -1,24 +0,0 @@
|
|||||||
// +build go1.5
|
|
||||||
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getURIPath(u *url.URL) string {
|
|
||||||
var uri string
|
|
||||||
|
|
||||||
if len(u.Opaque) > 0 {
|
|
||||||
uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
|
|
||||||
} else {
|
|
||||||
uri = u.EscapedPath()
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(uri) == 0 {
|
|
||||||
uri = "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
return uri
|
|
||||||
}
|
|
762
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
762
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
@ -1,762 +0,0 @@
|
|||||||
// Package v4 implements signing for AWS V4 signer
|
|
||||||
//
|
|
||||||
// Provides request signing for request that need to be signed with
|
|
||||||
// AWS V4 Signatures.
|
|
||||||
//
|
|
||||||
// Standalone Signer
|
|
||||||
//
|
|
||||||
// Generally using the signer outside of the SDK should not require any additional
|
|
||||||
// logic when using Go v1.5 or higher. The signer does this by taking advantage
|
|
||||||
// of the URL.EscapedPath method. If your request URI requires additional escaping
|
|
||||||
// you many need to use the URL.Opaque to define what the raw URI should be sent
|
|
||||||
// to the service as.
|
|
||||||
//
|
|
||||||
// The signer will first check the URL.Opaque field, and use its value if set.
|
|
||||||
// The signer does require the URL.Opaque field to be set in the form of:
|
|
||||||
//
|
|
||||||
// "//<hostname>/<path>"
|
|
||||||
//
|
|
||||||
// // e.g.
|
|
||||||
// "//example.com/some/path"
|
|
||||||
//
|
|
||||||
// The leading "//" and hostname are required or the URL.Opaque escaping will
|
|
||||||
// not work correctly.
|
|
||||||
//
|
|
||||||
// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
|
|
||||||
// method and using the returned value. If you're using Go v1.4 you must set
|
|
||||||
// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
|
|
||||||
// Go v1.5 the signer will fallback to URL.Path.
|
|
||||||
//
|
|
||||||
// AWS v4 signature validation requires that the canonical string's URI path
|
|
||||||
// element must be the URI escaped form of the HTTP request's path.
|
|
||||||
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
|
|
||||||
//
|
|
||||||
// The Go HTTP client will perform escaping automatically on the request. Some
|
|
||||||
// of these escaping may cause signature validation errors because the HTTP
|
|
||||||
// request differs from the URI path or query that the signature was generated.
|
|
||||||
// https://golang.org/pkg/net/url/#URL.EscapedPath
|
|
||||||
//
|
|
||||||
// Because of this, it is recommended that when using the signer outside of the
|
|
||||||
// SDK that explicitly escaping the request prior to being signed is preferable,
|
|
||||||
// and will help prevent signature validation errors. This can be done by setting
|
|
||||||
// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
|
|
||||||
// call URL.EscapedPath() if Opaque is not set.
|
|
||||||
//
|
|
||||||
// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
|
|
||||||
// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
|
|
||||||
// request URL. https://github.com/golang/go/issues/16847 points to a bug in
|
|
||||||
// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP
|
|
||||||
// message. URL.Opaque generally will force Go to make requests with absolute URL.
|
|
||||||
// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
|
|
||||||
// or url.EscapedPath will ignore the RawPath escaping.
|
|
||||||
//
|
|
||||||
// Test `TestStandaloneSign` provides a complete example of using the signer
|
|
||||||
// outside of the SDK and pre-escaping the URI path.
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
authHeaderPrefix = "AWS4-HMAC-SHA256"
|
|
||||||
timeFormat = "20060102T150405Z"
|
|
||||||
shortTimeFormat = "20060102"
|
|
||||||
|
|
||||||
// emptyStringSHA256 is a SHA256 of an empty string
|
|
||||||
emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
|
|
||||||
)
|
|
||||||
|
|
||||||
var ignoredHeaders = rules{
|
|
||||||
blacklist{
|
|
||||||
mapRule{
|
|
||||||
"Authorization": struct{}{},
|
|
||||||
"User-Agent": struct{}{},
|
|
||||||
"X-Amzn-Trace-Id": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// requiredSignedHeaders is a whitelist for build canonical headers.
|
|
||||||
var requiredSignedHeaders = rules{
|
|
||||||
whitelist{
|
|
||||||
mapRule{
|
|
||||||
"Cache-Control": struct{}{},
|
|
||||||
"Content-Disposition": struct{}{},
|
|
||||||
"Content-Encoding": struct{}{},
|
|
||||||
"Content-Language": struct{}{},
|
|
||||||
"Content-Md5": struct{}{},
|
|
||||||
"Content-Type": struct{}{},
|
|
||||||
"Expires": struct{}{},
|
|
||||||
"If-Match": struct{}{},
|
|
||||||
"If-Modified-Since": struct{}{},
|
|
||||||
"If-None-Match": struct{}{},
|
|
||||||
"If-Unmodified-Since": struct{}{},
|
|
||||||
"Range": struct{}{},
|
|
||||||
"X-Amz-Acl": struct{}{},
|
|
||||||
"X-Amz-Copy-Source": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Match": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Range": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
|
||||||
"X-Amz-Grant-Full-control": struct{}{},
|
|
||||||
"X-Amz-Grant-Read": struct{}{},
|
|
||||||
"X-Amz-Grant-Read-Acp": struct{}{},
|
|
||||||
"X-Amz-Grant-Write": struct{}{},
|
|
||||||
"X-Amz-Grant-Write-Acp": struct{}{},
|
|
||||||
"X-Amz-Metadata-Directive": struct{}{},
|
|
||||||
"X-Amz-Mfa": struct{}{},
|
|
||||||
"X-Amz-Request-Payer": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
|
||||||
"X-Amz-Storage-Class": struct{}{},
|
|
||||||
"X-Amz-Website-Redirect-Location": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
patterns{"X-Amz-Meta-"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// allowedHoisting is a whitelist for build query headers. The boolean value
|
|
||||||
// represents whether or not it is a pattern.
|
|
||||||
var allowedQueryHoisting = inclusiveRules{
|
|
||||||
blacklist{requiredSignedHeaders},
|
|
||||||
patterns{"X-Amz-"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Signer applies AWS v4 signing to given request. Use this to sign requests
|
|
||||||
// that need to be signed with AWS V4 Signatures.
|
|
||||||
type Signer struct {
|
|
||||||
// The authentication credentials the request will be signed against.
|
|
||||||
// This value must be set to sign requests.
|
|
||||||
Credentials *credentials.Credentials
|
|
||||||
|
|
||||||
// Sets the log level the signer should use when reporting information to
|
|
||||||
// the logger. If the logger is nil nothing will be logged. See
|
|
||||||
// aws.LogLevelType for more information on available logging levels
|
|
||||||
//
|
|
||||||
// By default nothing will be logged.
|
|
||||||
Debug aws.LogLevelType
|
|
||||||
|
|
||||||
// The logger loging information will be written to. If there the logger
|
|
||||||
// is nil, nothing will be logged.
|
|
||||||
Logger aws.Logger
|
|
||||||
|
|
||||||
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
|
|
||||||
// request header to the request's query string. This is most commonly used
|
|
||||||
// with pre-signed requests preventing headers from being added to the
|
|
||||||
// request's query string.
|
|
||||||
DisableHeaderHoisting bool
|
|
||||||
|
|
||||||
// Disables the automatic escaping of the URI path of the request for the
|
|
||||||
// siganture's canonical string's path. For services that do not need additional
|
|
||||||
// escaping then use this to disable the signer escaping the path.
|
|
||||||
//
|
|
||||||
// S3 is an example of a service that does not need additional escaping.
|
|
||||||
//
|
|
||||||
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
|
|
||||||
DisableURIPathEscaping bool
|
|
||||||
|
|
||||||
// Disales the automatical setting of the HTTP request's Body field with the
|
|
||||||
// io.ReadSeeker passed in to the signer. This is useful if you're using a
|
|
||||||
// custom wrapper around the body for the io.ReadSeeker and want to preserve
|
|
||||||
// the Body value on the Request.Body.
|
|
||||||
//
|
|
||||||
// This does run the risk of signing a request with a body that will not be
|
|
||||||
// sent in the request. Need to ensure that the underlying data of the Body
|
|
||||||
// values are the same.
|
|
||||||
DisableRequestBodyOverwrite bool
|
|
||||||
|
|
||||||
// currentTimeFn returns the time value which represents the current time.
|
|
||||||
// This value should only be used for testing. If it is nil the default
|
|
||||||
// time.Now will be used.
|
|
||||||
currentTimeFn func() time.Time
|
|
||||||
|
|
||||||
// UnsignedPayload will prevent signing of the payload. This will only
|
|
||||||
// work for services that have support for this.
|
|
||||||
UnsignedPayload bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSigner returns a Signer pointer configured with the credentials and optional
|
|
||||||
// option values provided. If not options are provided the Signer will use its
|
|
||||||
// default configuration.
|
|
||||||
func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
|
|
||||||
v4 := &Signer{
|
|
||||||
Credentials: credentials,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, option := range options {
|
|
||||||
option(v4)
|
|
||||||
}
|
|
||||||
|
|
||||||
return v4
|
|
||||||
}
|
|
||||||
|
|
||||||
type signingCtx struct {
|
|
||||||
ServiceName string
|
|
||||||
Region string
|
|
||||||
Request *http.Request
|
|
||||||
Body io.ReadSeeker
|
|
||||||
Query url.Values
|
|
||||||
Time time.Time
|
|
||||||
ExpireTime time.Duration
|
|
||||||
SignedHeaderVals http.Header
|
|
||||||
|
|
||||||
DisableURIPathEscaping bool
|
|
||||||
|
|
||||||
credValues credentials.Value
|
|
||||||
isPresign bool
|
|
||||||
formattedTime string
|
|
||||||
formattedShortTime string
|
|
||||||
unsignedPayload bool
|
|
||||||
|
|
||||||
bodyDigest string
|
|
||||||
signedHeaders string
|
|
||||||
canonicalHeaders string
|
|
||||||
canonicalString string
|
|
||||||
credentialString string
|
|
||||||
stringToSign string
|
|
||||||
signature string
|
|
||||||
authorization string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign signs AWS v4 requests with the provided body, service name, region the
|
|
||||||
// request is made to, and time the request is signed at. The signTime allows
|
|
||||||
// you to specify that a request is signed for the future, and cannot be
|
|
||||||
// used until then.
|
|
||||||
//
|
|
||||||
// Returns a list of HTTP headers that were included in the signature or an
|
|
||||||
// error if signing the request failed. Generally for signed requests this value
|
|
||||||
// is not needed as the full request context will be captured by the http.Request
|
|
||||||
// value. It is included for reference though.
|
|
||||||
//
|
|
||||||
// Sign will set the request's Body to be the `body` parameter passed in. If
|
|
||||||
// the body is not already an io.ReadCloser, it will be wrapped within one. If
|
|
||||||
// a `nil` body parameter passed to Sign, the request's Body field will be
|
|
||||||
// also set to nil. Its important to note that this functionality will not
|
|
||||||
// change the request's ContentLength of the request.
|
|
||||||
//
|
|
||||||
// Sign differs from Presign in that it will sign the request using HTTP
|
|
||||||
// header values. This type of signing is intended for http.Request values that
|
|
||||||
// will not be shared, or are shared in a way the header values on the request
|
|
||||||
// will not be lost.
|
|
||||||
//
|
|
||||||
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
|
|
||||||
// generated. To bypass the signer computing the hash you can set the
|
|
||||||
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
|
|
||||||
// only compute the hash if the request header value is empty.
|
|
||||||
func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
|
|
||||||
return v4.signWithBody(r, body, service, region, 0, signTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Presign signs AWS v4 requests with the provided body, service name, region
|
|
||||||
// the request is made to, and time the request is signed at. The signTime
|
|
||||||
// allows you to specify that a request is signed for the future, and cannot
|
|
||||||
// be used until then.
|
|
||||||
//
|
|
||||||
// Returns a list of HTTP headers that were included in the signature or an
|
|
||||||
// error if signing the request failed. For presigned requests these headers
|
|
||||||
// and their values must be included on the HTTP request when it is made. This
|
|
||||||
// is helpful to know what header values need to be shared with the party the
|
|
||||||
// presigned request will be distributed to.
|
|
||||||
//
|
|
||||||
// Presign differs from Sign in that it will sign the request using query string
|
|
||||||
// instead of header values. This allows you to share the Presigned Request's
|
|
||||||
// URL with third parties, or distribute it throughout your system with minimal
|
|
||||||
// dependencies.
|
|
||||||
//
|
|
||||||
// Presign also takes an exp value which is the duration the
|
|
||||||
// signed request will be valid after the signing time. This is allows you to
|
|
||||||
// set when the request will expire.
|
|
||||||
//
|
|
||||||
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
|
|
||||||
// generated. To bypass the signer computing the hash you can set the
|
|
||||||
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
|
|
||||||
// only compute the hash if the request header value is empty.
|
|
||||||
//
|
|
||||||
// Presigning a S3 request will not compute the body's SHA256 hash by default.
|
|
||||||
// This is done due to the general use case for S3 presigned URLs is to share
|
|
||||||
// PUT/GET capabilities. If you would like to include the body's SHA256 in the
|
|
||||||
// presigned request's signature you can set the "X-Amz-Content-Sha256"
|
|
||||||
// HTTP header and that will be included in the request's signature.
|
|
||||||
func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
|
|
||||||
return v4.signWithBody(r, body, service, region, exp, signTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
|
|
||||||
currentTimeFn := v4.currentTimeFn
|
|
||||||
if currentTimeFn == nil {
|
|
||||||
currentTimeFn = time.Now
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := &signingCtx{
|
|
||||||
Request: r,
|
|
||||||
Body: body,
|
|
||||||
Query: r.URL.Query(),
|
|
||||||
Time: signTime,
|
|
||||||
ExpireTime: exp,
|
|
||||||
isPresign: exp != 0,
|
|
||||||
ServiceName: service,
|
|
||||||
Region: region,
|
|
||||||
DisableURIPathEscaping: v4.DisableURIPathEscaping,
|
|
||||||
unsignedPayload: v4.UnsignedPayload,
|
|
||||||
}
|
|
||||||
|
|
||||||
for key := range ctx.Query {
|
|
||||||
sort.Strings(ctx.Query[key])
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx.isRequestSigned() {
|
|
||||||
ctx.Time = currentTimeFn()
|
|
||||||
ctx.handlePresignRemoval()
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
ctx.credValues, err = v4.Credentials.Get()
|
|
||||||
if err != nil {
|
|
||||||
return http.Header{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.assignAmzQueryValues()
|
|
||||||
ctx.build(v4.DisableHeaderHoisting)
|
|
||||||
|
|
||||||
// If the request is not presigned the body should be attached to it. This
|
|
||||||
// prevents the confusion of wanting to send a signed request without
|
|
||||||
// the body the request was signed for attached.
|
|
||||||
if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) {
|
|
||||||
var reader io.ReadCloser
|
|
||||||
if body != nil {
|
|
||||||
var ok bool
|
|
||||||
if reader, ok = body.(io.ReadCloser); !ok {
|
|
||||||
reader = ioutil.NopCloser(body)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r.Body = reader
|
|
||||||
}
|
|
||||||
|
|
||||||
if v4.Debug.Matches(aws.LogDebugWithSigning) {
|
|
||||||
v4.logSigningInfo(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctx.SignedHeaderVals, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) handlePresignRemoval() {
|
|
||||||
if !ctx.isPresign {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// The credentials have expired for this request. The current signing
|
|
||||||
// is invalid, and needs to be request because the request will fail.
|
|
||||||
ctx.removePresign()
|
|
||||||
|
|
||||||
// Update the request's query string to ensure the values stays in
|
|
||||||
// sync in the case retrieving the new credentials fails.
|
|
||||||
ctx.Request.URL.RawQuery = ctx.Query.Encode()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) assignAmzQueryValues() {
|
|
||||||
if ctx.isPresign {
|
|
||||||
ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
|
|
||||||
if ctx.credValues.SessionToken != "" {
|
|
||||||
ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
|
|
||||||
} else {
|
|
||||||
ctx.Query.Del("X-Amz-Security-Token")
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx.credValues.SessionToken != "" {
|
|
||||||
ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignRequestHandler is a named request handler the SDK will use to sign
|
|
||||||
// service client request with using the V4 signature.
|
|
||||||
var SignRequestHandler = request.NamedHandler{
|
|
||||||
Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignSDKRequest signs an AWS request with the V4 signature. This
|
|
||||||
// request handler should only be used with the SDK's built in service client's
|
|
||||||
// API operation requests.
|
|
||||||
//
|
|
||||||
// This function should not be used on its on its own, but in conjunction with
|
|
||||||
// an AWS service client's API operation call. To sign a standalone request
|
|
||||||
// not created by a service client's API operation method use the "Sign" or
|
|
||||||
// "Presign" functions of the "Signer" type.
|
|
||||||
//
|
|
||||||
// If the credentials of the request's config are set to
|
|
||||||
// credentials.AnonymousCredentials the request will not be signed.
|
|
||||||
func SignSDKRequest(req *request.Request) {
|
|
||||||
signSDKRequestWithCurrTime(req, time.Now)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildNamedHandler will build a generic handler for signing.
|
|
||||||
func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler {
|
|
||||||
return request.NamedHandler{
|
|
||||||
Name: name,
|
|
||||||
Fn: func(req *request.Request) {
|
|
||||||
signSDKRequestWithCurrTime(req, time.Now, opts...)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
|
|
||||||
// If the request does not need to be signed ignore the signing of the
|
|
||||||
// request if the AnonymousCredentials object is used.
|
|
||||||
if req.Config.Credentials == credentials.AnonymousCredentials {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
region := req.ClientInfo.SigningRegion
|
|
||||||
if region == "" {
|
|
||||||
region = aws.StringValue(req.Config.Region)
|
|
||||||
}
|
|
||||||
|
|
||||||
name := req.ClientInfo.SigningName
|
|
||||||
if name == "" {
|
|
||||||
name = req.ClientInfo.ServiceName
|
|
||||||
}
|
|
||||||
|
|
||||||
v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
|
|
||||||
v4.Debug = req.Config.LogLevel.Value()
|
|
||||||
v4.Logger = req.Config.Logger
|
|
||||||
v4.DisableHeaderHoisting = req.NotHoist
|
|
||||||
v4.currentTimeFn = curTimeFn
|
|
||||||
if name == "s3" {
|
|
||||||
// S3 service should not have any escaping applied
|
|
||||||
v4.DisableURIPathEscaping = true
|
|
||||||
}
|
|
||||||
// Prevents setting the HTTPRequest's Body. Since the Body could be
|
|
||||||
// wrapped in a custom io.Closer that we do not want to be stompped
|
|
||||||
// on top of by the signer.
|
|
||||||
v4.DisableRequestBodyOverwrite = true
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(v4)
|
|
||||||
}
|
|
||||||
|
|
||||||
signingTime := req.Time
|
|
||||||
if !req.LastSignedAt.IsZero() {
|
|
||||||
signingTime = req.LastSignedAt
|
|
||||||
}
|
|
||||||
|
|
||||||
signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
|
|
||||||
name, region, req.ExpireTime, signingTime,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
req.Error = err
|
|
||||||
req.SignedHeaderVals = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
req.SignedHeaderVals = signedHeaders
|
|
||||||
req.LastSignedAt = curTimeFn()
|
|
||||||
}
|
|
||||||
|
|
||||||
const logSignInfoMsg = `DEBUG: Request Signature:
|
|
||||||
---[ CANONICAL STRING ]-----------------------------
|
|
||||||
%s
|
|
||||||
---[ STRING TO SIGN ]--------------------------------
|
|
||||||
%s%s
|
|
||||||
-----------------------------------------------------`
|
|
||||||
const logSignedURLMsg = `
|
|
||||||
---[ SIGNED URL ]------------------------------------
|
|
||||||
%s`
|
|
||||||
|
|
||||||
func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
|
|
||||||
signedURLMsg := ""
|
|
||||||
if ctx.isPresign {
|
|
||||||
signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
|
|
||||||
}
|
|
||||||
msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
|
|
||||||
v4.Logger.Log(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) build(disableHeaderHoisting bool) {
|
|
||||||
ctx.buildTime() // no depends
|
|
||||||
ctx.buildCredentialString() // no depends
|
|
||||||
|
|
||||||
ctx.buildBodyDigest()
|
|
||||||
|
|
||||||
unsignedHeaders := ctx.Request.Header
|
|
||||||
if ctx.isPresign {
|
|
||||||
if !disableHeaderHoisting {
|
|
||||||
urlValues := url.Values{}
|
|
||||||
urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
|
|
||||||
for k := range urlValues {
|
|
||||||
ctx.Query[k] = urlValues[k]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
|
|
||||||
ctx.buildCanonicalString() // depends on canon headers / signed headers
|
|
||||||
ctx.buildStringToSign() // depends on canon string
|
|
||||||
ctx.buildSignature() // depends on string to sign
|
|
||||||
|
|
||||||
if ctx.isPresign {
|
|
||||||
ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature
|
|
||||||
} else {
|
|
||||||
parts := []string{
|
|
||||||
authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
|
|
||||||
"SignedHeaders=" + ctx.signedHeaders,
|
|
||||||
"Signature=" + ctx.signature,
|
|
||||||
}
|
|
||||||
ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) buildTime() {
|
|
||||||
ctx.formattedTime = ctx.Time.UTC().Format(timeFormat)
|
|
||||||
ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat)
|
|
||||||
|
|
||||||
if ctx.isPresign {
|
|
||||||
duration := int64(ctx.ExpireTime / time.Second)
|
|
||||||
ctx.Query.Set("X-Amz-Date", ctx.formattedTime)
|
|
||||||
ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
|
|
||||||
} else {
|
|
||||||
ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) buildCredentialString() {
|
|
||||||
ctx.credentialString = strings.Join([]string{
|
|
||||||
ctx.formattedShortTime,
|
|
||||||
ctx.Region,
|
|
||||||
ctx.ServiceName,
|
|
||||||
"aws4_request",
|
|
||||||
}, "/")
|
|
||||||
|
|
||||||
if ctx.isPresign {
|
|
||||||
ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
|
|
||||||
query := url.Values{}
|
|
||||||
unsignedHeaders := http.Header{}
|
|
||||||
for k, h := range header {
|
|
||||||
if r.IsValid(k) {
|
|
||||||
query[k] = h
|
|
||||||
} else {
|
|
||||||
unsignedHeaders[k] = h
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return query, unsignedHeaders
|
|
||||||
}
|
|
||||||
func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
|
|
||||||
var headers []string
|
|
||||||
headers = append(headers, "host")
|
|
||||||
for k, v := range header {
|
|
||||||
canonicalKey := http.CanonicalHeaderKey(k)
|
|
||||||
if !r.IsValid(canonicalKey) {
|
|
||||||
continue // ignored header
|
|
||||||
}
|
|
||||||
if ctx.SignedHeaderVals == nil {
|
|
||||||
ctx.SignedHeaderVals = make(http.Header)
|
|
||||||
}
|
|
||||||
|
|
||||||
lowerCaseKey := strings.ToLower(k)
|
|
||||||
if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
|
|
||||||
// include additional values
|
|
||||||
ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
headers = append(headers, lowerCaseKey)
|
|
||||||
ctx.SignedHeaderVals[lowerCaseKey] = v
|
|
||||||
}
|
|
||||||
sort.Strings(headers)
|
|
||||||
|
|
||||||
ctx.signedHeaders = strings.Join(headers, ";")
|
|
||||||
|
|
||||||
if ctx.isPresign {
|
|
||||||
ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
|
|
||||||
}
|
|
||||||
|
|
||||||
headerValues := make([]string, len(headers))
|
|
||||||
for i, k := range headers {
|
|
||||||
if k == "host" {
|
|
||||||
if ctx.Request.Host != "" {
|
|
||||||
headerValues[i] = "host:" + ctx.Request.Host
|
|
||||||
} else {
|
|
||||||
headerValues[i] = "host:" + ctx.Request.URL.Host
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
headerValues[i] = k + ":" +
|
|
||||||
strings.Join(ctx.SignedHeaderVals[k], ",")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stripExcessSpaces(headerValues)
|
|
||||||
ctx.canonicalHeaders = strings.Join(headerValues, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) buildCanonicalString() {
|
|
||||||
ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
|
|
||||||
|
|
||||||
uri := getURIPath(ctx.Request.URL)
|
|
||||||
|
|
||||||
if !ctx.DisableURIPathEscaping {
|
|
||||||
uri = rest.EscapePath(uri, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.canonicalString = strings.Join([]string{
|
|
||||||
ctx.Request.Method,
|
|
||||||
uri,
|
|
||||||
ctx.Request.URL.RawQuery,
|
|
||||||
ctx.canonicalHeaders + "\n",
|
|
||||||
ctx.signedHeaders,
|
|
||||||
ctx.bodyDigest,
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) buildStringToSign() {
|
|
||||||
ctx.stringToSign = strings.Join([]string{
|
|
||||||
authHeaderPrefix,
|
|
||||||
ctx.formattedTime,
|
|
||||||
ctx.credentialString,
|
|
||||||
hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))),
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) buildSignature() {
|
|
||||||
secret := ctx.credValues.SecretAccessKey
|
|
||||||
date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime))
|
|
||||||
region := makeHmac(date, []byte(ctx.Region))
|
|
||||||
service := makeHmac(region, []byte(ctx.ServiceName))
|
|
||||||
credentials := makeHmac(service, []byte("aws4_request"))
|
|
||||||
signature := makeHmac(credentials, []byte(ctx.stringToSign))
|
|
||||||
ctx.signature = hex.EncodeToString(signature)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *signingCtx) buildBodyDigest() {
|
|
||||||
hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
|
|
||||||
if hash == "" {
|
|
||||||
if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") {
|
|
||||||
hash = "UNSIGNED-PAYLOAD"
|
|
||||||
} else if ctx.Body == nil {
|
|
||||||
hash = emptyStringSHA256
|
|
||||||
} else {
|
|
||||||
hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
|
|
||||||
}
|
|
||||||
if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" {
|
|
||||||
ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ctx.bodyDigest = hash
|
|
||||||
}
|
|
||||||
|
|
||||||
// isRequestSigned returns if the request is currently signed or presigned
|
|
||||||
func (ctx *signingCtx) isRequestSigned() bool {
|
|
||||||
if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if ctx.Request.Header.Get("Authorization") != "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// unsign removes signing flags for both signed and presigned requests.
|
|
||||||
func (ctx *signingCtx) removePresign() {
|
|
||||||
ctx.Query.Del("X-Amz-Algorithm")
|
|
||||||
ctx.Query.Del("X-Amz-Signature")
|
|
||||||
ctx.Query.Del("X-Amz-Security-Token")
|
|
||||||
ctx.Query.Del("X-Amz-Date")
|
|
||||||
ctx.Query.Del("X-Amz-Expires")
|
|
||||||
ctx.Query.Del("X-Amz-Credential")
|
|
||||||
ctx.Query.Del("X-Amz-SignedHeaders")
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeHmac(key []byte, data []byte) []byte {
|
|
||||||
hash := hmac.New(sha256.New, key)
|
|
||||||
hash.Write(data)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeSha256(data []byte) []byte {
|
|
||||||
hash := sha256.New()
|
|
||||||
hash.Write(data)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeSha256Reader(reader io.ReadSeeker) []byte {
|
|
||||||
hash := sha256.New()
|
|
||||||
start, _ := reader.Seek(0, 1)
|
|
||||||
defer reader.Seek(start, 0)
|
|
||||||
|
|
||||||
io.Copy(hash, reader)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
const doubleSpace = " "
|
|
||||||
|
|
||||||
// stripExcessSpaces will rewrite the passed in slice's string values to not
|
|
||||||
// contain muliple side-by-side spaces.
|
|
||||||
func stripExcessSpaces(vals []string) {
|
|
||||||
var j, k, l, m, spaces int
|
|
||||||
for i, str := range vals {
|
|
||||||
// Trim trailing spaces
|
|
||||||
for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trim leading spaces
|
|
||||||
for k = 0; k < j && str[k] == ' '; k++ {
|
|
||||||
}
|
|
||||||
str = str[k : j+1]
|
|
||||||
|
|
||||||
// Strip multiple spaces.
|
|
||||||
j = strings.Index(str, doubleSpace)
|
|
||||||
if j < 0 {
|
|
||||||
vals[i] = str
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := []byte(str)
|
|
||||||
for k, m, l = j, j, len(buf); k < l; k++ {
|
|
||||||
if buf[k] == ' ' {
|
|
||||||
if spaces == 0 {
|
|
||||||
// First space.
|
|
||||||
buf[m] = buf[k]
|
|
||||||
m++
|
|
||||||
}
|
|
||||||
spaces++
|
|
||||||
} else {
|
|
||||||
// End of multiple spaces.
|
|
||||||
spaces = 0
|
|
||||||
buf[m] = buf[k]
|
|
||||||
m++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
vals[i] = string(buf[:m])
|
|
||||||
}
|
|
||||||
}
|
|
118
vendor/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
118
vendor/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
@ -1,118 +0,0 @@
|
|||||||
package aws
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should
|
|
||||||
// only be used with an io.Reader that is also an io.Seeker. Doing so may
|
|
||||||
// cause request signature errors, or request body's not sent for GET, HEAD
|
|
||||||
// and DELETE HTTP methods.
|
|
||||||
//
|
|
||||||
// Deprecated: Should only be used with io.ReadSeeker. If using for
|
|
||||||
// S3 PutObject to stream content use s3manager.Uploader instead.
|
|
||||||
func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
|
|
||||||
return ReaderSeekerCloser{r}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
|
|
||||||
// io.Closer interfaces to the underlying object if they are available.
|
|
||||||
type ReaderSeekerCloser struct {
|
|
||||||
r io.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads from the reader up to size of p. The number of bytes read, and
|
|
||||||
// error if it occurred will be returned.
|
|
||||||
//
|
|
||||||
// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
|
|
||||||
//
|
|
||||||
// Performs the same functionality as io.Reader Read
|
|
||||||
func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
|
|
||||||
switch t := r.r.(type) {
|
|
||||||
case io.Reader:
|
|
||||||
return t.Read(p)
|
|
||||||
}
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seek sets the offset for the next Read to offset, interpreted according to
|
|
||||||
// whence: 0 means relative to the origin of the file, 1 means relative to the
|
|
||||||
// current offset, and 2 means relative to the end. Seek returns the new offset
|
|
||||||
// and an error, if any.
|
|
||||||
//
|
|
||||||
// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
|
|
||||||
func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
|
|
||||||
switch t := r.r.(type) {
|
|
||||||
case io.Seeker:
|
|
||||||
return t.Seek(offset, whence)
|
|
||||||
}
|
|
||||||
return int64(0), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsSeeker returns if the underlying reader is also a seeker.
|
|
||||||
func (r ReaderSeekerCloser) IsSeeker() bool {
|
|
||||||
_, ok := r.r.(io.Seeker)
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the ReaderSeekerCloser.
|
|
||||||
//
|
|
||||||
// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
|
|
||||||
func (r ReaderSeekerCloser) Close() error {
|
|
||||||
switch t := r.r.(type) {
|
|
||||||
case io.Closer:
|
|
||||||
return t.Close()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
|
|
||||||
// Can be used with the s3manager.Downloader to download content to a buffer
|
|
||||||
// in memory. Safe to use concurrently.
|
|
||||||
type WriteAtBuffer struct {
|
|
||||||
buf []byte
|
|
||||||
m sync.Mutex
|
|
||||||
|
|
||||||
// GrowthCoeff defines the growth rate of the internal buffer. By
|
|
||||||
// default, the growth rate is 1, where expanding the internal
|
|
||||||
// buffer will allocate only enough capacity to fit the new expected
|
|
||||||
// length.
|
|
||||||
GrowthCoeff float64
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
|
|
||||||
// provided by buf.
|
|
||||||
func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
|
|
||||||
return &WriteAtBuffer{buf: buf}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteAt writes a slice of bytes to a buffer starting at the position provided
|
|
||||||
// The number of bytes written will be returned, or error. Can overwrite previous
|
|
||||||
// written slices if the write ats overlap.
|
|
||||||
func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
|
|
||||||
pLen := len(p)
|
|
||||||
expLen := pos + int64(pLen)
|
|
||||||
b.m.Lock()
|
|
||||||
defer b.m.Unlock()
|
|
||||||
if int64(len(b.buf)) < expLen {
|
|
||||||
if int64(cap(b.buf)) < expLen {
|
|
||||||
if b.GrowthCoeff < 1 {
|
|
||||||
b.GrowthCoeff = 1
|
|
||||||
}
|
|
||||||
newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
|
|
||||||
copy(newBuf, b.buf)
|
|
||||||
b.buf = newBuf
|
|
||||||
}
|
|
||||||
b.buf = b.buf[:expLen]
|
|
||||||
}
|
|
||||||
copy(b.buf[pos:], p)
|
|
||||||
return pLen, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bytes returns a slice of bytes written to the buffer.
|
|
||||||
func (b *WriteAtBuffer) Bytes() []byte {
|
|
||||||
b.m.Lock()
|
|
||||||
defer b.m.Unlock()
|
|
||||||
return b.buf
|
|
||||||
}
|
|
12
vendor/github.com/aws/aws-sdk-go/aws/url.go
generated
vendored
12
vendor/github.com/aws/aws-sdk-go/aws/url.go
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
// +build go1.8
|
|
||||||
|
|
||||||
package aws
|
|
||||||
|
|
||||||
import "net/url"
|
|
||||||
|
|
||||||
// URLHostname will extract the Hostname without port from the URL value.
|
|
||||||
//
|
|
||||||
// Wrapper of net/url#URL.Hostname for backwards Go version compatibility.
|
|
||||||
func URLHostname(url *url.URL) string {
|
|
||||||
return url.Hostname()
|
|
||||||
}
|
|
29
vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
generated
vendored
29
vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
generated
vendored
@ -1,29 +0,0 @@
|
|||||||
// +build !go1.8
|
|
||||||
|
|
||||||
package aws
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// URLHostname will extract the Hostname without port from the URL value.
|
|
||||||
//
|
|
||||||
// Copy of Go 1.8's net/url#URL.Hostname functionality.
|
|
||||||
func URLHostname(url *url.URL) string {
|
|
||||||
return stripPort(url.Host)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// stripPort is copy of Go 1.8 url#URL.Hostname functionality.
|
|
||||||
// https://golang.org/src/net/url/url.go
|
|
||||||
func stripPort(hostport string) string {
|
|
||||||
colon := strings.IndexByte(hostport, ':')
|
|
||||||
if colon == -1 {
|
|
||||||
return hostport
|
|
||||||
}
|
|
||||||
if i := strings.IndexByte(hostport, ']'); i != -1 {
|
|
||||||
return strings.TrimPrefix(hostport[:i], "[")
|
|
||||||
}
|
|
||||||
return hostport[:colon]
|
|
||||||
}
|
|
8
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
@ -1,8 +0,0 @@
|
|||||||
// Package aws provides core functionality for making requests to AWS services.
|
|
||||||
package aws
|
|
||||||
|
|
||||||
// SDKName is the name of this AWS SDK
|
|
||||||
const SDKName = "aws-sdk-go"
|
|
||||||
|
|
||||||
// SDKVersion is the version of this SDK
|
|
||||||
const SDKVersion = "1.10.51"
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user