Merge pull request #7 from drone-plugins/native

Native
This commit is contained in:
Jack Spirou 2016-01-24 15:45:48 -06:00
commit 28ea5cb7de
8 changed files with 2161 additions and 130 deletions

View File

@ -1 +1 @@
eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.waldmmzYiLrJb-IGTrZrgpQynH7j3Gg9l3Go2yOlpdalSDBQPnPHVUX13rQDE6d-jOtkU9d5jDDm-jqUFJxpC_iUOydpul-c_IK1x3Llhvtff2ACHLmhQATOaFcF9M3LPVM-8_UesIUltdKbaNgkg94knXx_KWnP_-GC0iroKljcUx67cJDrvfCKwYAwumH_c1aelFB5pxXKTsyWT-cq99-M34bDgAm0Opw0u8G60q6xjDYdUgQfzllrXzWouA8A-y2-m6JUeEv22u-yavdmA-KurtJhtrcnrb0JIhHbXHQ6RECOf_Pb2LCKNIP173gSmH0xaw3wcQ0jKbWNzodwRQ.yIdC4WIXCthzXa25.IG2Bvn9CtZCLE7KojfHKkkHUXhqbvcJxARQa-MYXHitbezlkpkwvEb3ITiiuZ9dR0KoWCN7fS8Q9jh7qJHQhAlKCCZJrlc7vyrON3-ZCgqT_PQ5f-1VXwN7f8Xsh05NUrq1s2StwGDz706IbsW0d13Trb8W9SeQ8XcCaeb6-KqKPYCh3RHg5of4AXa306lHIt1B1jRYlMkki22s9Ra08PnXMG5P7cVJhOiUmQQ.DektznGNfHQZEM-5g3BIFA
eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.LD8abRfGdqm1iV1lMb_ZFRE1EJfcZ3GCcJaU5kRaHbmrvmykyDRZCRkGZxnAcKFyxKyXVj-bKCK4vGkTc0OI3-x36N20gmPPYHM17lp0vm1IZtr1zrJ0Qc0reTTE9wFp4axo6HupV2wSFbLdj3nNH_SHCybQ9bIVn_olZRoGO-3nnKBXmuy0zkTfep0K55Pbty69a1Gl4jbKxyHG50GvSgSkTpRcgSNr_itqfeOkzFkKnYXvLC5zEJ1qmv6v4MCJVvgmySUZdXcdDWoAkQvwfZ65_2NttGnRlw-zXcDHoRWmcmucrpiGT_o7XbdG06c7lcjqJ4fepaqj5eMbseoa_A.SYax_4XXsBK8Vk1D.rk1aDHYQlUwbhQaqIRlXjI247xeRAzOtG3SQrm-RYPqLklCYP2Y-DodgWndKrXQZ7R12y2v1T8TUuRbmmTHXEwTOtV_TbK4XS9jAz7mTc7rKlIahf7HSAUJVlpsGV2qUg6d0wOsd7BXlCkMCFV1A-EPlFPf8RahkKU-PgKQWZ-_2JwISvg_pGNQE3DP2yRiyZ_2LYWR3Gai-MIRWeXrAKKWtCbhTf5GmjFzA_GmONHwObhzzXm9mJgqPmi3xSJwD3c9fSG7zxaB_yVjKxo3b1nMEO-gup9Tcs0U_cAYNGJjg11larmPYIaTWy3XJqzwP84T6OTqcCsEM4ZVB1btQ8PsfiXpgYdGh3DV-DP7ngAFYc9HL5U8fp0go_HVF1C3Tf_O1aA.LWsqNX71DPdSDPFOyWy33w

View File

@ -16,8 +16,9 @@ publish:
password: $$DOCKER_PASS
email: $$DOCKER_EMAIL
repo: plugins/drone-s3-sync
tag: native
when:
branch: master
branch: native
plugin:
name: S3 Sync

85
DOCS.md
View File

@ -8,9 +8,9 @@ Use the S3 sync plugin to synchronize files and folders with an Amazon S3 bucket
* `source` - location of folder to sync
* `target` - target folder in your S3 bucket
* `delete` - deletes files in the target not found in the source
* `include` - don't exclude files that match the specified pattern
* `exclude` - exclude files that match the specified pattern
* `content_type` - override default mime-tpyes to use this value
* `content_type` - override default mime-types to use this value
* `metadata` - set custom metadata
* `redirects` - targets that should redirect elsewhere
The following is a sample S3 configuration in your .drone.yml file:
@ -26,3 +26,82 @@ publish:
target: /target/location
delete: true
```
Both `acl` and `content_type` can be passed as a string value to apply to all files, or as a map to apply to a subset of files.
For example:
```yaml
publish:
s3_sync:
acl:
"public/*": public-read
"private/*": private
content_type:
".svg": image/svg+xml
region: "us-east-1"
bucket: "my-bucket.s3-website-us-east-1.amazonaws.com"
access_key: "970d28f4dd477bc184fbd10b376de753"
secret_key: "9c5785d3ece6a9cdefa42eb99b58986f9095ff1c"
source: folder/to/archive
target: /target/location
delete: true
```
In the case of `acl` the key of the map is a glob. If there are no matches in your settings for a given file, the default is `"private"`.
The `content_type` field the key is an extension including the leading dot `.`. If you want to set a content type for files with no extension, set the key to the empty string `""`. If there are no matches for the `content_type` of any file, one will automatically be determined for you.
The `metadata` field can be set as either an object where the keys are the metadata headers:
```yaml
publish:
s3_sync:
acl: public-read
region: "us-east-1"
bucket: "my-bucket.s3-website-us-east-1.amazonaws.com"
access_key: "970d28f4dd477bc184fbd10b376de753"
secret_key: "9c5785d3ece6a9cdefa42eb99b58986f9095ff1c"
source: folder/to/archive
target: /target/location
delete: true
metadata:
Cache-Control: "max-age: 10000"
```
Or you can specify metadata for file patterns by using a glob:
```yaml
publish:
s3_sync:
acl: public-read
region: "us-east-1"
bucket: "my-bucket.s3-website-us-east-1.amazonaws.com"
access_key: "970d28f4dd477bc184fbd10b376de753"
secret_key: "9c5785d3ece6a9cdefa42eb99b58986f9095ff1c"
source: folder/to/archive
target: /target/location
delete: true
metadata:
"*.png":
Cache-Control: "max-age: 10000000"
"*.html":
Cache-Control: "max-age: 1000"
```
Additionally, you can specify redirect targets for files that don't exist by using the `redirects` key:
```yaml
publish:
s3_sync:
acl: public-read
region: "us-east-1"
bucket: "my-bucket.s3-website-us-east-1.amazonaws.com"
access_key: "970d28f4dd477bc184fbd10b376de753"
secret_key: "9c5785d3ece6a9cdefa42eb99b58986f9095ff1c"
source: folder/to/archive
target: /target/location
delete: true
redirects:
some/missing/file: /somewhere/that/actually/exists
```

View File

@ -5,8 +5,7 @@
FROM gliderlabs/alpine:3.1
RUN apk add --update \
python \
py-pip \
&& pip install awscli
ca-certificates
ADD drone-s3-sync /bin/
ADD mime.types /etc/
ENTRYPOINT ["/bin/drone-s3-sync"]

272
aws.go Normal file
View File

@ -0,0 +1,272 @@
package main
import (
"crypto/md5"
"fmt"
"io"
"mime"
"os"
"path/filepath"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/ryanuber/go-glob"
)
type AWS struct {
client *s3.S3
remote []string
local []string
vargs PluginArgs
}
func NewAWS(vargs PluginArgs) AWS {
sess := session.New(&aws.Config{
Credentials: credentials.NewStaticCredentials(vargs.Key, vargs.Secret, ""),
Region: aws.String(vargs.Region),
})
c := s3.New(sess)
r := make([]string, 1, 1)
l := make([]string, 1, 1)
return AWS{c, r, l, vargs}
}
func (a *AWS) Upload(local, remote string) error {
if local == "" {
return nil
}
file, err := os.Open(local)
if err != nil {
return err
}
defer file.Close()
access := ""
if a.vargs.Access.IsString() {
access = a.vargs.Access.String()
} else if !a.vargs.Access.IsEmpty() {
accessMap := a.vargs.Access.Map()
for pattern := range accessMap {
if match := glob.Glob(pattern, local); match == true {
access = accessMap[pattern]
break
}
}
}
if access == "" {
access = "private"
}
fileExt := filepath.Ext(local)
var contentType string
if a.vargs.ContentType.IsString() {
contentType = a.vargs.ContentType.String()
} else if !a.vargs.ContentType.IsEmpty() {
contentMap := a.vargs.ContentType.Map()
for patternExt := range contentMap {
if patternExt == fileExt {
contentType = contentMap[patternExt]
break
}
}
}
metadata := map[string]*string{}
vmap := a.vargs.Metadata.Map()
if len(vmap) > 0 {
for pattern := range vmap {
if match := glob.Glob(pattern, local); match == true {
for k, v := range vmap[pattern] {
metadata[k] = aws.String(v)
}
break
}
}
}
if contentType == "" {
contentType = mime.TypeByExtension(fileExt)
}
head, err := a.client.HeadObject(&s3.HeadObjectInput{
Bucket: aws.String(a.vargs.Bucket),
Key: aws.String(remote),
})
if err != nil && err.(awserr.Error).Code() != "404" {
if err.(awserr.Error).Code() == "404" {
return err
}
debug("Uploading \"%s\" with Content-Type \"%s\" and permissions \"%s\"", local, contentType, access)
_, err = a.client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(a.vargs.Bucket),
Key: aws.String(remote),
Body: file,
ContentType: aws.String(contentType),
ACL: aws.String(access),
Metadata: metadata,
})
return err
}
hash := md5.New()
io.Copy(hash, file)
sum := fmt.Sprintf("\"%x\"", hash.Sum(nil))
if sum == *head.ETag {
shouldCopy := false
if head.ContentType == nil && contentType != "" {
debug("Content-Type has changed from unset to %s", contentType)
shouldCopy = true
}
if !shouldCopy && head.ContentType != nil && contentType != *head.ContentType {
debug("Content-Type has changed from %s to %s", *head.ContentType, contentType)
shouldCopy = true
}
if !shouldCopy && len(head.Metadata) != len(metadata) {
debug("Count of metadata values has changed for %s", local)
shouldCopy = true
}
if !shouldCopy && len(metadata) > 0 {
for k, v := range metadata {
if hv, ok := head.Metadata[k]; ok {
if *v != *hv {
debug("Metadata values have changed for %s", local)
shouldCopy = true
break
}
}
}
}
if !shouldCopy {
grant, err := a.client.GetObjectAcl(&s3.GetObjectAclInput{
Bucket: aws.String(a.vargs.Bucket),
Key: aws.String(remote),
})
if err != nil {
return err
}
previousAccess := "private"
for _, g := range grant.Grants {
gt := *g.Grantee
if gt.URI != nil {
if *gt.URI == "http://acs.amazonaws.com/groups/global/AllUsers" {
if *g.Permission == "READ" {
previousAccess = "public-read"
} else if *g.Permission == "WRITE" {
previousAccess = "public-read-write"
}
} else if *gt.URI == "http://acs.amazonaws.com/groups/global/AllUsers" {
if *g.Permission == "READ" {
previousAccess = "authenticated-read"
}
}
}
}
if previousAccess != access {
debug("Permissions for \"%s\" have changed from \"%s\" to \"%s\"", remote, previousAccess, access)
shouldCopy = true
}
}
if !shouldCopy {
debug("Skipping \"%s\" because hashes and metadata match", local)
return nil
}
debug("Updating metadata for \"%s\" Content-Type: \"%s\", ACL: \"%s\"", local, contentType, access)
_, err = a.client.CopyObject(&s3.CopyObjectInput{
Bucket: aws.String(a.vargs.Bucket),
Key: aws.String(remote),
CopySource: aws.String(fmt.Sprintf("%s/%s", a.vargs.Bucket, remote)),
ACL: aws.String(access),
ContentType: aws.String(contentType),
Metadata: metadata,
MetadataDirective: aws.String("REPLACE"),
})
return err
} else {
_, err = file.Seek(0, 0)
if err != nil {
return err
}
debug("Uploading \"%s\" with Content-Type \"%s\" and permissions \"%s\"", local, contentType, access)
_, err = a.client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(a.vargs.Bucket),
Key: aws.String(remote),
Body: file,
ContentType: aws.String(contentType),
ACL: aws.String(access),
Metadata: metadata,
})
return err
}
}
func (a *AWS) Redirect(path, location string) error {
debug("Adding redirect from \"%s\" to \"%s\"", path, location)
_, err := a.client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(a.vargs.Bucket),
Key: aws.String(path),
ACL: aws.String("public-read"),
WebsiteRedirectLocation: aws.String(location),
})
return err
}
func (a *AWS) Delete(remote string) error {
debug("Removing remote file \"%s\"", remote)
_, err := a.client.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(a.vargs.Bucket),
Key: aws.String(remote),
})
return err
}
func (a *AWS) List(path string) ([]string, error) {
remote := make([]string, 1, 1)
resp, err := a.client.ListObjects(&s3.ListObjectsInput{
Bucket: aws.String(a.vargs.Bucket),
Prefix: aws.String(path),
})
if err != nil {
return remote, err
}
for _, item := range resp.Contents {
remote = append(remote, *item.Key)
}
for *resp.IsTruncated {
resp, err = a.client.ListObjects(&s3.ListObjectsInput{
Bucket: aws.String(a.vargs.Bucket),
Prefix: aws.String(path),
Marker: aws.String(remote[len(remote)-1]),
})
if err != nil {
return remote, err
}
for _, item := range resp.Contents {
remote = append(remote, *item.Key)
}
}
return remote, nil
}

238
main.go
View File

@ -3,159 +3,153 @@ package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/drone/drone-plugin-go/plugin"
"github.com/drone/drone-go/drone"
"github.com/drone/drone-go/plugin"
)
type S3 struct {
Key string `json:"access_key"`
Secret string `json:"secret_key"`
Bucket string `json:"bucket"`
const maxConcurrent = 100
// us-east-1
// us-west-1
// us-west-2
// eu-west-1
// ap-southeast-1
// ap-southeast-2
// ap-northeast-1
// sa-east-1
Region string `json:"region"`
type job struct {
local string
remote string
action string
}
// Indicates the files ACL, which should be one
// of the following:
// private
// public-read
// public-read-write
// authenticated-read
// bucket-owner-read
// bucket-owner-full-control
Access string `json:"acl"`
// Copies the files from the specified directory.
// Regexp matching will apply to match multiple
// files
//
// Examples:
// /path/to/file
// /path/to/*.txt
// /path/to/*/*.txt
// /path/to/**
Source string `json:"source"`
Target string `json:"target"`
// Include or exclude all files or objects from the command
// that matches the specified pattern.
Include string `json:"include"`
Exclude string `json:"exclude"`
// Files that exist in the destination but not in the source
// are deleted during sync.
Delete bool `json:"delete"`
// Specify an explicit content type for this operation. This
// value overrides any guessed mime types.
ContentType string `json:"content_type"`
type result struct {
j job
err error
}
func main() {
workspace := plugin.Workspace{}
vargs := S3{}
vargs := PluginArgs{}
workspace := drone.Workspace{}
plugin.Param("workspace", &workspace)
plugin.Param("vargs", &vargs)
plugin.MustParse()
plugin.Param("workspace", &workspace)
if err := plugin.Parse(); err != nil {
fmt.Println(err)
os.Exit(1)
}
// skip if AWS key or SECRET are empty. A good example for this would
// be forks building a project. S3 might be configured in the source
// repo, but not in the fork
if len(vargs.Key) == 0 || len(vargs.Secret) == 0 {
if len(vargs.Key) == 0 || len(vargs.Secret) == 0 || len(vargs.Bucket) == 0 {
return
}
// make sure a default region is set
if len(vargs.Region) == 0 {
vargs.Region = "us-east-1"
}
// make sure a default access is set
// let's be conservative and assume private
if len(vargs.Access) == 0 {
vargs.Access = "private"
}
// make sure a default source is set
if len(vargs.Source) == 0 {
vargs.Source = "."
}
vargs.Source = filepath.Join(workspace.Path, vargs.Source)
// if the target starts with a "/" we need
// to remove it, otherwise we might adding
// a 3rd slash to s3://
if strings.HasPrefix(vargs.Target, "/") {
vargs.Target = vargs.Target[1:]
}
vargs.Target = fmt.Sprintf("s3://%s/%s", vargs.Bucket, vargs.Target)
cmd := command(vargs)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, "AWS_ACCESS_KEY_ID="+vargs.Key)
cmd.Env = append(cmd.Env, "AWS_SECRET_ACCESS_KEY="+vargs.Secret)
cmd.Dir = workspace.Path
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
trace(cmd)
// run the command and exit if failed.
err := cmd.Run()
client := NewAWS(vargs)
remote, err := client.List(vargs.Target)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
local := make([]string, 1, 1)
jobs := make([]job, 1, 1)
err = filepath.Walk(vargs.Source, func(path string, info os.FileInfo, err error) error {
if err != nil || info.IsDir() {
return err
}
localPath := path
if vargs.Source != "." {
localPath = strings.TrimPrefix(path, vargs.Source)
if strings.HasPrefix(localPath, "/") {
localPath = localPath[1:]
}
}
local = append(local, localPath)
jobs = append(jobs, job{
local: filepath.Join(vargs.Source, localPath),
remote: filepath.Join(vargs.Target, localPath),
action: "upload",
})
return nil
})
if err != nil {
fmt.Println(err)
os.Exit(1)
}
for path, location := range vargs.Redirects {
path = strings.TrimPrefix(path, "/")
local = append(local, path)
jobs = append(jobs, job{
local: path,
remote: location,
action: "redirect",
})
}
for _, r := range remote {
found := false
for _, l := range local {
if l == r {
found = true
break
}
}
if !found {
jobs = append(jobs, job{
local: "",
remote: r,
action: "delete",
})
}
}
jobChan := make(chan struct{}, maxConcurrent)
results := make(chan *result, len(jobs))
fmt.Printf("Synchronizing with bucket \"%s\"\n", vargs.Bucket)
for _, j := range jobs {
jobChan <- struct{}{}
go func(j job) {
if j.action == "upload" {
err = client.Upload(j.local, j.remote)
} else if j.action == "redirect" {
err = client.Redirect(j.local, j.remote)
} else if j.action == "delete" && vargs.Delete {
err = client.Delete(j.remote)
} else {
err = nil
}
results <- &result{j, err}
<-jobChan
}(j)
}
for _ = range jobs {
r := <-results
if r.err != nil {
fmt.Printf("ERROR: failed to %s %s to %s: %+v\n", r.j.action, r.j.local, r.j.remote, r.err)
os.Exit(1)
}
}
fmt.Println("done!")
}
// command is a helper function that returns the command
// and arguments to upload to aws from the command line.
func command(s S3) *exec.Cmd {
// command line args
args := []string{
"s3",
"sync",
s.Source,
s.Target,
"--acl",
s.Access,
"--region",
s.Region,
func debug(format string, args ...interface{}) {
if os.Getenv("DEBUG") != "" {
fmt.Printf(format+"\n", args...)
} else {
fmt.Printf(".")
}
// append delete flag if specified
if s.Delete {
args = append(args, "--delete")
}
// appends exclude flag if specified
if len(s.Exclude) != 0 {
args = append(args, "--exclude")
args = append(args, s.Exclude)
}
// append include flag if specified
if len(s.Include) != 0 {
args = append(args, "--include")
args = append(args, s.Include)
}
// appends content-type if specified
if len(s.ContentType) != 0 {
args = append(args, "--content-type")
args = append(args, s.ContentType)
}
return exec.Command("aws", args...)
}
// trace writes each command to standard error (preceded by a $ ) before it
// is executed. Used for debugging your build.
func trace(cmd *exec.Cmd) {
fmt.Println("$", strings.Join(cmd.Args, " "))
}

1588
mime.types Normal file

File diff suppressed because it is too large Load Diff

98
types.go Normal file
View File

@ -0,0 +1,98 @@
package main
import "encoding/json"
type PluginArgs struct {
Key string `json:"access_key"`
Secret string `json:"secret_key"`
Bucket string `json:"bucket"`
Region string `json:"region"`
Source string `json:"source"`
Target string `json:"target"`
Delete bool `json:"delete"`
Access StringMap `json:"acl"`
ContentType StringMap `json:"content_type"`
Metadata DeepStringMap `json:"metadata"`
Redirects map[string]string `json:"redirects"`
}
type DeepStringMap struct {
parts map[string]map[string]string
}
func (e *DeepStringMap) UnmarshalJSON(b []byte) error {
if len(b) == 0 {
return nil
}
p := map[string]map[string]string{}
if err := json.Unmarshal(b, &p); err != nil {
s := map[string]string{}
if err := json.Unmarshal(b, &s); err != nil {
return err
}
p["*"] = s
}
e.parts = p
return nil
}
func (e *DeepStringMap) Map() map[string]map[string]string {
return e.parts
}
type StringMap struct {
parts map[string]string
}
func (e *StringMap) UnmarshalJSON(b []byte) error {
if len(b) == 0 {
return nil
}
p := map[string]string{}
if err := json.Unmarshal(b, &p); err != nil {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
p["_string_"] = s
}
e.parts = p
return nil
}
func (e *StringMap) IsEmpty() bool {
if e == nil || len(e.parts) == 0 {
return true
}
return false
}
func (e *StringMap) IsString() bool {
if e.IsEmpty() || len(e.parts) != 1 {
return false
}
_, ok := e.parts["_string_"]
return ok
}
func (e *StringMap) String() string {
if e.IsEmpty() || !e.IsString() {
return ""
}
return e.parts["_string_"]
}
func (e *StringMap) Map() map[string]string {
if e.IsEmpty() || e.IsString() {
return map[string]string{}
}
return e.parts
}