0
0
mirror of https://github.com/thegeeklab/wp-s3-action.git synced 2024-06-02 18:39:42 +02:00
This commit is contained in:
Robert Kaussow 2024-05-12 00:16:40 +02:00
parent 3531e7f604
commit b43429568d
Signed by: xoxys
GPG Key ID: 4E692A2EAECC03C0
5 changed files with 399 additions and 80 deletions

View File

@ -95,3 +95,9 @@ run:
linters-settings:
gofumpt:
extra-rules: true
issues:
exclude-rules:
- path: "_test.go"
linters:
- err113

44
aws/aws.go Normal file
View File

@ -0,0 +1,44 @@
package aws
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/cloudfront"
"github.com/aws/aws-sdk-go-v2/service/s3"
)
type Client struct {
S3 *S3
Cloudfront *Cloudfront
}
// NewClient creates a new S3 client with the provided configuration.
func NewClient(ctx context.Context, url, region, accessKey, secretKey string, pathStyle bool) (*Client, error) {
cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(region))
if err != nil {
return nil, fmt.Errorf("error while loading AWS config: %w", err)
}
if url != "" {
cfg.BaseEndpoint = aws.String(url)
}
// allowing to use the instance role or provide a key and secret
if accessKey != "" && secretKey != "" {
cfg.Credentials = credentials.NewStaticCredentialsProvider(accessKey, secretKey, "")
}
c := s3.NewFromConfig(cfg, func(o *s3.Options) {
o.UsePathStyle = pathStyle
})
cf := cloudfront.NewFromConfig(cfg)
return &Client{
S3: &S3{client: c},
Cloudfront: &Cloudfront{client: cf},
}, nil
}

40
aws/cloudfront.go Normal file
View File

@ -0,0 +1,40 @@
package aws
import (
"context"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/cloudfront"
"github.com/aws/aws-sdk-go-v2/service/cloudfront/types"
"github.com/rs/zerolog/log"
)
type Cloudfront struct {
client CloudfrontAPIClient
Distribution string
}
type CloudfrontInvalidateOpt struct {
Path string
}
// Invalidate invalidates the specified path in the CloudFront distribution.
func (c *Cloudfront) Invalidate(ctx context.Context, opt CloudfrontInvalidateOpt) error {
log.Debug().Msgf("invalidating '%s'", opt.Path)
_, err := c.client.CreateInvalidation(ctx, &cloudfront.CreateInvalidationInput{
DistributionId: aws.String(c.Distribution),
InvalidationBatch: &types.InvalidationBatch{
CallerReference: aws.String(time.Now().Format(time.RFC3339Nano)),
Paths: &types.Paths{
Quantity: aws.Int32(1),
Items: []string{
opt.Path,
},
},
},
})
return err
}

View File

@ -9,32 +9,13 @@ import (
"mime"
"os"
"path/filepath"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/cloudfront"
cf_types "github.com/aws/aws-sdk-go-v2/service/cloudfront/types"
"github.com/aws/aws-sdk-go-v2/service/s3"
s3_types "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/rs/zerolog/log"
)
type Client struct {
S3 *S3
Cloudfront *Cloudfront
}
type Cloudfront struct {
client CloudfrontAPIClient
Distribution string
}
type CloudfrontInvalidateOpt struct {
Path string
}
type S3 struct {
client S3APIClient
Bucket string
@ -64,33 +45,6 @@ type S3ListOptions struct {
Path string
}
// NewClient creates a new S3 client with the provided configuration.
func NewClient(ctx context.Context, url, region, accessKey, secretKey string, pathStyle bool) (*Client, error) {
cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(region))
if err != nil {
return nil, fmt.Errorf("error while loading AWS config: %w", err)
}
if url != "" {
cfg.BaseEndpoint = aws.String(url)
}
// allowing to use the instance role or provide a key and secret
if accessKey != "" && secretKey != "" {
cfg.Credentials = credentials.NewStaticCredentialsProvider(accessKey, secretKey, "")
}
c := s3.NewFromConfig(cfg, func(o *s3.Options) {
o.UsePathStyle = pathStyle
})
cf := cloudfront.NewFromConfig(cfg)
return &Client{
S3: &S3{client: c},
Cloudfront: &Cloudfront{client: cf},
}, nil
}
// Upload uploads a file to an S3 bucket. It first checks if the file already exists in the bucket
// and compares the local file's content and metadata with the remote file. If the file has changed,
// it updates the remote file's metadata. If the file does not exist or has changed,
@ -117,7 +71,7 @@ func (u *S3) Upload(ctx context.Context, opt S3UploadOptions) error {
Key: &opt.RemoteObjectKey,
})
if err != nil {
var noSuchKeyError *s3_types.NoSuchKey
var noSuchKeyError *types.NoSuchKey
if !errors.As(err, &noSuchKeyError) {
return err
}
@ -138,7 +92,7 @@ func (u *S3) Upload(ctx context.Context, opt S3UploadOptions) error {
Key: &opt.RemoteObjectKey,
Body: file,
ContentType: &contentType,
ACL: s3_types.ObjectCannedACL(acl),
ACL: types.ObjectCannedACL(acl),
Metadata: metadata,
CacheControl: &cacheControl,
ContentEncoding: &contentEncoding,
@ -172,10 +126,10 @@ func (u *S3) Upload(ctx context.Context, opt S3UploadOptions) error {
Bucket: &u.Bucket,
Key: &opt.RemoteObjectKey,
CopySource: aws.String(fmt.Sprintf("%s/%s", u.Bucket, opt.RemoteObjectKey)),
ACL: s3_types.ObjectCannedACL(acl),
ACL: types.ObjectCannedACL(acl),
ContentType: &contentType,
Metadata: metadata,
MetadataDirective: s3_types.MetadataDirectiveReplace,
MetadataDirective: types.MetadataDirectiveReplace,
CacheControl: &cacheControl,
ContentEncoding: &contentEncoding,
})
@ -199,7 +153,7 @@ func (u *S3) Upload(ctx context.Context, opt S3UploadOptions) error {
Key: &opt.RemoteObjectKey,
Body: file,
ContentType: &contentType,
ACL: s3_types.ObjectCannedACL(acl),
ACL: types.ObjectCannedACL(acl),
Metadata: metadata,
CacheControl: &cacheControl,
ContentEncoding: &contentEncoding,
@ -371,6 +325,7 @@ func getMetadata(file string, patterns map[string]map[string]string) map[string]
return metadata
}
// Redirect adds a redirect from the specified path to the specified location in the S3 bucket.
func (u *S3) Redirect(ctx context.Context, opt S3RedirectOptions) error {
log.Debug().Msgf("adding redirect from '%s' to '%s'", opt.Path, opt.Location)
@ -381,13 +336,14 @@ func (u *S3) Redirect(ctx context.Context, opt S3RedirectOptions) error {
_, err := u.client.PutObject(ctx, &s3.PutObjectInput{
Bucket: aws.String(u.Bucket),
Key: aws.String(opt.Path),
ACL: s3_types.ObjectCannedACLPublicRead,
ACL: types.ObjectCannedACLPublicRead,
WebsiteRedirectLocation: aws.String(opt.Location),
})
return err
}
// Delete removes the specified object from the S3 bucket.
func (u *S3) Delete(ctx context.Context, opt S3DeleteOptions) error {
log.Debug().Msgf("removing remote file '%s'", opt.RemoteObjectKey)
@ -403,6 +359,7 @@ func (u *S3) Delete(ctx context.Context, opt S3DeleteOptions) error {
return err
}
// List retrieves a list of object keys in the S3 bucket under the specified path.
func (u *S3) List(ctx context.Context, opt S3ListOptions) ([]string, error) {
remote := make([]string, 0)
@ -435,22 +392,3 @@ func (u *S3) List(ctx context.Context, opt S3ListOptions) ([]string, error) {
return remote, nil
}
func (c *Cloudfront) Invalidate(ctx context.Context, opt CloudfrontInvalidateOpt) error {
log.Debug().Msgf("invalidating '%s'", opt.Path)
_, err := c.client.CreateInvalidation(ctx, &cloudfront.CreateInvalidationInput{
DistributionId: aws.String(c.Distribution),
InvalidationBatch: &cf_types.InvalidationBatch{
CallerReference: aws.String(time.Now().Format(time.RFC3339Nano)),
Paths: &cf_types.Paths{
Quantity: aws.Int32(1),
Items: []string{
opt.Path,
},
},
},
})
return err
}

View File

@ -2,13 +2,14 @@ package aws
import (
"context"
"errors"
"os"
"path/filepath"
"testing"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
s3_types "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/thegeeklab/wp-s3-action/aws/mocks"
@ -52,7 +53,7 @@ func TestS3Uploader_Upload(t *testing.T) {
t.Helper()
mockS3Client := mocks.NewMockS3APIClient(t)
mockS3Client.On("HeadObject", mock.Anything, mock.Anything).Return(&s3.HeadObjectOutput{}, &s3_types.NoSuchKey{})
mockS3Client.On("HeadObject", mock.Anything, mock.Anything).Return(&s3.HeadObjectOutput{}, &types.NoSuchKey{})
mockS3Client.On("PutObject", mock.Anything, mock.Anything).Return(&s3.PutObjectOutput{}, nil)
return &S3{
@ -103,12 +104,12 @@ func TestS3Uploader_Upload(t *testing.T) {
ContentType: aws.String("text/plain; charset=utf-8"),
}, nil)
mockS3Client.On("GetObjectAcl", mock.Anything, mock.Anything).Return(&s3.GetObjectAclOutput{
Grants: []s3_types.Grant{
Grants: []types.Grant{
{
Grantee: &s3_types.Grantee{
Grantee: &types.Grantee{
URI: aws.String("http://acs.amazonaws.com/groups/global/AllUsers"),
},
Permission: s3_types.PermissionWrite,
Permission: types.PermissionWrite,
},
},
}, nil)
@ -211,7 +212,7 @@ func TestS3Uploader_Upload(t *testing.T) {
t.Helper()
mockS3Client := mocks.NewMockS3APIClient(t)
mockS3Client.On("HeadObject", mock.Anything, mock.Anything).Return(&s3.HeadObjectOutput{}, &s3_types.NoSuchKey{})
mockS3Client.On("HeadObject", mock.Anything, mock.Anything).Return(&s3.HeadObjectOutput{}, &types.NoSuchKey{})
return &S3{
client: mockS3Client,
@ -233,10 +234,10 @@ func TestS3Uploader_Upload(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
upload, opt, teardown := tt.setup(t)
s3, opt, teardown := tt.setup(t)
defer teardown()
err := upload.Upload(context.Background(), opt)
err := s3.Upload(context.Background(), opt)
if tt.wantErr {
assert.Error(t, err)
@ -256,3 +257,293 @@ func createTempFile(t *testing.T, name string) string {
return name
}
func TestS3_Redirect(t *testing.T) {
t.Parallel()
tests := []struct {
name string
setup func(t *testing.T) (*S3, S3RedirectOptions, func())
wantErr bool
}{
{
name: "redirect_with_valid_options",
setup: func(t *testing.T) (*S3, S3RedirectOptions, func()) {
t.Helper()
mockS3Client := mocks.NewMockS3APIClient(t)
mockS3Client.On("PutObject", mock.Anything, mock.Anything).Return(&s3.PutObjectOutput{}, nil)
return &S3{
client: mockS3Client,
Bucket: "test-bucket",
}, S3RedirectOptions{
Path: "redirect/path",
Location: "https://example.com",
}, func() {
mockS3Client.AssertExpectations(t)
}
},
wantErr: false,
},
{
name: "skip_redirect_when_dry_run_is_true",
setup: func(t *testing.T) (*S3, S3RedirectOptions, func()) {
t.Helper()
mockS3Client := mocks.NewMockS3APIClient(t)
return &S3{
client: mockS3Client,
Bucket: "test-bucket",
DryRun: true,
}, S3RedirectOptions{
Path: "redirect/path",
Location: "https://example.com",
}, func() {
mockS3Client.AssertExpectations(t)
}
},
wantErr: false,
},
{
name: "error_when_put_object_fails",
setup: func(t *testing.T) (*S3, S3RedirectOptions, func()) {
t.Helper()
mockS3Client := mocks.NewMockS3APIClient(t)
mockS3Client.
On("PutObject", mock.Anything, mock.Anything).
Return(&s3.PutObjectOutput{}, errors.New("put object failed"))
return &S3{
client: mockS3Client,
Bucket: "test-bucket",
}, S3RedirectOptions{
Path: "redirect/path",
Location: "https://example.com",
}, func() {
mockS3Client.AssertExpectations(t)
}
},
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
s3, opt, teardown := tt.setup(t)
defer teardown()
err := s3.Redirect(context.Background(), opt)
if tt.wantErr {
assert.Error(t, err)
return
}
assert.NoError(t, err)
})
}
}
func TestS3_Delete(t *testing.T) {
t.Parallel()
tests := []struct {
name string
setup func(t *testing.T) (*S3, S3DeleteOptions, func())
wantErr bool
}{
{
name: "delete_existing_object",
setup: func(t *testing.T) (*S3, S3DeleteOptions, func()) {
t.Helper()
mockS3Client := mocks.NewMockS3APIClient(t)
mockS3Client.On("DeleteObject", mock.Anything, mock.Anything).Return(&s3.DeleteObjectOutput{}, nil)
return &S3{
client: mockS3Client,
Bucket: "test-bucket",
}, S3DeleteOptions{
RemoteObjectKey: "path/to/file.txt",
}, func() {
mockS3Client.AssertExpectations(t)
}
},
wantErr: false,
},
{
name: "skip_delete_when_dry_run_is_true",
setup: func(t *testing.T) (*S3, S3DeleteOptions, func()) {
t.Helper()
mockS3Client := mocks.NewMockS3APIClient(t)
return &S3{
client: mockS3Client,
Bucket: "test-bucket",
DryRun: true,
}, S3DeleteOptions{
RemoteObjectKey: "path/to/file.txt",
}, func() {
mockS3Client.AssertExpectations(t)
}
},
wantErr: false,
},
{
name: "error_when_delete_object_fails",
setup: func(t *testing.T) (*S3, S3DeleteOptions, func()) {
t.Helper()
mockS3Client := mocks.NewMockS3APIClient(t)
mockS3Client.
On("DeleteObject", mock.Anything, mock.Anything).
Return(&s3.DeleteObjectOutput{}, errors.New("delete object failed"))
return &S3{
client: mockS3Client,
Bucket: "test-bucket",
}, S3DeleteOptions{
RemoteObjectKey: "path/to/file.txt",
}, func() {
mockS3Client.AssertExpectations(t)
}
},
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
s3, opt, teardown := tt.setup(t)
defer teardown()
err := s3.Delete(context.Background(), opt)
if tt.wantErr {
assert.Error(t, err)
return
}
assert.NoError(t, err)
})
}
}
// func TestS3_List(t *testing.T) {
// t.Parallel()
// tests := []struct {
// name string
// setup func(t *testing.T) (*S3, S3ListOptions, func())
// wantErr bool
// want []string
// }{
// {
// name: "list_objects_in_prefix",
// setup: func(t *testing.T) (*S3, S3ListOptions, func()) {
// t.Helper()
// mockS3Client := mocks.NewMockS3APIClient(t)
// mockS3Client.On("ListObjects", mock.Anything, mock.Anything).Return(&s3.ListObjectsOutput{
// Contents: []types.Object{
// {Key: aws.String("prefix/file1.txt")},
// {Key: aws.String("prefix/file2.txt")},
// },
// }, nil)
// return &S3{
// client: mockS3Client,
// Bucket: "test-bucket",
// }, S3ListOptions{
// Path: "prefix/",
// }, func() {
// mockS3Client.AssertExpectations(t)
// }
// },
// wantErr: false,
// want: []string{"prefix/file1.txt", "prefix/file2.txt"},
// },
// {
// name: "list_objects_with_pagination",
// setup: func(t *testing.T) (*S3, S3ListOptions, func()) {
// t.Helper()
// mockS3Client := mocks.NewMockS3APIClient(t)
// mockS3Client.On("ListObjects", mock.Anything, mock.MatchedBy(func(input *s3.ListObjectsInput) bool {
// return *input.Marker == ""
// })).Return(&s3.ListObjectsOutput{
// Contents: []types.Object{{Key: aws.String("prefix/file1.txt")}, {Key: aws.String("prefix/file2.txt")}},
// IsTruncated: aws.Bool(true),
// }, nil)
// mockS3Client.On("ListObjects", mock.Anything, mock.MatchedBy(func(input *s3.ListObjectsInput) bool {
// return *input.Marker == "prefix/file2.txt"
// })).Return(&s3.ListObjectsOutput{
// Contents: []types.Object{{Key: aws.String("prefix/file3.txt")}},
// }, nil)
// return &S3{
// client: mockS3Client,
// Bucket: "test-bucket",
// }, S3ListOptions{
// Path: "prefix/",
// }, func() {
// mockS3Client.AssertExpectations(t)
// }
// },
// wantErr: false,
// want: []string{"prefix/file1.txt", "prefix/file2.txt", "prefix/file3.txt"},
// },
// {
// name: "error_when_list_objects_fails",
// setup: func(t *testing.T) (*S3, S3ListOptions, func()) {
// t.Helper()
// mockS3Client := mocks.NewMockS3APIClient(t)
// mockS3Client.
// On("ListObjects", mock.Anything, mock.Anything).
// Return(&s3.ListObjectsOutput{}, errors.New("list objects failed"))
// return &S3{
// client: mockS3Client,
// Bucket: "test-bucket",
// }, S3ListOptions{
// Path: "prefix/",
// }, func() {
// mockS3Client.AssertExpectations(t)
// }
// },
// wantErr: true,
// want: nil,
// },
// }
// for _, tt := range tests {
// tt := tt
// t.Run(tt.name, func(t *testing.T) {
// t.Parallel()
// s3, opt, teardown := tt.setup(t)
// defer teardown()
// got, err := s3.List(context.Background(), opt)
// if tt.wantErr {
// assert.Error(t, err)
// return
// }
// assert.NoError(t, err)
// assert.ElementsMatch(t, tt.want, got)
// })
// }
// }