From 3079b2af4dbf6c5a6ecb2aa4fabb56bf27db497a Mon Sep 17 00:00:00 2001 From: Claire Date: Tue, 20 Sep 2022 16:58:40 -0500 Subject: [PATCH] disable aws tests --- get.go | 11 +- get_s3.go | 684 ++++++++++++++++++++++++------------------------- get_s3_test.go | 474 +++++++++++++++++----------------- 3 files changed, 585 insertions(+), 584 deletions(-) diff --git a/get.go b/get.go index c233763c6..3a0ceac5d 100644 --- a/get.go +++ b/get.go @@ -65,11 +65,12 @@ func init() { } Getters = map[string]Getter{ - "file": new(FileGetter), - "git": new(GitGetter), - "gcs": new(GCSGetter), - "hg": new(HgGetter), - "s3": new(S3Getter), + "file": new(FileGetter), + "git": new(GitGetter), + "gcs": new(GCSGetter), + "hg": new(HgGetter), + // disabling s3 for now + // "s3": new(S3Getter), "http": httpGetter, "https": httpGetter, } diff --git a/get_s3.go b/get_s3.go index 7e0d853ba..640cc31b1 100644 --- a/get_s3.go +++ b/get_s3.go @@ -1,344 +1,344 @@ package getter -import ( - "context" - "fmt" - "net/url" - "os" - "path/filepath" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" -) - -// S3Getter is a Getter implementation that will download a module from -// a S3 bucket. -type S3Getter struct { - getter - - // Timeout sets a deadline which all S3 operations should - // complete within. Zero value means no timeout. - Timeout time.Duration -} - -func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { - // Parse URL - ctx := g.Context() - - if g.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, g.Timeout) - defer cancel() - } - - region, bucket, path, _, creds, err := g.parseUrl(u) - if err != nil { - return 0, err - } - - // Create client config - client, err := g.newS3Client(region, u, creds) - if err != nil { - return 0, err - } - - // List the object(s) at the given prefix - req := &s3.ListObjectsInput{ - Bucket: aws.String(bucket), - Prefix: aws.String(path), - } - resp, err := client.ListObjectsWithContext(ctx, req) - if err != nil { - return 0, err - } - - for _, o := range resp.Contents { - // Use file mode on exact match. - if *o.Key == path { - return ClientModeFile, nil - } - - // Use dir mode if child keys are found. - if strings.HasPrefix(*o.Key, path+"/") { - return ClientModeDir, nil - } - } - - // There was no match, so just return file mode. The download is going - // to fail but we will let S3 return the proper error later. - return ClientModeFile, nil -} - -func (g *S3Getter) Get(dst string, u *url.URL) error { - ctx := g.Context() - - if g.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, g.Timeout) - defer cancel() - } - - // Parse URL - region, bucket, path, _, creds, err := g.parseUrl(u) - if err != nil { - return err - } - - // Remove destination if it already exists - _, err = os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - - if err == nil { - // Remove the destination - if err := os.RemoveAll(dst); err != nil { - return err - } - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { - return err - } - - client, err := g.newS3Client(region, u, creds) - if err != nil { - return err - } - - // List files in path, keep listing until no more objects are found - lastMarker := "" - hasMore := true - for hasMore { - req := &s3.ListObjectsInput{ - Bucket: aws.String(bucket), - Prefix: aws.String(path), - } - if lastMarker != "" { - req.Marker = aws.String(lastMarker) - } - - resp, err := client.ListObjectsWithContext(ctx, req) - if err != nil { - return err - } - - hasMore = aws.BoolValue(resp.IsTruncated) - - // Get each object storing each file relative to the destination path - for _, object := range resp.Contents { - lastMarker = aws.StringValue(object.Key) - objPath := aws.StringValue(object.Key) - - // If the key ends with a backslash assume it is a directory and ignore - if strings.HasSuffix(objPath, "/") { - continue - } - - // Get the object destination path - objDst, err := filepath.Rel(path, objPath) - if err != nil { - return err - } - objDst = filepath.Join(dst, objDst) - - if err := g.getObject(ctx, client, objDst, bucket, objPath, ""); err != nil { - return err - } - } - } - - return nil -} - -func (g *S3Getter) GetFile(dst string, u *url.URL) error { - ctx := g.Context() - - if g.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, g.Timeout) - defer cancel() - } - - region, bucket, path, version, creds, err := g.parseUrl(u) - if err != nil { - return err - } - - client, err := g.newS3Client(region, u, creds) - if err != nil { - return err - } - - return g.getObject(ctx, client, dst, bucket, path, version) -} - -func (g *S3Getter) getObject(ctx context.Context, client *s3.S3, dst, bucket, key, version string) error { - req := &s3.GetObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - } - if version != "" { - req.VersionId = aws.String(version) - } - - resp, err := client.GetObjectWithContext(ctx, req) - if err != nil { - return err - } - - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { - return err - } - - body := resp.Body - - if g.client != nil && g.client.ProgressListener != nil { - fn := filepath.Base(key) - body = g.client.ProgressListener.TrackProgress(fn, 0, *resp.ContentLength, resp.Body) - } - defer body.Close() - - return copyReader(dst, body, 0666, g.client.umask()) -} - -func (g *S3Getter) getAWSConfig(region string, url *url.URL, creds *credentials.Credentials) *aws.Config { - conf := &aws.Config{} - metadataURLOverride := os.Getenv("AWS_METADATA_URL") - if creds == nil && metadataURLOverride != "" { - creds = credentials.NewChainCredentials( - []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.New(session.New(&aws.Config{ - Endpoint: aws.String(metadataURLOverride), - })), - }, - }) - } - - if creds != nil { - conf.Endpoint = &url.Host - conf.S3ForcePathStyle = aws.Bool(true) - if url.Scheme == "http" { - conf.DisableSSL = aws.Bool(true) - } - } - - conf.Credentials = creds - if region != "" { - conf.Region = aws.String(region) - } - - return conf.WithCredentialsChainVerboseErrors(true) -} - -func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) { - // This just check whether we are dealing with S3 or - // any other S3 compliant service. S3 has a predictable - // url as others do not - if strings.Contains(u.Host, "amazonaws.com") { - // Amazon S3 supports both virtual-hosted–style and path-style URLs to access a bucket, although path-style is deprecated - // In both cases few older regions supports dash-style region indication (s3-Region) even if AWS discourages their use. - // The same bucket could be reached with: - // bucket.s3.region.amazonaws.com/path - // bucket.s3-region.amazonaws.com/path - // s3.amazonaws.com/bucket/path - // s3-region.amazonaws.com/bucket/path - - hostParts := strings.Split(u.Host, ".") - switch len(hostParts) { - // path-style - case 3: - // Parse the region out of the first part of the host - region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") - if region == "" { - region = "us-east-1" - } - pathParts := strings.SplitN(u.Path, "/", 3) - bucket = pathParts[1] - path = pathParts[2] - // vhost-style, dash region indication - case 4: - // Parse the region out of the first part of the host - region = strings.TrimPrefix(strings.TrimPrefix(hostParts[1], "s3-"), "s3") - if region == "" { - err = fmt.Errorf("URL is not a valid S3 URL") - return - } - pathParts := strings.SplitN(u.Path, "/", 2) - bucket = hostParts[0] - path = pathParts[1] - //vhost-style, dot region indication - case 5: - region = hostParts[2] - pathParts := strings.SplitN(u.Path, "/", 2) - bucket = hostParts[0] - path = pathParts[1] - - } - if len(hostParts) < 3 && len(hostParts) > 5 { - err = fmt.Errorf("URL is not a valid S3 URL") - return - } - version = u.Query().Get("version") - - } else { - pathParts := strings.SplitN(u.Path, "/", 3) - if len(pathParts) != 3 { - err = fmt.Errorf("URL is not a valid S3 compliant URL") - return - } - bucket = pathParts[1] - path = pathParts[2] - version = u.Query().Get("version") - region = u.Query().Get("region") - if region == "" { - region = "us-east-1" - } - } - - _, hasAwsId := u.Query()["aws_access_key_id"] - _, hasAwsSecret := u.Query()["aws_access_key_secret"] - _, hasAwsToken := u.Query()["aws_access_token"] - if hasAwsId || hasAwsSecret || hasAwsToken { - creds = credentials.NewStaticCredentials( - u.Query().Get("aws_access_key_id"), - u.Query().Get("aws_access_key_secret"), - u.Query().Get("aws_access_token"), - ) - } - - return -} - -func (g *S3Getter) newS3Client( - region string, url *url.URL, creds *credentials.Credentials, -) (*s3.S3, error) { - var sess *session.Session - - if profile := url.Query().Get("aws_profile"); profile != "" { - var err error - sess, err = session.NewSessionWithOptions(session.Options{ - Profile: profile, - SharedConfigState: session.SharedConfigEnable, - }) - if err != nil { - return nil, err - } - } else { - config := g.getAWSConfig(region, url, creds) - sess = session.New(config) - } - - return s3.New(sess), nil -} +// import ( +// "context" +// "fmt" +// "net/url" +// "os" +// "path/filepath" +// "strings" +// "time" + +// "github.com/aws/aws-sdk-go/aws" +// "github.com/aws/aws-sdk-go/aws/credentials" +// "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" +// "github.com/aws/aws-sdk-go/aws/ec2metadata" +// "github.com/aws/aws-sdk-go/aws/session" +// "github.com/aws/aws-sdk-go/service/s3" +// ) + +// // S3Getter is a Getter implementation that will download a module from +// // a S3 bucket. +// type S3Getter struct { +// getter + +// // Timeout sets a deadline which all S3 operations should +// // complete within. Zero value means no timeout. +// Timeout time.Duration +// } + +// func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { +// // Parse URL +// ctx := g.Context() + +// if g.Timeout > 0 { +// var cancel context.CancelFunc +// ctx, cancel = context.WithTimeout(ctx, g.Timeout) +// defer cancel() +// } + +// region, bucket, path, _, creds, err := g.parseUrl(u) +// if err != nil { +// return 0, err +// } + +// // Create client config +// client, err := g.newS3Client(region, u, creds) +// if err != nil { +// return 0, err +// } + +// // List the object(s) at the given prefix +// req := &s3.ListObjectsInput{ +// Bucket: aws.String(bucket), +// Prefix: aws.String(path), +// } +// resp, err := client.ListObjectsWithContext(ctx, req) +// if err != nil { +// return 0, err +// } + +// for _, o := range resp.Contents { +// // Use file mode on exact match. +// if *o.Key == path { +// return ClientModeFile, nil +// } + +// // Use dir mode if child keys are found. +// if strings.HasPrefix(*o.Key, path+"/") { +// return ClientModeDir, nil +// } +// } + +// // There was no match, so just return file mode. The download is going +// // to fail but we will let S3 return the proper error later. +// return ClientModeFile, nil +// } + +// func (g *S3Getter) Get(dst string, u *url.URL) error { +// ctx := g.Context() + +// if g.Timeout > 0 { +// var cancel context.CancelFunc +// ctx, cancel = context.WithTimeout(ctx, g.Timeout) +// defer cancel() +// } + +// // Parse URL +// region, bucket, path, _, creds, err := g.parseUrl(u) +// if err != nil { +// return err +// } + +// // Remove destination if it already exists +// _, err = os.Stat(dst) +// if err != nil && !os.IsNotExist(err) { +// return err +// } + +// if err == nil { +// // Remove the destination +// if err := os.RemoveAll(dst); err != nil { +// return err +// } +// } + +// // Create all the parent directories +// if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { +// return err +// } + +// client, err := g.newS3Client(region, u, creds) +// if err != nil { +// return err +// } + +// // List files in path, keep listing until no more objects are found +// lastMarker := "" +// hasMore := true +// for hasMore { +// req := &s3.ListObjectsInput{ +// Bucket: aws.String(bucket), +// Prefix: aws.String(path), +// } +// if lastMarker != "" { +// req.Marker = aws.String(lastMarker) +// } + +// resp, err := client.ListObjectsWithContext(ctx, req) +// if err != nil { +// return err +// } + +// hasMore = aws.BoolValue(resp.IsTruncated) + +// // Get each object storing each file relative to the destination path +// for _, object := range resp.Contents { +// lastMarker = aws.StringValue(object.Key) +// objPath := aws.StringValue(object.Key) + +// // If the key ends with a backslash assume it is a directory and ignore +// if strings.HasSuffix(objPath, "/") { +// continue +// } + +// // Get the object destination path +// objDst, err := filepath.Rel(path, objPath) +// if err != nil { +// return err +// } +// objDst = filepath.Join(dst, objDst) + +// if err := g.getObject(ctx, client, objDst, bucket, objPath, ""); err != nil { +// return err +// } +// } +// } + +// return nil +// } + +// func (g *S3Getter) GetFile(dst string, u *url.URL) error { +// ctx := g.Context() + +// if g.Timeout > 0 { +// var cancel context.CancelFunc +// ctx, cancel = context.WithTimeout(ctx, g.Timeout) +// defer cancel() +// } + +// region, bucket, path, version, creds, err := g.parseUrl(u) +// if err != nil { +// return err +// } + +// client, err := g.newS3Client(region, u, creds) +// if err != nil { +// return err +// } + +// return g.getObject(ctx, client, dst, bucket, path, version) +// } + +// func (g *S3Getter) getObject(ctx context.Context, client *s3.S3, dst, bucket, key, version string) error { +// req := &s3.GetObjectInput{ +// Bucket: aws.String(bucket), +// Key: aws.String(key), +// } +// if version != "" { +// req.VersionId = aws.String(version) +// } + +// resp, err := client.GetObjectWithContext(ctx, req) +// if err != nil { +// return err +// } + +// // Create all the parent directories +// if err := os.MkdirAll(filepath.Dir(dst), g.client.mode(0755)); err != nil { +// return err +// } + +// body := resp.Body + +// if g.client != nil && g.client.ProgressListener != nil { +// fn := filepath.Base(key) +// body = g.client.ProgressListener.TrackProgress(fn, 0, *resp.ContentLength, resp.Body) +// } +// defer body.Close() + +// return copyReader(dst, body, 0666, g.client.umask()) +// } + +// func (g *S3Getter) getAWSConfig(region string, url *url.URL, creds *credentials.Credentials) *aws.Config { +// conf := &aws.Config{} +// metadataURLOverride := os.Getenv("AWS_METADATA_URL") +// if creds == nil && metadataURLOverride != "" { +// creds = credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, +// &ec2rolecreds.EC2RoleProvider{ +// Client: ec2metadata.New(session.New(&aws.Config{ +// Endpoint: aws.String(metadataURLOverride), +// })), +// }, +// }) +// } + +// if creds != nil { +// conf.Endpoint = &url.Host +// conf.S3ForcePathStyle = aws.Bool(true) +// if url.Scheme == "http" { +// conf.DisableSSL = aws.Bool(true) +// } +// } + +// conf.Credentials = creds +// if region != "" { +// conf.Region = aws.String(region) +// } + +// return conf.WithCredentialsChainVerboseErrors(true) +// } + +// func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) { +// // This just check whether we are dealing with S3 or +// // any other S3 compliant service. S3 has a predictable +// // url as others do not +// if strings.Contains(u.Host, "amazonaws.com") { +// // Amazon S3 supports both virtual-hosted–style and path-style URLs to access a bucket, although path-style is deprecated +// // In both cases few older regions supports dash-style region indication (s3-Region) even if AWS discourages their use. +// // The same bucket could be reached with: +// // bucket.s3.region.amazonaws.com/path +// // bucket.s3-region.amazonaws.com/path +// // s3.amazonaws.com/bucket/path +// // s3-region.amazonaws.com/bucket/path + +// hostParts := strings.Split(u.Host, ".") +// switch len(hostParts) { +// // path-style +// case 3: +// // Parse the region out of the first part of the host +// region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") +// if region == "" { +// region = "us-east-1" +// } +// pathParts := strings.SplitN(u.Path, "/", 3) +// bucket = pathParts[1] +// path = pathParts[2] +// // vhost-style, dash region indication +// case 4: +// // Parse the region out of the first part of the host +// region = strings.TrimPrefix(strings.TrimPrefix(hostParts[1], "s3-"), "s3") +// if region == "" { +// err = fmt.Errorf("URL is not a valid S3 URL") +// return +// } +// pathParts := strings.SplitN(u.Path, "/", 2) +// bucket = hostParts[0] +// path = pathParts[1] +// //vhost-style, dot region indication +// case 5: +// region = hostParts[2] +// pathParts := strings.SplitN(u.Path, "/", 2) +// bucket = hostParts[0] +// path = pathParts[1] + +// } +// if len(hostParts) < 3 && len(hostParts) > 5 { +// err = fmt.Errorf("URL is not a valid S3 URL") +// return +// } +// version = u.Query().Get("version") + +// } else { +// pathParts := strings.SplitN(u.Path, "/", 3) +// if len(pathParts) != 3 { +// err = fmt.Errorf("URL is not a valid S3 compliant URL") +// return +// } +// bucket = pathParts[1] +// path = pathParts[2] +// version = u.Query().Get("version") +// region = u.Query().Get("region") +// if region == "" { +// region = "us-east-1" +// } +// } + +// _, hasAwsId := u.Query()["aws_access_key_id"] +// _, hasAwsSecret := u.Query()["aws_access_key_secret"] +// _, hasAwsToken := u.Query()["aws_access_token"] +// if hasAwsId || hasAwsSecret || hasAwsToken { +// creds = credentials.NewStaticCredentials( +// u.Query().Get("aws_access_key_id"), +// u.Query().Get("aws_access_key_secret"), +// u.Query().Get("aws_access_token"), +// ) +// } + +// return +// } + +// func (g *S3Getter) newS3Client( +// region string, url *url.URL, creds *credentials.Credentials, +// ) (*s3.S3, error) { +// var sess *session.Session + +// if profile := url.Query().Get("aws_profile"); profile != "" { +// var err error +// sess, err = session.NewSessionWithOptions(session.Options{ +// Profile: profile, +// SharedConfigState: session.SharedConfigEnable, +// }) +// if err != nil { +// return nil, err +// } +// } else { +// config := g.getAWSConfig(region, url, creds) +// sess = session.New(config) +// } + +// return s3.New(sess), nil +// } diff --git a/get_s3_test.go b/get_s3_test.go index 25ce48367..5b781f43b 100644 --- a/get_s3_test.go +++ b/get_s3_test.go @@ -1,269 +1,269 @@ package getter -import ( - "net/url" - "os" - "path/filepath" - "testing" +// import ( +// "net/url" +// "os" +// "path/filepath" +// "testing" - "github.com/aws/aws-sdk-go/aws/awserr" -) +// "github.com/aws/aws-sdk-go/aws/awserr" +// ) -func init() { - // These are well known restricted IAM keys to a HashiCorp-managed bucket - // in a private AWS account that only has access to the open source test - // resources. - // - // We do the string concat below to avoid AWS autodetection of a key. This - // key is locked down an IAM policy that is read-only so we're purposely - // exposing it. - os.Setenv("AWS_ACCESS_KEY", "AKIAITTDR"+"WY2STXOZE2A") - os.Setenv("AWS_SECRET_KEY", "oMwSyqdass2kPF"+"/7ORZA9dlb/iegz+89B0Cy01Ea") -} +// func init() { +// // These are well known restricted IAM keys to a HashiCorp-managed bucket +// // in a private AWS account that only has access to the open source test +// // resources. +// // +// // We do the string concat below to avoid AWS autodetection of a key. This +// // key is locked down an IAM policy that is read-only so we're purposely +// // exposing it. +// os.Setenv("AWS_ACCESS_KEY", "AKIAITTDR"+"WY2STXOZE2A") +// os.Setenv("AWS_SECRET_KEY", "oMwSyqdass2kPF"+"/7ORZA9dlb/iegz+89B0Cy01Ea") +// } -func TestS3Getter_impl(t *testing.T) { - var _ Getter = new(S3Getter) -} +// func TestS3Getter_impl(t *testing.T) { +// var _ Getter = new(S3Getter) +// } -func TestS3Getter(t *testing.T) { - g := new(S3Getter) - dst := tempDir(t) +// func TestS3Getter(t *testing.T) { +// g := new(S3Getter) +// dst := tempDir(t) - // With a dir that doesn't exist - err := g.Get( - dst, testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder")) - if err != nil { - t.Fatalf("err: %s", err) - } +// // With a dir that doesn't exist +// err := g.Get( +// dst, testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder")) +// if err != nil { +// t.Fatalf("err: %s", err) +// } - // Verify the main file exists - mainPath := filepath.Join(dst, "main.tf") - if _, err := os.Stat(mainPath); err != nil { - t.Fatalf("err: %s", err) - } -} +// // Verify the main file exists +// mainPath := filepath.Join(dst, "main.tf") +// if _, err := os.Stat(mainPath); err != nil { +// t.Fatalf("err: %s", err) +// } +// } -func TestS3Getter_subdir(t *testing.T) { - g := new(S3Getter) - dst := tempDir(t) +// func TestS3Getter_subdir(t *testing.T) { +// g := new(S3Getter) +// dst := tempDir(t) - // With a dir that doesn't exist - err := g.Get( - dst, testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder/subfolder")) - if err != nil { - t.Fatalf("err: %s", err) - } +// // With a dir that doesn't exist +// err := g.Get( +// dst, testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder/subfolder")) +// if err != nil { +// t.Fatalf("err: %s", err) +// } - // Verify the main file exists - subPath := filepath.Join(dst, "sub.tf") - if _, err := os.Stat(subPath); err != nil { - t.Fatalf("err: %s", err) - } -} +// // Verify the main file exists +// subPath := filepath.Join(dst, "sub.tf") +// if _, err := os.Stat(subPath); err != nil { +// t.Fatalf("err: %s", err) +// } +// } -func TestS3Getter_GetFile(t *testing.T) { - g := new(S3Getter) - dst := tempTestFile(t) - defer os.RemoveAll(filepath.Dir(dst)) +// func TestS3Getter_GetFile(t *testing.T) { +// g := new(S3Getter) +// dst := tempTestFile(t) +// defer os.RemoveAll(filepath.Dir(dst)) - // Download - err := g.GetFile( - dst, testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder/main.tf")) - if err != nil { - t.Fatalf("err: %s", err) - } +// // Download +// err := g.GetFile( +// dst, testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder/main.tf")) +// if err != nil { +// t.Fatalf("err: %s", err) +// } - // Verify the main file exists - if _, err := os.Stat(dst); err != nil { - t.Fatalf("err: %s", err) - } - assertContents(t, dst, "# Main\n") -} +// // Verify the main file exists +// if _, err := os.Stat(dst); err != nil { +// t.Fatalf("err: %s", err) +// } +// assertContents(t, dst, "# Main\n") +// } -func TestS3Getter_GetFile_badParams(t *testing.T) { - g := new(S3Getter) - dst := tempTestFile(t) - defer os.RemoveAll(filepath.Dir(dst)) +// func TestS3Getter_GetFile_badParams(t *testing.T) { +// g := new(S3Getter) +// dst := tempTestFile(t) +// defer os.RemoveAll(filepath.Dir(dst)) - // Download - err := g.GetFile( - dst, - testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder/main.tf?aws_access_key_id=foo&aws_access_key_secret=bar&aws_access_token=baz")) - if err == nil { - t.Fatalf("expected error, got none") - } +// // Download +// err := g.GetFile( +// dst, +// testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder/main.tf?aws_access_key_id=foo&aws_access_key_secret=bar&aws_access_token=baz")) +// if err == nil { +// t.Fatalf("expected error, got none") +// } - if reqerr, ok := err.(awserr.RequestFailure); !ok || reqerr.StatusCode() != 403 { - t.Fatalf("expected InvalidAccessKeyId error") - } -} +// if reqerr, ok := err.(awserr.RequestFailure); !ok || reqerr.StatusCode() != 403 { +// t.Fatalf("expected InvalidAccessKeyId error") +// } +// } -func TestS3Getter_GetFile_notfound(t *testing.T) { - g := new(S3Getter) - dst := tempTestFile(t) - defer os.RemoveAll(filepath.Dir(dst)) +// func TestS3Getter_GetFile_notfound(t *testing.T) { +// g := new(S3Getter) +// dst := tempTestFile(t) +// defer os.RemoveAll(filepath.Dir(dst)) - // Download - err := g.GetFile( - dst, testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder/404.tf")) - if err == nil { - t.Fatalf("expected error, got none") - } -} +// // Download +// err := g.GetFile( +// dst, testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder/404.tf")) +// if err == nil { +// t.Fatalf("expected error, got none") +// } +// } -func TestS3Getter_ClientMode_dir(t *testing.T) { - g := new(S3Getter) +// func TestS3Getter_ClientMode_dir(t *testing.T) { +// g := new(S3Getter) - // Check client mode on a key prefix with only a single key. - mode, err := g.ClientMode( - testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder")) - if err != nil { - t.Fatalf("err: %s", err) - } - if mode != ClientModeDir { - t.Fatal("expect ClientModeDir") - } -} +// // Check client mode on a key prefix with only a single key. +// mode, err := g.ClientMode( +// testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder")) +// if err != nil { +// t.Fatalf("err: %s", err) +// } +// if mode != ClientModeDir { +// t.Fatal("expect ClientModeDir") +// } +// } -func TestS3Getter_ClientMode_file(t *testing.T) { - g := new(S3Getter) +// func TestS3Getter_ClientMode_file(t *testing.T) { +// g := new(S3Getter) - // Check client mode on a key prefix which contains sub-keys. - mode, err := g.ClientMode( - testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder/main.tf")) - if err != nil { - t.Fatalf("err: %s", err) - } - if mode != ClientModeFile { - t.Fatal("expect ClientModeFile") - } -} +// // Check client mode on a key prefix which contains sub-keys. +// mode, err := g.ClientMode( +// testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/folder/main.tf")) +// if err != nil { +// t.Fatalf("err: %s", err) +// } +// if mode != ClientModeFile { +// t.Fatal("expect ClientModeFile") +// } +// } -func TestS3Getter_ClientMode_notfound(t *testing.T) { - g := new(S3Getter) +// func TestS3Getter_ClientMode_notfound(t *testing.T) { +// g := new(S3Getter) - // Check the client mode when a non-existent key is looked up. This does not - // return an error, but rather should just return the file mode so that S3 - // can return an appropriate error later on. This also checks that the - // prefix is handled properly (e.g., "/fold" and "/folder" don't put the - // client mode into "dir". - mode, err := g.ClientMode( - testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/fold")) - if err != nil { - t.Fatalf("err: %s", err) - } - if mode != ClientModeFile { - t.Fatal("expect ClientModeFile") - } -} +// // Check the client mode when a non-existent key is looked up. This does not +// // return an error, but rather should just return the file mode so that S3 +// // can return an appropriate error later on. This also checks that the +// // prefix is handled properly (e.g., "/fold" and "/folder" don't put the +// // client mode into "dir". +// mode, err := g.ClientMode( +// testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/fold")) +// if err != nil { +// t.Fatalf("err: %s", err) +// } +// if mode != ClientModeFile { +// t.Fatal("expect ClientModeFile") +// } +// } -func TestS3Getter_ClientMode_collision(t *testing.T) { - g := new(S3Getter) +// func TestS3Getter_ClientMode_collision(t *testing.T) { +// g := new(S3Getter) - // Check that the client mode is "file" if there is both an object and a - // folder with a common prefix (i.e., a "collision" in the namespace). - mode, err := g.ClientMode( - testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/collision/foo")) - if err != nil { - t.Fatalf("err: %s", err) - } - if mode != ClientModeFile { - t.Fatal("expect ClientModeFile") - } -} +// // Check that the client mode is "file" if there is both an object and a +// // folder with a common prefix (i.e., a "collision" in the namespace). +// mode, err := g.ClientMode( +// testURL("https://s3.amazonaws.com/hc-oss-test/go-getter/collision/foo")) +// if err != nil { +// t.Fatalf("err: %s", err) +// } +// if mode != ClientModeFile { +// t.Fatal("expect ClientModeFile") +// } +// } -func TestS3Getter_Url(t *testing.T) { - var s3tests = []struct { - name string - url string - region string - bucket string - path string - version string - }{ - { - name: "AWSv1234", - url: "s3::https://s3-eu-west-1.amazonaws.com/bucket/foo/bar.baz?version=1234", - region: "eu-west-1", - bucket: "bucket", - path: "foo/bar.baz", - version: "1234", - }, - { - name: "AWSVhostDot", - url: "s3::https://bucket.s3.eu-west-1.amazonaws.com/foo/bar.baz?version=1234", - region: "eu-west-1", - bucket: "bucket", - path: "foo/bar.baz", - version: "1234", - }, - { - name: "AWSVhostDash", - url: "s3::https://bucket.s3-eu-west-1.amazonaws.com/foo/bar.baz?version=1234", - region: "eu-west-1", - bucket: "bucket", - path: "foo/bar.baz", - version: "1234", - }, - { - name: "localhost-1", - url: "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=TESTID&aws_access_key_secret=TestSecret®ion=us-east-2&version=1", - region: "us-east-2", - bucket: "test-bucket", - path: "hello.txt", - version: "1", - }, - { - name: "localhost-2", - url: "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=TESTID&aws_access_key_secret=TestSecret&version=1", - region: "us-east-1", - bucket: "test-bucket", - path: "hello.txt", - version: "1", - }, - { - name: "localhost-3", - url: "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=TESTID&aws_access_key_secret=TestSecret", - region: "us-east-1", - bucket: "test-bucket", - path: "hello.txt", - version: "", - }, - } +// func TestS3Getter_Url(t *testing.T) { +// var s3tests = []struct { +// name string +// url string +// region string +// bucket string +// path string +// version string +// }{ +// { +// name: "AWSv1234", +// url: "s3::https://s3-eu-west-1.amazonaws.com/bucket/foo/bar.baz?version=1234", +// region: "eu-west-1", +// bucket: "bucket", +// path: "foo/bar.baz", +// version: "1234", +// }, +// { +// name: "AWSVhostDot", +// url: "s3::https://bucket.s3.eu-west-1.amazonaws.com/foo/bar.baz?version=1234", +// region: "eu-west-1", +// bucket: "bucket", +// path: "foo/bar.baz", +// version: "1234", +// }, +// { +// name: "AWSVhostDash", +// url: "s3::https://bucket.s3-eu-west-1.amazonaws.com/foo/bar.baz?version=1234", +// region: "eu-west-1", +// bucket: "bucket", +// path: "foo/bar.baz", +// version: "1234", +// }, +// { +// name: "localhost-1", +// url: "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=TESTID&aws_access_key_secret=TestSecret®ion=us-east-2&version=1", +// region: "us-east-2", +// bucket: "test-bucket", +// path: "hello.txt", +// version: "1", +// }, +// { +// name: "localhost-2", +// url: "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=TESTID&aws_access_key_secret=TestSecret&version=1", +// region: "us-east-1", +// bucket: "test-bucket", +// path: "hello.txt", +// version: "1", +// }, +// { +// name: "localhost-3", +// url: "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=TESTID&aws_access_key_secret=TestSecret", +// region: "us-east-1", +// bucket: "test-bucket", +// path: "hello.txt", +// version: "", +// }, +// } - for i, pt := range s3tests { - t.Run(pt.name, func(t *testing.T) { - g := new(S3Getter) - forced, src := getForcedGetter(pt.url) - u, err := url.Parse(src) +// for i, pt := range s3tests { +// t.Run(pt.name, func(t *testing.T) { +// g := new(S3Getter) +// forced, src := getForcedGetter(pt.url) +// u, err := url.Parse(src) - if err != nil { - t.Errorf("test %d: unexpected error: %s", i, err) - } - if forced != "s3" { - t.Fatalf("expected forced protocol to be s3") - } +// if err != nil { +// t.Errorf("test %d: unexpected error: %s", i, err) +// } +// if forced != "s3" { +// t.Fatalf("expected forced protocol to be s3") +// } - region, bucket, path, version, creds, err := g.parseUrl(u) +// region, bucket, path, version, creds, err := g.parseUrl(u) - if err != nil { - t.Fatalf("err: %s", err) - } - if region != pt.region { - t.Fatalf("expected %s, got %s", pt.region, region) - } - if bucket != pt.bucket { - t.Fatalf("expected %s, got %s", pt.bucket, bucket) - } - if path != pt.path { - t.Fatalf("expected %s, got %s", pt.path, path) - } - if version != pt.version { - t.Fatalf("expected %s, got %s", pt.version, version) - } - if &creds == nil { - t.Fatalf("expected to not be nil") - } - }) - } -} +// if err != nil { +// t.Fatalf("err: %s", err) +// } +// if region != pt.region { +// t.Fatalf("expected %s, got %s", pt.region, region) +// } +// if bucket != pt.bucket { +// t.Fatalf("expected %s, got %s", pt.bucket, bucket) +// } +// if path != pt.path { +// t.Fatalf("expected %s, got %s", pt.path, path) +// } +// if version != pt.version { +// t.Fatalf("expected %s, got %s", pt.version, version) +// } +// if &creds == nil { +// t.Fatalf("expected to not be nil") +// } +// }) +// } +// }