Skip to content

Commit

Permalink
Merge pull request go-spatial#213 from terranodo/issue-211
Browse files Browse the repository at this point in the history
Issue 211
  • Loading branch information
ARolek authored Dec 11, 2017
2 parents b14c8a5 + 179b924 commit 0817b4b
Show file tree
Hide file tree
Showing 134 changed files with 47,421 additions and 2 deletions.
2 changes: 2 additions & 0 deletions atlas/atlas.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ import (

"github.com/terranodo/tegola"
"github.com/terranodo/tegola/cache"
_ "github.com/terranodo/tegola/cache/filecache"
_ "github.com/terranodo/tegola/cache/s3cache"
)

// DefaultAtlas is instanitated for convenience
Expand Down
27 changes: 27 additions & 0 deletions cache/errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,30 @@ type ErrInvalidFileKey struct {
func (e ErrInvalidFileKey) Error() string {
return fmt.Sprintf("cache: invalid fileKey (%v). unable to parse (%v) value (%v) into int", e.path, e.key, e.val)
}

type ErrGettingFromCache struct {
Err error
CacheType string
}

func (e ErrGettingFromCache) Error() string {
return fmt.Sprintf("cache: error getting from (%v) cache: %v", e.CacheType, e.Err)
}

type ErrSettingToCache struct {
Err error
CacheType string
}

func (e ErrSettingToCache) Error() string {
return fmt.Sprintf("cache: error setting to (%v) cache: %v", e.CacheType, e.Err)
}

type ErrPurgingCache struct {
Err error
CacheType string
}

func (e ErrPurgingCache) Error() string {
return fmt.Sprintf("cache: error purging (%v) cache: %v", e.CacheType, e.Err)
}
1 change: 0 additions & 1 deletion cache/filecache/filecache.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ import (

var (
ErrMissingBasepath = errors.New("filecache: missing required param 'basepath'")
ErrCacheMiss = errors.New("filecache: cache miss")
)

const CacheType = "file"
Expand Down
39 changes: 39 additions & 0 deletions cache/s3cache/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# S3Cache

s3cache is an abstraction on top of Amazon Web Services (AWS) Simple Storage Service (S3) which implements the tegola cache interface. To use it, add the following minimum config to your tegola config file:

```toml
[cache]
type="s3"
bucket="tegola-test-data"
```

## Properties
The s3cache config supports the following properties:

- `bucket` (string): [Required] the name of the S3 bucket to use.
- `basepath` (string): [Optional] a path prefix added to all cache operations inside of the S3 bucket. helpful so a bucket does not need to be dedicated to only this cache.
- `region` (string): [Optional] the region the bucket is in. Defaults to 'us-east-1'
- `aws_access_key_id` (string): [Optional] the AWS access key id to use.
- `aws_secret_access_key` (string): [Optional] the AWS secret access key to use.
- `max_zoom` (int): [Optional] the max zoom the cache should cache to. After this zoom, Set() calls will return before doing work.

## Credential chain
If the `aws_access_key_id` and `aws_secret_access_key` are not set, then the [credential provider chain](http://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html) will be used. The provider chain supports multiple methods for passing credentials, one of which is setting environment variables. For example:

```bash
$ export AWS_REGION=us-west-2
$ export AWS_ACCESS_KEY_ID=YOUR_AKID
$ export AWS_SECRET_ACCESS_KEY=YOUR_SECRET_KEY
```

## Testing
Testing is designed to work against a live S3 bucket. To run the s3 cache tests, the following environment variables need to be set:

```bash
$ export RUN_S3_TESTS=yes
$ export AWS_TEST_BUCKET=YOUR_TEST_BUCKET_NAME
$ export AWS_REGION=TEST_BUCKET_REGION
$ export AWS_ACCESS_KEY_ID=YOUR_AKID
$ export AWS_SECRET_ACCESS_KEY=YOUR_SECRET_KEY
```
268 changes: 268 additions & 0 deletions cache/s3cache/s3cache.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,268 @@
package s3cache

import (
"bytes"
"errors"
"io"
"os"
"path/filepath"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"

"github.com/terranodo/tegola/cache"
"github.com/terranodo/tegola/util/dict"
)

var (
ErrMissingBucket = errors.New("s3cache: missing required param 'bucket'")
)

const CacheType = "s3"

const (
// required
ConfigKeyBucket = "bucket"
// optional
ConfigKeyBasepath = "basepath"
ConfigKeyMaxZoom = "max_zoom"
ConfigKeyRegion = "region" // defaults to "us-east-1"
ConfigKeyAWSAccessKeyID = "aws_access_key_id"
ConfigKeyAWSSecretKey = "aws_secret_access_key"
)

const (
DefaultRegion = "us-east-1"
)

func init() {
cache.Register(CacheType, New)
}

// New instantiates a S3 cache. The config expects the following params:
//
// required:
// bucket (string): the name of the s3 bucket to write to
//
// optional:
// region (string): the AWS region the bucket is located. defaults to 'us-east-1'
// aws_access_key_id (string): an AWS access key id
// aws_secret_access_key (string): an AWS secret access key
// basepath (string): a path prefix added to all cache operations inside of the S3 bucket
// max_zoom (int): max zoom to use the cache. beyond this zoom cache Set() calls will be ignored

func New(config map[string]interface{}) (cache.Interface, error) {
var err error

s3cache := S3Cache{}

// parse the config
c := dict.M(config)

// TODO: this could be cleaner
defaultMaxZoom := 0
maxZoom, err := c.Int(ConfigKeyMaxZoom, &defaultMaxZoom)
if err != nil {
return nil, err
}
if maxZoom != 0 {
mz := uint(maxZoom)
s3cache.MaxZoom = &mz
}

s3cache.Bucket, err = c.String(ConfigKeyBucket, nil)
if err != nil {
return nil, ErrMissingBucket
}
if s3cache.Bucket == "" {
return nil, ErrMissingBucket
}

// basepath
basepath := ""
s3cache.Basepath, err = c.String(ConfigKeyBasepath, &basepath)
if err != nil {
return nil, err
}

// check for region env var
region := os.Getenv("AWS_REGION")
if region == "" {
region = DefaultRegion
}
region, err = c.String(ConfigKeyRegion, &region)
if err != nil {
return nil, err
}

accessKey := ""
accessKey, err = c.String(ConfigKeyAWSAccessKeyID, &accessKey)
if err != nil {
return nil, err
}
secretKey := ""
secretKey, err = c.String(ConfigKeyAWSSecretKey, &secretKey)
if err != nil {
return nil, err
}

awsConfig := aws.Config{
Region: aws.String(region),
}

// support for static credentials, this is not recommended by AWS but
// necessary for some environments
if accessKey != "" && secretKey != "" {
awsConfig.Credentials = credentials.NewStaticCredentials(accessKey, secretKey, "")
}

// setup the s3 session.
// if the accessKey and secreteKey are not provided (static creds) then the provider chain is used
// http://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html
s3cache.Client = s3.New(
session.New(&awsConfig),
)

// in order to confirm we have the correct permissions on the bucket create a small file
// and test a PUT, GET and DELETE to the bucket
key := cache.Key{
MapName: "tegola-test-map",
LayerName: "test-layer",
Z: 0,
X: 0,
Y: 0,
}
// write a test file
if err := s3cache.Set(&key, []byte("\x53\x69\x6c\x61\x73")); err != nil {
e := cache.ErrSettingToCache{
CacheType: CacheType,
Err: err,
}

return nil, e
}

// read the test file
_, hit, err := s3cache.Get(&key)
if err != nil {
e := cache.ErrGettingFromCache{
CacheType: CacheType,
Err: err,
}

return nil, e
}
if !hit {
// return an error?
}

// purge the test file
if err := s3cache.Purge(&key); err != nil {
e := cache.ErrPurgingCache{
CacheType: CacheType,
Err: err,
}

return nil, e
}

return &s3cache, nil
}

type S3Cache struct {
// Bucket is the name of the s3 bucket to operate on
Bucket string

// Basepath is a path prefix added to all cache operations inside of the S3 bucket
// helpful so a bucket does not need to be dedicated to only this cache
Basepath string

// MaxZoom determins the max zoom the cache to persist. Beyond this
// zoom, cache Set() calls will be ignored. This is useful if the cache
// should not be leveraged for higher zooms when data changes often.
MaxZoom *uint

// client holds a reference to the s3 client. it's expected the client
// has an active session and read, write, delete permissions have been checked
Client *s3.S3
}

func (s3c *S3Cache) Set(key *cache.Key, val []byte) error {
var err error

// check for maxzoom
if s3c.MaxZoom != nil && key.Z <= int(*s3c.MaxZoom) {
return nil
}

// add our basepath
k := filepath.Join(s3c.Basepath, key.String())

input := s3.PutObjectInput{
Body: aws.ReadSeekCloser(bytes.NewReader(val)),
Bucket: aws.String(s3c.Bucket),
Key: aws.String(k),
}

_, err = s3c.Client.PutObject(&input)
if err != nil {
return err
}

return nil
}

func (s3c *S3Cache) Get(key *cache.Key) ([]byte, bool, error) {
var err error

// add our basepath
k := filepath.Join(s3c.Basepath, key.String())

input := s3.GetObjectInput{
Bucket: aws.String(s3c.Bucket),
Key: aws.String(k),
}

result, err := s3c.Client.GetObject(&input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case s3.ErrCodeNoSuchKey:
return nil, false, nil
default:
return nil, false, aerr
}
}
return nil, false, err
}

var buf bytes.Buffer
_, err = io.Copy(&buf, result.Body)
if err != nil {
return nil, false, err
}

return buf.Bytes(), true, nil
}

func (s3c *S3Cache) Purge(key *cache.Key) error {
var err error

// add our basepath
k := filepath.Join(s3c.Basepath, key.String())

input := s3.DeleteObjectInput{
Bucket: aws.String(s3c.Bucket),
Key: aws.String(k),
}

_, err = s3c.Client.DeleteObject(&input)
if err != nil {
return err
}

return nil
}
Loading

0 comments on commit 0817b4b

Please sign in to comment.