digitalocean_spaces_bucket: add lifecycle_rule support (#411)

* adapt lifecycle_rule code from aws_s3_bucket

commented out tags code

* copy in acceptance tests w/ minimal adapation

* fix rebasing conflict

* more renaming and removal of unsupported features

* remove tags from tests

* remove more storage classes

* import did not work since default region not set during import

* remove storage class related attributes

* add warning about keys starting with /

Co-Authored-By: Andrew Starr-Bochicchio <andrewsomething@users.noreply.github.com>

* remove duplicate definition of isAWSError due to merge conflict

* add abort_incomplete_multipart_upload_days test plus some cleanups

* refactor setup of S3 client in tests

* another region fix

* document lifecycle_rule

* remove debugging logs statements

* remove moot method (since transitions were removed)

* remove TestAccDigitalOceanSpacesBucket_LifecycleRule_Expiration_EmptyConfigurationBlockw

* add AtleastOneOf config for expiration arguments

* Revert "add AtleastOneOf config for expiration arguments"

This reverts commit c70557be53de67ec9a4063e4fa858e375c9fe5a5.

Doesn't seem to work in sub-arguments

Co-authored-by: Andrew Starr-Bochicchio <andrewsomething@users.noreply.github.com>
This commit is contained in:
Tom Dyas 2020-04-14 07:17:52 -07:00 committed by GitHub
parent a80ab4b5f9
commit 61c7bea9cb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 599 additions and 60 deletions

View File

@ -1,6 +1,7 @@
package digitalocean
import (
"bytes"
"fmt"
"log"
"strings"
@ -9,8 +10,11 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/hashicorp/terraform-plugin-sdk/helper/hashcode"
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"
)
func resourceDigitalOceanBucket() *schema.Resource {
@ -106,6 +110,79 @@ func resourceDigitalOceanBucket() *schema.Resource {
Description: "The FQDN of the bucket",
Computed: true,
},
"lifecycle_rule": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validation.StringLenBetween(0, 255),
},
"prefix": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
if strings.HasPrefix(v.(string), "/") {
ws = append(ws, "prefix begins with `/`. In most cases, this should be excluded.")
}
return
},
},
"enabled": {
Type: schema.TypeBool,
Required: true,
},
"abort_incomplete_multipart_upload_days": {
Type: schema.TypeInt,
Optional: true,
},
"expiration": {
Type: schema.TypeSet,
Optional: true,
Set: expirationHash,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"date": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateS3BucketLifecycleTimestamp,
},
"days": {
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntAtLeast(0),
},
"expired_object_delete_marker": {
Type: schema.TypeBool,
Optional: true,
},
},
},
},
"noncurrent_version_expiration": {
Type: schema.TypeSet,
MaxItems: 1,
Optional: true,
Set: expirationHash,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"days": {
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntAtLeast(1),
},
},
},
},
},
},
},
"force_destroy": {
Type: schema.TypeBool,
Description: "Unless true, the bucket will only be destroyed if empty",
@ -186,6 +263,12 @@ func resourceDigitalOceanBucketUpdate(d *schema.ResourceData, meta interface{})
}
}
if d.HasChange("lifecycle_rule") {
if err := resourceDigitalOceanBucketLifecycleUpdate(svc, d); err != nil {
return err
}
}
return resourceDigitalOceanBucketRead(d, meta)
}
@ -266,6 +349,95 @@ func resourceDigitalOceanBucketRead(d *schema.ResourceData, meta interface{}) er
return fmt.Errorf("error setting versioning: %s", err)
}
// Read the lifecycle configuration
lifecycleResponse, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) {
return svc.GetBucketLifecycleConfiguration(&s3.GetBucketLifecycleConfigurationInput{
Bucket: aws.String(d.Id()),
})
})
if err != nil && !isAWSErr(err, "NoSuchLifecycleConfiguration", "") {
return err
}
lifecycleRules := make([]map[string]interface{}, 0)
if lifecycle, ok := lifecycleResponse.(*s3.GetBucketLifecycleConfigurationOutput); ok && len(lifecycle.Rules) > 0 {
lifecycleRules = make([]map[string]interface{}, 0, len(lifecycle.Rules))
for _, lifecycleRule := range lifecycle.Rules {
log.Printf("[DEBUG] Spaces bucket: %s, read lifecycle rule: %v", d.Id(), lifecycleRule)
rule := make(map[string]interface{})
// ID
if lifecycleRule.ID != nil && *lifecycleRule.ID != "" {
rule["id"] = *lifecycleRule.ID
}
filter := lifecycleRule.Filter
if filter != nil {
if filter.And != nil {
// Prefix
if filter.And.Prefix != nil && *filter.And.Prefix != "" {
rule["prefix"] = *filter.And.Prefix
}
} else {
// Prefix
if filter.Prefix != nil && *filter.Prefix != "" {
rule["prefix"] = *filter.Prefix
}
}
} else {
if lifecycleRule.Prefix != nil {
rule["prefix"] = *lifecycleRule.Prefix
}
}
// Enabled
if lifecycleRule.Status != nil {
if *lifecycleRule.Status == s3.ExpirationStatusEnabled {
rule["enabled"] = true
} else {
rule["enabled"] = false
}
}
// AbortIncompleteMultipartUploadDays
if lifecycleRule.AbortIncompleteMultipartUpload != nil {
if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil {
rule["abort_incomplete_multipart_upload_days"] = int(*lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation)
}
}
// expiration
if lifecycleRule.Expiration != nil {
e := make(map[string]interface{})
if lifecycleRule.Expiration.Date != nil {
e["date"] = (*lifecycleRule.Expiration.Date).Format("2006-01-02")
}
if lifecycleRule.Expiration.Days != nil {
e["days"] = int(*lifecycleRule.Expiration.Days)
}
if lifecycleRule.Expiration.ExpiredObjectDeleteMarker != nil {
e["expired_object_delete_marker"] = *lifecycleRule.Expiration.ExpiredObjectDeleteMarker
}
rule["expiration"] = schema.NewSet(expirationHash, []interface{}{e})
}
// noncurrent_version_expiration
if lifecycleRule.NoncurrentVersionExpiration != nil {
e := make(map[string]interface{})
if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil {
e["days"] = int(*lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays)
}
rule["noncurrent_version_expiration"] = schema.NewSet(expirationHash, []interface{}{e})
}
lifecycleRules = append(lifecycleRules, rule)
}
}
if err := d.Set("lifecycle_rule", lifecycleRules); err != nil {
return fmt.Errorf("error setting lifecycle_rule: %s", err)
}
// Set the bucket's name.
d.Set("name", d.Get("name").(string))
@ -464,6 +636,108 @@ func resourceDigitalOceanSpacesBucketVersioningUpdate(s3conn *s3.S3, d *schema.R
return nil
}
func resourceDigitalOceanBucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
bucket := d.Get("name").(string)
lifecycleRules := d.Get("lifecycle_rule").([]interface{})
if len(lifecycleRules) == 0 {
i := &s3.DeleteBucketLifecycleInput{
Bucket: aws.String(bucket),
}
_, err := s3conn.DeleteBucketLifecycle(i)
if err != nil {
return fmt.Errorf("Error removing S3 lifecycle: %s", err)
}
return nil
}
rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules))
for i, lifecycleRule := range lifecycleRules {
r := lifecycleRule.(map[string]interface{})
rule := &s3.LifecycleRule{}
// Filter
filter := &s3.LifecycleRuleFilter{}
filter.SetPrefix(r["prefix"].(string))
rule.SetFilter(filter)
// ID
if val, ok := r["id"].(string); ok && val != "" {
rule.ID = aws.String(val)
} else {
rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-"))
}
// Enabled
if val, ok := r["enabled"].(bool); ok && val {
rule.Status = aws.String(s3.ExpirationStatusEnabled)
} else {
rule.Status = aws.String(s3.ExpirationStatusDisabled)
}
// AbortIncompleteMultipartUpload
if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 {
rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{
DaysAfterInitiation: aws.Int64(int64(val)),
}
}
// Expiration
expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).(*schema.Set).List()
if len(expiration) > 0 {
e := expiration[0].(map[string]interface{})
i := &s3.LifecycleExpiration{}
if val, ok := e["date"].(string); ok && val != "" {
t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val))
if err != nil {
return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error())
}
i.Date = aws.Time(t)
} else if val, ok := e["days"].(int); ok && val > 0 {
i.Days = aws.Int64(int64(val))
} else if val, ok := e["expired_object_delete_marker"].(bool); ok {
i.ExpiredObjectDeleteMarker = aws.Bool(val)
}
rule.Expiration = i
}
// NoncurrentVersionExpiration
nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).(*schema.Set).List()
if len(nc_expiration) > 0 {
e := nc_expiration[0].(map[string]interface{})
if val, ok := e["days"].(int); ok && val > 0 {
rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{
NoncurrentDays: aws.Int64(int64(val)),
}
}
}
rules = append(rules, rule)
}
i := &s3.PutBucketLifecycleConfigurationInput{
Bucket: aws.String(bucket),
LifecycleConfiguration: &s3.BucketLifecycleConfiguration{
Rules: rules,
},
}
_, err := retryOnAwsCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) {
return s3conn.PutBucketLifecycleConfiguration(i)
})
if err != nil {
return fmt.Errorf("Error putting S3 lifecycle: %s", err)
}
return nil
}
func resourceDigitalOceanBucketImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
if strings.Contains(d.Id(), ",") {
s := strings.Split(d.Id(), ",")
@ -504,3 +778,41 @@ func normalizeRegion(region string) string {
return region
}
func expirationHash(v interface{}) int {
var buf bytes.Buffer
m, ok := v.(map[string]interface{})
if !ok {
return 0
}
if v, ok := m["date"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["days"]; ok {
buf.WriteString(fmt.Sprintf("%d-", v.(int)))
}
if v, ok := m["expired_object_delete_marker"]; ok {
buf.WriteString(fmt.Sprintf("%t-", v.(bool)))
}
return hashcode.String(buf.String())
}
func validateS3BucketLifecycleTimestamp(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
_, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", value))
if err != nil {
errors = append(errors, fmt.Errorf(
"%q cannot be parsed as RFC3339 Timestamp Format", value))
}
return
}
func isAWSErr(err error, code string, message string) bool {
if err, ok := err.(awserr.Error); ok {
return err.Code() == code && strings.Contains(err.Message(), message)
}
return false
}

View File

@ -541,17 +541,6 @@ func deleteS3ObjectVersion(conn *s3.S3, b, k, v string, force bool) error {
return err
}
// Returns true if the error matches all these conditions:
// * err is of type awserr.Error
// * Error.Code() matches code
// * Error.Message() contains message
func isAWSErr(err error, code string, message string) bool {
if err, ok := err.(awserr.Error); ok {
return err.Code() == code && strings.Contains(err.Message(), message)
}
return false
}
func stringMapToPointers(m map[string]interface{}) map[string]*string {
list := make(map[string]*string, len(m))
for i, v := range m {

View File

@ -2,7 +2,6 @@ package digitalocean
import (
"fmt"
"log"
"os"
"reflect"
"testing"
@ -299,29 +298,175 @@ resource "digitalocean_spaces_bucket" "bucket" {
})
}
func TestAccDigitalOceanSpacesBucket_LifecycleBasic(t *testing.T) {
rInt := acctest.RandInt()
resourceName := "digitalocean_spaces_bucket.bucket"
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDigitalOceanBucketDestroy,
Steps: []resource.TestStep{
{
Config: testAccDigitalOceanSpacesBucketConfigWithLifecycle(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanBucketExists(resourceName),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.0.id", "id1"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.0.prefix", "path1/"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.0.expiration.2613713285.days", "365"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.0.expiration.2613713285.date", ""),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.0.expiration.2613713285.expired_object_delete_marker", "false"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.1.id", "id2"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.1.prefix", "path2/"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.1.expiration.2855832418.date", "2016-01-12"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.1.expiration.2855832418.days", "0"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.1.expiration.2855832418.expired_object_delete_marker", "false"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.2.id", "id3"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.2.prefix", "path3/"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.2.abort_incomplete_multipart_upload_days", "30"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateId: fmt.Sprintf("ams3,tf-test-bucket-%d", rInt),
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{
"force_destroy", "acl"},
},
{
Config: testAccDigitalOceanSpacesBucketConfigWithVersioningLifecycle(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanBucketExists(resourceName),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.0.id", "id1"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.0.prefix", "path1/"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.0.enabled", "true"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.1.id", "id2"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.1.prefix", "path2/"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.1.enabled", "false"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.1.noncurrent_version_expiration.80908210.days", "365"),
),
},
{
Config: testAccDigitalOceanBucketConfig(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanBucketExists(resourceName),
),
},
},
})
}
func TestAccDigitalOceanSpacesBucket_LifecycleExpireMarkerOnly(t *testing.T) {
rInt := acctest.RandInt()
resourceName := "digitalocean_spaces_bucket.bucket"
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDigitalOceanBucketDestroy,
Steps: []resource.TestStep{
{
Config: testAccDigitalOceanSpacesBucketConfigWithLifecycleExpireMarker(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanBucketExists(resourceName),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.0.id", "id1"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.0.prefix", "path1/"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.0.expiration.3591068768.days", "0"),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.0.expiration.3591068768.date", ""),
resource.TestCheckResourceAttr(
resourceName, "lifecycle_rule.0.expiration.3591068768.expired_object_delete_marker", "true"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateId: fmt.Sprintf("ams3,tf-test-bucket-%d", rInt),
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{
"force_destroy", "acl"},
},
{
Config: testAccDigitalOceanBucketConfig(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanBucketExists(resourceName),
),
},
},
})
}
func testAccGetS3ConnForSpacesBucket(rs *terraform.ResourceState) (*s3.S3, error) {
rawRegion := ""
if actualRegion, ok := rs.Primary.Attributes["region"]; ok {
rawRegion = actualRegion
}
region := normalizeRegion(rawRegion)
spacesAccessKeyId := os.Getenv("SPACES_ACCESS_KEY_ID")
if spacesAccessKeyId == "" {
return nil, fmt.Errorf("SPACES_ACCESS_KEY_ID must be set")
}
spacesSecretAccessKey := os.Getenv("SPACES_SECRET_ACCESS_KEY")
if spacesSecretAccessKey == "" {
return nil, fmt.Errorf("SPACES_SECRET_ACCESS_KEY must be set")
}
sesh, err := session.NewSession(&aws.Config{
Region: aws.String(region),
Credentials: credentials.NewStaticCredentials(spacesAccessKeyId, spacesSecretAccessKey, "")},
)
if err != nil {
return nil, fmt.Errorf("Unable to create S3 session (region=%s): %v", region, err)
}
svc := s3.New(sesh, &aws.Config{
Endpoint: aws.String(fmt.Sprintf("https://%s.digitaloceanspaces.com", region))},
)
return svc, nil
}
func testAccCheckDigitalOceanBucketDestroy(s *terraform.State) error {
return testAccCheckDigitalOceanBucketDestroyWithProvider(s, testAccProvider)
}
func testAccCheckDigitalOceanBucketDestroyWithProvider(s *terraform.State, provider *schema.Provider) error {
for _, rs := range s.RootModule().Resources {
sesh, err := session.NewSession(&aws.Config{
Region: aws.String(rs.Primary.Attributes["region"]),
Credentials: credentials.NewStaticCredentials(os.Getenv("SPACES_ACCESS_KEY_ID"), os.Getenv("SPACES_SECRET_ACCESS_KEY"), "")},
)
svc := s3.New(sesh, &aws.Config{
Endpoint: aws.String(fmt.Sprintf("https://%s.digitaloceanspaces.com", rs.Primary.Attributes["region"]))},
)
if err != nil {
log.Fatal(err)
}
if rs.Type != "digitalocean_spaces_bucket" {
continue
}
svc, err := testAccGetS3ConnForSpacesBucket(rs)
if err != nil {
return fmt.Errorf("Unable to create S3 client: %v", err)
}
_, err = svc.DeleteBucket(&s3.DeleteBucketInput{
Bucket: aws.String(rs.Primary.ID),
})
@ -350,16 +495,9 @@ func testAccCheckDigitalOceanBucketExistsWithProvider(n string, providerF func()
return fmt.Errorf("No ID is set")
}
sesh, err := session.NewSession(&aws.Config{
Region: aws.String(rs.Primary.Attributes["region"]),
Credentials: credentials.NewStaticCredentials(os.Getenv("SPACES_ACCESS_KEY_ID"), os.Getenv("SPACES_SECRET_ACCESS_KEY"), "")},
)
svc := s3.New(sesh, &aws.Config{
Endpoint: aws.String(fmt.Sprintf("https://%s.digitaloceanspaces.com", rs.Primary.Attributes["region"]))},
)
svc, err := testAccGetS3ConnForSpacesBucket(rs)
if err != nil {
log.Fatal(err)
return fmt.Errorf("Unable to create S3 client: %v", err)
}
_, err = svc.HeadBucket(&s3.HeadBucketInput{
@ -388,16 +526,9 @@ func testAccCheckDigitalOceanDestroyBucket(n string) resource.TestCheckFunc {
return fmt.Errorf("No Spaces Bucket ID is set")
}
sesh, err := session.NewSession(&aws.Config{
Region: aws.String(rs.Primary.Attributes["region"]),
Credentials: credentials.NewStaticCredentials(os.Getenv("SPACES_ACCESS_KEY_ID"), os.Getenv("SPACES_SECRET_ACCESS_KEY"), "")},
)
svc := s3.New(sesh, &aws.Config{
Endpoint: aws.String(fmt.Sprintf("https://%s.digitaloceanspaces.com", rs.Primary.Attributes["region"]))},
)
svc, err := testAccGetS3ConnForSpacesBucket(rs)
if err != nil {
log.Fatal(err)
return fmt.Errorf("Unable to create S3 client: %v", err)
}
_, err = svc.DeleteBucket(&s3.DeleteBucketInput{
@ -422,13 +553,10 @@ func testAccCheckDigitalOceanBucketCors(n string, corsRules []*s3.CORSRule) reso
return fmt.Errorf("No Spaces Bucket ID is set")
}
sesh, err := session.NewSession(&aws.Config{
Region: aws.String(rs.Primary.Attributes["region"]),
Credentials: credentials.NewStaticCredentials(os.Getenv("SPACES_ACCESS_KEY_ID"), os.Getenv("SPACES_SECRET_ACCESS_KEY"), "")},
)
svc := s3.New(sesh, &aws.Config{
Endpoint: aws.String(fmt.Sprintf("https://%s.digitaloceanspaces.com", rs.Primary.Attributes["region"]))},
)
svc, err := testAccGetS3ConnForSpacesBucket(rs)
if err != nil {
return fmt.Errorf("Unable to create S3 client: %v", err)
}
out, err := svc.GetBucketCors(&s3.GetBucketCorsInput{
Bucket: aws.String(rs.Primary.ID),
@ -450,16 +578,9 @@ func testAccCheckDigitalOceanBucketVersioning(n string, versioningStatus string)
return func(s *terraform.State) error {
rs := s.RootModule().Resources[n]
sesh, err := session.NewSession(&aws.Config{
Region: aws.String(rs.Primary.Attributes["region"]),
Credentials: credentials.NewStaticCredentials(os.Getenv("SPACES_ACCESS_KEY_ID"), os.Getenv("SPACES_SECRET_ACCESS_KEY"), "")},
)
svc := s3.New(sesh, &aws.Config{
Endpoint: aws.String(fmt.Sprintf("https://%s.digitaloceanspaces.com", rs.Primary.Attributes["region"]))},
)
svc, err := testAccGetS3ConnForSpacesBucket(rs)
if err != nil {
log.Fatal(err)
return fmt.Errorf("Unable to create S3 client: %v", err)
}
out, err := svc.GetBucketVersioning(&s3.GetBucketVersioningInput{
@ -577,3 +698,95 @@ resource "digitalocean_spaces_bucket" "bucket" {
}
}
`
func testAccDigitalOceanSpacesBucketConfigWithLifecycle(randInt int) string {
return fmt.Sprintf(`
resource "digitalocean_spaces_bucket" "bucket" {
name = "tf-test-bucket-%d"
acl = "private"
region = "ams3"
lifecycle_rule {
id = "id1"
prefix = "path1/"
enabled = true
expiration {
days = 365
}
}
lifecycle_rule {
id = "id2"
prefix = "path2/"
enabled = true
expiration {
date = "2016-01-12"
}
}
lifecycle_rule {
id = "id3"
prefix = "path3/"
enabled = true
abort_incomplete_multipart_upload_days = 30
}
}
`, randInt)
}
func testAccDigitalOceanSpacesBucketConfigWithLifecycleExpireMarker(randInt int) string {
return fmt.Sprintf(`
resource "digitalocean_spaces_bucket" "bucket" {
name = "tf-test-bucket-%d"
acl = "private"
region = "ams3"
lifecycle_rule {
id = "id1"
prefix = "path1/"
enabled = true
expiration {
expired_object_delete_marker = "true"
}
}
}
`, randInt)
}
func testAccDigitalOceanSpacesBucketConfigWithVersioningLifecycle(randInt int) string {
return fmt.Sprintf(`
resource "digitalocean_spaces_bucket" "bucket" {
name = "tf-test-bucket-%d"
acl = "private"
region = "ams3"
versioning {
enabled = false
}
lifecycle_rule {
id = "id1"
prefix = "path1/"
enabled = true
noncurrent_version_expiration {
days = 365
}
}
lifecycle_rule {
id = "id2"
prefix = "path2/"
enabled = false
noncurrent_version_expiration {
days = 365
}
}
}
`, randInt)
}

View File

@ -79,6 +79,7 @@ The following arguments are supported:
* `region` - The region where the bucket resides (Defaults to `nyc3`)
* `acl` - Canned ACL applied on bucket creation (`private` or `public-read`)
* `cors_rule` - (Optional) A rule of Cross-Origin Resource Sharing (documented below).
* `lifecycle_rule` - (Optional) A configuration of object lifecycle management (documented below).
* `versioning` - (Optional) A state of versioning (documented below)
* `force_destroy` - Unless `true`, the bucket will only be destroyed if empty (Defaults to `false`)
@ -89,6 +90,30 @@ The `cors_rule` object supports the following:
* `allowed_origins` - (Required) A list of hosts from which requests using the specified methods are allowed. A host may contain one wildcard (e.g. http://*.example.com).
* `max_age_seconds` - (Optional) The time in seconds that browser can cache the response for a preflight request.
The `lifecycle_rule` object supports the following:
* `id` - (Optional) Unique identifier for the rule.
* `prefix` - (Optional) Object key prefix identifying one or more objects to which the rule applies.
* `enabled` - (Required) Specifies lifecycle rule status.
* `abort_incomplete_multipart_upload_days` (Optional) Specifies the number of days after initiating a multipart
upload when the multipart upload must be completed or else Spaces will abort the upload.
* `expiration` - (Optional) Specifies a time period after which applicable objects expire (documented below).
* `noncurrent_version_expiration` - (Optional) Specifies when non-current object versions expire (documented below).
At least one of `expiration` or `noncurrent_version_expiration` must be specified.
The `expiration` object supports the following:
* `date` - (Optional) Specifies the date/time after which you want applicable objects to expire. The argument uses
RFC3339 format, e.g. "2020-03-22T15:03:55Z" or parts thereof e.g. "2019-02-28".
* `days` - (Optional) Specifies the number of days after object creation when the applicable objects will expire.
* `expired_object_delete_marker` - (Optional) On a versioned bucket (versioning-enabled or versioning-suspended
bucket), setting this to true directs Spaces to delete expired object delete markers.
The `noncurrent_version_expiration` object supports the following:
* `days` - (Required) Specifies the number of days after which an object's non-current versions expire.
The `versioning` object supports the following:
* `enabled` - (Optional) Enable versioning. Once you version-enable a bucket, it can never return to an unversioned