func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error { bucket := d.Get("bucket").(string) lifecycleRules := d.Get("lifecycle_rule").([]interface{}) if len(lifecycleRules) == 0 { i := &s3.DeleteBucketLifecycleInput{ Bucket: aws.String(bucket), } err := resource.Retry(1*time.Minute, func() *resource.RetryError { if _, err := s3conn.DeleteBucketLifecycle(i); err != nil { return resource.NonRetryableError(err) } return nil }) if err != nil { return fmt.Errorf("Error removing S3 lifecycle: %s", err) } return nil } rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules)) for i, lifecycleRule := range lifecycleRules { r := lifecycleRule.(map[string]interface{}) rule := &s3.LifecycleRule{ Prefix: aws.String(r["prefix"].(string)), } // ID if val, ok := r["id"].(string); ok && val != "" { rule.ID = aws.String(val) } else { rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-")) } // Enabled if val, ok := r["enabled"].(bool); ok && val { rule.Status = aws.String(s3.ExpirationStatusEnabled) } else { rule.Status = aws.String(s3.ExpirationStatusDisabled) } // AbortIncompleteMultipartUpload if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 { rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{ DaysAfterInitiation: aws.Int64(int64(val)), } } // Expiration expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).(*schema.Set).List() if len(expiration) > 0 { e := expiration[0].(map[string]interface{}) i := &s3.LifecycleExpiration{} if val, ok := e["date"].(string); ok && val != "" { t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) if err != nil { return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) } i.Date = aws.Time(t) } else if val, ok := e["days"].(int); ok && val > 0 { i.Days = aws.Int64(int64(val)) } else if val, ok := e["expired_object_delete_marker"].(bool); ok { i.ExpiredObjectDeleteMarker = aws.Bool(val) } rule.Expiration = i } // NoncurrentVersionExpiration nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).(*schema.Set).List() if len(nc_expiration) > 0 { e := nc_expiration[0].(map[string]interface{}) if val, ok := e["days"].(int); ok && val > 0 { rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{ NoncurrentDays: aws.Int64(int64(val)), } } } // Transitions transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List() if len(transitions) > 0 { rule.Transitions = make([]*s3.Transition, 0, len(transitions)) for _, transition := range transitions { transition := transition.(map[string]interface{}) i := &s3.Transition{} if val, ok := transition["date"].(string); ok && val != "" { t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) if err != nil { return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) } i.Date = aws.Time(t) } else if val, ok := transition["days"].(int); ok && val > 0 { i.Days = aws.Int64(int64(val)) } if val, ok := transition["storage_class"].(string); ok && val != "" { i.StorageClass = aws.String(val) } rule.Transitions = append(rule.Transitions, i) } } // NoncurrentVersionTransitions nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List() if len(nc_transitions) > 0 { rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions)) for _, transition := range nc_transitions { transition := transition.(map[string]interface{}) i := &s3.NoncurrentVersionTransition{} if val, ok := transition["days"].(int); ok && val > 0 { i.NoncurrentDays = aws.Int64(int64(val)) } if val, ok := transition["storage_class"].(string); ok && val != "" { i.StorageClass = aws.String(val) } rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i) } } rules = append(rules, rule) } i := &s3.PutBucketLifecycleConfigurationInput{ Bucket: aws.String(bucket), LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ Rules: rules, }, } err := resource.Retry(1*time.Minute, func() *resource.RetryError { if _, err := s3conn.PutBucketLifecycleConfiguration(i); err != nil { return resource.NonRetryableError(err) } return nil }) if err != nil { return fmt.Errorf("Error putting S3 lifecycle: %s", err) } return nil }