func (e *environ) SetConfig(cfg *config.Config) error { ecfg, err := providerInstance.newConfig(cfg) if err != nil { return err } e.ecfgMutex.Lock() defer e.ecfgMutex.Unlock() e.ecfgUnlocked = ecfg auth := aws.Auth{ecfg.accessKey(), ecfg.secretKey()} region := aws.Regions[ecfg.region()] publicBucketRegion := aws.Regions[ecfg.publicBucketRegion()] e.ec2Unlocked = ec2.New(auth, region) e.s3Unlocked = s3.New(auth, region) // create new storage instances, existing instances continue // to reference their existing configuration. e.storageUnlocked = &storage{ bucket: e.s3Unlocked.Bucket(ecfg.controlBucket()), } if ecfg.publicBucket() != "" { e.publicStorageUnlocked = &storage{ bucket: s3.New(auth, publicBucketRegion).Bucket(ecfg.publicBucket()), } } else { e.publicStorageUnlocked = nil } return nil }
// Communicate with all endpoints to see if they are alive. func (s *ClientTests) TestRegions(c *C) { errs := make(chan error, len(aws.Regions)) for _, region := range aws.Regions { go func(r aws.Region) { s := s3.New(s.s3.Auth, r) b := s.Bucket("goamz-" + s.Auth.AccessKey) _, err := b.Get("non-existent") errs <- err }(region) } for _ = range aws.Regions { err := <-errs if err != nil { s3_err, ok := err.(*s3.Error) if ok { c.Check(s3_err.Code, Matches, "NoSuchBucket") } else if _, ok = err.(*net.DNSError); ok { // Okay as well. } else { c.Errorf("Non-S3 error: %s", err) } } else { c.Errorf("Test should have errored but it seems to have succeeded") } } }
// Puts an `ExportResult` struct to an S3 bucket within the specified directory func (x *S3) Store(result *ExportResult, directory string) error { if result.Error != nil { return result.Error } file, err := os.Open(result.Path) if err != nil { return err } defer file.Close() buffy := bufio.NewReader(file) stat, err := file.Stat() if err != nil { return err } size := stat.Size() auth := aws.Auth{ AccessKey: x.AccessKey, SecretKey: x.ClientSecret, } s := s3.New(auth, aws.Regions[x.Region]) bucket := s.Bucket(x.Bucket) err = bucket.PutReader(directory+result.Filename(), buffy, size, result.MIME, s3.BucketOwnerFull) return err }
func (s *S) TestCreateBucketBackward(c *gocheck.C) { patchRandomReader() defer unpatchRandomReader() auth := aws.Auth{AccessKey: "access", SecretKey: "s3cr3t"} region := aws.Region{ Name: "myregion", S3Endpoint: s.t.S3Server.URL(), S3LocationConstraint: true, S3LowercaseBucket: true, } s3Client := s3.New(auth, region) app := App{Name: "leper"} err := s3Client.Bucket(app.Name).PutBucket(s3.BucketOwnerFull) c.Assert(err, gocheck.IsNil) env := s3Env{ Auth: aws.Auth{AccessKey: "access", SecretKey: "s3cr3t"}, bucket: app.Name, endpoint: s.t.S3Server.URL(), locationConstraint: true, } ctx := action.BWContext{Params: []interface{}{&app}, FWResult: &env} createBucketAction.Backward(ctx) _, err = s3Client.Bucket(app.Name).List("", "/", "", 100) c.Assert(err, gocheck.NotNil) }
func (s *S) TestBootstrapInstanceIdHealerHeal(c *gocheck.C) { ec2Server, err := ec2test.NewServer() c.Assert(err, gocheck.IsNil) defer ec2Server.Quit() s3Server, err := s3test.NewServer(nil) c.Assert(err, gocheck.IsNil) defer s3Server.Quit() h := bootstrapInstanceIdHealer{} region := aws.SAEast region.EC2Endpoint = ec2Server.URL() region.S3Endpoint = s3Server.URL() h.e = ec2.New(aws.Auth{AccessKey: "some", SecretKey: "thing"}, region) sg, err := h.ec2().CreateSecurityGroup("juju-delta-0", "") c.Assert(err, gocheck.IsNil) h.s = s3.New(aws.Auth{AccessKey: "some", SecretKey: "thing"}, region) jujuBucket := "ble" config.Set("juju:bucket", jujuBucket) bucket := h.s3().Bucket(jujuBucket) err = bucket.PutBucket(s3.PublicReadWrite) c.Assert(err, gocheck.IsNil) resp, err := h.ec2().RunInstances(&ec2.RunInstances{MaxCount: 1, SecurityGroups: []ec2.SecurityGroup{sg.SecurityGroup}}) c.Assert(err, gocheck.IsNil) err = bucket.Put("provider-state", []byte("doesnotexist"), "binary/octet-stream", s3.PublicReadWrite) c.Assert(err, gocheck.IsNil) c.Assert(h.needsHeal(), gocheck.Equals, true) err = h.Heal() c.Assert(err, gocheck.IsNil) data, err := bucket.Get("provider-state") expected := "zookeeper-instances: [" + resp.Instances[0].InstanceId + "]" c.Assert(string(data), gocheck.Equals, expected) }
func PostSignup(w *rest.ResponseWriter, req *rest.Request) { signup := Signup{} err := req.DecodeJsonPayload(&signup) if err != nil { rest.Error(w, err.Error(), http.StatusInternalServerError) return } if signup.Email == "" { rest.Error(w, "email is required", 400) return } auth, err := aws.EnvAuth() if err != nil { panic(err.Error()) } s := s3.New(auth, aws.EUWest) bucket := s.Bucket("seq-signup") data := []byte(signup.Email) err = bucket.Put(signup.Email, data, "text/plain", s3.BucketOwnerFull) if err != nil { panic(err.Error()) } fmt.Println(signup) w.Header().Set("Access-Control-Allow-Origin", "http://sequenceiq.com") w.WriteJson(&signup) }
// Create the named bucket in the named region -- with the supplied creds // and return the bucket *object func CreateBucket(creds aws.Auth, region aws.Region, bucket_name string) *s3.Bucket { sss := s3.New(creds, region) bucket := sss.Bucket(bucket_name) log.Println("Creating s3 bucket") bucket.PutBucket(s3.Private) return bucket }
func main() { flag.Parse() cmds := flag.Args() if len(cmds) <= 0 { fmt.Println("Command required") os.Exit(1) } cmd := Cmd{} cmd.name = cmds[0] cmd.modifiers = cmds[1:] auth, err := aws.EnvAuth() if err != nil { panic(err) } cmd.conn = s3.New(auth, getRegion(region)) s3url, err := url.Parse(cmd.modifiers[0]) cmd.url = s3url cmd.bucket = s3.Bucket{cmd.conn, cmd.url.Host} RunCommand(cmd) }
func main() { flag.Parse() if *toHash == "" || *pkgDirs == "" || *cmd == "" || *bucketName == "" { log.Print("You are missing one or more mandatory command line arguments.") flag.Usage() os.Exit(1) } cred, err := aws.EnvAuth() if err != nil { log.Fatalf("Couldn't auth into s3. Did you set up ENV? Error: %s", err) } s3client := s3.New(cred, aws.USWest) bucket := s3client.Bucket(*bucketName) // extract. checksum := fmt.Sprintf("%x", hashFiles(*toHash).Sum(nil)) filename := *outfile // consider bucket.GetReader to pipe directly into gunzip/untar file, err := bucket.Get(fmt.Sprintf("%s/%s", checksum, filename)) if err != nil { fmt.Printf("%s\n", err) build(*cmd) archive(*pkgDirs, filename) upload(bucket, checksum, filename) } else { extract(file) } }
func (s *LocalServerSuite) SetUpSuite(c *C) { s.srv.SetUp(c) s.clientTests.s3 = s3.New(s.srv.auth, s.srv.region) // TODO Sadly the fake server ignores auth completely right now. :-( s.clientTests.authIsBroken = true }
// Read the state from S3. func readState() { state := State{} wd, err := os.Getwd() fmt.Printf("wd: %s\n", wd) // read from S3 auth, err := aws.EnvAuth() if err != nil { panic(err.Error()) } s := s3.New(auth, aws.USEast) bucket := s.Bucket(s3_bucket) data, err := bucket.Get("rss.json") if err != nil { panic(err) } fmt.Printf("Data read from S3\n") // old way /* file, err := os.Open("state.json") if err != nil { panic(err) } data, err := ioutil.ReadAll(file) if err != nil { panic(err) } */ err = json.Unmarshal(data, &state) if err != nil { panic(err) } folders = state.Folders }
// Deploys a site to S3. func (s *Site) Deploy(user, pass, url string) error { auth := aws.Auth{AccessKey: user, SecretKey: pass} b := s3.New(auth, aws.USEast).Bucket(url) // walks _site directory and uploads file to S3 walker := func(fn string, fi os.FileInfo, err error) error { if fi.IsDir() { return nil } rel, _ := filepath.Rel(s.Dest, fn) typ := mime.TypeByExtension(filepath.Ext(rel)) content, err := ioutil.ReadFile(fn) log.Printf(MsgUploadFile, rel) if err != nil { return err } // try to upload the file ... sometimes this fails due to amazon // issues. If so, we'll re-try if err := b.Put(rel, content, typ, s3.PublicRead); err != nil { time.Sleep(100 * time.Millisecond) // sleep so that we don't immediately retry return b.Put(rel, content, typ, s3.PublicRead) } // file upload was a success, return nil return nil } return filepath.Walk(s.Dest, walker) }
// Goroutine for saving the state. func saver(ticker *time.Ticker) { for { select { case <-ticker.C: println("Writing state") lock.Lock() if dirty { dirty = false state := State{folders} bytes, err := json.Marshal(state) if err != nil { panic(err) } // write to S3 auth, err := aws.EnvAuth() if err != nil { panic(err) } s := s3.New(auth, aws.USEast) bucket := s.Bucket(s3_bucket) err = bucket.Put("rss.json", bytes, "application/json", s3.ACL("private")) if err != nil { panic(err) } } lock.Unlock() } } }
func Sync(localPath, bucketName, awsRegion, acl string) { if localPath == "" || bucketName == "" { flag.PrintDefaults() return } auth, err := aws.EnvAuth() if err != nil { panic(err.Error()) } s3Conn := s3.New(auth, aws.Regions[awsRegion]) bucket := s3Conn.Bucket(bucketName) pathChan := make(chan string) doneChan := make(chan int) go doTheWalk(localPath, pathChan) count := 0 for path := range pathChan { if path == "" { break } go compareAndSync(acl, localPath, path, bucket, doneChan) count++ } for i := 0; i < count; i++ { <-doneChan } }
func (s *AmazonClientSuite) SetUpSuite(c *C) { if !*amazon { c.Skip("live tests against AWS disabled (no -amazon)") } s.srv.SetUp(c) s.s3 = s3.New(s.srv.auth, s.Region) }
// Build builds the slug with the received tar as content and upload it to S3 func Build(name string, tar io.Reader) (string, error) { slug := bytes.NewBuffer([]byte{}) appBuildCache := fmt.Sprintf("/tmp/app-cache/%s", name) os.MkdirAll(appBuildCache, 0700) builder := exec.Command("docker", "run", "-i", "-a", "stdin", "-a", "stdout", "-a", "stderr", "-v", fmt.Sprintf("%s:/tmp/cache:rw", appBuildCache), "flynn/slugbuilder", "-") builder.Stderr = os.Stdout builder.Stdout = slug builder.Stdin = tar if err := builder.Run(); err != nil { return "", err } var auth = aws.Auth{AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY} var s3conn = s3.New(auth, aws.Region{S3Endpoint: S3_ENDPOINT}) var bucket = s3conn.Bucket(BUCKET_NAME) var name_sha1 = sha1.Sum([]byte(name)) var key = fmt.Sprintf("%x.tgz", name_sha1[:10]) err := bucket.PutReader(key, slug, int64(slug.Len()), "application/tar", "private") if err != nil { return "", err } return bucket.SignedURL(key, time.Now().Add(10*time.Second)), nil }
func s3Put(config *BackupConfig, local, remote string, public bool) error { localf, err := os.Open(local) if err != nil { return err } defer localf.Close() localfi, err := localf.Stat() if err != nil { return err } auth := aws.Auth{config.AwsAccess, config.AwsSecret} b := s3.New(auth, aws.USEast).Bucket(config.Bucket) acl := s3.Private if public { acl = s3.PublicRead } contType := mime.TypeByExtension(path.Ext(local)) if contType == "" { contType = "binary/octet-stream" } err = b.PutBucket(acl) if err != nil { return err } return b.PutReader(remote, localf, localfi.Size(), contType, acl) }
func UploadEntity(dir string, entity *Entity) (string, error) { spl := strings.Split(dir, "/") endDir := spl[len(spl)-1] // auth, err := aws.EnvAuth() // if err != nil { // return "", err // } // Open Bucket s := s3.New(aws.Auth{Config.AwsKey, Config.AwsSecret}, aws.USWest2) bucket := s.Bucket(Config.bucket) b, err := ioutil.ReadFile(dir) if err != nil { return "", err } err = bucket.Put("/"+endDir, b, "text/plain", s3.PublicRead) if err != nil { return "", err } awsLink := bucket.URL("/" + endDir) return awsLink, err }
func (srv *localServer) startServer(c *gc.C) { var err error srv.ec2srv, err = ec2test.NewServer() if err != nil { c.Fatalf("cannot start ec2 test server: %v", err) } srv.s3srv, err = s3test.NewServer(srv.config) if err != nil { c.Fatalf("cannot start s3 test server: %v", err) } aws.Regions["test"] = aws.Region{ Name: "test", EC2Endpoint: srv.ec2srv.URL(), S3Endpoint: srv.s3srv.URL(), S3LocationConstraint: true, Sign: aws.SignV2, } s3inst := s3.New(aws.Auth{}, aws.Regions["test"]) storage := ec2.BucketStorage(s3inst.Bucket("juju-dist")) envtesting.UploadFakeTools(c, storage) srv.addSpice(c) zones := make([]amzec2.AvailabilityZoneInfo, 3) zones[0].Region = "test" zones[0].Name = "test-available" zones[0].State = "available" zones[1].Region = "test" zones[1].Name = "test-impaired" zones[1].State = "impaired" zones[2].Region = "test" zones[2].Name = "test-unavailable" zones[2].State = "unavailable" srv.ec2srv.SetAvailabilityZones(zones) }
// Persist a brain func (brain *Brain) Save() { // Persist the database to file var data bytes.Buffer contents := gob.NewEncoder(&data) err := contents.Encode(brain) if err != nil { panic(err) } // The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables are used. auth, err := aws.EnvAuth() if err != nil { panic(err.Error()) } // Open Bucket s := s3.New(auth, aws.USEast) // Load the database from an S3 bucket bucket := s.Bucket(bucketName) err = bucket.Put(filename, data.Bytes(), "text/plain", s3.BucketOwnerFull) if err != nil { panic(err.Error()) } }
// Load a brain func (brain *Brain) Load() { // The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables are used. auth, err := aws.EnvAuth() if err != nil { panic(err.Error()) } // Open Bucket s := s3.New(auth, aws.USEast) // Load the database from an S3 bucket bucket := s.Bucket(bucketName) // Create a bytes.Buffer n, err := bucket.Get(filename) if err != nil { panic(err) } p := bytes.NewBuffer(n) dec := gob.NewDecoder(p) err = dec.Decode(&brain) if err != nil { log.Print("There was an error loading the brain. Using a blank one.") } }
func (s *AmazonDomainClientSuite) SetUpSuite(c *C) { if !*amazon { c.Skip("live tests against AWS disabled (no -amazon)") } s.srv.SetUp(c) region := aws.USEast region.S3BucketEndpoint = "https://${bucket}.s3.amazonaws.com" s.s3 = s3.New(s.srv.auth, region) }
func (s *AmazonClientSuite) SetUpSuite(c *C) { if !testutil.Amazon { c.Skip("live tests against AWS disabled (no -amazon)") } s.srv.SetUp(c) s.s3 = s3.New(s.srv.auth, s.Region) // In case tests were interrupted in the middle before. s.ClientTests.Cleanup() }
func setupS3Connection() { if *s3_bucket_name == "" || *s3_access_key == "" || *s3_secret_key == "" { log.Printf("[init] missing S3 params") os.Exit(1) } auth = aws.Auth{*s3_access_key, *s3_secret_key} s3Connection = s3.New(auth, aws.USEast) bucket = s3.Bucket{s3Connection, *s3_bucket_name} }
func (self *S3) getBucket() *s3.Bucket { auth := aws.Auth{ AccessKey: self.AccessKey, SecretKey: self.SecretKey, } region := aws.Regions[self.Region] connection := s3.New(auth, region) return connection.Bucket(self.Bucket) }
func New(access, secret, endpoint, bucketName string) (s3Backend *S3, err error) { s3Conn := s3.New(aws.Auth{access, secret}, aws.Region{S3Endpoint: endpoint}) bucket := s3Conn.Bucket(bucketName) err = bucket.PutBucket("") if err != nil { return nil, err } return &S3{bucket}, nil }
func (s *AmazonDomainClientSuite) SetUpSuite(c *C) { if !testutil.Amazon { c.Skip("live tests against AWS disabled (no -amazon)") } s.srv.SetUp(c) region := s.Region region.S3BucketEndpoint = "https://${bucket}.s3.amazonaws.com" s.s3 = s3.New(s.srv.auth, region) s.ClientTests.Cleanup() }
func (ss *S3Storage) Connect() error { if ss.BucketName == "" { return errors.New("S3 bucket name mandatory to establish a connection") } s3Conn := s3.New(ss.AwsAuth, ss.Region) ss.connexion = s3Conn.Bucket(ss.BucketName) return nil }
func TestWrapS3(t *testing.T) { auth, err := aws.EnvAuth() if err != nil { t.Fatalf("Need AWS auth for testing: %v", err) } // TODO: CL switch or similar to change the AWS zone backend := WrapS3(goamzs3.New(auth, aws.EUWest)) testS3backend(backend, t) return }
func (s *AmazonDomainClientSuite) SetUpSuite(c *C) { if !*amazon { c.Skip("live tests against AWS disabled (no -amazon)") } s.srv.SetUp(c) region := s.Region // TODO(dfc) this subsitution only works for us-east-1 region.S3BucketEndpoint = "https://${bucket}.s3.amazonaws.com" s.s3 = s3.New(s.srv.auth, region) }