Example #1
0
func (s *S3) Download(remote_path, local_path string, uid, gid int, perms, listener string) error {
	log.Infof("S3 Downloading %s", local_path)
	conf, keys := s.GetS3Config()

	// Open bucket to put file into
	s3 := s3gof3r.New("", *keys)
	b := s3.Bucket(s.config.Listeners[s.listener].Bucket)

	r, _, err := b.GetReader(remote_path, conf)
	if err != nil {
		return err
	}
	// stream to file
	if _, err = utils.FileWrite(local_path, r, uid, gid, perms); err != nil {
		return err
	}
	err = r.Close()
	if err != nil {
		return err
	}
	basePath := s.config.Listeners[listener].BasePath
	datastore.UpdateHost(listener, strings.TrimLeft(local_path, basePath))

	return err
}
func main() {

	s3gof3r.SetLogger(os.Stdout, "", log.LstdFlags, false)

	// STARTEXAMPLE OMIT
	presentation, err := os.Open(file_name) // open presentation file
	if err != nil {
		log.Fatal(err)
	}

	k, err := s3gof3r.EnvKeys() // get S3 keys from environment
	if err != nil {
		log.Fatal(err)
	}
	// Open bucket to put file into
	s3 := s3gof3r.New("", k)
	b := s3.Bucket(bucket_name)

	// Open a PutWriter for upload
	w, err := b.PutWriter(presentation.Name(), nil, nil)
	if err != nil {
		log.Fatal(err)
	}
	defer w.Close()
	if _, err = io.Copy(w, presentation); err != nil { // Copy into S3
		log.Fatal(err)
	}
	log.Printf("%s uploaded to %s", file_name, bucket_name)
	// STOPEXAMPLE OMIT
}
Example #3
0
func reloc(bucketName, oldPath, newPath string, keys s3gof3r.Keys) {
	s3 := s3gof3r.New("s3.amazonaws.com", keys)
	bucket := s3.Bucket(bucketName)
	// this is a POST at the bottom, and copies are a PUT.  whee.
	//w, err := s3.Bucket(bucketName).PutWriter(newPath, copyInstruction, s3Conf)
	// So, implement our own aws copy API.
	req, err := http.NewRequest("PUT", "", &bytes.Buffer{})
	if err != nil {
		panic(integrity.WarehouseConnectionError.Wrap(err))
	}
	req.URL.Scheme = s3Conf.Scheme
	req.URL.Host = fmt.Sprintf("%s.%s", bucketName, s3.Domain)
	req.URL.Path = path.Clean(fmt.Sprintf("/%s", newPath))
	// Communicate the copy source object with a header.
	// Be advised that if this object doesn't exist, amazon reports that as a 404... yes, a 404 that has nothing to do with the query URI.
	req.Header.Add("x-amz-copy-source", path.Join("/", bucketName, oldPath))
	bucket.Sign(req)
	resp, err := s3Conf.Client.Do(req)
	if err != nil {
		panic(integrity.WarehouseConnectionError.Wrap(err))
	}
	defer resp.Body.Close()
	if resp.StatusCode != 200 {
		panic(integrity.WarehouseConnectionError.Wrap(newRespError(resp)))
	}
	// delete previous location
	if err := bucket.Delete(oldPath); err != nil {
		panic(integrity.WarehouseConnectionError.Wrap(err))
	}
}
Example #4
0
func (s *S3) Upload(local_path, remote_path string) error {
	conf, keys := s.GetS3Config()

	// Open bucket to put file into
	s3 := s3gof3r.New("", *keys)
	b := s3.Bucket(s.config.Listeners[s.listener].Bucket)

	// open file to upload
	file, err := os.Open(local_path)
	if err != nil {
		return err
	}

	// Open a PutWriter for upload
	w, err := b.PutWriter(remote_path, nil, conf)
	if err != nil {
		return err
	}
	if _, err = io.Copy(w, file); err != nil { // Copy into S3
		return err
	}
	if err = w.Close(); err != nil {
		return err
	}
	return nil

}
Example #5
0
func DownloadFromS3(k s3gof3r.Keys, bucketName, filename string) (bytes.Buffer, error) {
	s3 := s3gof3r.New("", k)
	b := s3.Bucket(bucketName)

	r, _, err := b.GetReader(filename, nil)
	if err != nil {
		return bytes.Buffer{}, err
	}

	// outputFile, err := os.Create(filepath.Join(outputDirPath, filename))
	// if err != nil {
	// 	return nil, err
	// }

	var output bytes.Buffer

	if _, err = io.Copy(&output, r); err != nil {
		return bytes.Buffer{}, err
	}

	err = r.Close()
	if err != nil {
		return bytes.Buffer{}, err
	}

	return output, nil
}
Example #6
0
func (rm *rmOpts) Execute(args []string) error {

	k, err := getAWSKeys()
	if err != nil {
		return err
	}

	conf := new(s3gof3r.Config)
	*conf = *s3gof3r.DefaultConfig
	s3 := s3gof3r.New(rm.EndPoint, k)
	s3gof3r.SetLogger(os.Stderr, "", log.Ltime, rm.Debug)

	// parse positional cp args
	if len(args) != 1 {
		return fmt.Errorf("rm: path argument required")
	}

	//var urls [1]*url.URL
	u, err := url.ParseRequestURI(args[0])
	if err != nil {
		return fmt.Errorf("parse error: %s", err)
	}
	if u.Host != "" && u.Scheme != "s3" {
		return fmt.Errorf("parse error: %s", u.String())
	}
	return s3.Bucket(u.Host).Delete(u.Path)
}
Example #7
0
func UploadToS3(k s3gof3r.Keys, bucketName string, pathToFile string) error {
	s3 := s3gof3r.New("", k)
	b := s3.Bucket(bucketName)

	file, err := os.Open(pathToFile)
	if err != nil {
		return err
	}

	stats, _ := file.Stat()

	dstPath := FormatDstPath("/backup/%hostname%/%timestamp%", stats.Name())

	w, err := b.PutWriter(dstPath, nil, nil)
	if err != nil {
		return err
	}

	if _, err = io.Copy(w, file); err != nil { // Copy into S3
		return err
	}

	if err = w.Close(); err != nil {
		return err
	}

	return nil
}
Example #8
0
File: s3.go Project: kevball/cfops
//SafeCreateS3Bucket creates an s3 bucket for storing files to an s3-compatible blobstore
func SafeCreateS3Bucket(domain, bucket, accessKey, secretKey string) (*S3Bucket, error) {
	s := &S3Bucket{
		Bucket:    bucket,
		Name:      "s3",
		Domain:    domain,
		AccessKey: accessKey,
		SecretKey: secretKey,
	}
	if s.Bucket == "" {
		return nil, errors.New("bucket name is undefined")
	}
	var k s3gof3r.Keys
	var err error

	if s.AccessKey == "" || s.SecretKey == "" {
		k, err = s3gof3r.EnvKeys() // get S3 keys from environment
		if err != nil {
			return nil, err
		}
	} else {
		k = s3gof3r.Keys{
			AccessKey: s.AccessKey,
			SecretKey: s.SecretKey,
		}
	}
	s3 := s3gof3r.New(s.Domain, k)
	s.bucket = s3.Bucket(s.Bucket)
	return s, nil
}
Example #9
0
func makeS3writer(bucketName string, path string, keys s3gof3r.Keys) io.WriteCloser {
	s3 := s3gof3r.New("s3.amazonaws.com", keys)
	w, err := s3.Bucket(bucketName).PutWriter(path, nil, s3Conf)
	if err != nil {
		panic(integrity.WarehouseConnectionError.Wrap(err))
	}
	return w
}
Example #10
0
func (cp *cpOpts) Execute(args []string) (err error) {

	k, err := getAWSKeys()
	if err != nil {
		return
	}

	conf := new(s3gof3r.Config)
	*conf = *s3gof3r.DefaultConfig
	s3 := s3gof3r.New(cp.EndPoint, k)
	conf.Concurrency = cp.Concurrency
	if cp.NoSSL {
		conf.Scheme = "http"
	}
	conf.PartSize = cp.PartSize
	conf.Md5Check = !cp.NoMd5
	s3gof3r.SetLogger(os.Stderr, "", log.LstdFlags, cp.Debug)

	src, err := func(src string) (io.ReadCloser, error) {
		u, err := url.Parse(src)
		if err != nil {
			return nil, fmt.Errorf("parse error: %s", err)
		}

		if u.Host == "" {
			return os.Open(u.Path)
		}
		r, _, err := s3.Bucket(u.Host).GetReader(u.Path, conf)
		return r, err
	}(cp.Source)
	if err != nil {
		return
	}

	dst, err := func(dst string) (io.WriteCloser, error) {
		u, err := url.Parse(dst)
		if err != nil {
			return nil, fmt.Errorf("parse error: %s", err)
		}

		if u.Host == "" {
			return os.Create(u.Path)
		}

		return s3.Bucket(u.Host).PutWriter(u.Path, ACL(cp.Header, cp.ACL), conf)
	}(cp.Dest)
	if err != nil {
		return
	}

	if _, err = io.Copy(dst, src); err != nil {
		return
	}
	if err = src.Close(); err != nil {
		return
	}
	return dst.Close()
}
Example #11
0
func (s *S3) Remove(remote_path string) bool {
	log.Infof("Removing file %s from s3 storage", remote_path)
	_, keys := s.GetS3Config()

	// Open bucket to put file into
	s3 := s3gof3r.New("", *keys)
	b := s3.Bucket(s.config.Listeners[s.listener].Bucket)
	b.Delete(remote_path)
	return true
}
Example #12
0
func (cp *cpOpts) Execute(args []string) (err error) {

	k, err := getAWSKeys()
	if err != nil {
		return
	}

	conf := new(s3gof3r.Config)
	*conf = *s3gof3r.DefaultConfig
	s3 := s3gof3r.New(cp.EndPoint, k)
	conf.Concurrency = cp.Concurrency
	if cp.NoSSL {
		conf.Scheme = "http"
	}
	conf.PartSize = cp.PartSize
	conf.Md5Check = !cp.NoMd5
	conf.NTry = cp.NTry
	s3gof3r.SetLogger(os.Stderr, "", log.LstdFlags, cp.Debug)

	src, err := func(src string) (io.ReadCloser, error) {
		if !strings.HasPrefix(strings.ToLower(src), "s3") {
			return os.Open(src)
		}
		u, err := url.ParseRequestURI(src)
		if err != nil {
			return nil, fmt.Errorf("parse error: %s", err)
		}

		r, _, err := s3.Bucket(u.Host).GetReader(u.Path, conf)
		return r, err
	}(cp.Source)
	if err != nil {
		return
	}
	defer checkClose(src, err)

	dst, err := func(dst string) (io.WriteCloser, error) {
		if !strings.HasPrefix(strings.ToLower(dst), "s3") {
			return os.Create(dst)
		}
		u, err := url.ParseRequestURI(dst)
		if err != nil {
			return nil, fmt.Errorf("parse error: %s", err)
		}

		return s3.Bucket(u.Host).PutWriter(u.Path, ACL(cp.Header, cp.ACL), conf)
	}(cp.Dest)
	if err != nil {
		return
	}

	defer checkClose(dst, err)
	_, err = io.Copy(dst, src)
	return
}
Example #13
0
func makeS3reader(bucketName string, path string, keys s3gof3r.Keys) io.ReadCloser {
	s3 := s3gof3r.New("s3.amazonaws.com", keys)
	w, _, err := s3.Bucket(bucketName).GetReader(path, s3Conf)
	if err != nil {
		if err2, ok := err.(*s3gof3r.RespError); ok && err2.Code == "NoSuchKey" {
			panic(integrity.DataDNE.New("not stored here"))
		} else {
			panic(integrity.WarehouseConnectionError.Wrap(err))
		}
	}
	return w
}
func newS3WriterProvider(awsAccessKey string, awsSecretKey string, s3Domain string, bucketName string) *S3WriterProvider {
	s3gof3r.DefaultDomain = s3Domain

	awsKeys := s3gof3r.Keys{
		AccessKey: awsAccessKey,
		SecretKey: awsSecretKey,
	}

	s3 := s3gof3r.New("", awsKeys)
	bucket := s3.Bucket(bucketName)

	return &S3WriterProvider{bucket}
}
Example #15
0
func NewS3Remote(config config.Config) (*S3Remote, error) {
	s3, err := newS3Client(config)
	if err != nil {
		return &S3Remote{}, err
	}

	s3gof3rKeys := s3gof3r.Keys{AccessKey: config.AWS.AccessKeyID, SecretKey: config.AWS.SecretAccessKey}

	return &S3Remote{
		config:               config,
		BucketName:           config.AWS.S3URL.Host,
		client:               s3,
		uploadDownloadClient: s3gof3r.New("", s3gof3rKeys),
	}, nil
}
Example #16
0
func (get *Get) Execute(args []string) (err error) {
	conf := new(s3gof3r.Config)
	*conf = *s3gof3r.DefaultConfig
	k, err := getAWSKeys()
	if err != nil {
		return
	}
	s3 := s3gof3r.New(get.EndPoint, k)
	b := s3.Bucket(get.Bucket)
	if get.Concurrency > 0 {
		conf.Concurrency = get.Concurrency
	}
	conf.PartSize = get.PartSize
	conf.Md5Check = !get.CheckDisable
	get.Key = url.QueryEscape(get.Key)

	if get.VersionId != "" {
		get.Key = fmt.Sprintf("%s?versionId=%s", get.Key, get.VersionId)
	}
	log.Println("GET: ", get)

	w, err := os.Create(get.Path)
	if err != nil {
		if get.Path == "" {
			w = os.Stdout
		} else {
			return
		}
	}
	defer w.Close()
	r, header, err := b.GetReader(get.Key, conf)
	if err != nil {
		return
	}
	if _, err = io.Copy(w, r); err != nil {
		return
	}
	if err = r.Close(); err != nil {
		return
	}
	log.Println("Headers: ", header)
	if get.Debug {
		debug()
	}
	return
}
Example #17
0
func (get *getOpts) Execute(args []string) (err error) {
	conf := new(s3gof3r.Config)
	*conf = *s3gof3r.DefaultConfig
	k, err := getAWSKeys()
	if err != nil {
		return
	}
	s3 := s3gof3r.New(get.EndPoint, k)
	b := s3.Bucket(get.Bucket)
	conf.Concurrency = get.Concurrency
	if get.NoSSL {
		conf.Scheme = "http"
	}
	conf.PartSize = get.PartSize
	conf.Md5Check = !get.NoMd5

	s3gof3r.SetLogger(os.Stderr, "", log.LstdFlags, get.Debug)

	if get.VersionID != "" {
		get.Key = fmt.Sprintf("%s?versionId=%s", get.Key, get.VersionID)
	}

	w, err := os.Create(get.Path)
	if err != nil {
		if get.Path == "" {
			w = os.Stdout
		} else {
			return
		}
	}
	defer w.Close()
	r, header, err := b.GetReader(get.Key, conf)
	if err != nil {
		return
	}
	if _, err = io.Copy(w, r); err != nil {
		return
	}
	if err = r.Close(); err != nil {
		return
	}
	if get.Debug {
		log.Println("Headers: ", header)
	}
	return
}
Example #18
0
func NewS3Remote(config config.Config) (*S3Remote, error) {
	s3, err := newS3Client(config)
	if err != nil {
		return &S3Remote{}, err
	}

	s3gof3rKeys, err := getS3gof3rKeys(config)
	if err != nil {
		return &S3Remote{}, err
	}

	return &S3Remote{
		config:               config,
		BucketName:           config.AWS.S3URL.Host,
		client:               s3,
		uploadDownloadClient: s3gof3r.New("", s3gof3rKeys),
	}, nil
}
Example #19
0
func newUploadDownlaodClient(config config.Config) (*s3gof3r.S3, error) {
	s3gKeys, err := getS3gof3rKeys(config)
	if err != nil {
		return nil, err
	}

	var s3domain string

	// We have to do this due to a recent region related change in s3gof3r:
	// https://github.com/rlmcpherson/s3gof3r/blob/b574ee38528c51c2c8652b79e71245817c59bd61/s3gof3r.go#L28-L43
	if config.AWS.Region == "us-east-1" {
		s3domain = ""
	} else {
		s3domain = fmt.Sprintf("s3-%v.amazonaws.com", config.AWS.Region)
	}

	return s3gof3r.New(s3domain, s3gKeys), nil
}
Example #20
0
func newS3Output(conf map[string]string) (output, error) {
	bucketName := conf["bucket"]
	if bucketName == "" {
		return nil, errors.New("No bucket specified")
	}
	fileName := conf["filename"]
	if fileName == "" {
		return nil, errors.New("No file name specified")
	}

	keys, err := s3gof3r.EnvKeys()
	if err != nil {
		return nil, err
	}
	s3 := s3gof3r.New(conf["endpoint"], keys)
	bucket := s3.Bucket(bucketName)

	return bucket.PutWriter(fileName, nil, nil)
}
Example #21
0
func (put *Put) Execute(args []string) (err error) {
	conf := new(s3gof3r.Config)
	*conf = *s3gof3r.DefaultConfig
	k, err := getAWSKeys()
	if err != nil {
		return
	}
	s3 := s3gof3r.New(put.EndPoint, k)
	b := s3.Bucket(put.Bucket)
	if put.Concurrency > 0 {
		conf.Concurrency = put.Concurrency
	}
	conf.PartSize = put.PartSize
	conf.Md5Check = !put.CheckDisable
	log.Println(put)
	if put.Header == nil {
		put.Header = make(http.Header)
	}

	r, err := os.Open(put.Path)
	if err != nil {
		if put.Path == "" {
			r = os.Stdin
		} else {
			return
		}
	}
	defer r.Close()
	w, err := b.PutWriter(put.Key, put.Header, conf)
	if err != nil {
		return
	}
	if _, err = io.Copy(w, r); err != nil {
		return
	}
	if err = w.Close(); err != nil {
		return
	}
	if put.Debug {
		debug()
	}
	return
}
Example #22
0
File: put.go Project: jda/s3gof3r
func (put *putOpts) Execute(args []string) (err error) {
	conf := new(s3gof3r.Config)
	*conf = *s3gof3r.DefaultConfig
	k, err := getAWSKeys()
	if err != nil {
		return
	}
	s3 := s3gof3r.New(put.EndPoint, k)
	b := s3.Bucket(put.Bucket)
	conf.Concurrency = put.Concurrency
	if put.NoSSL {
		conf.Scheme = "http"
	}
	conf.PartSize = put.PartSize
	conf.Md5Check = !put.NoMd5
	s3gof3r.SetLogger(os.Stderr, "", log.LstdFlags, put.Debug)

	if put.Header == nil {
		put.Header = make(http.Header)
	}

	r, err := os.Open(put.Path)
	if err != nil {
		if put.Path == "" {
			r = os.Stdin
		} else {
			return
		}
	}
	defer r.Close()
	w, err := b.PutWriter(put.Key, put.Header, conf)
	if err != nil {
		return
	}
	if _, err = io.Copy(w, r); err != nil {
		return
	}
	if err = w.Close(); err != nil {
		return
	}
	return
}
Example #23
0
func NewS3ImageFactory(bucketName string) *ImageFactory {
	factory := new(ImageFactory)
	//log.Print(imageCollections)
	k, err := s3gof3r.EnvKeys() // get S3 keys from environment
	if err != nil {
		log.Fatal("Unable to init s3", err)
	}

	// Open bucket to put file into
	s3 := s3gof3r.New("", k)

	bucket := s3.Bucket(bucketName)
	if bucket == nil {
		log.Fatal("Unable to init s3", err)
	}
	factory.NewImage = func(r, o string, b bool) ImageFile {
		return NewS3Image(s3, bucket, r, o, b)
	}
	return factory
}
Example #24
0
// GetLegacy ...
func (la *LegacyArguments) GetLegacy() (*Legacy, error) {
	// Create a "TEST" snapshot in order to work out which tables are active
	// Get a list of Keyspaces and Table Names (plus directories)
	// Walk through all the directories.
	auth, _ := aws.GetAuth(
		la.AwsAccessKey,
		la.AwsSecret,
		"",
		time.Now().AddDate(0, 0, 1))

	// Check the bucket exists.
	bucket := s3.New(auth, GetAwsRegion(la.AwsRegion)).Bucket(la.S3Bucket)
	_, err := bucket.List("/", "/", "", 1)
	if err != nil {
		return nil, err
	}

	streamAccess := s3gof3r.New("", s3gof3r.Keys{
		AccessKey:     la.AwsAccessKey,
		SecretKey:     la.AwsSecret,
		SecurityToken: "",
	})

	streamBucket := streamAccess.Bucket(la.S3Bucket)

	legacy := &Legacy{
		DataDirectories: make([]string, 0),
		S3Bucket:        bucket,
		S3StreamBucket:  streamBucket,
		LogDirectory:    la.LogDirectory,
		NewSnapshot:     la.NewSnapshot,
	}

	legacy.MachineName, _ = os.Hostname()
	legacy.DataDirectories = SplitAndTrim(la.DataDirectories, ",")
	legacy.ExcludeKeyspaces = SplitAndTrim(la.ExcludeKeyspaces, ",")

	return legacy, nil
}
Example #25
0
func UploadToS3(file multipart.File) (id string, err error) {
	k, err := s3gof3r.EnvKeys()
	if err != nil {
		log.Println(err.Error())
		return
	}

	// Open bucket to put file into
	s3 := s3gof3r.New(S3Domain, k)
	s3.Region()
	b := s3.Bucket(S3Bucket)

	//Generate unique name
	u4, err := uuid.NewV4()
	if err != nil {
		log.Println(err.Error())
		return
	}

	id = u4.String()

	// Open a PutWriter for upload
	w, err := b.PutWriter("challenge/"+id, nil, nil)
	if err != nil {
		log.Println(err.Error())
		return
	}

	if _, err = io.Copy(w, file); err != nil {
		log.Println(err.Error())
		return
	}
	if err = w.Close(); err != nil {
		log.Println(err.Error())
		return
	}

	return
}
Example #26
0
func GetStats(id string) (p Parser, err error) {
	k, err := s3gof3r.EnvKeys()
	if err != nil {
		return
	}
	s3 := s3gof3r.New(S3Domain, k)
	s3.Region()
	b := s3.Bucket(S3Bucket)

	r, _, err := b.GetReader("challenge/"+id, nil)
	if err != nil {
		return
	}
	defer r.Close()

	buf := bytes.NewBuffer(nil)
	io.Copy(buf, r)

	p = WordStats(string(buf.Bytes()))
	p.Name = id
	return

}
Example #27
0
func (cp *cpOpts) Execute(args []string) (err error) {

	k, err := getAWSKeys()
	if err != nil {
		return
	}

	conf := new(s3gof3r.Config)
	*conf = *s3gof3r.DefaultConfig
	s3 := s3gof3r.New(cp.EndPoint, k)
	conf.Concurrency = cp.Concurrency
	if cp.NoSSL {
		conf.Scheme = "http"
	}
	conf.PartSize = cp.PartSize
	conf.Md5Check = !cp.NoMd5
	s3gof3r.SetLogger(os.Stderr, "", log.LstdFlags, cp.Debug)

	// parse positional cp args
	if len(args) != 2 {
		return fmt.Errorf("cp: source and destination arguments required")
	}

	var urls [2]*url.URL
	for i, a := range args {
		urls[i], err = url.Parse(a)
		if err != nil {
			return fmt.Errorf("parse error: %s", err)
		}
		if urls[i].Host != "" && urls[i].Scheme != "s3" {
			return fmt.Errorf("parse error: %s", urls[i].String())
		}
	}

	src, err := func(src *url.URL) (io.ReadCloser, error) {
		if src.Host == "" {
			return os.Open(src.Path)
		}
		r, _, err := s3.Bucket(src.Host).GetReader(src.Path, conf)
		return r, err
	}(urls[0])
	if err != nil {
		return
	}

	dst, err := func(dst *url.URL) (io.WriteCloser, error) {
		if dst.Host == "" {
			return os.Create(dst.Path)
		}
		return s3.Bucket(dst.Host).PutWriter(dst.Path, cp.Header, conf)
	}(urls[1])
	if err != nil {
		return
	}

	if _, err = io.Copy(dst, src); err != nil {
		return
	}
	if err = src.Close(); err != nil {
		return
	}
	if err = dst.Close(); err != nil {
		return
	}
	return
}