Ejemplo n.º 1
1
func (this *StorageUtil) upload(fileName string, fileLocation string, fingerPrint string) (string, error) {
	f, err := os.Open(fileLocation)
	defer f.Close()

	if err != nil {
		return "", err
	}

	metadata := map[string]*string{}
	metadata["fingerprint"] = &fingerPrint

	uploader := s3manager.NewUploader(this.ssn)

	result, err := uploader.Upload(&s3manager.UploadInput{
		Bucket:   &this.Bucket,
		Key:      &fileName,
		Body:     f,
		Metadata: metadata,
	})

	if err != nil {
		return "", err
	} else {
		return result.Location, nil
	}
}
Ejemplo n.º 2
0
func main() {
	flag.Parse()
	if *filepath == "" || *bucket == "" {
		fmt.Println("Please specify correct parameters!")
		fmt.Println("You should specify:")
		fmt.Println("-path with path to file you want to upload")
		fmt.Println("-bucket name of bucket in S3 where you want to upload")
		os.Exit(1)
	}

	file, err := os.Open(*filepath)
	if err != nil {
		fmt.Println("Failed to open a file.", err)
		os.Exit(1)
	}

	session := session.New(createConfig())
	service := s3manager.NewUploader(session)

	resp, err := service.Upload(&s3manager.UploadInput{
		Bucket: aws.String(*bucket),
		Key:    aws.String("/" + getFileName(*filepath)),
		Body:   file,
	})
	if err != nil {
		fmt.Println("Failed to upload a file.", err)
		os.Exit(1)
	}

	fmt.Println("---------------------")
	fmt.Println("File was successfully uploaded!")
	fmt.Println("Location:", resp.Location)
}
Ejemplo n.º 3
0
func main() {
	flag.Parse()
	if *filesPath == "" || *bucket == "" {
		fmt.Println("Please specify correct parameters!")
		fmt.Println("You should specify:")
		fmt.Println("-path with path to file you want to upload")
		fmt.Println("-bucket name of bucket in S3 where you want to upload")
		os.Exit(1)
	}

	file, err := os.Open(*filesPath)
	if err != nil {
		log.Fatal("Failed to open a file with an error: ", err)
	}
	defer file.Close()
	info, err := file.Stat()
	if err != nil {
		log.Fatal("Failed to get info about file with an error: ", err)
	}

	session := session.New(createConfig())
	service := s3manager.NewUploader(session)

	switch mode := info.Mode(); {
	case mode.IsDir():
		uploadDirectory(*service, *file)
	case mode.IsRegular():
		uploadFile(*service, *uploadPath+getFileName(*filesPath), file)
	}
}
Ejemplo n.º 4
0
// Save will upload a file to S3
func (a *Amazon) Save(filepath, filename, mime string, expire bool) (err error) {

	file, err := os.Open(filepath)
	if err != nil {
		return errors.New("problem opening file for s3")
	}
	defer file.Close()

	uploader := s3manager.NewUploader(a.session)

	// default cachecontrol header
	var cache = "public, max-age=31536000"

	// if we want the file to not be cached
	if expire {
		cache = "public, max-age=0"
	}

	params := &s3manager.UploadInput{
		Bucket:               aws.String(config.Settings.Amazon.Bucket),
		Key:                  aws.String(filename),
		Body:                 file,
		ContentType:          aws.String(mime),
		CacheControl:         aws.String(cache),
		ServerSideEncryption: aws.String(s3.ServerSideEncryptionAes256),
	}

	_, err = uploader.Upload(params)

	return

}
Ejemplo n.º 5
0
func (client *s3client) UploadFile(bucketName string, remotePath string, localPath string) (string, error) {
	uploader := s3manager.NewUploader(client.session)

	stat, err := os.Stat(localPath)
	if err != nil {
		return "", err
	}

	localFile, err := os.Open(localPath)
	if err != nil {
		return "", err
	}

	defer localFile.Close()

	progress := client.newProgressBar(stat.Size())

	progress.Start()
	defer progress.Finish()

	uploadOutput, err := uploader.Upload(&s3manager.UploadInput{
		Bucket: aws.String(bucketName),
		Key:    aws.String(remotePath),
		Body:   progress.NewProxyReader(localFile),
	})
	if err != nil {
		return "", err
	}

	if uploadOutput.VersionID != nil {
		return *uploadOutput.VersionID, nil
	}

	return "", nil
}
Ejemplo n.º 6
0
// Update the Object from in with modTime and size
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
	uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
		u.Concurrency = 2
		u.LeavePartsOnError = false
		u.S3 = o.fs.c
	})

	// Set the mtime in the meta data
	metadata := map[string]*string{
		metaMtime: aws.String(swift.TimeToFloatString(modTime)),
	}

	// Guess the content type
	contentType := fs.MimeType(o)

	key := o.fs.root + o.remote
	req := s3manager.UploadInput{
		Bucket:      &o.fs.bucket,
		ACL:         &o.fs.perm,
		Key:         &key,
		Body:        in,
		ContentType: &contentType,
		Metadata:    metadata,
		//ContentLength: &size,
	}
	_, err := uploader.Upload(&req)
	if err != nil {
		return err
	}

	// Read the metadata from the newly created object
	o.meta = nil // wipe old metadata
	err = o.readMetaData()
	return err
}
Ejemplo n.º 7
0
func uploadPackage(config *Config) error {
	key := "package.zip"
	uploadfile, err := os.Open(key)
	if err != nil {
		return err
	}

	//rlu := &rateLimitUploader{ fh: uploadfile }

	fmt.Println("Uploading package.zip to", config.S3Bucket, "bucket...")
	uploader := s3manager.NewUploader(session.New())
	_, err = uploader.Upload(&s3manager.UploadInput{
		Bucket: &config.S3Bucket,
		Key:    &key,
		//Body:   rlu,
		Body: uploadfile,
	})

	if err != nil {
		panic(err)
	}

	fmt.Println("Uploaded package.zip to", config.S3Bucket, "bucket.")

	return nil
}
Ejemplo n.º 8
0
func (client *s3client) UploadFile(bucketName string, remotePath string, localPath string) (string, error) {
	uploader := s3manager.NewUploader(&s3manager.UploadOptions{
		S3: client.client,
	})

	localFile, err := os.Open(localPath)
	if err != nil {
		return "", err
	}

	defer localFile.Close()

	// uploadOutput, err := uploader.Upload(&s3manager.UploadInput{
	_, err = uploader.Upload(&s3manager.UploadInput{
		Bucket: aws.String(bucketName),
		Key:    aws.String(remotePath),
		Body:   localFile,
	})
	if err != nil {
		return "", err
	}

	// if uploadOutput.VersionID != nil {
	// return *uploadOutput.VersionID, nil
	// }

	return "", nil
}
Ejemplo n.º 9
0
func TestUploadOrderMultiFailureLeaveParts(t *testing.T) {
	s, ops, _ := loggingSvc(emptyList)
	s.Handlers.Send.PushBack(func(r *service.Request) {
		switch data := r.Data.(type) {
		case *s3.UploadPartOutput:
			if *data.ETag == "ETAG2" {
				r.HTTPResponse.StatusCode = 400
			}
		}
	})

	mgr := s3manager.NewUploader(&s3manager.UploadOptions{
		S3:                s,
		Concurrency:       1,
		LeavePartsOnError: true,
	})
	_, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   bytes.NewReader(make([]byte, 1024*1024*12)),
	})

	assert.Error(t, err)
	assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart"}, *ops)
}
Ejemplo n.º 10
0
// Write the local backup file to S3.
// There are no tests for this remote operation
func (b *Backup) writeBackupRemote() {
	s3Conn := session.New(&aws.Config{Region: aws.String(string(b.Config.S3Region))})

	t := time.Unix(b.StartTime, 0)

	b.RemoteFilePath = fmt.Sprintf("%s/%v/%d/%v/%v", b.Config.ObjectPrefix, t.Year(), t.Month(), t.Day(), filepath.Base(b.FullFilename))

	// re-read the compressed file.  There is probably a better way to do this
	localFileContents, err := ioutil.ReadFile(b.FullFilename)
	if err != nil {
		log.Fatalf("[ERR] Could not read compressed file!: %v", err)
	}

	// Create the params to pass into the actual uploader
	params := &s3manager.UploadInput{
		Bucket: &b.Config.S3Bucket,
		Key:    &b.RemoteFilePath,
		Body:   bytes.NewReader(localFileContents),
	}

	if b.Config.S3ServerSideEncryption != "" {
		params.ServerSideEncryption = &b.Config.S3ServerSideEncryption
	}

	if b.Config.S3KmsKeyID != "" {
		params.SSEKMSKeyId = &b.Config.S3KmsKeyID
	}

	log.Printf("[INFO] Uploading %v/%v to S3 in %v", string(b.Config.S3Bucket), b.RemoteFilePath, string(b.Config.S3Region))
	uploader := s3manager.NewUploader(s3Conn)
	_, err = uploader.Upload(params)
	if err != nil {
		log.Fatalf("[ERR] Could not upload to S3!: %v", err)
	}
}
Ejemplo n.º 11
0
func fileToBucket(filename, bucket string) (size int64, err error) {
	defer Track("fileToBucket", Now(), debugOut)

	// Open the file
	file, err := os.Open(filename)
	if err != nil {
		return
	}
	defer file.Close()

	// Get the filesize
	fi, ferr := file.Stat()
	if ferr == nil {
		size = fi.Size()
	}

	// Extract the basename
	baseFilename := filepath.Base(filename)

	// Setup the uploader, and git'r'done
	svc := s3manager.NewUploader(AWSSession)
	_, err = svc.Upload(&s3manager.UploadInput{
		Bucket: aws.String(bucket),
		Key:    aws.String(baseFilename),
		Body:   file,
	})

	return
}
Ejemplo n.º 12
0
func TestUploadFailCleanup(t *testing.T) {
	svc := s3.New(nil)

	// Break checksum on 2nd part so it fails
	part := 0
	svc.Handlers.Build.PushBack(func(r *request.Request) {
		if r.Operation.Name == "UploadPart" {
			if part == 1 {
				r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "000")
			}
			part++
		}
	})

	key := "12mb-leave"
	mgr := s3manager.NewUploader(&s3manager.UploadOptions{
		S3:                svc,
		LeavePartsOnError: false,
	})
	_, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: bucketName,
		Key:    &key,
		Body:   bytes.NewReader(integBuf12MB),
	})
	assert.Error(t, err)
	uploadID := ""
	if merr, ok := err.(s3manager.MultiUploadFailure); ok {
		uploadID = merr.UploadID()
	}
	assert.NotEmpty(t, uploadID)

	_, err = svc.ListParts(&s3.ListPartsInput{
		Bucket: bucketName, Key: &key, UploadId: &uploadID})
	assert.Error(t, err)
}
Ejemplo n.º 13
0
func handleMultipartForm(req *http.Request, folderName string) (err error) {
	// 10MB allocated for files
	const _10mb = (1 << 22) * 24
	if err = req.ParseMultipartForm(_10); err != nil {
		return
	}

	uploader := s3manager.NewUploader(sess)
	for _, fileHeaders := range req.MultipartForm.File {
		for _, header := range fileHeaders {
			var file multipart.File
			if file, err = header.Open(); err != nil {
				return
			}

			_, err = uploader.Upload(&s3manager.UploadInput{
				Bucket: aws.String(conf.S3.BucketName),
				Key:    aws.String(fmt.Sprintf("%s/%s", folderName, header.Filename)),
				Body:   file,
			})
			if err != nil {
				return
			}
		}
	}
	return
}
Ejemplo n.º 14
0
Archivo: s3.go Proyecto: get3w/get3w
// Upload upload object
func (service Service) Upload(key string, filePath string) error {
	if key == "" {
		return fmt.Errorf("key must be a nonempty string")
	}
	if filePath == "" {
		return fmt.Errorf("filePath must be a nonempty string")
	}

	file, err := os.Open(filePath)
	if err != nil {
		return err
	}

	defer file.Close()
	uploader := s3manager.NewUploader(session.New())
	_, err = uploader.Upload(&s3manager.UploadInput{
		Bucket:      aws.String(service.bucketSource),
		Key:         aws.String(key),
		ACL:         aws.String(s3.ObjectCannedACLPublicRead),
		ContentType: aws.String(mime.TypeByExtension(path.Ext(key))),
		Body:        file,
	})

	return err
}
Ejemplo n.º 15
0
// WriteS3Object writes the data to the given key, optionally compressing it first
func WriteS3Object(data []string, config *aws.Config, bucket string, key string, lineSeparator string, compress bool) (string, error) {
	var reader io.Reader

	byteReader := strings.NewReader(strings.Join(data, lineSeparator))

	if compress {
		key = fmt.Sprintf("%v.gz", key)
		pipeReader, pipeWriter := io.Pipe()
		reader = pipeReader

		go func() {
			gw := gzip.NewWriter(pipeWriter)
			io.Copy(gw, byteReader)
			gw.Close()
			pipeWriter.Close()
		}()
	} else {
		reader = byteReader
	}

	uploader := s3manager.NewUploader(session.New(config))

	result, err := uploader.Upload(&s3manager.UploadInput{
		Body:   reader,
		Bucket: aws.String(bucket),
		Key:    aws.String(key),
	})

	return result.Location, err
}
Ejemplo n.º 16
0
func main() {
	if len(os.Args) != 3 {
		log.Fatal("wrong number of arguments")
	}
	src, _ := os.Args[1], os.Args[2]

	creds := credentials.NewEnvCredentials()
	if _, err := creds.Get(); err != nil {
		log.Fatal(err)
	}

	svc := s3.New(&aws.Config{
		Credentials:      creds,
		Region:           "us-east-2",
		Endpoint:         "s3.amazonaws.com",
		S3ForcePathStyle: true,
	})

	uploader := s3manager.NewUploader(&s3manager.UploadOptions{S3: svc})

	watcher, err := fsnotify.NewWatcher()
	if err != nil {
		log.Fatal(err)
	}
	defer watcher.Close()

	done := make(chan bool)
	go func() {
		for {
			select {
			case event := <-watcher.Events:
				log.Println("event:", event)
				switch event.Op {
				case fsnotify.Create:
					handleCreate(watcher, uploader, event.Name)
				case fsnotify.Write:
					log.Println("modified file:", event.Name)
				}

			case err := <-watcher.Errors:
				log.Println("error:", err)
			}
		}
	}()

	err = filepath.Walk(src, func(path string, info os.FileInfo, err error) error {
		if info.IsDir() {
			log.Println(path)
			return watcher.Add(path)
		}
		return nil
	})
	if err != nil {
		log.Fatal(err)
	}

	<-done

}
Ejemplo n.º 17
0
func uploadSegmentsToS3(inputFolder string, s3id string, scanned func(string, bool), uploaded func(int, int)) error {
	inputPath := scratchPath(inputFolder)
	sess := session.New(&aws.Config{Region: aws.String("us-west-2")})
	uploader := s3manager.NewUploader(sess, func(u *s3manager.Uploader) {
		u.PartSize = 5 * 1024 * 1024
		u.Concurrency = 5
	})

	files, err := ioutil.ReadDir(inputPath)
	if err != nil {
		return err
	}

	sort.Sort(ForPlayback(files))

	totalFiles := len(files)
	finishedFiles := make(chan string, len(files))
	limiter := make(chan int, 10)

	go func() {
		for i := range files {
			file := files[i]
			log.Println("doing file", file.Name())
			limiter <- 0
			go func(fname string) {
				fullpath := filepath.Join(inputPath, fname)
				outpath := filepath.Join("media", s3id, fname)

				file, err := os.Open(fullpath)
				if err != nil {
					scanned("Error opening file for upload", true)
					log.Println("Error opening file for upload", err)
					return
				}

				_, err = uploader.Upload(&s3manager.UploadInput{
					Bucket: aws.String("smick-media-output"),
					Key:    aws.String(outpath),
					Body:   file,
				})
				file.Close()
				os.Remove(fullpath)
				finishedFiles <- fname
				<-limiter
			}(file.Name())
		}
	}()

	for j := 1; j <= totalFiles; j++ {
		fname := <-finishedFiles
		logLine := fmt.Sprintf("Uploaded %s: %d of %d", fname, j, totalFiles)
		scanned(logLine, false)
		uploaded(j, totalFiles)
	}

	return nil
}
Ejemplo n.º 18
0
// newS3Client returns S3 service client
func newS3Client(cfg *aws.Config) *s3Client {
	return &s3Client{
		region: *cfg.Region,
		client: s3.New(session.New(cfg)),
		uploader: s3manager.NewUploader(session.New(cfg), func(u *s3manager.Uploader) {
			u.PartSize = 64 * 1024 * 1024
		}),
	}
}
Ejemplo n.º 19
0
func NewUploader(c client.ConfigProvider, bucketName string) *S3Uploader {
	m := s3manager.NewUploader(c)

	u := S3Uploader{
		uploader:   m,
		bucketName: bucketName,
	}

	return &u
}
Ejemplo n.º 20
0
func InitS3Uploader(config RepoConfig) *S3Uploader {
	uploader := new(S3Uploader)
	uploader.BucketName = aws.String(config.S3Bucket)

	s3config := aws.Config{Region: aws.String(config.S3Region)}
	s3uploader := s3manager.NewUploader(session.New(&s3config))
	uploader.S3Uploader = s3uploader

	return uploader
}
Ejemplo n.º 21
0
//Submits an audition in auditions/{auditionid}
func submitAuditionHandler(w http.ResponseWriter, r *http.Request) {
	s := redis_session.Session(w, r)
	fmt.Println(r.Form)
	err := r.ParseMultipartForm(32 << 20)
	if err != nil {
		log.Printf("%s", err)
	}

	roleID := r.FormValue("id")
	fmt.Println(r.Form)
	file, handler, err := r.FormFile("auditionFile")
	defer file.Close()
	if err != nil {
		fmt.Printf("err opening audition file: %s", err)
		return
	}

	bytes, err := file.Seek(0, 2)
	if err != nil {
		panic(err)
	}

	var kilobytes int64
	kilobytes = (bytes / 1024)

	var megabytes float64
	megabytes = (float64)(kilobytes / 1024)

	if megabytes < 6 {
		attachmentURL := "/auditions/" + roleID + "/" + s.Get("Email") + "/" + handler.Filename

		uploader := s3manager.NewUploader(session.New())
		result, err := uploader.Upload(&s3manager.UploadInput{
			Body:   file,
			Bucket: aws.String("coaud"),
			Key:    aws.String(attachmentURL),
		})

		if err != nil {
			log.Fatalln("Failed to upload", err)
		}

		log.Println("Successfully uploaded to", result.Location)

		//create a new audition and add the link
		auditionID := bson.NewObjectId()
		audition := role.NewAudition(user.FindUser(s.Get("Email")), handler.Filename, result.Location, auditionID)
		curRole := role.FindRole(roleID)
		role.InsertAudition(audition, curRole)

		w.Write([]byte("uploaded"))
	} else {
		w.Write([]byte("rejected"))
	}
}
Ejemplo n.º 22
0
// UploadFile will upload file to specific S3 bucket
func UploadFile(session *session.Session, bucket, key string, file *os.File) {
	service := s3manager.NewUploader(session)
	resp, err := service.Upload(&s3manager.UploadInput{
		Bucket:      aws.String(bucket),
		Key:         aws.String(key),
		Body:        file,
		ContentType: aws.String(getContentType(file)),
	})
	logger.Process(err, "Can't upload file")
	fmt.Println("File was successfully uploaded! Location:", resp.Location)
}
Ejemplo n.º 23
0
func submitContestHandler(w http.ResponseWriter, r *http.Request) {
	s := redis_session.Session(w, r)
	contestId := bson.NewObjectId()
	//s := redis_session.Session(w, r)
	layout := "2006-01-02"
	var UTC *time.Location = time.UTC

	deadline, err := time.ParseInLocation(layout, r.FormValue("deadline"), UTC)
	if err != nil {
		fmt.Println(err)
		return
	}
	fmt.Println(r.FormFile("photo"))
	file, handler, err := r.FormFile("photo")
	if err == nil {
		fmt.Println("success photo is in there")
		defer file.Close()
		//bytes, err := file.Seek(0,2)
		if err != nil {
			panic(err)
		}
		//var kilobytes int64
		//kilobytes = (bytes / 1024)

		//var megabytes float64
		//megabytes = (float64)(kilobytes / 1024)
		attachmentURL := "/contests/" + contestId.Hex() + "/" + s.Get("Email") + "/" + handler.Filename

		uploader := s3manager.NewUploader(session.New())
		result, err := uploader.Upload(&s3manager.UploadInput{
			Body:   file,
			Bucket: aws.String("coaud"),
			Key:    aws.String(attachmentURL),
		})

		if err != nil {
			log.Fatalln("Failed to upload", err)
		}

		newContest := role.NewContest(r.FormValue("title"), r.FormValue("description"), result.Location, deadline, contestId)
		fmt.Println("Below this")
		fmt.Println(result.Location)
		role.InsertContest(newContest)
		http.Redirect(w, r, "/contest/", http.StatusTemporaryRedirect)
	} else {
		fmt.Println("Default picture time")
		newContest := role.NewContest(r.FormValue("title"), r.FormValue("description"), "/public/img/default_role_pic.png", deadline, contestId)

		role.InsertContest(newContest)

		http.Redirect(w, r, "/contest/", http.StatusTemporaryRedirect)
	}
}
func TestBenchmarkBbs(t *testing.T) {
	var lagerLogLevel lager.LogLevel
	switch logLevel {
	case DEBUG:
		lagerLogLevel = lager.DEBUG
	case INFO:
		lagerLogLevel = lager.INFO
	case ERROR:
		lagerLogLevel = lager.ERROR
	case FATAL:
		lagerLogLevel = lager.FATAL
	default:
		panic(fmt.Errorf("unknown log level: %s", logLevel))
	}

	var logWriter io.Writer
	if logFilename == "" {
		logWriter = GinkgoWriter
	} else {
		logFile, err := os.Create(logFilename)
		if err != nil {
			panic(fmt.Errorf("Error opening file '%s': %s", logFilename, err.Error()))
		}
		defer logFile.Close()

		logWriter = logFile
	}

	logger = lager.NewLogger("bbs-benchmarks-test")
	logger.RegisterSink(lager.NewWriterSink(logWriter, lagerLogLevel))

	reporters = []Reporter{}

	if dataDogAPIKey != "" && dataDogAppKey != "" {
		dataDogClient = datadog.NewClient(dataDogAPIKey, dataDogAppKey)
		dataDogReporter = reporter.NewDataDogReporter(logger, metricPrefix, dataDogClient)
		reporters = append(reporters, &dataDogReporter)
	}

	if awsAccessKeyID != "" && awsSecretAccessKey != "" && awsBucketName != "" {
		creds := credentials.NewStaticCredentials(awsAccessKeyID, awsSecretAccessKey, "")
		s3Client := s3.New(&aws.Config{
			Region:      &awsRegion,
			Credentials: creds,
		})
		uploader := s3manager.NewUploader(&s3manager.UploadOptions{S3: s3Client})
		reporter := reporter.NewS3Reporter(logger, awsBucketName, uploader)
		reporters = append(reporters, &reporter)
	}

	RegisterFailHandler(Fail)
	RunSpecsWithDefaultAndCustomReporters(t, "Benchmark BBS Suite", reporters)
}
Ejemplo n.º 25
0
func UploadVideo(Url string) (string, string, uint32, uint32, string) {
	bucket := VideoBucket
	defaults.DefaultConfig.Region = aws.String("us-east-1")

	fileName := GenerateObjectId() + ".mp4"
	fmt.Println("Downloading", Url, "to", fileName)
	file, err := os.Create(fileName)
	if err != nil {
		fmt.Println("Error while creating", fileName, "-", err)
		return "", "", 0, 0, ""
	}
	defer file.Close()

	response, err := http.Get(Url)
	if err != nil {
		fmt.Println("Error while downloading", Url, "-", err)
		return "", "", 0, 0, ""
	}
	defer response.Body.Close()

	n, err := io.Copy(file, response.Body)
	if err != nil {
		fmt.Println("Error while downloading", Url, "-", err)
		return "", "", 0, 0, ""
	}
	fmt.Println("n:%s", n)

	dir, err := filepath.Abs(fileName)
	if err != nil {
		fmt.Println("error file Abs:%s\n", err)
	}
	fmt.Println(".......file path:%s\n", dir)

	duration, timescale, durationstr, width, height := ExtractMp4Meta(dir)

	fmt.Println("duaration:%d, timescale:%d, durationstr:%s, width:%d, height:%d", duration, timescale, durationstr, width, height)
	fmt.Println("Start upload file to S3:%s\n", fileName)
	uploader := s3manager.NewUploader(nil)
	result, err := uploader.Upload(&s3manager.UploadInput{
		Bucket: &bucket,
		Key:    &fileName,
		Body:   file,
	})
	if err != nil {
		fmt.Println("Failed to upload", err)
	}
	fmt.Println("Uploaded....%s", result)
	err = os.Remove(dir)
	if err != nil {
	}
	// durstr := GetDuration(duration/timescale)
	return VideoBucket, fileName, duration, timescale, durationstr
}
Ejemplo n.º 26
0
// Upload a local file to S3.  Returns the s3 keyname of the
// uploaded item, or an error
func uploadLocalFileToS3(packagePath string, awsSession *session.Session, S3Bucket string, noop bool, logger *logrus.Logger) (string, error) {
	// Query the S3 bucket for the bucket policies.  The bucket _should_ have ObjectExpiration,
	// otherwise we're just going to orphan our binaries...
	err := ensureExpirationPolicy(awsSession, S3Bucket, noop, logger)
	if nil != err {
		return "", fmt.Errorf("Failed to ensure bucket policies: %s", err.Error())
	}
	// Then do the actual work
	reader, err := os.Open(packagePath)
	if nil != err {
		return "", fmt.Errorf("Failed to open local archive for S3 upload: %s", err.Error())
	}
	defer func() {
		reader.Close()
		os.Remove(packagePath)
	}()

	body, err := os.Open(packagePath)
	if nil != err {
		return "", err
	}
	// Cache it in case there was an error & we need to cleanup
	keyName := filepath.Base(packagePath)

	uploadInput := &s3manager.UploadInput{
		Bucket:      &S3Bucket,
		Key:         &keyName,
		ContentType: aws.String("application/zip"),
		Body:        body,
	}

	if noop {
		logger.WithFields(logrus.Fields{
			"Bucket": S3Bucket,
			"Key":    keyName,
		}).Info("Bypassing S3 ZIP upload due to -n/-noop command line argument")
	} else {
		logger.WithFields(logrus.Fields{
			"Source": packagePath,
		}).Info("Uploading local file to S3")
		uploader := s3manager.NewUploader(awsSession)
		result, err := uploader.Upload(uploadInput)
		if nil != err {
			return "", err
		}
		logger.WithFields(logrus.Fields{

			"URL": result.Location,
		}).Info("Upload complete")
	}
	return keyName, nil
}
Ejemplo n.º 27
0
func TestUploadOrderReadFail2(t *testing.T) {
	s, ops, _ := loggingSvc([]string{"UploadPart"})
	mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s, Concurrency: 1})
	_, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   &failreader{times: 2},
	})

	assert.Equal(t, "ReadRequestBody", err.(awserr.Error).Code())
	assert.EqualError(t, err.(awserr.Error).OrigErr(), "random failure")
	assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops)
}
Ejemplo n.º 28
0
func TestUploadOrderReadFail1(t *testing.T) {
	s, ops, _ := loggingSvc(emptyList)
	mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s})
	_, err := mgr.Upload(&s3manager.UploadInput{
		Bucket: aws.String("Bucket"),
		Key:    aws.String("Key"),
		Body:   &failreader{times: 1},
	})

	assert.Equal(t, "ReadRequestBody", err.(awserr.Error).Code())
	assert.EqualError(t, err.(awserr.Error).OrigErr(), "random failure")
	assert.Equal(t, []string{}, *ops)
}
Ejemplo n.º 29
0
/*
Upload uploads a file to S3.

This is merely a wrapper around the aws-sdk-go uploader. It allows us to isolate
the aws-sdk-go dependencies and unify error handling.
*/
func Upload(file *os.File, bucket, key string) error {
	uploader := s3manager.NewUploader(session.New(&aws.Config{Region: aws.String("us-east-1")}))
	result, err := uploader.Upload(&s3manager.UploadInput{
		Body:   file,
		Bucket: aws.String(bucket),
		Key:    aws.String(key),
	})
	if err != nil {
		return err
	}
	log.Println("Successfully uploaded to", result.Location)
	return nil
}
func Upload(conf *Config, file io.Reader, fileName string, isPNG bool) (string, error) {
	s3Config := aws.NewConfig().
		WithCredentials(credentials.NewStaticCredentials(conf.AccessKeyID, conf.SecretAccessKey, ""))

	forcePathStyle := true
	s3Config.S3ForcePathStyle = &forcePathStyle

	if conf.AwsRegion == "" {
		s3Config = s3Config.WithRegion(" ").WithEndpoint(conf.Endpoint)
	} else {
		s3Config = s3Config.WithRegion(conf.AwsRegion)
	}

	sess := session.New(s3Config)

	uploader := s3manager.NewUploader(sess)

	upParams := &s3manager.UploadInput{
		ACL:    aws.String("public-read"),
		Bucket: &conf.BucketName,
		Key:    &fileName,
		Body:   file,
	}

	if isPNG {
		upParams.ContentType = aws.String("image/png")
	}

	result, err := uploader.Upload(upParams)
	if err != nil {
		return "", fmt.Errorf("Failed to upload file, err: %s", err.Error())
	}

	return result.Location, nil
}