Esempio n. 1
0
func (s *S3) init() error {
	if s.Bucket == "" {
		return errors.New("Please Specify an S3 Bucket")
	}
	if s.Region == "" {
		return errors.New("Please Specify an S3 Region")
	}
	if s.Root == "" {
		return errors.New("Please Specify an S3 Root Path")
	}
	if s.BufferDir == "" {
		return errors.New("Please Specify a Buffer Directory to use for Uploads")
	}

	var ok bool
	if s.region, ok = aws.Regions[s.Region]; !ok {
		return errors.New("Invalid Region: " + s.Region)
	}
	err := s.getAuth()
	if err != nil {
		return err
	}
	s.s3 = s3.New(s.auth, s.region)
	s.bucket = s.s3.Bucket(s.Bucket)
	if err := os.MkdirAll(s.BufferDir, 0755); err != nil && !os.IsExist(err) {
		// there was an error and it wasn't that the directory already exists
		return err
	}
	s.bufferDir = &BufferDir{Mutex: sync.Mutex{}, root: s.BufferDir}
	s.root = strings.TrimPrefix(s.Root, "/")
	go s.updateAuthLoop()
	return nil
}
Esempio n. 2
0
func (app *AppContext) setupS3Logger() (err error) {
	auth, err := aws.GetAuth("", "", "", time.Now())

	if err != nil {
		log.Fatalln("Failed to find AWS credentials in env")
	}

	awsConnection := s3.New(
		auth,
		getAWSRegion(app.config.aws_region),
	)
	bucket := awsConnection.Bucket(app.config.bucket)

	instanceInfo := keygen.BuildInstanceInfo(
		&keygen.EnvInstanceFetcher{},
		serviceName,
		app.config.logging_dir,
	)

	rotateCoordinator := gologging.NewRotateCoordinator(
		app.config.max_log_lines,
		app.config.max_log_age,
	)

	metricsLogger := MetricsLogger{app.metrics}

	app.s3log, err = gologging.StartS3Logger(
		rotateCoordinator,
		instanceInfo,
		&metricsLogger,
		&uploader.S3UploaderBuilder{
			Bucket: bucket,
			KeyNameGenerator: &KeyNameGenerator{
				Info:   instanceInfo,
				Prefix: app.config.key_prefix,
			},
		},
		&metricsLogger,
		app.config.num_workers,
	)
	if err != nil {
		return
	}

	// Make sure logger is flushed when shutdown signal is received
	sigc := make(chan os.Signal, 1)
	signal.Notify(sigc,
		syscall.SIGHUP,
		syscall.SIGINT,
		syscall.SIGTERM,
		syscall.SIGQUIT)
	go func() {
		<-sigc
		log.Println("interrupted, closing logger...")
		app.s3log.Close()
		os.Exit(0)
	}()

	return nil
}
Esempio n. 3
0
File: site.go Progetto: bazil/jkl
// Deploys a site to S3.
func (s *Site) Deploy(user, pass, url string) error {

	auth := aws.Auth{AccessKey: user, SecretKey: pass}
	b := s3.New(auth, aws.USEast).Bucket(url)

	// walks _site directory and uploads file to S3
	walker := func(fn string, fi os.FileInfo, err error) error {
		if fi.IsDir() {
			return nil
		}

		rel, _ := filepath.Rel(s.Dest, fn)
		typ := mime.TypeByExtension(filepath.Ext(rel))
		content, err := ioutil.ReadFile(fn)
		logf(MsgUploadFile, rel)
		if err != nil {
			return err
		}

		// try to upload the file ... sometimes this fails due to amazon
		// issues. If so, we'll re-try
		if err := b.Put(rel, content, typ, s3.PublicRead, s3.Options{}); err != nil {
			time.Sleep(100 * time.Millisecond) // sleep so that we don't immediately retry
			return b.Put(rel, content, typ, s3.PublicRead, s3.Options{})
		}

		// file upload was a success, return nil
		return nil
	}

	return filepath.Walk(s.Dest, walker)
}
Esempio n. 4
0
// Communicate with all endpoints to see if they are alive.
func (s *ClientTests) TestRegions(c *check.C) {
	errs := make(chan error, len(aws.Regions))
	for _, region := range aws.Regions {
		go func(r aws.Region) {
			s := s3.New(s.s3.Auth, r)
			b := s.Bucket("goamz-" + s.Auth.AccessKey)
			_, err := b.Get("non-existent")
			errs <- err
		}(region)
	}
	for _ = range aws.Regions {
		err := <-errs
		if err != nil {
			s3_err, ok := err.(*s3.Error)
			if ok {
				c.Check(s3_err.Code, check.Matches, "NoSuchBucket")
			} else if _, ok = err.(*net.DNSError); ok {
				// Okay as well.
			} else {
				c.Errorf("Non-S3 error: %s", err)
			}
		} else {
			c.Errorf("Test should have errored but it seems to have succeeded")
		}
	}
}
Esempio n. 5
0
func s3Setup(bucketName string, path string, opts sequinsOptions) *sequins {
	auth, err := aws.GetAuth(*s3AccessKey, *s3SecretKey, "", time.Time{})
	if err != nil {
		log.Fatal(err)
	}

	regionName := *s3Region
	if regionName == "" {
		regionName = aws.InstanceRegion()
		if regionName == "" {
			log.Fatal("Unspecified --s3-region, and no instance region found.")
		}
	}

	region, exists := aws.Regions[regionName]
	if !exists {
		log.Fatalf("Invalid AWS region: %s", regionName)
	}

	bucket := s3.New(auth, region).Bucket(bucketName)
	backend := backend.NewS3Backend(bucket, path)
	if opts.LocalPath == "" {
		tmpDir, err := ioutil.TempDir("", "sequins-")
		if err != nil {
			log.Fatal(err)
		}

		opts.LocalPath = tmpDir
	}

	return newSequins(backend, opts)
}
Esempio n. 6
0
func setupS3() *backend.S3Backend {
	fakeS3, _ := s3test.NewServer(&s3test.Config{})

	// cargo-culted from s3test
	fakeRegion := aws.Region{
		Name:                 "faux-region-1",
		S3Endpoint:           fakeS3.URL(),
		S3LocationConstraint: true,
	}

	auth, _ := aws.GetAuth("foo", "bar", "", time.Time{})
	bucket := s3.New(auth, fakeRegion).Bucket("sequinstest")
	bucket.PutBucket("")

	putFile(bucket, "test_data/0/part-00000")
	putFile(bucket, "test_data/0/part-00001")
	putFile(bucket, "test_data/0/_SUCCESS")

	putFile(bucket, "test_data/1/part-00000")
	putFile(bucket, "test_data/1/part-00001")

	bucket.Put("test_data/foo", []byte("nothing"), "", "", s3.Options{})

	return backend.NewS3Backend(bucket, "test_data")
}
Esempio n. 7
0
func s3Put(config *BackupConfig, local, remote string, public bool) error {
	localf, err := os.Open(local)
	if err != nil {
		return err
	}
	defer localf.Close()
	localfi, err := localf.Stat()
	if err != nil {
		return err
	}

	auth := aws.Auth{AccessKey: config.AwsAccess, SecretKey: config.AwsSecret}
	b := s3.New(auth, aws.USEast).Bucket(config.Bucket)

	acl := s3.Private
	if public {
		acl = s3.PublicRead
	}

	contType := mime.TypeByExtension(path.Ext(local))
	if contType == "" {
		contType = "binary/octet-stream"
	}

	err = b.PutBucket(acl)
	if err != nil {
		return err
	}
	opts := s3.Options{}
	return b.PutReader(remote, localf, localfi.Size(), contType, acl, opts)
}
Esempio n. 8
0
func SyncFiles(filesToSync []FileToSync) ([]FileProcessed, error) {

	filesProcessedChan := make(chan FileProcessed, len(filesToSync))

	// From IAM or env
	auth, err := GetAWSAuth()
	if err != nil {
		return []FileProcessed{}, err
	}

	region := aws.USWest

	s3_conn := s3.New(auth, region)

	for _, file := range filesToSync {

		go func(file FileToSync) {
			updated := syncFileFromS3(s3_conn, file)
			fileProcessed := FileProcessed{updated, file}
			filesProcessedChan <- fileProcessed
		}(file)

	}

	var filesProcessed []FileProcessed
	for i := 0; i < len(filesToSync); i++ {
		fileProcessed := <-filesProcessedChan
		filesProcessed = append(filesProcessed, fileProcessed)
	}

	return filesProcessed, nil

}
Esempio n. 9
0
func (s *LocalServerSuite) SetUpSuite(c *gocheck.C) {
	s.srv.SetUp(c)
	s.clientTests.s3 = s3.New(s.srv.auth, s.srv.region)

	// TODO Sadly the fake server ignores auth completely right now. :-(
	s.clientTests.authIsBroken = true
	s.clientTests.Cleanup()
}
Esempio n. 10
0
func (s *AmazonClientSuite) SetUpSuite(c *check.C) {
	if !testutil.Amazon {
		c.Skip("live tests against AWS disabled (no -amazon)")
	}
	s.srv.SetUp(c)
	s.s3 = s3.New(s.srv.auth, s.Region)
	// In case tests were interrupted in the middle before.
	s.ClientTests.Cleanup()
}
Esempio n. 11
0
File: main.go Progetto: kaiinui/mofu
func GetBucket() *s3.Bucket {
	auth, err := aws.EnvAuth()
	if err != nil {
		panic(err.Error())
	}

	s := s3.New(auth, aws.USEast)
	return s.Bucket("filmapp-development")
}
Esempio n. 12
0
func (s *AmazonDomainClientSuite) SetUpSuite(c *check.C) {
	if !testutil.Amazon {
		c.Skip("live tests against AWS disabled (no -amazon)")
	}
	s.srv.SetUp(c)
	region := s.Region
	region.S3BucketEndpoint = "https://${bucket}.s3.amazonaws.com"
	s.s3 = s3.New(s.srv.auth, region)
	s.ClientTests.Cleanup()
}
Esempio n. 13
0
func (ss *S3Storage) Connect() error {
	if ss.BucketName == "" {
		return errors.New("S3 bucket name mandatory to establish a connection")
	}

	s3Conn := s3.New(ss.AwsAuth, ss.Region)
	ss.connexion = s3Conn.Bucket(ss.BucketName)

	return nil
}
Esempio n. 14
0
func main() {
	flag.Parse()
	auth, err := aws.EnvAuth()
	if err != nil {
		log.Fatalln("Failed to recieve auth from env")
	}
	awsConnection := s3.New(
		auth,
		aws.USWest2,
	)
	bucket := awsConnection.Bucket(targetBucket)
	info := gen.BuildInstanceInfo(&localInstanceFetcher{}, "basic_example", ".")
	rotateCoordinator := gologging.NewRotateCoordinator(adjustedMaxLines, time.Hour*1)
	logger, err := gologging.StartS3Logger(
		rotateCoordinator,
		info,
		&stdoutNotifier{},
		&uploader.S3UploaderBuilder{
			Bucket:           bucket,
			KeyNameGenerator: &gen.EdgeKeyNameGenerator{info},
		},
		&stderrNotifier{},
		5,
	)
	if err != nil {
		log.Fatalf("Error building uploader: %s\n ", err)
	}

	i := 0
	now := time.Now()
	sigc := make(chan os.Signal, 1)
	signal.Notify(sigc,
		syscall.SIGHUP,
		syscall.SIGINT,
		syscall.SIGTERM,
		syscall.SIGQUIT)
	go func() {
		<-sigc
		logger.Close()
		fmt.Printf("Produced %f rps\n", float64(i)/(float64(time.Now().Sub(now))/float64(time.Second)))
		os.Exit(0)
	}()
	x := int(time.Second) / *rps
	for ; i < MAX_LINES_PER_LOG*4; i++ {
		// throttle - there is a better solution to this
		defer func() {
			if x := recover(); x != nil { // means we cuagh a signal
				time.Sleep(120 * time.Second)
			}
		}()
		time.Sleep(time.Duration(int(0.8 * float64(x))))
		logger.Log("MOAR! %d", i)
	}
	logger.Close()
}
Esempio n. 15
0
// create a new s3 client from the url
func newS3Client(config config.Config) (*s3.S3, error) {
	auth, err := aws.GetAuth(config.AWS.AccessKeyID, config.AWS.SecretAccessKey, "", time.Now())
	if err != nil {
		return &s3.S3{}, err
	}

	if config.AWS.Region == "" {
		return nil, errors.New("Region not set for S3 client lib (missing SetS3URL?)")
	}

	return s3.New(auth, aws.Regions[config.AWS.Region]), nil
}
Esempio n. 16
0
func getS3(u *url.URL) (io.ReadCloser, error) {
	if AWSAuth.AccessKey == "" || AWSRegion.Name == "" {
		return nil, fmt.Errorf("Invalid AWS Auth or Region. Please check env AWS_CONFIG_FILE.")
	}
	client := s3.New(AWSAuth, AWSRegion)
	bucket := client.Bucket(u.Host)
	rc, err := bucket.GetReader(u.Path)
	if err != nil {
		return nil, err
	}
	return rc, nil
}
Esempio n. 17
0
func GetBucket(bucketPrefix string) (*s3.Bucket, error) {
	auth, err := aws.GetAuth("", "", "", time.Time{})
	if err != nil {
		return nil, err
	}

	s := s3.New(auth, aws.USWest2)
	s.ConnectTimeout = time.Second * 30
	s.ReadTimeout = time.Second * 30

	bucketName := strings.TrimPrefix(bucketPrefix, "s3://") + "-" + environment.GetCloudEnv()
	return s.Bucket(bucketName), nil
}
Esempio n. 18
0
func (so *s3Options) Execute(args []string) error {
	if err := app.CheckArity(1, 1, args); err != nil {
		return err
	}

	repo := args[0]

	s3 := s3.New(awsAuth, awsRegion)
	buk := s3.Bucket(defBucket)

	data, err := buk.Get("/binary/repos/repositories")

	if err != nil {
		return err
	}

	ts := &env.TagStore{}

	err = json.Unmarshal(data, &ts)

	id, err := ts.Lookup(repo)

	if err != nil {
		return err
	}

	dts, err := env.DefaultTagStore()

	if err != nil {
		return err
	}

	i := &Importer{tags: ts, sysTags: dts}

	if !so.Force {
		if i.alreadyExists(id) {
			return fmt.Errorf("Already have %s, skipping download\n", utils.TruncateID(id))
		}
	}

	fmt.Printf("Downloading %s (%s)\n", repo, utils.TruncateID(id))

	err = i.download(buk, id)

	dts.Flush()

	return err
}
Esempio n. 19
0
func (d *S3Downstream) Init() error {
	u, err := url.Parse(d.downstreamURI)
	if err != nil || u.Scheme != "s3" || u.User == nil {
		log.Panic("Bad URL scheme ", d.downstreamURI)
	}

	username := u.User.Username()
	password, _ := u.User.Password()

	auth := aws.Auth{AccessKey: username, SecretKey: password}

	log.Println("Init s3 connection using key ", auth.AccessKey, u.Host)
	connection := s3.New(auth, aws.APSoutheast)
	d.bucket = connection.Bucket(u.Host)
	return nil
}
Esempio n. 20
0
// create a new s3 client from the url
func newS3Client(config config.Config) (*s3.S3, error) {
	auth, err := aws.GetAuth(config.AWS.AccessKeyID, config.AWS.SecretAccessKey, "", time.Now())
	if err != nil {
		return &s3.S3{}, err
	}

	var regionName string

	regQuery := config.AWS.S3URL.Query()["region"]

	if len(regQuery) > 0 && regQuery[0] != "" {
		regionName = regQuery[0]
	} else {
		regionName = S3DefaultRegion
	}

	region := aws.Regions[regionName]

	return s3.New(auth, region), nil
}
Esempio n. 21
0
func listBackups() {
	bucketName := *config.S3BackupBucket
	dir := sanitizeDirForList(*config.S3BackupDir, bucketDelim)
	auth := aws.Auth{AccessKey: *config.AwsAccess, SecretKey: *config.AwsSecret}
	b := s3.New(auth, aws.USEast).Bucket(bucketName)
	fmt.Printf("Listing files in %s\n", fullUrl(bucketName))
	rsp, err := b.List(dir, bucketDelim, "", 1000)
	if err != nil {
		log.Fatalf("Invalid s3 backup: bucket.List failed %s\n", err)
	}
	//fmt.Printf("rsp: %v\n", rsp)
	if 0 == len(rsp.Contents) {
		fmt.Printf("There are no files in %s\n", fullUrl(*config.S3BackupBucket))
		return
	}
	//fmt.Printf("Backup files in %s:\n", fullUrl(*config.S3BackupBucket))
	for _, key := range rsp.Contents {
		fmt.Printf("  %s %d\n", key.Key, key.Size)
	}
}
Esempio n. 22
0
func main() {
	auth := aws.Auth{
		AccessKey: "abc",
		SecretKey: "123",
	}
	fakeRegion := aws.Region{
		Name:       "fakes3",
		S3Endpoint: fmt.Sprintf("http://%s:%s", fakes3host, fakes3port),
	}
	s := s3.New(auth, fakeRegion)
	bucket := s.Bucket(bucketname)
	err := bucket.PutBucket(s3.BucketOwnerFull)
	if err != nil {
		panic(err.Error())
	}
	_, err = bucket.List("", "/", "", 20)
	if err != nil {
		panic(err.Error())
	}
}
Esempio n. 23
0
func listBlobFiles(config *BackupConfig, dir string) ([]s3.Key, error) {
	auth := aws.Auth{AccessKey: config.AwsAccess, SecretKey: config.AwsSecret}
	b := s3.New(auth, aws.USEast).Bucket(config.Bucket)
	ret := make([]s3.Key, 0)
	dir = sanitizeDirForList(dir, bucketDelim)
	marker := ""
	for {
		// note: according to my tests, 1000 is max
		if res, err := b.List(dir, "", marker, 1000); err != nil {
			return nil, err
		} else {
			for _, k := range res.Contents {
				ret = append(ret, k)
			}
			if !res.IsTruncated {
				break
			}
			marker = res.Contents[len(res.Contents)-1].Key
		}
	}
	return ret, nil
}
Esempio n. 24
0
func (u *S3Uploader) Setup(destination string) error {
	u.Destination = destination

	// Setup the AWS authentication
	auth, err := aws.EnvAuth()
	if err != nil {
		return errors.New("Error loading AWS credentials: " + err.Error())
	}

	// Decide what region to use
	// https://github.com/crowdmob/goamz/blob/master/aws/regions.go
	// I think S3 defaults to us-east-1
	regionName := "us-east-1"
	if os.Getenv("AWS_DEFAULT_REGION") != "" {
		regionName = os.Getenv("AWS_DEFAULT_REGION")
	}

	// Check to make sure the region exists
	region, ok := aws.Regions[regionName]
	if ok == false {
		return errors.New("Unknown AWS Region `" + regionName + "`")
	}

	// Find the bucket
	s3 := s3.New(auth, region)
	bucket := s3.Bucket(u.bucketName())

	// If the list doesn't return an error, then we've got our
	// bucket
	_, err = bucket.List("", "", "", 0)
	if err != nil {
		return errors.New("Could not find bucket `" + u.bucketName() + " in region `" + region.Name + "` (" + err.Error() + ")")
	}

	u.Bucket = bucket

	return nil
}
Esempio n. 25
0
func (s *S) SetUpSuite(c *C) {
	testServer.Start()

	auth, _ := aws.GetAuth("abc", "123", "", time.Time{})
	client := s3.New(auth, aws.Region{Name: "faux-region-1", S3Endpoint: testServer.URL})

	tempDir, err := ioutil.TempDir("", "dogestry-test")
	if err != nil {
		c.Fatalf("couldn't get tempdir. Error: %s", err)
	}

	s.TempDir = tempDir

	baseConfig, err := config.NewConfig(false)
	if err != nil {
		c.Fatalf("couldn't initialize config. Error: %s", err)
	}

	s.remote = &S3Remote{
		config:     baseConfig,
		BucketName: "bucket",
		client:     client,
	}
}
Esempio n. 26
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())

	flag.Parse()

	if AWS_ACCESS_KEY_ID == "" || AWS_SECRET_ACCESS_KEY == "" {
		log.Fatal("AWS Credentials Required")
	}

	os.Setenv("AWS_ACCESS_KEY_ID", AWS_ACCESS_KEY_ID)
	os.Setenv("AWS_SECRET_ACCESS_KEY", AWS_SECRET_ACCESS_KEY)

	// Since we're not messing with cacheAge, it's just easier to treat is as a string.
	if cacheAge == "" {
		cacheAge = "0"
	}

	if maxObjs != 0 || stopMarker != "" {
		// Set the conditional bit to check to stop
		doStop = true
	}

	log.Println("Starting Cache Alterations:")

	//  Connect to AWS using goamz
	auth, err := aws.EnvAuth()
	if err != nil {
		log.Panic(err.Error())
	}

	// Instantiate S3 Object
	s := s3.New(auth, aws.USEast)

	// Set the Bucket
	Bucket := s.Bucket(bucketName)

	// Initial Request - Outside Loop
	Response, err := Bucket.List("", "", lastMarker, 1000)
	if err != nil {
		log.Panic(err.Error())
	}

	// Set up the header for iterating.
	opts := s3.CopyOptions{}
	opts.CacheControl = "max-age=" + cacheAge
	opts.MetadataDirective = "REPLACE"

	log.Println("-> 0 START")

	// Loop Results
	for _, v := range Response.Contents {
		fmt.Printf(".") // Indicator that something is happening
		_, err := Bucket.PutCopy(v.Key, s3.PublicRead, opts, bucketName+"/"+v.Key)
		if err != nil {
			log.Panic(err.Error())
		}
		// We generate our own lastMarker.  This allows us to perform our own resume.
		lastMarker = v.Key
		results++

		if doStop == true {
			if results == maxObjs || lastMarker == stopMarker {
				break // End here.
			}
		}
	}

	fmt.Printf("\n")
	log.Println("->", results, " ", lastMarker)

	// Did Amazon say there was more?  If so, keep going.
	if Response.IsTruncated == true {
		for {
			// Issue List Command
			Response, err := Bucket.List("", "", lastMarker, 1000)
			if err != nil {
				panic(err.Error())
			}

			// Loop through Response and dump it to the console.
			for _, v := range Response.Contents {
				fmt.Printf(".") // Indicator that something is happening
				_, err := Bucket.PutCopy(v.Key, s3.PublicRead, opts, bucketName+"/"+v.Key)
				if err != nil {
					log.Panic(err.Error())
				}
				lastMarker = v.Key
				results++

				if doStop == true {
					if results == maxObjs || lastMarker == stopMarker {
						break // End here.
					}
				}
			}

			if Response.IsTruncated == false {
				break // End loop
			} else {
				fmt.Printf("\n")
				log.Println("->", results, " ", lastMarker)
			}
		}
	}
	log.Println("Wrote to", results, " S3 Objects. Last object was:", lastMarker)
}
Esempio n. 27
0
func (s *S) SetUpSuite(c *check.C) {
	testServer.Start()
	auth := aws.Auth{AccessKey: "abc", SecretKey: "123"}
	s.s3 = s3.New(auth, aws.Region{Name: "faux-region-1", S3Endpoint: testServer.URL})
}
Esempio n. 28
0
func main() {
	flag.Parse()

	stats, err := initStatsd(*stats_prefix, os.Getenv("STATSD_HOSTPORT"))
	if err != nil {
		log.Fatalf("Statsd configuration error: %v", err)
	}

	auth, err := aws.GetAuth("", "", "", time.Now())
	if err != nil {
		log.Fatalln("Failed to recieve auth from env")
	}
	awsConnection := s3.New(
		auth,
		aws.USWest2,
	)

	auditBucket := awsConnection.Bucket(auditBucketName + "-" + CLOUD_ENV)
	auditBucket.PutBucket(s3.BucketOwnerFull)
	eventBucket := awsConnection.Bucket(eventBucketName + "-" + CLOUD_ENV)
	eventBucket.PutBucket(s3.BucketOwnerFull)

	auditInfo := gen.BuildInstanceInfo(&gen.EnvInstanceFetcher{}, "spade_edge_audit", *logging_dir)
	loggingInfo := gen.BuildInstanceInfo(&gen.EnvInstanceFetcher{}, "spade_edge", *logging_dir)

	auditRotateCoordinator := gologging.NewRotateCoordinator(auditMaxLogLines, auditMaxLogAge)
	loggingRotateCoordinator := gologging.NewRotateCoordinator(maxLogLines, maxLogAge)

	auditLogger, err := gologging.StartS3Logger(
		auditRotateCoordinator,
		auditInfo,
		&DummyNotifierHarness{},
		&uploader.S3UploaderBuilder{
			Bucket:           auditBucket,
			KeyNameGenerator: &gen.EdgeKeyNameGenerator{Info: auditInfo},
		},
		BuildSQSErrorHarness(),
		2,
	)
	if err != nil {
		log.Fatalf("Got Error while building audit: %s\n", err)
	}

	spadeEventLogger, err := gologging.StartS3Logger(
		loggingRotateCoordinator,
		loggingInfo,
		BuildSQSNotifierHarness(),
		&uploader.S3UploaderBuilder{
			Bucket:           eventBucket,
			KeyNameGenerator: &gen.EdgeKeyNameGenerator{Info: loggingInfo},
		},
		BuildSQSErrorHarness(),
		2,
	)
	if err != nil {
		log.Fatalf("Got Error while building logger: %s\n", err)
	}

	// Initialize Loggers.
	// AuditLogger writes to the audit log, for analysis of system success rate.
	// SpadeLogger writes requests to a file for processing by the spade processor.
	// K(afka)Logger writes produces messages for kafka, currently in dark launch.
	// We allow the klogger to be null incase we boot up with an unresponsive kafka cluster.
	var logger *request_handler.EventLoggers
	brokerList := ParseBrokerList(*brokers)
	klogger, err := kafka_logger.NewKafkaLogger(*clientId, brokerList)
	if err == nil {
		klogger.(*kafka_logger.KafkaLogger).Init()
		logger = &request_handler.EventLoggers{
			AuditLogger: auditLogger,
			SpadeLogger: spadeEventLogger,
			KLogger:     klogger,
		}
	} else {
		log.Printf("Got Error while building logger: %s + %v\nUsing Nop Logger\n", err, brokerList)
		logger = &request_handler.EventLoggers{
			AuditLogger: auditLogger,
			SpadeLogger: spadeEventLogger,
			KLogger:     &request_handler.NoopLogger{},
		}
	}

	// Trigger close on receipt of SIGINT
	sigc := make(chan os.Signal, 1)
	signal.Notify(sigc,
		syscall.SIGINT)
	go func() {
		<-sigc
		// Cause flush
		logger.Close()
		os.Exit(0)
	}()

	hystrixStreamHandler := hystrix.NewStreamHandler()
	hystrixStreamHandler.Start()
	go http.ListenAndServe(net.JoinHostPort("", "81"), hystrixStreamHandler)

	// setup server and listen
	server := &http.Server{
		Addr: *listen_port,
		Handler: &request_handler.SpadeHandler{
			StatLogger: stats,
			EdgeLogger: logger,
			Assigner:   request_handler.Assigner,
		},
		ReadTimeout:    5 * time.Second,
		WriteTimeout:   5 * time.Second,
		MaxHeaderBytes: 1 << 20, // 0.5MB
	}
	if err := server.ListenAndServe(); err != nil {
		log.Fatalln(err)
	}
}
Esempio n. 29
0
func pushToS3(bundlesPath string) error {
	if _, err := os.Stat(bundlesPath); os.IsNotExist(err) {
		return fmt.Errorf("This is awkward, the bundles path DNE: %s", bundlesPath)
	}

	// use env variables to connect to s3
	auth, err := aws.EnvAuth()
	if err != nil {
		return fmt.Errorf("AWS Auth failed: %v", err)
	}

	// connect to s3 bucket
	s := s3.New(auth, aws.GetRegion(region))
	bucketname, bucketpath := bucketParts(bucket)
	bucket := s.Bucket(bucketname)

	//walk the bundles directory
	var html string
	walkFn := func(fpath string, info os.FileInfo, err error) error {
		stat, err := os.Stat(fpath)
		if err != nil {
			return err
		}

		relFilePath, err := filepath.Rel(bundlesPath, fpath)
		if err != nil || (fpath == bundlesPath && stat.IsDir()) {
			// Error getting relative path OR we are looking
			// at the root path. Skip in both situations.
			return nil
		}

		if stat.IsDir() {
			return nil
		}

		if err = uploadFileToS3(bucket, fpath, path.Join(bucketpath, relFilePath)); err != nil {
			log.Warnf("Uploading %s to s3 failed: %v", fpath, err)
			return err
		}

		// add to html
		image := "default"
		if strings.HasSuffix(relFilePath, ".sha256") || strings.HasSuffix(relFilePath, ".md5") {
			image = "text"
		}
		html += fmt.Sprintf(`<tr>
		<td valign="top"><a href="%s"><img src="/static/%s.png" alt="[ICO]"/></a></td>
		<td><a href="%s">%s</a></td>
		<td>%s</td>
		<td>%s</td>
</tr>`, relFilePath, image, relFilePath, relFilePath, humanSize(stat.Size()), stat.ModTime().Format(time.RFC3339))

		return nil
	}

	// walk the filepath
	if err := filepath.Walk(bundlesPath, walkFn); err != nil {
		return err
	}

	// add html to template
	if err := createIndexFile(bucket, bucketpath, html); err != nil {
		return err
	}

	return nil
}
Esempio n. 30
0
func (s *S3) Run(env *tachyon.CommandEnv) (*tachyon.Result, error) {
	auth, err := aws.GetAuth("", "", "", time.Time{})
	if err != nil {
		return nil, err
	}

	c := s3.New(auth, aws.USWest2)
	b := c.Bucket(s.Bucket)

	res := tachyon.NewResult(true)

	res.Add("bucket", s.Bucket)
	res.Add("remote", s.At)

	if s.PutFile != "" {
		path := env.Paths.File(s.PutFile)

		f, err := os.Open(path)
		if err != nil {
			return nil, err
		}

		if f == nil {
			return nil, fmt.Errorf("Unknown local file %s", s.PutFile)
		}

		defer f.Close()

		var perm s3.ACL

		if s.Public {
			if s.Writable {
				perm = s3.PublicReadWrite
			} else {
				perm = s3.PublicRead
			}
		} else {
			perm = s3.Private
		}

		ct := s.ContentType
		if ct == "" {
			ct = "application/octet-stream"
		}

		fi, err := f.Stat()
		if err != nil {
			return nil, err
		}

		var (
			input io.Reader
			opts  s3.Options
			size  int64
		)

		h := md5.New()

		if s.GZip {
			var buf bytes.Buffer

			z := gzip.NewWriter(io.MultiWriter(h, &buf))

			_, err = io.Copy(z, f)
			if err != nil {
				return nil, err
			}

			z.Close()

			opts.ContentEncoding = "gzip"

			input = &buf
			size = int64(buf.Len())
		} else {
			input = io.TeeReader(f, h)
			size = fi.Size()
		}

		err = b.PutReader(s.At, input, size, ct, perm, opts)

		rep, err := b.Head(s.At, nil)
		if err != nil {
			return nil, err
		}

		localMD5 := hex.EncodeToString(h.Sum(nil))

		res.Add("wrote", size)
		res.Add("local", s.PutFile)
		res.Add("md5", localMD5)

		etag := rep.Header.Get("ETag")
		if etag != "" {
			etag = etag[1 : len(etag)-1]

			if localMD5 != etag {
				return nil, fmt.Errorf("corruption uploading file detected")
			}
		}

	} else if s.GetFile != "" {
		f, err := os.OpenFile(s.GetFile, os.O_CREATE|os.O_WRONLY, 0644)
		if err != nil {
			return nil, err
		}

		defer f.Close()

		i, err := b.GetReader(s.At)
		if err != nil {
			return nil, err
		}

		defer i.Close()

		n, err := io.Copy(f, i)
		if err != nil {
			return nil, err
		}

		res.Add("read", n)
		res.Add("local", s.GetFile)
	} else {
		return nil, fmt.Errorf("Specify put_file or get_file")
	}

	return res, nil
}