示例#1
0
func (e EC2Tags) Get() (map[string]string, error) {
	tags := make(map[string]string)

	// Passing blank values here instructs the AWS library to look at the
	// current instances meta data for the security credentials.
	auth, err := aws.GetAuth("", "", "", time.Time{})
	if err != nil {
		return tags, errors.New(fmt.Sprintf("Error creating AWS authentication: %s", err.Error()))
	}

	// Find the current region and create a new EC2 connection
	region := aws.GetRegion(aws.InstanceRegion())
	ec2Client := ec2.New(auth, region)

	// Filter by the current machines instance-id
	filter := ec2.NewFilter()
	filter.Add("resource-id", aws.InstanceId())

	// Describe the tags for the current instance
	resp, err := ec2Client.DescribeTags(filter)
	if err != nil {
		return tags, errors.New(fmt.Sprintf("Error downloading tags: %s", err.Error()))
	}

	// Collect the tags
	for _, tag := range resp.Tags {
		tags[tag.Key] = tag.Value
	}

	return tags, nil
}
func getSqsQueueSize(region, awsAccessKeyID, awsSecretAccessKey, queueName string) (int, error) {
	// Auth
	auth, err := aws.GetAuth(awsAccessKeyID, awsSecretAccessKey, "", time.Now())
	if err != nil {
		return -1, err
	}

	// SQS
	sqsClient := sqs.New(auth, aws.GetRegion(region))
	queue, err := sqsClient.GetQueue(queueName)
	if err != nil {
		return -1, err
	}

	// Get queue attribute
	attr, err := queue.GetQueueAttributes(sqsAttributeOfQueueSize)
	if err != nil {
		return -1, err
	}

	// Queue size
	size, err := strconv.Atoi(attr.Attributes[0].Value)
	if err != nil {
		return -1, err
	}

	return size, nil
}
示例#3
0
func awsS3Auth() (aws.Auth, error) {
	// First try to authenticate using the BUILDKITE_ ENV variables
	buildkiteAuth, buildkiteErr := buildkiteS3EnvAuth()
	if buildkiteErr == nil {
		return buildkiteAuth, nil
	}

	// Passing blank values here instructs the AWS library to look at the
	// current instances meta data for the security credentials.
	awsAuth, awsErr := aws.GetAuth("", "", "", time.Time{})
	if awsErr == nil {
		return awsAuth, nil
	}

	var err error

	// If they attempted to use the BUILDKITE_ ENV variables, return them
	// that error, otherwise default to the error from AWS
	if buildkiteErr != nil && buildkiteAuth.AccessKey != "" || buildkiteAuth.SecretKey != "" {
		err = buildkiteErr
	} else {
		err = awsErr
	}

	return aws.Auth{}, err
}
示例#4
0
func main() {
	flag.Parse()

	if *region == "" {
		*region = aws.InstanceRegion()
	}

	auth, err := aws.GetAuth("", "", "", time.Now())
	if err != nil {
		log.Panic(err)
	}

	s3service := s3.New(auth, aws.GetRegion(*region))
	bucket := s3service.Bucket(flag.Arg(0))

	for {
		var entries []RoutingEntry

		data, err := bucket.Get("/routing-table.json")
		if err == nil {
			err = json.Unmarshal(data, &entries)
			if err == nil {
				updateProxies(entries)
			}
		} else {
			log.Print("no get routing table", err)
		}

		time.Sleep(time.Second * 10)
	}
}
示例#5
0
func (s *S) TestGetAuthEnv(c *check.C) {
	os.Clearenv()
	os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
	os.Setenv("AWS_ACCESS_KEY_ID", "access")
	auth, err := aws.GetAuth("", "", "", time.Time{})
	c.Assert(err, check.IsNil)
	c.Assert(auth, check.Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
示例#6
0
文件: s3.go 项目: jhadvig/origin
// New constructs a new Driver with the given AWS credentials, region, encryption flag, and
// bucketName
func New(params DriverParameters) (*Driver, error) {
	auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{})
	if err != nil {
		return nil, err
	}

	if !params.Secure {
		params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1)
	}

	s3obj := s3.New(auth, params.Region)
	bucket := s3obj.Bucket(params.Bucket)

	if params.V4Auth {
		s3obj.Signature = aws.V4Signature
	} else {
		if params.Region.Name == "eu-central-1" {
			return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication")
		}
	}

	// Validate that the given credentials have at least read permissions in the
	// given bucket scope.
	if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil {
		return nil, err
	}

	// TODO Currently multipart uploads have no timestamps, so this would be unwise
	// if you initiated a new s3driver while another one is running on the same bucket.
	// multis, _, err := bucket.ListMulti("", "")
	// if err != nil {
	// 	return nil, err
	// }

	// for _, multi := range multis {
	// 	err := multi.Abort()
	// 	//TODO appropriate to do this error checking?
	// 	if err != nil {
	// 		return nil, err
	// 	}
	// }

	d := &driver{
		S3:            s3obj,
		Bucket:        bucket,
		ChunkSize:     params.ChunkSize,
		Encrypt:       params.Encrypt,
		RootDirectory: params.RootDirectory,
	}

	return &Driver{
		baseEmbed: baseEmbed{
			Base: base.Base{
				StorageDriver: d,
			},
		},
	}, nil
}
示例#7
0
// New constructs a new Driver with the given AWS credentials, region, encryption flag, and
// bucketName
func New(params DriverParameters) (*Driver, error) {
	auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{})
	if err != nil {
		return nil, fmt.Errorf("unable to resolve aws credentials, please ensure that 'accesskey' and 'secretkey' are properly set or the credentials are available in $HOME/.aws/credentials: %v", err)
	}

	if !params.Secure {
		params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1)
	}

	s3obj := s3.New(auth, params.Region)
	bucket := s3obj.Bucket(params.Bucket)

	if params.V4Auth {
		s3obj.Signature = aws.V4Signature
	} else {
		if params.Region.Name == "eu-central-1" {
			return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication")
		}
	}

	// TODO Currently multipart uploads have no timestamps, so this would be unwise
	// if you initiated a new s3driver while another one is running on the same bucket.
	// multis, _, err := bucket.ListMulti("", "")
	// if err != nil {
	// 	return nil, err
	// }

	// for _, multi := range multis {
	// 	err := multi.Abort()
	// 	//TODO appropriate to do this error checking?
	// 	if err != nil {
	// 		return nil, err
	// 	}
	// }

	d := &driver{
		S3:            s3obj,
		Bucket:        bucket,
		ChunkSize:     params.ChunkSize,
		Encrypt:       params.Encrypt,
		RootDirectory: params.RootDirectory,
		zeros:         make([]byte, params.ChunkSize),
	}

	d.pool.New = func() interface{} {
		return make([]byte, d.ChunkSize)
	}

	return &Driver{
		baseEmbed: baseEmbed{
			Base: base.Base{
				StorageDriver: d,
			},
		},
	}, nil
}
示例#8
0
func (s *S) TestGetAuthStatic(c *check.C) {
	exptdate := time.Now().Add(time.Hour)
	auth, err := aws.GetAuth("access", "secret", "token", exptdate)
	c.Assert(err, check.IsNil)
	c.Assert(auth.AccessKey, check.Equals, "access")
	c.Assert(auth.SecretKey, check.Equals, "secret")
	c.Assert(auth.Token(), check.Equals, "token")
	c.Assert(auth.Expiration(), check.Equals, exptdate)
}
示例#9
0
func initAwsBucket() {
	expiration := time.Now().Add(time.Hour * 1)
	auth, err := aws.GetAuth(config.AccessKey, config.SecretKey, "", expiration) //"" = token which isn't needed
	if err != nil {
		panic(err)
	}

	aws_bucket = s3.New(auth, aws.GetRegion(config.Region)).Bucket(config.Bucket)
}
示例#10
0
文件: dynamodb.go 项目: Comcast/rulio
func getDynamoServer(ctx *Context, region string) (*dynamodb.Server, error) {

	Log(INFO, ctx, "getDynamoServer", "region", region)

	// If we don't have real access keys, just try local.  See
	// ../local-dynamodb/.  Otherwise go for real DynamoDB.  ToDo:
	// insist on using the real DynamoDB.

	// ToDo: Combine with router/aws.go FindAWS()

	if region == "local" {
		r := aws.Region{DynamoDBEndpoint: "http://127.0.0.1:8000"}
		auth := aws.Auth{AccessKey: "DUMMY_KEY", SecretKey: "DUMMY_SECRET"}
		return dynamodb.New(auth, r), nil
	} else if strings.HasPrefix(region, "http:") {
		r := aws.Region{DynamoDBEndpoint: region}
		auth, err := aws.GetAuth("", "", "", time.Now().Add(100000*time.Hour))
		if err != nil {
			Log(INFO, ctx, "router.FindAWS", "warning", err)
			return nil, err
		}
		return dynamodb.New(auth, r), nil
	} else {
		auth, err := aws.EnvAuth()
		if err != nil {
			Log(INFO, ctx, "getDynamoServer", "warning", err, "when", "aws.EnvAuth")
			// return nil, nil, err
			// ToDo: Fix 100000 ...
			auth, err = aws.GetAuth("", "", "", time.Now().Add(100000*time.Hour))
			if err != nil {
				Log(INFO, ctx, "router.FindAWS", "warning", err)
				return nil, err
			}
		}
		r, found := aws.Regions[region]
		if !found {
			err = fmt.Errorf("Bad region name '%s'", region)
			Log(INFO, ctx, "getDynamoServer", "error", err)
			return nil, err
		}

		return dynamodb.New(auth, r), nil
	}
}
示例#11
0
func initAwsBucket() {

	fmt.Println("Initializing aws buccket bear!", config.Port)
	expiration := time.Now().Add(time.Hour * 1)
	auth, err := aws.GetAuth(config.AccessKey, config.SecretKey, "", expiration) //"" = token which isn't needed
	if err != nil {
		panic(err)
	}

	aws_bucket = s3.New(auth, aws.GetRegion(config.Region)).Bucket(config.Bucket)
}
示例#12
0
func New(service string, region string, tableName string) *Discovery {
	pk, err := tableDescription.BuildPrimaryKey()
	if err != nil {
		log.Fatal(err)
	}

	auth, err := aws.GetAuth("", "", "", time.Now())
	dbServer := dynamodb.New(auth, aws.GetRegion(region))
	table := dbServer.NewTable(tableName, pk)
	return &Discovery{table: table, Service: service}
}
示例#13
0
func (f *Factory) New(uri *url.URL) bridge.RegistryAdapter {
	pk, err := tableDescription.BuildPrimaryKey()
	if err != nil {
		log.Fatal(err)
	}

	auth, err := aws.GetAuth("", "", "", time.Now())

	dbServer := dynamodb.New(auth, aws.GetRegion(uri.Host))
	table := dbServer.NewTable(strings.TrimPrefix(uri.Path, "/"), pk)
	return &MikroAdapter{table: table}
}
示例#14
0
func (input *S3OffsetInput) Init(config interface{}) (err error) {
	conf := config.(*S3OffsetInputConfig)
	input.S3OffsetInputConfig = conf

	if conf.MetaFile != "" {
		// We already have the required metadata. Don't need to fetch it.
		input.metaFileName = conf.MetaFile
	} else if conf.ClientIdListFile != "" {
		// Load clientids from file.
		input.clientids, err = readLines(conf.ClientIdListFile)
		if err != nil {
			return fmt.Errorf("Error reading file %s for 'client_id_list': %s", conf.ClientIdListFile, err)
		}
	} else {
		return fmt.Errorf("Missing parameter: You must specify either 'client_id_list' or 'metadata_file'")
	}

	auth, err := aws.GetAuth(conf.AWSKey, conf.AWSSecretKey, "", time.Now())
	if err != nil {
		return fmt.Errorf("Authentication error: %s\n", err)
	}
	region, ok := aws.Regions[conf.AWSRegion]
	if !ok {
		return fmt.Errorf("Parameter 'aws_region' must be a valid AWS Region")
	}
	s := s3.New(auth, region)
	s.ConnectTimeout = time.Duration(conf.S3ConnectTimeout) * time.Second
	s.ReadTimeout = time.Duration(conf.S3ReadTimeout) * time.Second

	// TODO: ensure we can read from (and list, for meta) the buckets.
	input.bucket = s.Bucket(conf.S3Bucket)

	if conf.S3MetaBucket != "" {
		input.metaBucket = s.Bucket(conf.S3MetaBucket)
	} else if conf.MetaFile == "" {
		return fmt.Errorf("Parameter 's3_meta_bucket' is required unless using 'metadata_file'")
	}

	// Remove any excess path separators from the bucket prefix.
	conf.S3MetaBucketPrefix = CleanBucketPrefix(conf.S3MetaBucketPrefix)

	input.stop = make(chan bool)
	input.offsetChan = make(chan MessageLocation, 1000)

	return nil
}
func (input *S3SplitFileInput) Init(config interface{}) (err error) {
	conf := config.(*S3SplitFileInputConfig)
	input.S3SplitFileInputConfig = conf

	input.schema, err = LoadSchema(conf.SchemaFile)
	if err != nil {
		return fmt.Errorf("Parameter 'schema_file' must be a valid JSON file: %s", err)
	}

	if conf.S3Bucket != "" {
		auth, err := aws.GetAuth(conf.AWSKey, conf.AWSSecretKey, "", time.Now())
		if err != nil {
			return fmt.Errorf("Authentication error: %s\n", err)
		}
		region, ok := aws.Regions[conf.AWSRegion]
		if !ok {
			return fmt.Errorf("Parameter 'aws_region' must be a valid AWS Region")
		}
		s := s3.New(auth, region)
		s.ConnectTimeout = time.Duration(conf.S3ConnectTimeout) * time.Second
		s.ReadTimeout = time.Duration(conf.S3ReadTimeout) * time.Second
		// TODO: ensure we can read from the bucket.
		input.bucket = s.Bucket(conf.S3Bucket)
	} else {
		input.bucket = nil
	}

	if conf.S3ObjectMatchRegex != "" {
		if input.objectMatch, err = regexp.Compile(conf.S3ObjectMatchRegex); err != nil {
			err = fmt.Errorf("S3SplitFileInput: %s", err)
			return
		}
	} else {
		input.objectMatch = nil
	}

	// Remove any excess path separators from the bucket prefix.
	conf.S3BucketPrefix = CleanBucketPrefix(conf.S3BucketPrefix)

	input.stop = make(chan bool)
	input.listChan = make(chan string, 1000)

	return nil
}
示例#16
0
func main() {
	auth, err := aws.GetAuth("", "", "", time.Time{})
	check(err)
	s := sqs.New(auth, aws.Regions["us-east-1"])
	for _, qn := range queues {
		q, err := s.GetQueue(qn)
		if err != nil {
			fmt.Printf("error for queue %s: %v\n", qn, err)
			continue
		}
		for _, an := range attrs {
			a, err := q.GetQueueAttributes(an)
			if err != nil {
				fmt.Printf(
					"error getting attribute %s for queue %s: %v\n",
					an, qn, err)
				continue
			}
			fmt.Printf(
				"%s.%s: %s\n",
				qn, a.Attributes[0].Name, a.Attributes[0].Value)
		}
	}
}
示例#17
0
func main() {
	flagBase := flag.String("base-dir", "/", "Base directory in which to look for files to export")
	flagPattern := flag.String("pattern", ".*", "Filenames must match this regular expression to be uploaded")
	flagBucket := flag.String("bucket", "default-bucket", "S3 Bucket name")
	flagBucketPrefix := flag.String("bucket-prefix", "", "S3 Bucket path prefix")
	flagAWSKey := flag.String("aws-key", "", "AWS Key")
	flagAWSSecretKey := flag.String("aws-secret-key", "", "AWS Secret Key")
	flagAWSRegion := flag.String("aws-region", "us-west-2", "AWS Region")
	flagLoop := flag.Bool("loop", false, "Run in a loop and keep watching for more files to export")
	flagDryRun := flag.Bool("dry-run", false, "Don't actually do anything, just output what would be done")
	flag.Parse()

	if flag.NArg() != 0 {
		flag.PrintDefaults()
		os.Exit(1)
	}

	var err error
	baseStat, err := os.Stat(*flagBase)
	if err != nil || !baseStat.IsDir() {
		fmt.Printf("base-dir: %s\n", err)
		os.Exit(2)
	}

	pattern, err := regexp.Compile(*flagPattern)
	if err != nil {
		fmt.Printf("pattern: %s\n", err)
		os.Exit(3)
	}

	// fmt.Printf("Base:%s  Pattern:%s  Bucket: s3://%s/%s  AWSKey:%s / %s  Region:%s  Dry Run:%t  Loop:%t\n",
	// 	*flagBase, *flagPattern, *flagBucket, *flagBucketPrefix, *flagAWSKey, *flagAWSSecretKey, *flagAWSRegion, *flagDryRun, *flagLoop)

	var progress Progress
	var rate float64
	var uploadMB float64

	var b *s3.Bucket
	if !*flagDryRun {
		auth, err := aws.GetAuth(*flagAWSKey, *flagAWSSecretKey, "", time.Now())
		if err != nil {
			fmt.Printf("Authentication error: %s\n", err)
			os.Exit(4)
		}

		region, ok := aws.Regions[*flagAWSRegion]
		if !ok {
			fmt.Printf("Parameter 'aws-region' must be a valid AWS Region\n")
			os.Exit(5)
		}
		s := s3.New(auth, region)
		b = s.Bucket(*flagBucket)
	} else {
		// b declared and not used :(
		_ = b
	}

	for true {
		progress = Progress{}
		startTime := time.Now().UTC()
		err = filepath.Walk(*flagBase, makeupload(*flagBase, pattern, b, *flagBucketPrefix, *flagDryRun, &progress))
		if err != nil {
			fmt.Printf("Error reading files from %s: %s\n", *flagBase, err)
		}

		if progress.Count > 0 {
			uploadMB = float64(progress.Bytes) / 1024.0 / 1024.0
			duration := time.Now().UTC().Sub(startTime).Seconds()

			if duration > 0 {
				rate = uploadMB / duration
			} else {
				rate = 0
			}
			fmt.Printf("Uploaded %d files containing %.2fMB in %.02fs (%.02fMB/s). Encountered %d errors.\n", progress.Count, uploadMB, duration, rate, progress.Errors)
		} else {
			// We didn't upload any files.
			if !*flagLoop {
				fmt.Println("Nothing to upload")
			} else {
				// Only sleep if we didn't find anything to upload. If we did upload
				// something, we want to try again right away.
				fmt.Println("Waiting for files to upload...")
				time.Sleep(10 * time.Second)
			}
		}

		if !*flagLoop {
			break
		}
	}
}
func (o *S3SplitFileOutput) Init(config interface{}) (err error) {
	conf := config.(*S3SplitFileOutputConfig)
	o.S3SplitFileOutputConfig = conf
	var intPerm int64

	if intPerm, err = strconv.ParseInt(conf.FolderPerm, 8, 32); err != nil {
		err = fmt.Errorf("S3SplitFileOutput '%s' can't parse `folder_perm`, is it an octal integer string?",
			o.Path)
		return
	}
	o.folderPerm = os.FileMode(intPerm)

	if intPerm, err = strconv.ParseInt(conf.Perm, 8, 32); err != nil {
		err = fmt.Errorf("S3SplitFileOutput '%s' can't parse `perm`, is it an octal integer string?",
			o.Path)
		return
	}
	o.perm = os.FileMode(intPerm)

	if conf.MaxFileSize < 1 {
		err = fmt.Errorf("Parameter 'max_file_size' must be greater than 0.")
		return
	}
	if conf.MaxFileAge < 1 {
		err = fmt.Errorf("Parameter 'max_file_age' must be greater than 0.")
		return
	}

	if conf.MaxOpenFiles < 0 {
		err = fmt.Errorf("Parameter 'max_open_files' must not be negative.")
		return
	}
	o.fopenCache, err = lru.New(conf.MaxOpenFiles)
	if err != nil {
		// This should never happen since we already checked for negative size.
		return
	}

	// Close files as they are evicted / removed from the cache.
	o.fopenCache.OnEvicted = func(key interface{}, val interface{}) {
		// If it's not a file, we don't care about it.
		switch t := val.(type) {
		case *os.File:
			t.Close()
		}
	}

	o.dimFiles = map[string]*SplitFileInfo{}

	// TODO: fall back to default schema.
	//fmt.Printf("schema_file = '%s'\n", conf.SchemaFile)
	if conf.SchemaFile == "" {
		err = fmt.Errorf("Parameter 'schema_file' is missing")
		return
	}

	o.schema, err = LoadSchema(conf.SchemaFile)
	if err != nil {
		return fmt.Errorf("Parameter 'schema_file' must be a valid JSON file: %s", err)
	}

	if conf.S3Bucket != "" {
		auth, err := aws.GetAuth(conf.AWSKey, conf.AWSSecretKey, "", time.Now())
		if err != nil {
			return fmt.Errorf("Authentication error: %s\n", err)
		}
		region, ok := aws.Regions[conf.AWSRegion]
		if !ok {
			return fmt.Errorf("Parameter 'aws_region' must be a valid AWS Region")
		}
		s := s3.New(auth, region)
		s.ConnectTimeout = time.Duration(conf.S3ConnectTimeout) * time.Second
		s.ReadTimeout = time.Duration(conf.S3ReadTimeout) * time.Second
		// TODO: ensure we can write to the bucket.
		o.bucket = s.Bucket(conf.S3Bucket)
	} else {
		o.bucket = nil
	}

	// Remove any excess path separators from the bucket prefix.
	conf.S3BucketPrefix = fmt.Sprintf("/%s", strings.Trim(conf.S3BucketPrefix, "/"))

	o.publishChan = make(chan PublishAttempt, 1000)

	o.shuttingDown = false

	return
}
示例#19
0
文件: main.go 项目: tly1980/dynoclone
func main() {
	flag.Parse()
	default_cond := []dynamodb.AttributeComparison{}
	auth, err := aws.GetAuth("", "", "", time.Now())
	aws_region := aws.Regions[*region]

	if err != nil {
		log.Fatal("Failed to auth", err)
	}

	srcDesc, _, err := sniff(
		&auth, aws_region, *tableSrc, *tableDst)

	if err != nil {
		log.Fatal("Failed to describe table.", err)
	}

	work := make(chan map[string]*dynamodb.Attribute, *numIn)
	work2 := make(chan map[string]*dynamodb.Attribute, WORK2_BUFFER)
	done := make(chan string)
	events := make(chan Event, EVENTS_BUFFER)
	var readers = make([]*Reader, *numIn)
	var writers = make([]*Writer, *numOut)
	mon := newMonitor(*tableSrc, *tableDst,
		srcDesc.ItemCount, *tps,
		done, events, readers, writers)

	// this one never requires a done signal
	go mon.run()

	// read (4) => speed regulator (1) => write (4)
	for i := 0; i < *numIn; i++ {
		r := newReader(*tableSrc, &auth, aws_region,
			default_cond,
			i, *numIn, READ_BATCH,
			work,
			done, events)

		readers[i] = r
		go r.run()
	}

	// start a regulator to control the speed
	go regulator_thread(*tps,
		work, work2, done)

	for j := 0; j < *numOut; j++ {
		w := newWriter(j,
			*tableDst, &auth, aws_region,
			*batchSize, work2, done, events)
		writers[j] = w
		go w.run()
	}

	mon.show()

	//wait for reader
	for i := 0; i < *numIn; i++ {
		mon.drain()
	}
	close(work)

	// wait for regulator
	mon.drain()

	close(work2)

	//wait for writer
	for j := 0; j < *numOut; j++ {
		mon.drain()
	}

	close(events)
	// for mon
	mon.drain()

	mon.show()
}
示例#20
0
func main() {
	flagMatch := flag.String("match", "TRUE", "message_matcher filter expression")
	flagFormat := flag.String("format", "txt", "output format [txt|json|heka|count]")
	flagOutput := flag.String("output", "", "output filename, defaults to stdout")
	flagStdin := flag.Bool("stdin", false, "read list of s3 key names from stdin")
	flagBucket := flag.String("bucket", "default-bucket", "S3 Bucket name")
	flagAWSKey := flag.String("aws-key", "", "AWS Key")
	flagAWSSecretKey := flag.String("aws-secret-key", "", "AWS Secret Key")
	flagAWSRegion := flag.String("aws-region", "us-west-2", "AWS Region")
	flagMaxMessageSize := flag.Uint64("max-message-size", 4*1024*1024, "maximum message size in bytes")
	flagWorkers := flag.Uint64("workers", 16, "number of parallel workers")
	flagConnectTimeout := flag.Uint64("connect_timeout", 60, "Max seconds to wait for an S3 connection")
	flagReadTimeout := flag.Uint64("read_timeout", 300, "Max seconds to wait for an S3 file read to complete")
	flag.Parse()

	if !*flagStdin && flag.NArg() < 1 {
		flag.PrintDefaults()
		os.Exit(1)
	}

	if *flagMaxMessageSize < math.MaxUint32 {
		maxSize := uint32(*flagMaxMessageSize)
		message.SetMaxMessageSize(maxSize)
	} else {
		fmt.Fprintf(os.Stderr, "Message size is too large: %d\n", flagMaxMessageSize)
		os.Exit(8)
	}

	workers := 1
	if *flagWorkers == 0 {
		fmt.Fprintf(os.Stderr, "Cannot run with zero workers. Using 1.\n")
	} else if *flagWorkers < 2000 {
		workers = int(*flagWorkers)
	} else {
		fmt.Fprintf(os.Stderr, "Too many workers: %d. Use a reasonable value (up to a few hundred).\n", flagWorkers)
		os.Exit(8)
	}

	var connectTimeout uint32
	if *flagConnectTimeout < math.MaxUint32 {
		connectTimeout = uint32(*flagConnectTimeout)
	} else {
		fmt.Fprintf(os.Stderr, "Connection Timeout is too large:%d.\n", flagConnectTimeout)
		os.Exit(8)
	}

	var readTimeout uint32
	if *flagReadTimeout < math.MaxUint32 {
		readTimeout = uint32(*flagReadTimeout)
	} else {
		fmt.Fprintf(os.Stderr, "Read Timeout is too large:%d.\n", flagReadTimeout)
		os.Exit(8)
	}

	var err error
	var match *message.MatcherSpecification
	if match, err = message.CreateMatcherSpecification(*flagMatch); err != nil {
		fmt.Fprintf(os.Stderr, "Match specification - %s\n", err)
		os.Exit(2)
	}

	var out *os.File
	if "" == *flagOutput {
		out = os.Stdout
	} else {
		if out, err = os.OpenFile(*flagOutput, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil {
			fmt.Fprintf(os.Stderr, "%s\n", err)
			os.Exit(3)
		}
		defer out.Close()
	}

	auth, err := aws.GetAuth(*flagAWSKey, *flagAWSSecretKey, "", time.Now())
	if err != nil {
		fmt.Fprintf(os.Stderr, "Authentication error: %s\n", err)
		os.Exit(4)
	}
	region, ok := aws.Regions[*flagAWSRegion]
	if !ok {
		fmt.Fprintf(os.Stderr, "Parameter 'aws-region' must be a valid AWS Region\n")
		os.Exit(5)
	}
	s := s3.New(auth, region)
	if connectTimeout > 0 {
		s.ConnectTimeout = time.Duration(connectTimeout) * time.Second
	}
	if readTimeout > 0 {
		s.ReadTimeout = time.Duration(readTimeout) * time.Second
	}
	bucket := s.Bucket(*flagBucket)

	filenameChannel := make(chan string, 1000)
	recordChannel := make(chan s3splitfile.S3Record, 1000)
	doneChannel := make(chan string, 1000)
	allDone := make(chan int)

	for i := 1; i <= workers; i++ {
		go cat(bucket, filenameChannel, recordChannel, doneChannel)
	}
	go save(recordChannel, match, *flagFormat, out, allDone)

	startTime := time.Now().UTC()
	totalFiles := 0
	pendingFiles := 0
	if *flagStdin {
		scanner := bufio.NewScanner(os.Stdin)
		for scanner.Scan() {
			filename := scanner.Text()
			totalFiles++
			pendingFiles++
			filenameChannel <- filename
			if pendingFiles >= 1000 {
				waitFor(doneChannel, 1)
				pendingFiles--
			}
		}
		close(filenameChannel)
	} else {
		for _, filename := range flag.Args() {
			totalFiles++
			pendingFiles++
			filenameChannel <- filename
			if pendingFiles >= 1000 {
				waitFor(doneChannel, 1)
				pendingFiles--
			}
		}
		close(filenameChannel)
	}

	fmt.Fprintf(os.Stderr, "Waiting for last %d files\n", pendingFiles)
	waitFor(doneChannel, pendingFiles)
	close(recordChannel)
	bytesRead := <-allDone
	// All done! Win!
	duration := time.Now().UTC().Sub(startTime).Seconds()
	mb := float64(bytesRead) / 1024.0 / 1024.0
	if duration == 0.0 {
		duration = 1.0
	}
	fmt.Fprintf(os.Stderr, "All done processing %d files, %.2fMB in %.2f seconds (%.2fMB/s)\n", totalFiles, mb, duration, (mb / duration))
}
示例#21
0
func main() {
	flagSchema := flag.String("schema", "", "Filename of the schema to use as a filter")
	flagBucket := flag.String("bucket", "default-bucket", "S3 Bucket name")
	flagBucketPrefix := flag.String("bucket-prefix", "", "S3 Bucket path prefix")
	flagAWSKey := flag.String("aws-key", "", "AWS Key")
	flagAWSSecretKey := flag.String("aws-secret-key", "", "AWS Secret Key")
	flagAWSRegion := flag.String("aws-region", "us-west-2", "AWS Region")
	flagDryRun := flag.Bool("dry-run", false, "Don't actually do anything, just output what would be done")
	flagVerbose := flag.Bool("verbose", false, "Print detailed info")
	flag.Parse()

	if flag.NArg() != 0 {
		flag.PrintDefaults()
		os.Exit(1)
	}

	var err error
	var schema s3splitfile.Schema
	schema, err = s3splitfile.LoadSchema(*flagSchema)
	if err != nil {
		fmt.Printf("schema: %s\n", err)
		os.Exit(2)
	}

	if *flagDryRun {
		fmt.Printf("Dry Run: Would have listed files in s3://%s/%s according to filter schema %s\n",
			*flagBucket, *flagBucketPrefix, *flagSchema)
		os.Exit(0)
	}

	var b *s3.Bucket

	prefix := s3splitfile.CleanBucketPrefix(*flagBucketPrefix)

	// Initialize the S3 bucket
	auth, err := aws.GetAuth(*flagAWSKey, *flagAWSSecretKey, "", time.Now())
	if err != nil {
		fmt.Printf("Authentication error: %s\n", err)
		os.Exit(4)
	}
	region, ok := aws.Regions[*flagAWSRegion]
	if !ok {
		fmt.Printf("Parameter 'aws-region' must be a valid AWS Region\n")
		os.Exit(5)
	}
	s := s3.New(auth, region)
	b = s.Bucket(*flagBucket)

	var errCount int
	var totalCount int
	var totalSize int64

	startTime := time.Now().UTC()

	// List the keys as we see them
	for k := range s3splitfile.S3Iterator(b, prefix, schema) {
		if k.Err != nil {
			fmt.Printf("ERROR fetching key: %s\n", k.Err)
			errCount++
		} else {
			totalCount++
			totalSize += k.Key.Size
			fmt.Printf("%s\n", k.Key.Key)
		}
	}

	duration := time.Now().UTC().Sub(startTime).Seconds()

	if *flagVerbose {
		fmt.Printf("Filter matched %d files totaling %s in %.02fs (%d errors)\n",
			totalCount, s3splitfile.PrettySize(totalSize), duration, errCount)
	}
}
示例#22
0
func main() {
	flagStdin := flag.Bool("stdin", false, "read list of s3 key names from stdin")
	flagBucket := flag.String("bucket", "default-bucket", "S3 Bucket name")
	flagAWSKey := flag.String("aws-key", "", "AWS Key")
	flagAWSSecretKey := flag.String("aws-secret-key", "", "AWS Secret Key")
	flagAWSRegion := flag.String("aws-region", "us-west-2", "AWS Region")
	flagConnectTimeout := flag.Uint64("connect_timeout", 60, "Max seconds to wait for an S3 connection")
	flagReadTimeout := flag.Uint64("read_timeout", 300, "Max seconds to wait for an S3 file read to complete")
	flag.Parse()

	if !*flagStdin && flag.NArg() < 1 {
		flag.PrintDefaults()
		os.Exit(1)
	}

	var connectTimeout uint32
	if *flagConnectTimeout < math.MaxUint32 {
		connectTimeout = uint32(*flagConnectTimeout)
	} else {
		fmt.Fprintf(os.Stderr, "Connection Timeout is too large:%d.\n", flagConnectTimeout)
		os.Exit(8)
	}

	var readTimeout uint32
	if *flagReadTimeout < math.MaxUint32 {
		readTimeout = uint32(*flagReadTimeout)
	} else {
		fmt.Fprintf(os.Stderr, "Read Timeout is too large:%d.\n", flagReadTimeout)
		os.Exit(8)
	}

	auth, err := aws.GetAuth(*flagAWSKey, *flagAWSSecretKey, "", time.Now())
	if err != nil {
		fmt.Fprintf(os.Stderr, "Authentication error: %s\n", err)
		os.Exit(4)
	}
	region, ok := aws.Regions[*flagAWSRegion]
	if !ok {
		fmt.Fprintf(os.Stderr, "Parameter 'aws-region' must be a valid AWS Region\n")
		os.Exit(5)
	}
	s := s3.New(auth, region)
	if connectTimeout > 0 {
		s.ConnectTimeout = time.Duration(connectTimeout) * time.Second
	}
	if readTimeout > 0 {
		s.ReadTimeout = time.Duration(readTimeout) * time.Second
	}
	bucket := s.Bucket(*flagBucket)

	startTime := time.Now().UTC()
	totalFiles := 0
	if *flagStdin {
		scanner := bufio.NewScanner(os.Stdin)
		for scanner.Scan() {
			filename := scanner.Text()
			totalFiles++
			cat(bucket, filename)
		}
	} else {
		for _, filename := range flag.Args() {
			totalFiles++
			cat(bucket, filename)
		}
	}

	duration := time.Now().UTC().Sub(startTime).Seconds()
	mb := float64(bytesRead) / 1024.0 / 1024.0
	if duration == 0.0 {
		duration = 1.0
	}
	fmt.Fprintf(os.Stderr, "All done processing %d files, %.2fMB in %.2f seconds (%.2fMB/s)\n", totalFiles, mb, duration, (mb / duration))
}