func (r53 *Route53) updateAuth() { r53.authLock.Lock() // update auth auth, err := aws.GetAuth("", "", "", time.Time{}) for ; err != nil; auth, err = aws.GetAuth("", "", "", time.Time{}) { if debug { log.Printf("[Route53] Error getting auth (sleeping 5s before retry): %v", err) } time.Sleep(5 * time.Second) } r53.auth = auth if debug { log.Printf("[Route53] auth updated. expires at %v.", auth.Expiration()) } r53.authLock.Unlock() }
// FetchMetrics fetch elasticache values func (p ECachePlugin) FetchMetrics() (map[string]float64, error) { auth, err := aws.GetAuth(p.AccessKeyID, p.SecretAccessKey, "", time.Now()) if err != nil { return nil, err } cloudWatch, err := cloudwatch.NewCloudWatch(auth, aws.Regions[p.Region].CloudWatchServicepoint) if err != nil { return nil, err } stat := make(map[string]float64) perInstances := &[]cloudwatch.Dimension{ cloudwatch.Dimension{ Name: "CacheClusterId", Value: p.CacheClusterID, }, cloudwatch.Dimension{ Name: "CacheNodeId", Value: p.CacheNodeID, }, } for _, met := range p.CacheMetrics { v, err := getLastPoint(cloudWatch, perInstances, met) if err == nil { stat[met] = v } else { log.Printf("%s: %s", met, err) } } return stat, nil }
func main() { kingpin.Version("1.2.1") kingpin.Parse() sl, err := syslog.New(syslog.LOG_NOTICE|syslog.LOG_LOCAL0, "[varnish-purge-proxy]") defer sl.Close() if err != nil { log.Println("Error writing to syslog") } else { log.SetFlags(0) log.SetOutput(sl) } if len(*tags) == 0 { fmt.Println("No tags specified") return } // Set up access to ec2 auth, err := aws.GetAuth("", "", "", time.Now().Add(time.Duration(24*365*time.Hour))) if err != nil { log.Println(err) return } ec2region := ec2.New(auth, region) go serveHTTP(*port, ec2region) select {} }
// FetchMetrics fetch the metrics func (p CPUCreditPlugin) FetchMetrics() (map[string]float64, error) { region := aws.Regions[p.Region] dimension := &cloudwatch.Dimension{ Name: "InstanceId", Value: p.InstanceID, } auth, err := aws.GetAuth(p.AccessKeyID, p.SecretAccessKey, "", time.Now()) if err != nil { return nil, err } cw, err := cloudwatch.NewCloudWatch(auth, region.CloudWatchServicepoint) stat := make(map[string]float64) stat["usage"], err = getLastPointAverage(cw, dimension, "CPUCreditUsage") if err != nil { return nil, err } stat["balance"], err = getLastPointAverage(cw, dimension, "CPUCreditBalance") if err != nil { return nil, err } return stat, nil }
func (p RDSPlugin) FetchMetrics() (map[string]float64, error) { auth, err := aws.GetAuth(p.AccessKeyId, p.SecretAccessKey, "", time.Now()) if err != nil { return nil, err } cloudWatch, err := cloudwatch.NewCloudWatch(auth, aws.Regions[p.Region].CloudWatchServicepoint) if err != nil { return nil, err } stat := make(map[string]float64) perInstance := &cloudwatch.Dimension{ Name: "DBInstanceIdentifier", Value: p.Identifier, } for _, met := range [...]string{ "BinLogDiskUsage", "CPUUtilization", "DatabaseConnections", "DiskQueueDepth", "FreeableMemory", "FreeStorageSpace", "ReplicaLag", "SwapUsage", "ReadIOPS", "WriteIOPS", "ReadLatency", "WriteLatency", "ReadThroughput", "WriteThroughput", "NetworkTransmitThroughput", "NetworkReceiveThroughput", } { v, err := GetLastPoint(cloudWatch, perInstance, met) if err == nil { stat[met] = v } else { log.Printf("%s: %s", met, err) } } return stat, nil }
func GetAuthFromEnv() aws.Auth { auth, err := aws.GetAuth("", "", "", time.Now()) if err != nil && os.Getenv("CLOUD_ENVIRONMENT") == "PRODUCTION" { log.Fatalln("Failed to recieve auth from env") } return auth }
func (s *S3) getAuth() (err error) { s.auth, err = aws.GetAuth(s.AccessKey, s.SecretKey, "", time.Time{}) if s.s3 != nil { s.s3.Auth = s.auth } return }
// FetchMetrics interface for mackerel-plugin func (p RDSPlugin) FetchMetrics() (map[string]float64, error) { auth, err := aws.GetAuth(p.AccessKeyID, p.SecretAccessKey, "", time.Now()) if err != nil { return nil, err } cloudWatch, err := cloudwatch.NewCloudWatch(auth, aws.Regions[p.Region].CloudWatchServicepoint) if err != nil { return nil, err } stat := make(map[string]float64) perInstance := &cloudwatch.Dimension{ Name: "DBInstanceIdentifier", Value: p.Identifier, } for _, met := range p.rdsMetrics() { v, err := getLastPoint(cloudWatch, perInstance, met) if err == nil { stat[met] = v } else { log.Printf("%s: %s", met, err) } } return stat, nil }
func (app *AppContext) setupS3Logger() (err error) { auth, err := aws.GetAuth("", "", "", time.Now()) if err != nil { log.Fatalln("Failed to find AWS credentials in env") } awsConnection := s3.New( auth, getAWSRegion(app.config.aws_region), ) bucket := awsConnection.Bucket(app.config.bucket) instanceInfo := keygen.BuildInstanceInfo( &keygen.EnvInstanceFetcher{}, serviceName, app.config.logging_dir, ) rotateCoordinator := gologging.NewRotateCoordinator( app.config.max_log_lines, app.config.max_log_age, ) metricsLogger := MetricsLogger{app.metrics} app.s3log, err = gologging.StartS3Logger( rotateCoordinator, instanceInfo, &metricsLogger, &uploader.S3UploaderBuilder{ Bucket: bucket, KeyNameGenerator: &KeyNameGenerator{ Info: instanceInfo, Prefix: app.config.key_prefix, }, }, &metricsLogger, app.config.num_workers, ) if err != nil { return } // Make sure logger is flushed when shutdown signal is received sigc := make(chan os.Signal, 1) signal.Notify(sigc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) go func() { <-sigc log.Println("interrupted, closing logger...") app.s3log.Close() os.Exit(0) }() return nil }
func s3Setup(bucketName string, path string, opts sequinsOptions) *sequins { auth, err := aws.GetAuth(*s3AccessKey, *s3SecretKey, "", time.Time{}) if err != nil { log.Fatal(err) } regionName := *s3Region if regionName == "" { regionName = aws.InstanceRegion() if regionName == "" { log.Fatal("Unspecified --s3-region, and no instance region found.") } } region, exists := aws.Regions[regionName] if !exists { log.Fatalf("Invalid AWS region: %s", regionName) } bucket := s3.New(auth, region).Bucket(bucketName) backend := backend.NewS3Backend(bucket, path) if opts.LocalPath == "" { tmpDir, err := ioutil.TempDir("", "sequins-") if err != nil { log.Fatal(err) } opts.LocalPath = tmpDir } return newSequins(backend, opts) }
func setupS3() *backend.S3Backend { fakeS3, _ := s3test.NewServer(&s3test.Config{}) // cargo-culted from s3test fakeRegion := aws.Region{ Name: "faux-region-1", S3Endpoint: fakeS3.URL(), S3LocationConstraint: true, } auth, _ := aws.GetAuth("foo", "bar", "", time.Time{}) bucket := s3.New(auth, fakeRegion).Bucket("sequinstest") bucket.PutBucket("") putFile(bucket, "test_data/0/part-00000") putFile(bucket, "test_data/0/part-00001") putFile(bucket, "test_data/0/_SUCCESS") putFile(bucket, "test_data/1/part-00000") putFile(bucket, "test_data/1/part-00001") bucket.Put("test_data/foo", []byte("nothing"), "", "", s3.Options{}) return backend.NewS3Backend(bucket, "test_data") }
func main() { flag.Parse() log.SetOutput(os.Stdout) auth, err := aws.GetAuth("", "", "", time.Time{}) if err != nil { log.Fatalln("Failed to recieve auth") } stats, err := lib.InitStats(statsPrefix) if err != nil { log.Fatalln("Error initializing stats:", err) } postgresBackend, err := metadata.NewPostgresStorer(&pgConfig) listener := StartWorker(&listener.SQSAddr{ Region: aws.USWest2, QueueName: "spade-compactor-" + env, Auth: auth, }, stats, postgresBackend) wait := make(chan struct{}) sigc := make(chan os.Signal, 1) signal.Notify(sigc, syscall.SIGINT) go func() { <-sigc // Cause flush listener.Close() close(wait) }() <-wait }
func main() { var inst_id string if instanceID == "" { inst_id := aws.InstanceId() if inst_id == "unknown" { log.Fatalln("Unable to get instance id") } } else { inst_id = instanceID } auth, err := aws.GetAuth(accesskey, secretkey, "", time.Time{}) if err != nil { log.Fatalln("Unable to get AWS auth", err) } awsec2 = ec2.New(auth, aws.GetRegion(region)) groupMap := getSecurityGroupIds(inst_id) for _, id := range securityGroupIDs { groupMap[id] = true } groupIds := make([]string, 0, len(groupMap)) for id := range groupMap { groupIds = append(groupIds, id) } opts := &ec2.ModifyInstanceAttributeOptions{SecurityGroups: ec2.SecurityGroupIds(groupIds...)} resp, err := awsec2.ModifyInstanceAttribute(inst_id, opts) if err != nil || !resp.Return { log.Fatalln("Error adding security groups to instance", err) } log.Printf("Added security groups %s to instance %s\n", securityGroupIDs.String(), inst_id) c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL) // this waits until we get a kill signal <-c groupMap = getSecurityGroupIds(inst_id) for _, id := range securityGroupIDs { delete(groupMap, id) } groupIds = make([]string, 0, len(groupMap)) for id := range groupMap { groupIds = append(groupIds, id) } opts = &ec2.ModifyInstanceAttributeOptions{SecurityGroups: ec2.SecurityGroupIds(groupIds...)} resp, err = awsec2.ModifyInstanceAttribute(inst_id, opts) if err != nil || !resp.Return { log.Fatalln("Error removing security groups from instance", err) } log.Printf("Removed security groups %s from instance %s\n", securityGroupIDs.String(), inst_id) }
func (s *S) TestGetAuthEnv(c *check.C) { os.Clearenv() os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") os.Setenv("AWS_ACCESS_KEY_ID", "access") auth, err := aws.GetAuth("", "", "", time.Time{}) c.Assert(err, check.IsNil) c.Assert(auth, check.Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) }
func (s *S) TestGetAuthStatic(c *check.C) { exptdate := time.Now().Add(time.Hour) auth, err := aws.GetAuth("access", "secret", "token", exptdate) c.Assert(err, check.IsNil) c.Assert(auth.AccessKey, check.Equals, "access") c.Assert(auth.SecretKey, check.Equals, "secret") c.Assert(auth.Token(), check.Equals, "token") c.Assert(auth.Expiration(), check.Equals, exptdate) }
// create a new s3 client from the url func newS3Client(config config.Config) (*s3.S3, error) { auth, err := aws.GetAuth(config.AWS.AccessKeyID, config.AWS.SecretAccessKey, "", time.Now()) if err != nil { return &s3.S3{}, err } if config.AWS.Region == "" { return nil, errors.New("Region not set for S3 client lib (missing SetS3URL?)") } return s3.New(auth, aws.Regions[config.AWS.Region]), nil }
func (p *ESPlugin) prepare() error { auth, err := aws.GetAuth(p.AccessKeyID, p.SecretAccessKey, "", time.Now()) if err != nil { return err } p.CloudWatch, err = cloudwatch.NewCloudWatch(auth, aws.Regions[p.Region].CloudWatchServicepoint) if err != nil { return err } return nil }
func New() (*Route53, error) { auth, err := aws.GetAuth("", "", "", time.Time{}) if err != nil { return nil, err } r53 := &Route53{ auth: auth, authLock: sync.RWMutex{}, } go r53.updateAuthLoop() return r53, nil }
func main() { route53.DebugOn() auth, err := aws.GetAuth("", "", "", time.Time{}) if err != nil { fmt.Fprintln(os.Stderr, "error: no aws credentials available") os.Exit(255) } r53 = route53.NewWithAuth(auth) NewClient().Parse() }
func GetBucket(bucketPrefix string) (*s3.Bucket, error) { auth, err := aws.GetAuth("", "", "", time.Time{}) if err != nil { return nil, err } s := s3.New(auth, aws.USWest2) s.ConnectTimeout = time.Second * 30 s.ReadTimeout = time.Second * 30 bucketName := strings.TrimPrefix(bucketPrefix, "s3://") + "-" + environment.GetCloudEnv() return s.Bucket(bucketName), nil }
func main() { flag.Parse() auth, err := aws.GetAuth("", "", "", time.Now().Add(time.Hour)) if err != nil { panic(err) } filter := ec2.NewFilter() filter.Add("instance-state-name", "running") filter.Add("tag:env", "prod") c := ec2.New(auth, aws.USEast) resp, err := c.DescribeInstances(nil, filter) if err != nil { log.Panicln(err) } rezzies := resp.Reservations for _, rv := range rezzies { for _, inst := range rv.Instances { if len(inst.BlockDevices) < 3 { for _, bd := range inst.BlockDevices { vid := bd.EBS.VolumeId name, err := getName(inst.Tags) log.Printf("Creating snapshot for: %s volume: %v\n", name, vid) if err != nil { log.Fatalf("Error getting name:", err) } stamp := time.Now().UTC().Format(TIME_FORMAT) snprsp, err := c.CreateSnapshot(vid, fmt.Sprintf("%s %s %s", name, *period, stamp)) if err != nil { log.Printf("Failed to snap: %s, Error: %s\n", vid, err) break } else { log.Printf("Created snap: %s\n", snprsp.Id) t := ec2.Tag{Key: "inst_snap", Value: fmt.Sprintf("%s/%s", inst.InstanceId, *period)} tags = append(inst.Tags, t) wg.Add(1) go tagSnapshot(inst.InstanceId, snprsp.Id, tags, c) } } } } } wg.Wait() }
// FetchMetrics interface for mackerel plugin func (p SESPlugin) FetchMetrics() (map[string]float64, error) { if p.Endpoint == "" { return nil, errors.New("no endpoint") } auth, err := aws.GetAuth(p.AccessKeyID, p.SecretAccessKey, "", time.Now()) if err != nil { return nil, err } sescfg := ses.Config{ AccessKeyID: auth.AccessKey, SecretAccessKey: auth.SecretKey, SecurityToken: auth.Token(), Endpoint: p.Endpoint, } stat := make(map[string]float64) quota, err := sescfg.GetSendQuota() if err == nil { stat["SentLast24Hours"] = quota.SentLast24Hours stat["Max24HourSend"] = quota.Max24HourSend stat["MaxSendRate"] = quota.MaxSendRate } datapoints, err := sescfg.GetSendStatistics() if err == nil { latest := ses.SendDataPoint{ Timestamp: time.Unix(0, 0), } for _, dp := range datapoints { if latest.Timestamp.Before(dp.Timestamp) { latest = dp } } stat["Complaints"] = float64(latest.Complaints) stat["DeliveryAttempts"] = float64(latest.DeliveryAttempts) stat["Bounces"] = float64(latest.Bounces) stat["Rejects"] = float64(latest.Rejects) } return stat, nil }
func GetAWSAuth() (aws.Auth, error) { // First try to see if we have AWS environment variables and auth with that. auth, err := aws.EnvAuth() if err == nil { // If we it worked lets use this return auth, nil } // If ENV didn't work then lets try to get from IAM role auth, err = aws.GetAuth("", "", "", time.Now().AddDate(1, 1, 1)) if err != nil { return aws.Auth{}, err } return auth, nil }
// create a new s3 client from the url func newS3Client(config config.Config) (*s3.S3, error) { auth, err := aws.GetAuth(config.AWS.AccessKeyID, config.AWS.SecretAccessKey, "", time.Now()) if err != nil { return &s3.S3{}, err } var regionName string regQuery := config.AWS.S3URL.Query()["region"] if len(regQuery) > 0 && regQuery[0] != "" { regionName = regQuery[0] } else { regionName = S3DefaultRegion } region := aws.Regions[regionName] return s3.New(auth, region), nil }
func MakeDynamoDBStore(awsAccessKey, awsSecretKey string) *TDynamoDBStore { var ( auth aws.Auth pk dynamodb.PrimaryKey ) contract.RequireNoErrors( func() (err error) { auth, err = aws.GetAuth(awsAccessKey, awsSecretKey, auth.Token(), auth.Expiration()) return }, func() (err error) { desc := DynamoDBDemoTableDescription() pk, err = desc.BuildPrimaryKey() return }) dynamo := dynamodb.Server{auth, aws.USWest2} // hardcode ftw table := dynamo.NewTable(DynamoDbDemoTable, pk) return &TDynamoDBStore{&dynamo, table} }
func (p *ELBPlugin) prepare() error { auth, err := aws.GetAuth(p.AccessKeyID, p.SecretAccessKey, "", time.Now()) if err != nil { return err } p.CloudWatch, err = cloudwatch.NewCloudWatch(auth, aws.Regions[p.Region].CloudWatchServicepoint) if err != nil { return err } ret, err := p.CloudWatch.ListMetrics(&cloudwatch.ListMetricsRequest{ Namespace: "AWS/ELB", Dimensions: []cloudwatch.Dimension{ { Name: "AvailabilityZone", }, }, MetricName: "HealthyHostCount", }) if err != nil { return err } p.AZs = make([]string, 0, len(ret.ListMetricsResult.Metrics)) for _, met := range ret.ListMetricsResult.Metrics { if len(met.Dimensions) > 1 { continue } else if met.Dimensions[0].Name != "AvailabilityZone" { continue } p.AZs = append(p.AZs, met.Dimensions[0].Value) } return nil }
func (s *S) SetUpSuite(c *C) { testServer.Start() auth, _ := aws.GetAuth("abc", "123", "", time.Time{}) client := s3.New(auth, aws.Region{Name: "faux-region-1", S3Endpoint: testServer.URL}) tempDir, err := ioutil.TempDir("", "dogestry-test") if err != nil { c.Fatalf("couldn't get tempdir. Error: %s", err) } s.TempDir = tempDir baseConfig, err := config.NewConfig(false) if err != nil { c.Fatalf("couldn't initialize config. Error: %s", err) } s.remote = &S3Remote{ config: baseConfig, BucketName: "bucket", client: client, } }
func main() { flag.Parse() stats, err := initStatsd(*stats_prefix, os.Getenv("STATSD_HOSTPORT")) if err != nil { log.Fatalf("Statsd configuration error: %v", err) } auth, err := aws.GetAuth("", "", "", time.Now()) if err != nil { log.Fatalln("Failed to recieve auth from env") } awsConnection := s3.New( auth, aws.USWest2, ) auditBucket := awsConnection.Bucket(auditBucketName + "-" + CLOUD_ENV) auditBucket.PutBucket(s3.BucketOwnerFull) eventBucket := awsConnection.Bucket(eventBucketName + "-" + CLOUD_ENV) eventBucket.PutBucket(s3.BucketOwnerFull) auditInfo := gen.BuildInstanceInfo(&gen.EnvInstanceFetcher{}, "spade_edge_audit", *logging_dir) loggingInfo := gen.BuildInstanceInfo(&gen.EnvInstanceFetcher{}, "spade_edge", *logging_dir) auditRotateCoordinator := gologging.NewRotateCoordinator(auditMaxLogLines, auditMaxLogAge) loggingRotateCoordinator := gologging.NewRotateCoordinator(maxLogLines, maxLogAge) auditLogger, err := gologging.StartS3Logger( auditRotateCoordinator, auditInfo, &DummyNotifierHarness{}, &uploader.S3UploaderBuilder{ Bucket: auditBucket, KeyNameGenerator: &gen.EdgeKeyNameGenerator{Info: auditInfo}, }, BuildSQSErrorHarness(), 2, ) if err != nil { log.Fatalf("Got Error while building audit: %s\n", err) } spadeEventLogger, err := gologging.StartS3Logger( loggingRotateCoordinator, loggingInfo, BuildSQSNotifierHarness(), &uploader.S3UploaderBuilder{ Bucket: eventBucket, KeyNameGenerator: &gen.EdgeKeyNameGenerator{Info: loggingInfo}, }, BuildSQSErrorHarness(), 2, ) if err != nil { log.Fatalf("Got Error while building logger: %s\n", err) } // Initialize Loggers. // AuditLogger writes to the audit log, for analysis of system success rate. // SpadeLogger writes requests to a file for processing by the spade processor. // K(afka)Logger writes produces messages for kafka, currently in dark launch. // We allow the klogger to be null incase we boot up with an unresponsive kafka cluster. var logger *request_handler.EventLoggers brokerList := ParseBrokerList(*brokers) klogger, err := kafka_logger.NewKafkaLogger(*clientId, brokerList) if err == nil { klogger.(*kafka_logger.KafkaLogger).Init() logger = &request_handler.EventLoggers{ AuditLogger: auditLogger, SpadeLogger: spadeEventLogger, KLogger: klogger, } } else { log.Printf("Got Error while building logger: %s + %v\nUsing Nop Logger\n", err, brokerList) logger = &request_handler.EventLoggers{ AuditLogger: auditLogger, SpadeLogger: spadeEventLogger, KLogger: &request_handler.NoopLogger{}, } } // Trigger close on receipt of SIGINT sigc := make(chan os.Signal, 1) signal.Notify(sigc, syscall.SIGINT) go func() { <-sigc // Cause flush logger.Close() os.Exit(0) }() hystrixStreamHandler := hystrix.NewStreamHandler() hystrixStreamHandler.Start() go http.ListenAndServe(net.JoinHostPort("", "81"), hystrixStreamHandler) // setup server and listen server := &http.Server{ Addr: *listen_port, Handler: &request_handler.SpadeHandler{ StatLogger: stats, EdgeLogger: logger, Assigner: request_handler.Assigner, }, ReadTimeout: 5 * time.Second, WriteTimeout: 5 * time.Second, MaxHeaderBytes: 1 << 20, // 0.5MB } if err := server.ListenAndServe(); err != nil { log.Fatalln(err) } }
func (s *S3) Run(env *tachyon.CommandEnv) (*tachyon.Result, error) { auth, err := aws.GetAuth("", "", "", time.Time{}) if err != nil { return nil, err } c := s3.New(auth, aws.USWest2) b := c.Bucket(s.Bucket) res := tachyon.NewResult(true) res.Add("bucket", s.Bucket) res.Add("remote", s.At) if s.PutFile != "" { path := env.Paths.File(s.PutFile) f, err := os.Open(path) if err != nil { return nil, err } if f == nil { return nil, fmt.Errorf("Unknown local file %s", s.PutFile) } defer f.Close() var perm s3.ACL if s.Public { if s.Writable { perm = s3.PublicReadWrite } else { perm = s3.PublicRead } } else { perm = s3.Private } ct := s.ContentType if ct == "" { ct = "application/octet-stream" } fi, err := f.Stat() if err != nil { return nil, err } var ( input io.Reader opts s3.Options size int64 ) h := md5.New() if s.GZip { var buf bytes.Buffer z := gzip.NewWriter(io.MultiWriter(h, &buf)) _, err = io.Copy(z, f) if err != nil { return nil, err } z.Close() opts.ContentEncoding = "gzip" input = &buf size = int64(buf.Len()) } else { input = io.TeeReader(f, h) size = fi.Size() } err = b.PutReader(s.At, input, size, ct, perm, opts) rep, err := b.Head(s.At, nil) if err != nil { return nil, err } localMD5 := hex.EncodeToString(h.Sum(nil)) res.Add("wrote", size) res.Add("local", s.PutFile) res.Add("md5", localMD5) etag := rep.Header.Get("ETag") if etag != "" { etag = etag[1 : len(etag)-1] if localMD5 != etag { return nil, fmt.Errorf("corruption uploading file detected") } } } else if s.GetFile != "" { f, err := os.OpenFile(s.GetFile, os.O_CREATE|os.O_WRONLY, 0644) if err != nil { return nil, err } defer f.Close() i, err := b.GetReader(s.At) if err != nil { return nil, err } defer i.Close() n, err := io.Copy(f, i) if err != nil { return nil, err } res.Add("read", n) res.Add("local", s.GetFile) } else { return nil, fmt.Errorf("Specify put_file or get_file") } return res, nil }
func main() { var inst_id string if instanceID == "" { inst_id := aws.InstanceId() if inst_id == "unknown" { log.Fatalln("Unable to get instance id") } } else { inst_id = instanceID } auth, err := aws.GetAuth(accesskey, secretkey, "", time.Time{}) if err != nil { log.Fatalln("Unable to get AWS auth", err) } if securityGroupID != "" { awsec2 = ec2.New(auth, aws.GetRegion(region)) groupMap := getSecurityGroupIds(inst_id) groupMap[securityGroupID] = true groupIds := make([]string, 0, len(groupMap)) for id := range groupMap { groupIds = append(groupIds, id) } opts := &ec2.ModifyInstanceAttributeOptions{SecurityGroups: ec2.SecurityGroupIds(groupIds...)} resp, err := awsec2.ModifyInstanceAttribute(inst_id, opts) if err != nil || !resp.Return { log.Fatalln("Error adding security group to instance", err) } log.Printf("Added security group %s to instance %s\n", securityGroupID, inst_id) } awselb := elb.New(auth, aws.GetRegion(region)) for _, lbname := range lbnames { _, err = awselb.RegisterInstancesWithLoadBalancer([]string{inst_id}, lbname) if err != nil { log.Fatalln("Error registering instance", err) } log.Printf("Registered instance %s with elb %s\n", inst_id, lbname) } c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL) // this waits until we get a kill signal <-c for _, lbname := range lbnames { _, err = awselb.DeregisterInstancesFromLoadBalancer([]string{inst_id}, lbname) if err != nil { log.Fatalln("Error deregistering instance", err) } log.Printf("Deregistered instance %s with elb %s\n", inst_id, lbname) } if securityGroupID != "" { groupMap := getSecurityGroupIds(inst_id) delete(groupMap, securityGroupID) groupIds := make([]string, 0, len(groupMap)) for id := range groupMap { groupIds = append(groupIds, id) } opts := &ec2.ModifyInstanceAttributeOptions{SecurityGroups: ec2.SecurityGroupIds(groupIds...)} resp, err := awsec2.ModifyInstanceAttribute(inst_id, opts) if err != nil || !resp.Return { log.Fatalln("Error removing security group from instance", err) } log.Printf("Removed security group %s from instance %s\n", securityGroupID, inst_id) } }