// Assume role uses the current server role to call STS and assume a different role if permitted // Params: // roleArn - the requested role ARN (example: arn:aws:iam::11111111111:role/myrole ) // sessionName - a name to associate with the current session. Use the service name + // unique idenfitifier preferebly. // duration - the duration of the session in seconds. Must be between 900 and 3600 // Returns aws.Auth object that can be used with any of the existing goamz APIs // // Check http://goo.gl/M6uCu5 for more information // func AssumeRole(roleArn string, sessionName string, duration int) (*aws.Auth, error) { if duration < 900 || duration > 3600 { return nil, fmt.Errorf("Duration out of bounds") } //Try to get our local auth localAuth, err := aws.GetAuth("", "", "", time.Time{}) if err != nil { return nil, err } stsClient := sts.New(localAuth, aws.Regions[util.GetAwsRegionName()]) stsOptions := &sts.AssumeRoleParams{ DurationSeconds: int(duration), RoleArn: roleArn, RoleSessionName: sessionName, } //Try to assume role roleAuth, err := stsClient.AssumeRole(stsOptions) if err != nil { return nil, err } //Marshal the response into an aws.Auth object auth := aws.NewAuth(roleAuth.Credentials.AccessKeyId, roleAuth.Credentials.SecretAccessKey, roleAuth.Credentials.SessionToken, roleAuth.Credentials.Expiration) return auth, nil }
func (s *S) TestGetAuthEnv(c *C) { os.Clearenv() os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") os.Setenv("AWS_ACCESS_KEY_ID", "access") auth, err := aws.GetAuth("", "", "", time.Time{}) c.Assert(err, IsNil) c.Assert(*auth, Equals, *aws.NewAuth("access", "secret", "", time.Time{})) }
func (s *S) TestGetAuthEnv(c *gocheck.C) { os.Clearenv() os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") os.Setenv("AWS_ACCESS_KEY_ID", "access") auth, err := aws.GetAuth("", "", "", time.Time{}) c.Assert(err, gocheck.IsNil) c.Assert(auth, gocheck.Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"}) }
func (pc *NodeController) getNodeAddEc2Action(c *gin.Context) { a, err := models.AccessMapper.FetchOne("ec2") if err != nil { panic(err) } if a == nil { c.HTML(http.StatusOK, "node_add_ec2.html", map[string]interface{}{}) return } auth, err := aws.GetAuth(a.AccessKey, a.PrivateKey, "", time.Now().Add(time.Hour)) if err != nil { panic(err) } var vpcs []ec2.VPC var securityGroups []ec2.SecurityGroupInfo region := c.Query("availability_zone") vpc := c.Query("vpc") securityGroup := c.Query("security_group") if region != "" { awsec2 := ec2.New(auth, aws.Regions[region]) res, _ := awsec2.DescribeVpcs(nil, nil) if res != nil { vpcs = res.VPCs } if vpc != "" { if groups, _ := awsec2.SecurityGroups(nil, nil); groups != nil { for _, g := range groups.Groups { if g.VpcId == vpc { securityGroups = append(securityGroups, g) } } } } } log.Println("vpcs:", vpcs) c.HTML(http.StatusOK, "node_add_ec2.html", map[string]interface{}{ "AccessKey": a.AccessKey, "AWSRegions": aws.Regions, "VPCs": vpcs, "SecurityGroups": securityGroups, "query": map[string]interface{}{ "availability_zone": region, "vpc": vpc, "security_group": securityGroup, }, }) }
func (s *S) TestGetAuthStatic(c *C) { exptdate := time.Now().Add(time.Hour) auth, err := aws.GetAuth("access", "secret", "token", exptdate) c.Assert(err, IsNil) c.Assert(auth.AccessKey(), Equals, "access") c.Assert(auth.SecretKey(), Equals, "secret") c.Assert(auth.Token(), Equals, "token") c.Assert(auth.Expiration(), Equals, exptdate) }
func getBucket() (*s3.Bucket, error) { auth, err := aws.GetAuth(config.AWS_ACCESS_KEY, config.AWS_SECRET_KEY, "", time.Time{}) if err != nil { return nil, err } conn := s3.New(auth, aws.Regions["eu-west-1"]) b := conn.Bucket(config.BUCKET) return b, nil }
func main() { kingpin.CommandLine.Help = "Docker container EC2 metadata service." kingpin.Parse() defer log.Flush() configureLogging(*verboseOpt) auth, err := aws.GetAuth("", "", "", time.Time{}) if err != nil { panic(err) } containerService := NewContainerService(dockerClient(), *defaultRole, auth) // Proxy non-credentials requests to primary metadata service http.HandleFunc("/", logHandler(func(w http.ResponseWriter, r *http.Request) { match := credsRegex.FindStringSubmatch(r.URL.Path) if match != nil { handleCredentials(match[1], match[2], containerService, w, r) return } proxyReq, err := http.NewRequest(r.Method, fmt.Sprintf("%s%s", baseUrl, r.URL.Path), r.Body) if err != nil { log.Error("Error creating proxy http request: ", err) http.Error(w, "An unexpected error occurred communicating with Amazon", http.StatusInternalServerError) return } copyHeaders(proxyReq.Header, r.Header) resp, err := instanceServiceClient.RoundTrip(proxyReq) if err != nil { log.Error("Error forwarding request to EC2 metadata service: ", err) http.Error(w, "An unexpected error occurred communicating with Amazon", http.StatusInternalServerError) return } defer resp.Body.Close() copyHeaders(w.Header(), resp.Header) w.WriteHeader(resp.StatusCode) if _, err := io.Copy(w, resp.Body); err != nil { log.Warn("Error copying response content from EC2 metadata service: ", err) } })) log.Critical(http.ListenAndServe(*serverAddr, nil)) }
func main() { var opts struct { AWSAccess string `long:"aws-access" env:"ACCESS_KEY" required:"true"` AWSSecret string `long:"aws-secret" env:"SECRET_KEY" required:"true"` AWSRegion string `long:"aws-region" env:"AWS_REGION" default:"us-east-1"` StatsDHost string `long:"statsd-host" env:"STATSD_HOST" default:"localhost:8125"` Verbose bool `long:"verbose" short:"v" env:"DEBUG" default:"false"` } godotenv.Load(".env") if _, err := flags.Parse(&opts); err != nil { log.Fatal("cannot parse command line arguments") } if opts.Verbose { log.SetLevel(log.DebugLevel) } statsdbuffer, err := getStatsdBuffer(opts.StatsDHost) if err != nil { log.Fatal("could not initialize statsd client") } statsdbuffer.Logger = log.StandardLogger() auth, err := aws.GetAuth(opts.AWSAccess, opts.AWSSecret, "", time.Now()) if err != nil { log.WithField("error", err).Fatal("could not authenticate to aws") } region := aws.Regions[opts.AWSRegion] cw, err := cloudwatch.NewCloudWatch(auth, region.CloudWatchServicepoint) if err != nil { log.WithFields(log.Fields{ "error": err, "region": opts.AWSRegion, }).Fatal("could not open cloudwatch") } requests, err := getMetricRequests(cw) if err != nil { log.WithField("error", err).Fatal("could not build requests") } log.WithField("num_metrics", len(requests)).Info("built stats requests") ticker := time.NewTicker(time.Minute) for now := range ticker.C { goStats(cw, &requests, now, statsdbuffer) } }
// Store messages to S3: func StoreMessages(fileData []byte) error { // Something to compress the fileData into: var fileDataBytes bytes.Buffer gzFileData := gzip.NewWriter(&fileDataBytes) gzFileData.Write(fileData) gzFileData.Close() log.Infof("Storing %d bytes...", len(fileDataBytes.Bytes())) // Authenticate with AWS: awsAuth, err := aws.GetAuth("", "", "", time.Now()) if err != nil { log.Criticalf("Unable to authenticate to AWS! (%s) ...\n", err) os.Exit(2) } else { log.Debugf("Authenticated to AWS") } // Make a new S3 connection: log.Debugf("Connecting to AWS...") s3Connection := s3.New(awsAuth, aws.Regions[*awsRegion]) // Make a bucket object: s3Bucket := s3Connection.Bucket(*s3Bucket) // Prepare arguments for the call to store messages on S3: contType := "text/plain" perm := s3.BucketOwnerFull options := &s3.Options{ SSE: false, Meta: nil, } // Build the filename we'll use for S3: fileName := fmt.Sprintf("%v.gz", FileName()) // Upload the data: err = s3Bucket.Put(fileName, fileDataBytes.Bytes(), contType, perm, *options) if err != nil { log.Criticalf("Failed to put file (%v) on S3 (%v)", fileName, err) os.Exit(2) } else { log.Infof("Stored file (%v) on s3", fileName) } return nil }
// Lookup the Route53 zone-id for the domain-name we were given: func getRoute53ZoneId(domainName string) (string, error) { // Authenticate with AWS: awsAuth, err := aws.GetAuth("", "", "", time.Now()) if err != nil { log.Criticalf("[dnsUpdater] Unable to authenticate to AWS! (%s)", err) return "", err } else { log.Debugf("[dnsUpdater] Authenticated to AWS") } // Make a new EC2 connection: log.Debugf("[dnsUpdater] Connecting to Route53 ...") route53Connection, err := route53.NewRoute53(awsAuth) if err != nil { log.Criticalf("[dnsUpdater] Unable to connect to Route53! (%s)", err) return "", err } // Submit the request: ListHostedZonesResponse, err := route53Connection.ListHostedZones("", 100) if err != nil { log.Criticalf("[dnsUpdater] Failed to make ListHostedZones call: %v", err) return "", err } else { log.Debugf("[dnsUpdater] Retreived %d DNS zones.", len(ListHostedZonesResponse.HostedZones)) } // Go through the responses looking for our zone: for _, hostedZone := range ListHostedZonesResponse.HostedZones { // Compare the name to the one provided: if hostedZone.Name == domainName { log.Infof("[dnsUpdater] Found ID (%v) for domain (%v).", hostedZone.Id, domainName) // Split the zone-ID (because they tend to look like "/hostedzone/ZXJHAS123"): return strings.Split(hostedZone.Id, "/")[2], nil break } } log.Criticalf("[dnsUpdater] Couldn't find zone-ID for domain (%v)!", domainName) os.Exit(1) return "", errors.New(fmt.Sprintf("Couldn't find DNS-domain '%v' on your AWS account", domainName)) }
func testDynamodb() (bool, error) { auth, err := aws.GetAuth("test", "test", "test", time.Now()) if err != nil { log.Panic(err) } ddb := dynamodb.Server{auth, local} tables, err := ddb.ListTables() if err != nil { return false, errors.New("DYNAMODB KO") } log.Printf("%v\n", tables) return true, nil }
// GetLegacy ... func (la *LegacyArguments) GetLegacy() (*Legacy, error) { // Create a "TEST" snapshot in order to work out which tables are active // Get a list of Keyspaces and Table Names (plus directories) // Walk through all the directories. auth, _ := aws.GetAuth( la.AwsAccessKey, la.AwsSecret, "", time.Now().AddDate(0, 0, 1)) // Check the bucket exists. bucket := s3.New(auth, GetAwsRegion(la.AwsRegion)).Bucket(la.S3Bucket) _, err := bucket.List("/", "/", "", 1) if err != nil { return nil, err } streamAccess := s3gof3r.New("", s3gof3r.Keys{ AccessKey: la.AwsAccessKey, SecretKey: la.AwsSecret, SecurityToken: "", }) streamBucket := streamAccess.Bucket(la.S3Bucket) legacy := &Legacy{ DataDirectories: make([]string, 0), S3Bucket: bucket, S3StreamBucket: streamBucket, LogDirectory: la.LogDirectory, NewSnapshot: la.NewSnapshot, } legacy.MachineName, _ = os.Hostname() legacy.DataDirectories = SplitAndTrim(la.DataDirectories, ",") legacy.ExcludeKeyspaces = SplitAndTrim(la.ExcludeKeyspaces, ",") return legacy, nil }
func getService(service, region string) (*aws.Service, error) { reg, err := GetAWSRegion(region) if err != nil { return nil, err } var endpoint string switch service { case "cf": endpoint = reg.CloudFormationEndpoint case "ec2": endpoint = reg.EC2Endpoint case "iam": endpoint = reg.IAMEndpoint case "rds": endpoint = reg.RDSEndpoint.Endpoint default: return nil, fmt.Errorf("Service %s not implemented", service) } // only get the creds from the env for now auth, err := aws.GetAuth("", "", "", time.Now()) if err != nil { return nil, err } serviceInfo := aws.ServiceInfo{ Endpoint: endpoint, Signer: aws.V2Signature, } svc, err := aws.NewService(auth, serviceInfo) if err != nil { return nil, err } return svc, nil }
func NewAnnouncer() (sk *Announcer, err error) { sk = &Announcer{} b, err := aws.GetMetaData("placement/availability-zone") if err != nil { return nil, err } sk.RegionId = string(b[:len(b)-1]) b, err = aws.GetMetaData("instance-id") if err != nil { return nil, err } sk.InstanceId = string(b) if sk.RegionId == "" { sk.RegionId = aws.USEast.Name } auth, err := aws.GetAuth("", "", "", time.Time{}) if err != nil { return nil, err } sk.elb = elb.New(auth, aws.Regions[sk.RegionId]) return }
func (pc *NodeController) postNodeAddEc2Action(c *gin.Context) { var form models.EC2NodeCreateForm if err := c.Bind(&form); err != nil { c.AbortWithStatus(http.StatusBadRequest) return } if err := form.Validate(); err != nil { c.Redirect(http.StatusFound, c.Request.Referer()) return } a, err := models.AccessMapper.FetchOne("ec2") if err != nil { panic(err) } if a == nil { c.Redirect(http.StatusFound, c.Request.Referer()) return } basicAuth := env.Get("BASIC_AUTH") if auth := env.Get("BASIC_AUTH"); auth != "" { basicAuth = "-u " + auth + " " } auth, err := aws.GetAuth(a.AccessKey, a.PrivateKey, "", time.Now().Add(time.Hour)) if err != nil { panic(err) } awsec2 := ec2.New(auth, aws.Regions[form.AvailabilityZone]) // Create public key // Waiting for merge pull request https://github.com/goamz/goamz/pull/111 // { // key, err := ssh.GetPublicKey() // if err != nil { // panic(err) // } // if _, err := awsec2.ImportKeyPair(&ImportKeyPairOptions{ // KeyName: "karhu", // PublicKeyMaterial: string(key), // }); err != nil { // panic(err) // } // } if _, err := awsec2.RunInstances(&ec2.RunInstancesOptions{ ImageId: "ami-e31a6594", MinCount: 1, MaxCount: 0, KeyName: "karhu", InstanceType: form.InstanceType, SecurityGroups: []ec2.SecurityGroup{{Id: form.SecurityGroup}}, // KernelId : string // RamdiskId : string UserData: []byte(fmt.Sprintf(`#!/bin/bash sudo apt-get update && \ sudo apt-get install -y curl && \ curl %s"%s/api/nodes/register.sh?monit=1&ssh_port=22" | sudo -i -u admin bash`, basicAuth, env.Get("PUBLIC_HOST"))), AvailabilityZone: "eu-west-1c", // Waiting for https://github.com/goamz/goamz/pull/112 // PlacementGroupName : string Tenancy: "default", Monitoring: form.Monitoring == "on", SubnetId: "subnet-425a4f27", // Waiting for https://github.com/goamz/goamz/pull/112 // DisableAPITermination : bool // ShutdownBehavior : string // PrivateIPAddress : string // IamInstanceProfile : IamInstanceProfile // BlockDevices : []BlockDeviceMapping // EbsOptimized : bool // AssociatePublicIpAddress :bool }); err != nil { panic(err) } c.Redirect(http.StatusFound, "/nodes") }
func EnvAuth() Option { return func(s *MemS3Fs) { s.auth, _ = aws.GetAuth("", "", "", time.Time{}) } }
// Periodically populate the host-inventory: func Updater(config *types.Config) { log.Infof("[hostInventoryUpdater] Started") updateFrequency := 5 // Run forever: for { // Sleep until the next run: log.Debugf("[hostInventoryUpdater] Sleeping for %vs ...", updateFrequency) time.Sleep(time.Duration(updateFrequency) * time.Second) // Authenticate with AWS: awsAuth, err := aws.GetAuth("", "", "", time.Now()) if err != nil { log.Errorf("[hostInventoryUpdater] Unable to authenticate to AWS! (%s)", err) continue } else { log.Debugf("[hostInventoryUpdater] Authenticated to AWS") } // Make a new EC2 connection: log.Debugf("[hostInventoryUpdater] Connecting to EC2 ...") ec2Connection := ec2.New(awsAuth, aws.Regions[config.AWSRegion]) // Prepare a filter: filter := ec2.NewFilter() filter.Add("instance-state-name", "running") // Make a "DescribeInstances" call (lists ALL instances in your account): describeInstancesResponse, err := ec2Connection.DescribeInstances([]string{}, filter) if err != nil { log.Errorf("[hostInventoryUpdater] Failed to make describe-instances call: %v", err) } else { log.Debugf("[hostInventoryUpdater] Found %v instances running in your account", len(describeInstancesResponse.Reservations)) // Lock the host-list (so we don't change it while another goroutine is using it): log.Tracef("[hostInventoryUpdater] Trying to lock config.HostInventoryMutex ...") config.HostInventoryMutex.Lock() log.Tracef("[hostInventoryUpdater] Locked config.HostInventoryMutex") // Clear out the existing host-inventory: config.HostInventory = types.HostInventory{ Environments: make(map[string]types.Environment), } // Re-populate it from the describe instances response: for _, reservation := range describeInstancesResponse.Reservations { // Search for our role and environment tags: var role, environment string for _, tag := range reservation.Instances[0].Tags { if tag.Key == config.RoleTag { role = tag.Value } if tag.Key == config.EnvironmentTag { environment = tag.Value } } // Make sure we have environment and role tags: if environment == "" || role == "" { log.Debugf("[hostInventoryUpdater] Instance (%v) must have both 'environment' and 'role' metadata in order for DNS records to be creted!", reservation.Instances[0].InstanceId) // Continue with the next instance: continue } else { log.Infof("[hostInventoryUpdater] Building records for instance (%v) in zone (%v) ...", reservation.Instances[0].InstanceId, reservation.Instances[0].AvailabilityZone) } // Add a new environment to the inventory (unless we already have it): if _, ok := config.HostInventory.Environments[environment]; !ok { config.HostInventory.Environments[environment] = types.Environment{ DNSRecords: make(map[string][]string), } } // Either create or add to the role-per-zone record: internalZoneRecord := fmt.Sprintf("%v.%v.i.%v.%v", role, reservation.Instances[0].AvailabilityZone, environment, config.DNSDomainName) if _, ok := config.HostInventory.Environments[environment].DNSRecords[internalZoneRecord]; !ok { config.HostInventory.Environments[environment].DNSRecords[internalZoneRecord] = []string{reservation.Instances[0].PrivateIPAddress} } else { config.HostInventory.Environments[environment].DNSRecords[internalZoneRecord] = append(config.HostInventory.Environments[environment].DNSRecords[internalZoneRecord], reservation.Instances[0].PrivateIPAddress) } // Either create or add to the role-per-region record: internalRegionRecord := fmt.Sprintf("%v.%v.i.%v.%v", role, config.AWSRegion, environment, config.DNSDomainName) if _, ok := config.HostInventory.Environments[environment].DNSRecords[internalRegionRecord]; !ok { config.HostInventory.Environments[environment].DNSRecords[internalRegionRecord] = []string{reservation.Instances[0].PrivateIPAddress} } else { config.HostInventory.Environments[environment].DNSRecords[internalRegionRecord] = append(config.HostInventory.Environments[environment].DNSRecords[internalRegionRecord], reservation.Instances[0].PrivateIPAddress) } // Either create or add to the external record: if reservation.Instances[0].IPAddress != "" { externalRecord := fmt.Sprintf("%v.%v.e.%v.%v", role, config.AWSRegion, environment, config.DNSDomainName) if _, ok := config.HostInventory.Environments[environment].DNSRecords[externalRecord]; !ok { config.HostInventory.Environments[environment].DNSRecords[externalRecord] = []string{reservation.Instances[0].IPAddress} } else { config.HostInventory.Environments[environment].DNSRecords[externalRecord] = append(config.HostInventory.Environments[environment].DNSRecords[externalRecord], reservation.Instances[0].IPAddress) } } } } // Unlock the host-inventory: log.Tracef("[hostInventoryUpdater] Unlocking config.HostInventoryMutex ...") config.HostInventoryMutex.Unlock() // Now set the sleep time to the correct value: updateFrequency = config.HostUpdateFrequency } }
func main() { // Parse command-line flags for this system. var ( listenAddress = flag.String("addr", "", "Address to listen to incoming requests on.") ldapAddress = flag.String("ldapAddr", "", "Address to connect to LDAP.") ldapBindDN = flag.String("ldapBindDN", "", "LDAP DN to bind to for login.") ldapInsecure = flag.Bool("insecureLDAP", false, "INSECURE: Don't use TLS for LDAP connection.") ldapBindPassword = flag.String("ldapBindPassword", "", "LDAP password for bind.") statsdHost = flag.String("statsHost", "", "Address to send statsd metrics to.") iamAccount = flag.String("iamaccount", "", "AWS Account ID for generating IAM Role ARNs") enableLDAPRoles = flag.Bool("ldaproles", false, "Enable role support using LDAP directory.") roleAttribute = flag.String("roleattribute", "", "Group attribute to get role from.") defaultRole = flag.String("role", "", "AWS role to assume by default.") configFile = flag.String("conf", "/etc/hologram/server.json", "Config file to load.") cacheTimeout = flag.Int("cachetime", 3600, "Time in seconds after which to refresh LDAP user cache.") debugMode = flag.Bool("debug", false, "Enable debug mode.") config Config ) flag.Parse() // Enable debug log output if the user requested it. if *debugMode { log.DebugMode(true) log.Debug("Enabling debug log output. Use sparingly.") } // Parse in options from the given config file. log.Debug("Loading configuration from %s", *configFile) configContents, configErr := ioutil.ReadFile(*configFile) if configErr != nil { log.Errorf("Could not read from config file. The error was: %s", configErr.Error()) os.Exit(1) } configParseErr := json.Unmarshal(configContents, &config) if configParseErr != nil { log.Errorf("Error in parsing config file: %s", configParseErr.Error()) os.Exit(1) } // Merge in command flag options. if *ldapAddress != "" { config.LDAP.Host = *ldapAddress } if *ldapInsecure { config.LDAP.InsecureLDAP = true } if *ldapBindDN != "" { config.LDAP.Bind.DN = *ldapBindDN } if *ldapBindPassword != "" { config.LDAP.Bind.Password = *ldapBindPassword } if *statsdHost != "" { config.Stats = *statsdHost } if *iamAccount != "" { config.AWS.Account = *iamAccount } if *listenAddress != "" { config.Listen = *listenAddress } if *defaultRole != "" { config.AWS.DefaultRole = *defaultRole } if *enableLDAPRoles { config.LDAP.EnableLDAPRoles = true } if *roleAttribute != "" { config.LDAP.RoleAttribute = *roleAttribute } if *cacheTimeout != 3600 { config.CacheTimeout = *cacheTimeout } var stats g2s.Statter var statsErr error if config.LDAP.UserAttr == "" { config.LDAP.UserAttr = "cn" } if config.Stats == "" { log.Debug("No statsd server specified; no metrics will be emitted by this program.") stats = g2s.Noop() } else { stats, statsErr = g2s.Dial("udp", config.Stats) if statsErr != nil { log.Errorf("Error connecting to statsd: %s. No metrics will be emitted by this program.", statsErr.Error()) stats = g2s.Noop() } else { log.Debug("This program will emit metrics to %s", config.Stats) } } // Setup the server state machine that responds to requests. auth, err := aws.GetAuth(os.Getenv("HOLOGRAM_AWSKEY"), os.Getenv("HOLOGRAM_AWSSECRET"), "", time.Now()) if err != nil { log.Errorf("Error getting instance credentials: %s", err.Error()) os.Exit(1) } stsConnection := sts.New(auth, aws.Regions["us-east-1"]) credentialsService := server.NewDirectSessionTokenService(config.AWS.Account, stsConnection) var ldapServer *ldap.Conn // Connect to the LDAP server using TLS or not depending on the config if config.LDAP.InsecureLDAP { log.Debug("Connecting to LDAP at server %s (NOT using TLS).", config.LDAP.Host) ldapServer, err = ldap.Dial("tcp", config.LDAP.Host) if err != nil { log.Errorf("Could not dial LDAP! %s", err.Error()) os.Exit(1) } } else { // Connect to the LDAP server with sample credentials. tlsConfig := &tls.Config{ InsecureSkipVerify: true, } log.Debug("Connecting to LDAP at server %s.", config.LDAP.Host) ldapServer, err = ldap.DialTLS("tcp", config.LDAP.Host, tlsConfig) if err != nil { log.Errorf("Could not dial LDAP! %s", err.Error()) os.Exit(1) } } if bindErr := ldapServer.Bind(config.LDAP.Bind.DN, config.LDAP.Bind.Password); bindErr != nil { log.Errorf("Could not bind to LDAP! %s", bindErr.Error()) os.Exit(1) } ldapCache, err := server.NewLDAPUserCache(ldapServer, stats, config.LDAP.UserAttr, config.LDAP.BaseDN, config.LDAP.EnableLDAPRoles, config.LDAP.RoleAttribute) if err != nil { log.Errorf("Top-level error in LDAPUserCache layer: %s", err.Error()) os.Exit(1) } serverHandler := server.New(ldapCache, credentialsService, config.AWS.DefaultRole, stats, ldapServer, config.LDAP.UserAttr, config.LDAP.BaseDN, config.LDAP.EnableLDAPRoles) server, err := remote.NewServer(config.Listen, serverHandler.HandleConnection) // Wait for a signal from the OS to shutdown. terminate := make(chan os.Signal) signal.Notify(terminate, syscall.SIGINT, syscall.SIGTERM) // SIGUSR1 and SIGUSR2 should make Hologram enable and disable debug logging, // respectively. debugEnable := make(chan os.Signal) debugDisable := make(chan os.Signal) signal.Notify(debugEnable, syscall.SIGUSR1) signal.Notify(debugDisable, syscall.SIGUSR2) // SIGHUP should make Hologram server reload its cache of user information // from LDAP. reloadCacheSigHup := make(chan os.Signal) signal.Notify(reloadCacheSigHup, syscall.SIGHUP) // Reload the cache based on time set in configuration cacheTimeoutTicker := time.NewTicker(time.Duration(config.CacheTimeout) * time.Second) log.Info("Hologram server is online, waiting for termination.") WaitForTermination: for { select { case <-terminate: break WaitForTermination case <-debugEnable: log.Info("Enabling debug mode.") log.DebugMode(true) case <-debugDisable: log.Info("Disabling debug mode.") log.DebugMode(false) case <-reloadCacheSigHup: log.Info("Force-reloading user cache.") ldapCache.Update() case <-cacheTimeoutTicker.C: log.Info("Cache timeout. Reloading user cache.") ldapCache.Update() } } log.Info("Caught signal; shutting down now.") server.Close() }
// Periodically populate DNS using the host-inventory: func Updater(config *types.Config) { // Get the Route53 "Zone-ID": route53zoneId, err := getRoute53ZoneId(config.DNSDomainName) if err != nil { log.Criticalf("Error looking up DNS zone-id: %v", err) os.Exit(2) } // Run forever: log.Infof("[dnsUpdater] Started") for { // Sleep until the next run: log.Debugf("[dnsUpdater] Sleeping for %vs ...", config.DNSUpdateFrequency) time.Sleep(time.Duration(config.DNSUpdateFrequency) * time.Second) // Lock the host-list (so we don't try to access it when another go-routine is modifying it): log.Tracef("[dnsUpdater] Trying to lock config.HostInventoryMutex ...") config.HostInventoryMutex.Lock() log.Tracef("[dnsUpdater] Locked config.HostInventoryMutex") // See if we actually have any changes to make: if len(config.HostInventory.Environments) > 0 { // Authenticate with AWS: awsAuth, err := aws.GetAuth("", "", "", time.Now()) if err != nil { log.Errorf("[dnsUpdater] Unable to authenticate to AWS! (%s)", err) continue } else { log.Debugf("[dnsUpdater] Authenticated to AWS") } // Make a new EC2 connection: log.Debugf("[dnsUpdater] Connecting to Route53 ...") route53Connection, err := route53.NewRoute53(awsAuth) if err != nil { log.Errorf("[dnsUpdater] Unable to connect to Route53! (%s)", err) continue } // Go through each environment: for environmentName, environment := range config.HostInventory.Environments { // Make an empty batch of changes: changes := make([]route53.ResourceRecordSet, 0) // Now iterate over the host-inventory: log.Debugf("[dnsUpdater] Creating requests for the '%v' environment ...", environmentName) for dnsRecordName, dnsRecordValue := range environment.DNSRecords { // Turn the list of strings (host-addresses) into a list of route53.ResourceRecordValue: resourceRecordValues := make([]route53.ResourceRecordValue, 0) for _, hostAddress := range dnsRecordValue { resourceRecordValues = append(resourceRecordValues, route53.ResourceRecordValue{Value: hostAddress}) } // Prepare a change-request: log.Debugf("[dnsUpdater] Record: %v => %v", dnsRecordName, dnsRecordValue) changes = append(changes, &route53.BasicResourceRecordSet{ Action: "UPSERT", Name: dnsRecordName, Type: "A", TTL: config.DNSTTL, Values: resourceRecordValues, }) } // Create a request to modify records: changeResourceRecordSetsRequest := route53.ChangeResourceRecordSetsRequest{ Xmlns: "https://route53.amazonaws.com/doc/2013-04-01/", Changes: changes, } // Submit the request: changeResourceRecordSetsResponse, err := route53Connection.ChangeResourceRecordSet(&changeResourceRecordSetsRequest, route53zoneId) if err != nil { log.Errorf("[dnsUpdater] Failed to make changeResourceRecordSetsResponse call: %v", err) continue } else { log.Infof("[dnsUpdater] Successfully updated %d DNS record-sets for %v.%v (Request-ID: %v, Status: %v, Submitted: %v)", len(changes), environmentName, config.DNSDomainName, changeResourceRecordSetsResponse.Id, changeResourceRecordSetsResponse.Status, changeResourceRecordSetsResponse.SubmittedAt) } } } else { log.Info("[dnsUpdater] No DNS changes to make") } // Unlock the host-inventory: log.Tracef("[dnsUpdater] Unlocking config.HostInventoryMutex ...") config.HostInventoryMutex.Unlock() } }