// If we're not running on GCE (e.g. dev mode on localhost) and have // no other way to get the info, the error value is is errNoRefresh. func (h *DeployHandler) authenticatedClient() (project string, hc *http.Client, err error) { project = os.Getenv("CAMLI_GCE_PROJECT") accountFile := os.Getenv("CAMLI_GCE_SERVICE_ACCOUNT") if project != "" && accountFile != "" { data, errr := ioutil.ReadFile(accountFile) err = errr if err != nil { return } jwtConf, errr := google.JWTConfigFromJSON(data, "https://www.googleapis.com/auth/compute.readonly") err = errr if err != nil { return } hc = jwtConf.Client(context.Background()) return } if !metadata.OnGCE() { err = errNoRefresh return } project, _ = metadata.ProjectID() hc, err = google.DefaultClient(oauth2.NoContext) return project, hc, err }
// Returns a thread-compatible implementation of GCM interactions. func NewCore() (*GcmCore, error) { token, err := gce.NewAuthTokenProvider(GCMAuthScope) if err != nil { return nil, err } // Detect project. project, err := metadata.ProjectID() if err != nil { return nil, err } core := &GcmCore{ token: token, project: project, exportedMetrics: make(map[string]metricDescriptor), lastValue: gcstore.New(time.Hour), } // Wait for an initial token. _, err = core.token.WaitForToken() if err != nil { return nil, err } if err := core.listMetrics(); err != nil { return nil, err } return core, nil }
// newClient creates http.Client with a jwt service account when // jsonFile flag is specified, otherwise by obtaining the GCE service // account's access token. func newClient(jsonFile string) (*http.Client, error) { if jsonFile != "" { jsonKey, err := ioutil.ReadFile(jsonFile) if err != nil { return nil, err } conf, err := google.JWTConfigFromJSON(jsonKey, pubsub.ScopePubSub) if err != nil { return nil, err } return conf.Client(oauth2.NoContext), nil } if metadata.OnGCE() { c := &http.Client{ Transport: &oauth2.Transport{ Source: google.ComputeTokenSource(""), }, } if *projID == "" { projectID, err := metadata.ProjectID() if err != nil { return nil, fmt.Errorf("ProjectID failed, %v", err) } *projID = projectID } return c, nil } return nil, errors.New("Could not create an authenticated client.") }
func logsHandler(w http.ResponseWriter, r *http.Request) { suffix := strings.TrimPrefix(r.URL.Path, "/debug/logs/") switch suffix { case "camlistored": projID, err := metadata.ProjectID() if err != nil { httputil.ServeError(w, r, fmt.Errorf("Error getting project ID: %v", err)) return } http.Redirect(w, r, "https://console.developers.google.com/logs?project="+projID+"&service=custom.googleapis.com&logName=camlistored-stderr", http.StatusFound) case "system": c := &http.Client{ Transport: &http.Transport{ Dial: func(network, addr string) (net.Conn, error) { return net.Dial("unix", "/run/camjournald.sock") }, }, } res, err := c.Get("http://journal/entries") if err != nil { http.Error(w, err.Error(), 500) return } w.Header().Set("Content-Type", "text/plain; charset=utf-8") io.Copy(w, res.Body) default: http.Error(w, "no such logs", 404) } }
// Returns an implementation of a Google Cloud Logging (GCL) sink. func new() (sink_api.ExternalSink, error) { // TODO: Retry OnGCE call for ~15 seconds before declaring failure. time.Sleep(3 * time.Second) // Only support GCE for now. if !metadata.OnGCE() { return nil, fmt.Errorf("The Google Cloud Logging (GCL) sink failed to start: this process must be running on Google Compute Engine (GCE)") } // Detect project ID projectId, err := metadata.ProjectID() if err != nil { return nil, err } glog.Infof("Project ID for GCL sink is: %q\r\n", projectId) // Check for required auth scopes err = gce.VerifyAuthScope(GCLAuthScope) if err != nil { return nil, err } impl := &gclSink{ projectId: projectId, httpClient: &http.Client{}, } // Get an initial token. err = impl.refreshToken() if err != nil { return nil, err } return impl, nil }
func maybeRemapCloudSQL(host string) (out string, err error) { if !strings.HasSuffix(host, cloudSQLSuffix) { return host, nil } inst := strings.TrimSuffix(host, cloudSQLSuffix) if !metadata.OnGCE() { return "", errors.New("CloudSQL support only available when running on Google Compute Engine.") } proj, err := metadata.ProjectID() if err != nil { return "", fmt.Errorf("Failed to lookup GCE project ID: %v", err) } admin, _ := sqladmin.New(oauth2.NewClient(context.Background(), google.ComputeTokenSource(""))) listRes, err := admin.Instances.List(proj).Do() if err != nil { return "", fmt.Errorf("error enumerating Cloud SQL instances: %v", err) } for _, it := range listRes.Items { if !strings.EqualFold(it.Instance, inst) { continue } js, _ := json.Marshal(it) log.Printf("Found Cloud SQL instance %s: %s", inst, js) for _, ipm := range it.IpAddresses { return ipm.IpAddress, nil } return "", fmt.Errorf("No external IP address for Cloud SQL instances %s", inst) } var found []string for _, it := range listRes.Items { found = append(found, it.Instance) } return "", fmt.Errorf("Cloud SQL instance %q not found. Found: %q", inst, found) }
func init() { if !metadata.OnGCE() { return } hc, err := google.DefaultClient(oauth2.NoContext) if err != nil { registerBrokenFS(fmt.Errorf("could not get http client for context: %v", err)) return } projID, err := metadata.ProjectID() if projID == "" || err != nil { registerBrokenFS(fmt.Errorf("could not get GCE project ID: %v", err)) return } ctx := cloud.NewContext(projID, hc) sc, err := storage.NewClient(ctx) if err != nil { registerBrokenFS(fmt.Errorf("could not get cloud storage client: %v", err)) return } wkfs.RegisterFS("/gcs/", &gcsFS{ ctx: ctx, sc: sc, }) }
// LogWriter returns an environment-specific io.Writer suitable for passing // to log.SetOutput. It will also include writing to os.Stderr as well. func LogWriter() (w io.Writer) { w = os.Stderr if !env.OnGCE() { return } projID, err := metadata.ProjectID() if projID == "" { log.Printf("Error getting project ID: %v", err) return } scopes, _ := metadata.Scopes("default") haveScope := func(scope string) bool { for _, x := range scopes { if x == scope { return true } } return false } if !haveScope(logging.Scope) { log.Printf("when this Google Compute Engine VM instance was created, it wasn't granted enough access to use Google Cloud Logging (Scope URL: %v).", logging.Scope) return } logc, err := logging.NewClient(context.Background(), projID, "camlistored-stderr") if err != nil { log.Printf("Error creating Google logging client: %v", err) return } return io.MultiWriter(w, logc.Writer(logging.Debug)) }
func checkInProduction() bool { if !metadata.OnGCE() { return false } proj, _ := metadata.ProjectID() inst, _ := metadata.InstanceName() log.Printf("Running on GCE: %v / %v", proj, inst) return proj == "camlistore-website" && inst == "camweb" }
func (sh *StatusHandler) googleCloudConsole() (string, error) { if !env.OnGCE() { return "", errors.New("not on GCE") } projID, err := metadata.ProjectID() if err != nil { return "", fmt.Errorf("Error getting project ID: %v", err) } return "https://console.cloud.google.com/compute/instances?project=" + projID, nil }
// projectID returns the GCE project ID used for running this camweb on GCE // and/or for logging on Google Cloud Logging, if any. func projectID() string { if *gceProjectID != "" { return *gceProjectID } projID, err := metadata.ProjectID() if projID == "" || err != nil { log.Fatalf("GCE project ID needed but --gce_project_id not specified (and not running on GCE); metadata error: %v", err) } return projID }
func CreateGCMSink(uri *url.URL) (core.DataSink, error) { if len(uri.Scheme) > 0 { return nil, fmt.Errorf("scheme should not be set for GCM sink") } if len(uri.Host) > 0 { return nil, fmt.Errorf("host should not be set for GCM sink") } opts, err := url.ParseQuery(uri.RawQuery) metrics := "all" if len(opts["metrics"]) > 0 { metrics = opts["metrics"][0] } var metricFilter MetricFilter = metricsAll switch metrics { case "all": metricFilter = metricsAll case "autoscaling": metricFilter = metricsOnlyAutoscaling default: return nil, fmt.Errorf("invalid metrics parameter: %s", metrics) } if err := gce_util.EnsureOnGCE(); err != nil { return nil, err } // Detect project ID projectId, err := gce.ProjectID() if err != nil { return nil, err } // Create Google Cloud Monitoring service. client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource("")) gcmService, err := gcm.New(client) if err != nil { return nil, err } sink := &gcmSink{ registered: false, project: projectId, gcmService: gcmService, metricFilter: metricFilter, } glog.Infof("created GCM sink") if err := sink.registerAllMetrics(); err != nil { glog.Warningf("Error during metrics registration: %v", err) } return sink, nil }
func initClient() *http.Client { metaproject, _ := metadata.ProjectID() if Gceproject == "" && metaproject == "" { log.Errorf("No project specified") os.Exit(1) } else if Gceproject == "" && metaproject != "" { Gceproject = metaproject } gcs := GCS(Gceproject) gc := gcs.Client() log.Debugf("Google Auth: %#v", gc) return gc }
func initGCE() error { initGCECalled = true // Use the staging project if not on GCE. This assumes the DefaultTokenSource // credential used below has access to that project. if !metadata.OnGCE() { projectID = stagingProjectID } var err error projectID, err = metadata.ProjectID() if err != nil { return fmt.Errorf("failed to get current GCE ProjectID: %v", err) } inStaging = projectID == stagingProjectID if inStaging { log.Printf("Running in staging cluster (%q)", projectID) } tokenSource, _ = google.DefaultTokenSource(oauth2.NoContext) httpClient := oauth2.NewClient(oauth2.NoContext, tokenSource) serviceCtx = cloud.NewContext(projectID, httpClient) projectZone, err = metadata.Get("instance/zone") if err != nil || projectZone == "" { return fmt.Errorf("failed to get current GCE zone: %v", err) } // Convert the zone from "projects/1234/zones/us-central1-a" to "us-central1-a". projectZone = path.Base(projectZone) if !hasComputeScope() { return errors.New("The coordinator is not running with access to read and write Compute resources. VM support disabled.") } projectRegion = projectZone[:strings.LastIndex(projectZone, "-")] // "us-central1" externalIP, err = metadata.ExternalIP() if err != nil { return fmt.Errorf("ExternalIP: %v", err) } computeService, _ = compute.New(httpClient) errTryDeps = checkTryBuildDeps() if errTryDeps != nil { log.Printf("TryBot builders disabled due to error: %v", errTryDeps) } else { log.Printf("TryBot builders enabled.") } go gcePool.pollQuotaLoop() return nil }
func initGCP() { initGCPOnce.Do(func() { onGCE = metadata.OnGCE() if onGCE { // These will fail on instances if the metadata service is // down or the client is compiled with an API version that // has been removed. Since these are not vital, let's ignore // them and make their fields in the dockeLogEntry ,omitempty projectID, _ = metadata.ProjectID() zone, _ = metadata.Zone() instanceName, _ = metadata.InstanceName() instanceID, _ = metadata.InstanceID() } }) }
func getProjectAndZone() (string, string, error) { result, err := metadata.Get("instance/zone") if err != nil { return "", "", err } parts := strings.Split(result, "/") if len(parts) != 4 { return "", "", fmt.Errorf("unexpected response: %s", result) } zone := parts[3] projectID, err := metadata.ProjectID() if err != nil { return "", "", err } return projectID, zone, nil }
func (c *googleAPIClient) instanceInfo() (project, zone, instance string, err error) { project, err = metadata.ProjectID() if err != nil { return } zone, err = metadata.Zone() if err != nil { return } name, err := metadata.InstanceName() if err != nil { return } instancePath := path.Join("projects", project, "zones", zone, "instances", name) instance = strings.TrimRight(c.config.InstanceBase, "/") + "/" + instancePath return }
func newGcmSource() (dataSource, error) { // Detect project ID projectId, err := gce.ProjectID() if err != nil { return nil, err } // Create Google Cloud Monitoring service. client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource("")) s, err := gcm.New(client) if err != nil { return nil, err } return &gcmSource{ project: projectId, gcmService: s, }, nil }
// newCloudDns creates a new instance of a Google Cloud DNS Interface. func newCloudDns(config io.Reader) (*Interface, error) { projectID, _ := metadata.ProjectID() // On error we get an empty string, which is fine for now. var tokenSource oauth2.TokenSource // Possibly override defaults with config below if config != nil { var cfg Config if err := gcfg.ReadInto(&cfg, config); err != nil { glog.Errorf("Couldn't read config: %v", err) return nil, err } glog.Infof("Using Google Cloud DNS provider config %+v", cfg) if cfg.Global.ProjectID != "" { projectID = cfg.Global.ProjectID } if cfg.Global.TokenURL != "" { tokenSource = gce.NewAltTokenSource(cfg.Global.TokenURL, cfg.Global.TokenBody) } } return CreateInterface(projectID, tokenSource) }
func init() { if onGCE { // These will fail on instances if the metadata service is // down or the client is compiled with an API version that // has been removed. Since these are not vital, let's ignore // them and make their fields in the dockeLogEntry ,omitempty projectID, _ = metadata.ProjectID() zone, _ = metadata.Zone() instanceName, _ = metadata.InstanceName() instanceID, _ = metadata.InstanceID() } if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } if err := logger.RegisterLogOptValidator(name, ValidateLogOpts); err != nil { logrus.Fatal(err) } }
func (cmo *CloudMonitoringOutput) Init(config interface{}) (err error) { cmo.conf = config.(*CloudMonitoringConfig) if metadata.OnGCE() { if cmo.conf.ProjectId == "" { if cmo.conf.ProjectId, err = metadata.ProjectID(); err != nil { return } } if cmo.conf.ResourceId == "" { if cmo.conf.ResourceId, err = metadata.InstanceID(); err != nil { return } } if cmo.conf.Zone == "" { if cmo.conf.Zone, err = metadata.Get("instance/zone"); err != nil { return } } } if cmo.conf.ProjectId == "" { return errors.New("ProjectId cannot be blank") } cmo.batchChan = make(chan MonitoringBatch) cmo.backChan = make(chan []*cloudmonitoring.TimeseriesPoint, 2) cmo.outputExit = make(chan error) if cmo.client, err = google.DefaultClient(oauth2.NoContext, cloudmonitoring.MonitoringScope); err != nil { return } if cmo.service, err = cloudmonitoring.New(cmo.client); err != nil { return } r := &cloudmonitoring.ListMetricDescriptorsRequest{Kind: "cloudmonitoring#listMetricDescriptorsRequest"} _, err = cmo.service.MetricDescriptors.List(cmo.conf.ProjectId, r).Do() if err != nil { log.Print("Init CloudMonitoringOutput Error: %v", err) } return }
// Returns an implementation of a Google Cloud Logging (GCL) sink. func new() (sink_api.ExternalSink, error) { token, err := gce.NewAuthTokenProvider(GCLAuthScope) if err != nil { return nil, err } // Detect project ID projectId, err := metadata.ProjectID() if err != nil { return nil, err } glog.Infof("Project ID for GCL sink is: %q\r\n", projectId) impl := &gclSink{ token: token, projectId: projectId, httpClient: &http.Client{}, } return impl, nil }
func (clo *CloudLoggingOutput) Init(config interface{}) (err error) { clo.conf = config.(*CloudLoggingConfig) if metadata.OnGCE() { if clo.conf.ProjectId == "" { if clo.conf.ProjectId, err = metadata.ProjectID(); err != nil { return } } if clo.conf.ResourceId == "" { if clo.conf.ResourceId, err = metadata.InstanceID(); err != nil { return } } if clo.conf.Zone == "" { if clo.conf.Zone, err = metadata.Get("instance/zone"); err != nil { return } } } if clo.conf.ProjectId == "" { return errors.New("ProjectId cannot be blank") } clo.batchChan = make(chan LogBatch) clo.backChan = make(chan []*logging.LogEntry, 2) clo.outputExit = make(chan error) if clo.client, err = google.DefaultClient(oauth2.NoContext, logging.CloudPlatformScope); err != nil { return } if clo.service, err = logging.New(clo.client); err != nil { return } _, err = clo.service.Projects.LogServices.List(clo.conf.ProjectId).Do() if err != nil { log.Print("Init CloudLoggingOutput Error: %v", err) } return }
func CreateGCLSink(uri *url.URL) (core.EventSink, error) { if err := gce_util.EnsureOnGCE(); err != nil { return nil, err } // Detect project ID projectId, err := gce.ProjectID() if err != nil { return nil, err } // Create Google Cloud Logging service. client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource("")) gclService, err := gcl.New(client) if err != nil { return nil, err } sink := &gclSink{project: projectId, gclService: gclService} glog.Info("created GCL sink") return sink, nil }
// LogWriter returns an environment-specific io.Writer suitable for passing // to log.SetOutput. It will also include writing to os.Stderr as well. func LogWriter() (w io.Writer) { w = os.Stderr if !env.OnGCE() { return } projID, err := metadata.ProjectID() if projID == "" { log.Printf("Error getting project ID: %v", err) return } hc, err := google.DefaultClient(oauth2.NoContext) if err != nil { log.Printf("Error creating default GCE OAuth2 client: %v", err) return } logc, err := logging.NewClient(cloud.NewContext(projID, hc), "camlistored-stderr") if err != nil { log.Printf("Error creating Google logging client: %v", err) return } return io.MultiWriter(w, logc.Writer(logging.Debug)) }
// Returns a thread-compatible implementation of GCM interactions. func NewCore() (*GcmCore, error) { // TODO: Retry OnGCE call for ~15 seconds before declaring failure. time.Sleep(3 * time.Second) // Only support GCE for now. if !metadata.OnGCE() { return nil, fmt.Errorf("the GCM sink is currently only supported on GCE") } // Detect project. project, err := metadata.ProjectID() if err != nil { return nil, err } // Check required service accounts err = gce.VerifyAuthScope(GCLAuthScope) if err != nil { return nil, err } core := &GcmCore{ project: project, exportedMetrics: make(map[string]metricDescriptor), lastValue: gcstore.New(time.Hour), } // Get an initial token. err = core.refreshToken() if err != nil { return nil, err } if err := core.listMetrics(); err != nil { return nil, err } return core, nil }
// Periodically populate DNS using the host-inventory: func Updater(config *types.Config) { // Run forever: log.Infof("[dnsUpdater] Started") for { // Sleep until the next run: log.Debugf("[dnsUpdater] Sleeping for %vs ...", config.DNSUpdateFrequency) time.Sleep(time.Duration(config.DNSUpdateFrequency) * time.Second) // Lock the host-list (so we don't try to access it when another go-routine is modifying it): log.Tracef("[dnsUpdater] Trying to lock config.HostInventoryMutex ...") config.HostInventoryMutex.Lock() log.Tracef("[dnsUpdater] Locked config.HostInventoryMutex") // See if we actually have any changes to make: if len(config.HostInventory.Environments) > 0 { // Connect to GCE (either from GCE permissions, JSON file, or ENV-vars): client, err := google.DefaultClient(context.Background(), googledns.CloudPlatformScope) if err != nil { log.Errorf("[dnsUpdater] Unable to authenticate to GCE! (%s)", err) continue } // Get a DNS service-object: dnsService, err := googledns.New(client) if err != nil { log.Errorf("[dnsUpdater] Failed to connecting to GCE! %v", err) continue } // Get the project: googleComputeProject, err := metadata.ProjectID() if err != nil { log.Errorf("[hostInventoryUpdater] Unable to retrieve metadata from instance! (%s)", err) continue } else { log.Debugf("[hostInventoryUpdater] Found project-id (%v)", googleComputeProject) } // Get a list of pre-existing DNS records in this zone: resourceRecordSetsList, err := dnsService.ResourceRecordSets.List(googleComputeProject, config.DNSZoneName).Do() if err != nil { log.Errorf("[dnsUpdater] Unable to make DNS ResourceRecordSets.List() call! (%s)", err) continue } else { log.Debugf("[dnsUpdater] Found %v pre-existing DNS records", len(resourceRecordSetsList.Rrsets)) } // Go through each environment: for environmentName, environment := range config.HostInventory.Environments { // Prepare a "change" (which is a list of records to add): change := &googledns.Change{ Additions: []*googledns.ResourceRecordSet{}, } // See if we already have a DNS entry: for _, resourceRecordSet := range resourceRecordSetsList.Rrsets { record, ok := environment.DNSRecords[resourceRecordSet.Name] if ok { // See if the record needs to be deleted and changed: if fmt.Sprintf("%v", record) == fmt.Sprintf("%v", resourceRecordSet.Rrdatas) { // Delete the record from the host-inventory (to prevent it from being created again): log.Debugf("[dnsUpdater] Record %v already exists in DNS (%v) - no need to make it again", resourceRecordSet.Name, record) delete(environment.DNSRecords, resourceRecordSet.Name) } else { // The record doesn't match, so we'll ask for it to be deleted: change.Deletions = append(change.Deletions, resourceRecordSet) } } } // Now iterate over the host-inventory: log.Debugf("[dnsUpdater] Creating requests for the '%v' environment ...", environmentName) for dnsRecordName, dnsRecordValue := range environment.DNSRecords { // Prepare a resourceRecordSet: log.Debugf("[dnsUpdater] Record: %v => %v", dnsRecordName, dnsRecordValue) change.Additions = append(change.Additions, &googledns.ResourceRecordSet{ Name: dnsRecordName, Rrdatas: dnsRecordValue, Ttl: config.DNSTTL, Type: "A", }) } // Make the Create() call: if len(change.Additions) > 0 || len(change.Deletions) > 0 { changeMade, err := dnsService.Changes.Create(googleComputeProject, config.DNSZoneName, change).Do() if err != nil { log.Errorf("[dnsUpdater] Unable to make DNS Changes.Create() call! (%s)", err) continue } else { log.Debugf("[dnsUpdater] Made %v changes to DNS zone (%v), status: %v", len(changeMade.Additions), googleComputeProject, changeMade.Status) } } else { log.Infof("[dnsUpdater] No changes to be made") } } } else { log.Info("[dnsUpdater] No DNS changes to make") } // Unlock the host-inventory: log.Tracef("[dnsUpdater] Unlocking config.HostInventoryMutex ...") config.HostInventoryMutex.Unlock() } }
// Periodically populate the host-inventory: func Updater(config *types.Config) { log.Infof("[hostInventoryUpdater] Started") updateFrequency := 5 // Run forever: for { // Sleep until the next run: log.Debugf("[hostInventoryUpdater] Sleeping for %vs ...", updateFrequency) time.Sleep(time.Duration(updateFrequency) * time.Second) // Connect to GCE (either from GCE permissions, JSON file, or ENV-vars): client, err := google.DefaultClient(context.Background(), compute.ComputeScope) if err != nil { log.Errorf("[hostInventoryUpdater] Unable to authenticate to GCE! (%s)", err) continue } else { log.Debugf("[hostInventoryUpdater] Authenticated to GCE") } // Get a Compute service-object: computeService, err := compute.New(client) if err != nil { log.Errorf("[hostInventoryUpdater] Failed to connecting to GCE! %v", err) continue } // Get the project: googleComputeProject, err := metadata.ProjectID() if err != nil { log.Errorf("[hostInventoryUpdater] Unable to retrieve metadata from instance! (%s)", err) continue } else { log.Debugf("[hostInventoryUpdater] Found project-id (%v)", googleComputeProject) } // Make the zones.list() call: zonesList, err := computeService.Zones.List(googleComputeProject).Do() if err != nil { log.Errorf("[hostInventoryUpdater] Unable to make zones.list() call! (%s)", err) continue } else { log.Debugf("[hostInventoryUpdater] Found %v zones in this project (%v)", len(zonesList.Items), googleComputeProject) } // Lock the host-list (so we don't change it while another goroutine is using it): log.Tracef("[hostInventoryUpdater] Trying to lock config.HostInventoryMutex ...") config.HostInventoryMutex.Lock() log.Tracef("[hostInventoryUpdater] Locked config.HostInventoryMutex") // Clear out the existing host-inventory: config.HostInventory = types.HostInventory{ Environments: make(map[string]types.Environment), } // Now check each zone: for _, googleComputeZone := range zonesList.Items { // Make the instances.list() call: instanceList, err := computeService.Instances.List(googleComputeProject, googleComputeZone.Name).Do() if err != nil { log.Errorf("[hostInventoryUpdater] Unable to make instances.list() call! (%s)", err) continue } else { log.Debugf("[hostInventoryUpdater] Found %v instances running in this project (%v) in this zone (%v)", len(instanceList.Items), googleComputeProject, googleComputeZone.Name) // Get the region-name (by slicing off the last two characters - gross!): regionName := googleComputeZone.Name[:len(googleComputeZone.Name)-2] // Iterate over each instance returned: for _, instance := range instanceList.Items { // Search for our role and environment metadata: var role, environment string for _, metadata := range instance.Metadata.Items { if metadata.Key == config.RoleMetadataKey { role = *metadata.Value } if metadata.Key == config.EnvironmentMetadataKey { environment = *metadata.Value } } // Make sure we have environment and role tags: if environment == "" || role == "" { log.Debugf("[hostInventoryUpdater] Instance (%v) must have both 'environment' and 'role' metadata in order for DNS records to be creted!", instance.Name) // Continue with the next instance: continue } else { log.Infof("[hostInventoryUpdater] Building records for instance (%v) in zone (%v) ...", instance.Name, googleComputeZone.Name) } // Add a new environment to the inventory (unless we already have it): if _, ok := config.HostInventory.Environments[environment]; !ok { config.HostInventory.Environments[environment] = types.Environment{ DNSRecords: make(map[string][]string), } } // Build records for the primary network interface: if len(instance.NetworkInterfaces) > 0 { // Either create or add to the role-per-zone record: internalZoneRecord := fmt.Sprintf("%v.%v.i.%v.%v", role, googleComputeZone.Name, environment, config.DNSDomainName) if _, ok := config.HostInventory.Environments[environment].DNSRecords[internalZoneRecord]; !ok { config.HostInventory.Environments[environment].DNSRecords[internalZoneRecord] = []string{instance.NetworkInterfaces[0].NetworkIP} } else { config.HostInventory.Environments[environment].DNSRecords[internalZoneRecord] = append(config.HostInventory.Environments[environment].DNSRecords[internalZoneRecord], instance.NetworkInterfaces[0].NetworkIP) } // Either create or add to the role-per-region record: internalRegionRecord := fmt.Sprintf("%v.%v.i.%v.%v", role, regionName, environment, config.DNSDomainName) if _, ok := config.HostInventory.Environments[environment].DNSRecords[internalRegionRecord]; !ok { config.HostInventory.Environments[environment].DNSRecords[internalRegionRecord] = []string{instance.NetworkInterfaces[0].NetworkIP} } else { config.HostInventory.Environments[environment].DNSRecords[internalRegionRecord] = append(config.HostInventory.Environments[environment].DNSRecords[internalRegionRecord], instance.NetworkInterfaces[0].NetworkIP) } } // Build records for the secondary network interface (external addresses don't appear as interfaces on GCE, so this will never work): if len(instance.NetworkInterfaces) > 1 { // Either create or add to the external record: externalRecord := fmt.Sprintf("%v.%v.e.%v.%v", role, regionName, environment, config.DNSDomainName) if _, ok := config.HostInventory.Environments[environment].DNSRecords[externalRecord]; !ok { config.HostInventory.Environments[environment].DNSRecords[externalRecord] = []string{instance.NetworkInterfaces[1].NetworkIP} } else { config.HostInventory.Environments[environment].DNSRecords[externalRecord] = append(config.HostInventory.Environments[environment].DNSRecords[externalRecord], instance.NetworkInterfaces[1].NetworkIP) } } } } } // Unlock the host-inventory: log.Tracef("[hostInventoryUpdater] Unlocking config.HostInventoryMutex ...") config.HostInventoryMutex.Unlock() // Now set the sleep time to the correct value: updateFrequency = config.HostUpdateFrequency } }
func main() { flag.Parse() if *root == "" { var err error *root, err = os.Getwd() if err != nil { log.Fatalf("Failed to getwd: %v", err) } } readTemplates() mux := http.DefaultServeMux mux.Handle("/favicon.ico", http.FileServer(http.Dir(filepath.Join(*root, "static")))) mux.Handle("/robots.txt", http.FileServer(http.Dir(filepath.Join(*root, "static")))) mux.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir(filepath.Join(*root, "static"))))) mux.Handle("/talks/", http.StripPrefix("/talks/", http.FileServer(http.Dir(filepath.Join(*root, "talks"))))) mux.Handle(pkgPattern, godocHandler{}) mux.Handle(cmdPattern, godocHandler{}) mux.HandleFunc(errPattern, errHandler) mux.HandleFunc("/r/", gerritRedirect) mux.HandleFunc("/dl/", releaseRedirect) mux.HandleFunc("/debugz/ip", ipHandler) mux.Handle("/docs/contributing", redirTo("/code#contributing")) mux.Handle("/lists", redirTo("/community")) mux.HandleFunc("/contributors", contribHandler()) mux.HandleFunc("/", mainHandler) if *buildbotHost != "" && *buildbotBackend != "" { buildbotUrl, err := url.Parse(*buildbotBackend) if err != nil { log.Fatalf("Failed to parse %v as a URL: %v", *buildbotBackend, err) } buildbotHandler := httputil.NewSingleHostReverseProxy(buildbotUrl) bbhpattern := strings.TrimRight(*buildbotHost, "/") + "/" mux.Handle(bbhpattern, buildbotHandler) } if *httpsAddr != "" { if launcher := gceDeployHandler("/launch/"); launcher != nil { mux.Handle("/launch/", launcher) } } var handler http.Handler = &noWwwHandler{Handler: mux} if *logDir != "" || *logStdout { handler = NewLoggingHandler(handler, NewApacheLogger(*logDir, *logStdout)) } if *gceLogName != "" { projID := *gceProjectID if projID == "" { if v, err := metadata.ProjectID(); v == "" || err != nil { log.Fatalf("Use of --gce_log_name without specifying --gce_project_id (and not running on GCE); metadata error: %v", err) } else { projID = v } } var hc *http.Client if *gceJWTFile != "" { jsonSlurp, err := ioutil.ReadFile(*gceJWTFile) if err != nil { log.Fatalf("Error reading --gce_jwt_file value: %v", err) } jwtConf, err := google.JWTConfigFromJSON(jsonSlurp, logging.Scope) if err != nil { log.Fatalf("Error reading --gce_jwt_file value: %v", err) } hc = jwtConf.Client(context.Background()) } else { if !metadata.OnGCE() { log.Fatal("No --gce_jwt_file and not running on GCE.") } var err error hc, err = google.DefaultClient(oauth2.NoContext) if err != nil { log.Fatal(err) } } ctx := cloud.NewContext(projID, hc) logc, err := logging.NewClient(ctx, projID, *gceLogName) if err != nil { log.Fatal(err) } if err := logc.Ping(); err != nil { log.Fatalf("Failed to ping Google Cloud Logging: %v", err) } handler = NewLoggingHandler(handler, gceLogger{logc}) } errc := make(chan error) startEmailCommitLoop(errc) if *alsoRun != "" { runAsChild(*alsoRun) } httpServer := &http.Server{ Addr: *httpAddr, Handler: handler, ReadTimeout: 5 * time.Minute, WriteTimeout: 30 * time.Minute, } go func() { errc <- httpServer.ListenAndServe() }() if *httpsAddr != "" { log.Printf("Starting TLS server on %s", *httpsAddr) httpsServer := new(http.Server) *httpsServer = *httpServer httpsServer.Addr = *httpsAddr go func() { errc <- httpsServer.ListenAndServeTLS(*tlsCertFile, *tlsKeyFile) }() } log.Fatalf("Serve error: %v", <-errc) }