Esempio n. 1
0
// projectHasInstance checks for all the possible zones if there's already an instance for the project.
// It returns the name of the zone at the first instance it finds, if any.
func (d *Deployer) projectHasInstance() (zone string, err error) {
	s, err := compute.New(d.Client)
	if err != nil {
		return "", err
	}
	// TODO(mpl): make use of the handler's cached zones.
	zl, err := compute.NewZonesService(s).List(d.Conf.Project).Do()
	if err != nil {
		return "", fmt.Errorf("could not get a list of zones: %v", err)
	}
	computeService, _ := compute.New(d.Client)
	var zoneOnce sync.Once
	var grp syncutil.Group
	errc := make(chan error, 1)
	zonec := make(chan string, 1)
	timeout := time.NewTimer(30 * time.Second)
	defer timeout.Stop()
	for _, z := range zl.Items {
		z := z
		grp.Go(func() error {
			list, err := computeService.Instances.List(d.Conf.Project, z.Name).Do()
			if err != nil {
				return fmt.Errorf("could not list existing instances: %v", err)
			}
			if len(list.Items) > 0 {
				zoneOnce.Do(func() {
					zonec <- z.Name
				})
			}
			return nil
		})
	}
	go func() {
		errc <- grp.Err()
	}()
	// We block until either an instance was found in a zone, or all the instance
	// listing is done. Or we timed-out.
	select {
	case err = <-errc:
		return "", err
	case zone = <-zonec:
		// We voluntarily ignore any listing error if we found at least one instance
		// because that's what we primarily want to report about.
		return zone, nil
	case <-timeout.C:
		return "", errors.New("timed out")
	}
}
Esempio n. 2
0
func getComputeClient() (*compute.Service, error) {
	const retries = 10
	const backoff = time.Second * 6

	// Setup the gce client for provisioning instances
	// Getting credentials on gce jenkins is flaky, so try a couple times
	var err error
	var cs *compute.Service
	for i := 0; i < retries; i++ {
		if i > 0 {
			time.Sleep(backoff)
		}

		var client *http.Client
		client, err = google.DefaultClient(oauth2.NoContext, compute.ComputeScope)
		if err != nil {
			continue
		}

		cs, err = compute.New(client)
		if err != nil {
			continue
		}
		return cs, nil
	}
	return nil, err
}
Esempio n. 3
0
// Init creates and and initializes the compute client.
func (g *Google) Init() error {
	// Initialize auth: we re-use the code from docker-machine.
	oauthClient, err := newOauthClient(g.context.GCETokenPath)
	if err != nil {
		return util.Errorf("could not get OAuth client: %v", err)
	}

	cSvc, err := compute.New(oauthClient)
	if err != nil {
		return util.Errorf("could not get Compute service: %v", err)
	}
	g.computeService = cSvc

	ivSvc, err := resourceviews.New(oauthClient)
	if err != nil {
		return util.Errorf("could not get Compute service: %v", err)
	}
	g.instanceGroupsService = ivSvc

	if err = g.checkProjectExists(); err != nil {
		return util.Errorf("invalid project %q: %v", g.project, err)
	}

	log.Infof("validated project name: %q", g.project)
	return nil
}
Esempio n. 4
0
// Get returns the Instance corresponding to the Project, Zone, and Name defined in the
// Deployer's Conf.
func (d *Deployer) Get() (*compute.Instance, error) {
	computeService, err := compute.New(d.Client)
	if err != nil {
		return nil, err
	}
	return computeService.Instances.Get(d.Conf.Project, d.Conf.Zone, d.Conf.Name).Do()
}
Esempio n. 5
0
// NewComputeUtil creates and initializes a ComputeUtil.
func newComputeUtil(driver *Driver) (*ComputeUtil, error) {
	client, err := google.DefaultClient(oauth2.NoContext, raw.ComputeScope)
	if err != nil {
		return nil, err
	}

	service, err := raw.New(client)
	if err != nil {
		return nil, err
	}

	return &ComputeUtil{
		zone:              driver.Zone,
		instanceName:      driver.MachineName,
		userName:          driver.SSHUser,
		project:           driver.Project,
		diskTypeURL:       driver.DiskType,
		address:           driver.Address,
		network:           driver.Network,
		preemptible:       driver.Preemptible,
		useInternalIP:     driver.UseInternalIP,
		useInternalIPOnly: driver.UseInternalIPOnly,
		service:           service,
		zoneURL:           apiURL + driver.Project + "/zones/" + driver.Zone,
		globalURL:         apiURL + driver.Project + "/global",
		SwarmMaster:       driver.SwarmMaster,
		SwarmHost:         driver.SwarmHost,
	}, nil
}
Esempio n. 6
0
func (c *Cred) ComputeService() (*compute.Service, error) {
	cfg, err := google.JWTConfigFromJSON([]byte(c.Credentials), compute.ComputeScope)
	if err != nil {
		return nil, err
	}

	return compute.New(cfg.Client(context.Background()))
}
Esempio n. 7
0
func computeMain(client *http.Client, argv []string) {
	if len(argv) != 2 {
		fmt.Fprintln(os.Stderr, "Usage: compute project_id instance_name (to start an instance)")
		return
	}

	service, _ := compute.New(client)
	projectId := argv[0]
	instanceName := argv[1]

	prefix := "https://www.googleapis.com/compute/v1/projects/" + projectId
	imageURL := "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140606"
	zone := "us-central1-a"

	// Show the current images that are available.
	res, err := service.Images.List(projectId).Do()
	log.Printf("Got compute.Images.List, err: %#v, %v", res, err)

	instance := &compute.Instance{
		Name:        instanceName,
		Description: "compute sample instance",
		MachineType: prefix + "/zones/" + zone + "/machineTypes/n1-standard-1",
		Disks: []*compute.AttachedDisk{
			{
				AutoDelete: true,
				Boot:       true,
				Type:       "PERSISTENT",
				InitializeParams: &compute.AttachedDiskInitializeParams{
					DiskName:    "my-root-pd",
					SourceImage: imageURL,
				},
			},
		},
		NetworkInterfaces: []*compute.NetworkInterface{
			&compute.NetworkInterface{
				AccessConfigs: []*compute.AccessConfig{
					&compute.AccessConfig{
						Type: "ONE_TO_ONE_NAT",
						Name: "External NAT",
					},
				},
				Network: prefix + "/global/networks/default",
			},
		},
		ServiceAccounts: []*compute.ServiceAccount{
			{
				Email: "default",
				Scopes: []string{
					compute.DevstorageFull_controlScope,
					compute.ComputeScope,
				},
			},
		},
	}

	op, err := service.Instances.Insert(projectId, zone, instance).Do()
	log.Printf("Got compute.Operation, err: %#v, %v", op, err)
}
Esempio n. 8
0
// New returns a new instance of GceImages
func New(conf *GCEConfig) (*GceImages, error) {
	var err error
	if conf.ProjectID == "" {
		return nil, errors.New("ProjectID is not set. Please check your configuration.")
	}

	// increase the timeout. Also we need to pass the client with the context itself
	timeout := time.Second * 30
	ctx := context.WithValue(oauth2.NoContext, oauth2.HTTPClient, &http.Client{
		Transport: &http.Transport{TLSHandshakeTimeout: timeout},
		Timeout:   timeout,
	})

	var client *http.Client

	// allowed scopes
	scopes := []string{compute.ComputeScope}

	// Recommended way is explicit passing of credentials json which can be
	// downloaded from console.developers.google under APIs & Auth/Credentials
	// section
	if conf.AccountFile != "" {
		// expand shell meta character
		path, err := homedir.Expand(conf.AccountFile)
		if err != nil {
			return nil, err
		}

		jsonContent, err := ioutil.ReadFile(path)
		if err != nil {
			return nil, err
		}

		jtwConfig, err := google.JWTConfigFromJSON(jsonContent, scopes...)
		if err != nil {
			return nil, err
		}

		client = jtwConfig.Client(ctx)
	} else {
		// Look for application default credentials, for more details, see:
		// https://developers.google.com/accounts/docs/application-default-credentials
		client, err = google.DefaultClient(ctx, scopes...)
		if err != nil {
			return nil, err
		}
	}

	svc, err := compute.New(client)
	if err != nil {
		return nil, err
	}

	return &GceImages{
		svc:    compute.NewImagesService(svc),
		config: conf,
	}, nil
}
Esempio n. 9
0
// newGCECloud creates a new instance of GCECloud.
func newGCECloud(config io.Reader) (*GCECloud, error) {
	projectID, zone, err := getProjectAndZone()
	if err != nil {
		return nil, err
	}
	// TODO: if we want to use this on a machine that doesn't have the http://metadata server
	// e.g. on a user's machine (not VM) somewhere, we need to have an alternative for
	// instance id lookup.
	instanceID, err := getInstanceID()
	if err != nil {
		return nil, err
	}
	externalID, err := getCurrentExternalID()
	if err != nil {
		return nil, err
	}
	networkName, err := getNetworkName()
	if err != nil {
		return nil, err
	}
	tokenSource := google.ComputeTokenSource("")
	if config != nil {
		var cfg Config
		if err := gcfg.ReadInto(&cfg, config); err != nil {
			glog.Errorf("Couldn't read config: %v", err)
			return nil, err
		}
		if cfg.Global.ProjectID != "" {
			projectID = cfg.Global.ProjectID
		}
		if cfg.Global.NetworkName != "" {
			networkName = cfg.Global.NetworkName
		}
		if cfg.Global.TokenURL != "" {
			tokenSource = newAltTokenSource(cfg.Global.TokenURL)
		}
	}
	client := oauth2.NewClient(oauth2.NoContext, tokenSource)
	svc, err := compute.New(client)
	if err != nil {
		return nil, err
	}
	containerSvc, err := container.New(client)
	if err != nil {
		return nil, err
	}
	return &GCECloud{
		service:          svc,
		containerService: containerSvc,
		projectID:        projectID,
		zone:             zone,
		instanceID:       instanceID,
		externalID:       externalID,
		networkName:      networkName,
		metadataAccess:   getMetadata,
	}, nil
}
Esempio n. 10
0
// Create sets up and starts a Google Compute Engine instance as defined in d.Conf. It
// creates the necessary Google Storage buckets beforehand.
func (d *Deployer) Create(ctx *context.Context) (*compute.Instance, error) {
	if err := d.checkProjectID(); err != nil {
		return nil, err
	}

	computeService, _ := compute.New(d.Client)
	storageService, _ := storage.New(d.Client)

	fwc := make(chan error, 1)
	go func() {
		fwc <- d.setFirewall(ctx, computeService)
	}()

	config := cloudConfig(d.Conf)
	const maxCloudConfig = 32 << 10 // per compute API docs
	if len(config) > maxCloudConfig {
		return nil, fmt.Errorf("cloud config length of %d bytes is over %d byte limit", len(config), maxCloudConfig)
	}

	// TODO(mpl): maybe add a wipe mode where we erase other instances before attempting to create.
	if zone, err := d.projectHasInstance(); zone != "" {
		return nil, instanceExistsError{
			project: d.Conf.Project,
			zone:    zone,
		}
	} else if err != nil {
		return nil, fmt.Errorf("could not scan project for existing instances: %v", err)
	}

	if err := d.setBuckets(storageService, ctx); err != nil {
		return nil, fmt.Errorf("could not create buckets: %v", err)
	}

	if err := d.setupHTTPS(storageService); err != nil {
		return nil, fmt.Errorf("could not setup HTTPS: %v", err)
	}

	if err := d.createInstance(computeService, ctx); err != nil {
		return nil, fmt.Errorf("could not create compute instance: %v", err)
	}

	inst, err := computeService.Instances.Get(d.Conf.Project, d.Conf.Zone, d.Conf.Name).Do()
	if err != nil {
		return nil, fmt.Errorf("error getting instance after creation: %v", err)
	}
	if Verbose {
		ij, _ := json.MarshalIndent(inst, "", "    ")
		log.Printf("Instance: %s", ij)
	}

	if err = <-fwc; err != nil {
		return nil, fmt.Errorf("could not create firewall rules: %v", err)
	}
	return inst, nil
}
Esempio n. 11
0
func (h *DeployHandler) refreshZones() error {
	h.zonesMu.Lock()
	defer h.zonesMu.Unlock()
	defer func() {
		h.regions = make([]string, 0, len(h.zones))
		for r, _ := range h.zones {
			h.regions = append(h.regions, r)
		}
	}()
	// TODO(mpl): get projectID and access tokens from metadata once camweb is on GCE.
	accountFile := os.Getenv("CAMLI_GCE_SERVICE_ACCOUNT")
	if accountFile == "" {
		h.Printf("No service account to query for the zones, using hard-coded ones instead.")
		h.zones = backupZones
		return nil
	}
	project := os.Getenv("CAMLI_GCE_PROJECT")
	if project == "" {
		h.Printf("No project we can query on to get the zones, using hard-coded ones instead.")
		h.zones = backupZones
		return nil
	}
	data, err := ioutil.ReadFile(accountFile)
	if err != nil {
		return err
	}
	conf, err := google.JWTConfigFromJSON(data, "https://www.googleapis.com/auth/compute.readonly")
	if err != nil {
		return err
	}
	s, err := compute.New(conf.Client(oauth2.NoContext))
	if err != nil {
		return err
	}
	rl, err := compute.NewRegionsService(s).List(project).Do()
	if err != nil {
		return fmt.Errorf("could not get a list of regions: %v", err)
	}
	h.zones = make(map[string][]string)
	for _, r := range rl.Items {
		zones := make([]string, 0, len(r.Zones))
		for _, z := range r.Zones {
			zone := path.Base(z)
			if zone == "europe-west1-a" {
				// Because even though the docs mark it as deprecated, it still shows up here, go figure.
				continue
			}
			zone = strings.Replace(zone, r.Name, "", 1)
			zones = append(zones, zone)
		}
		h.zones[r.Name] = zones
	}
	return nil
}
Esempio n. 12
0
// newComputeService returns a new Compute Engine API Client,
// to use with Google App Engine.
func newComputeService(c context.Context) (service *compute.Service, err error) {
	client := &http.Client{
		Transport: &oauth2.Transport{
			Source: google.AppEngineTokenSource(c, compute.ComputeScope),
			Base: &urlfetch.Transport{
				Context: c,
			},
		},
	}
	return compute.New(client)
}
Esempio n. 13
0
func newComputeService(ctx context.Context) (*compute.Service, error) {
	client, err := google.DefaultClient(ctx, compute.ComputeScope)
	if err != nil {
		return nil, err
	}
	computeService, err := compute.New(client)
	if err != nil {
		return nil, err
	}
	return computeService, nil
}
Esempio n. 14
0
func initGCE() error {
	initGCECalled = true
	// Use the staging project if not on GCE. This assumes the DefaultTokenSource
	// credential used below has access to that project.
	if !metadata.OnGCE() {
		projectID = stagingProjectID
	}
	var err error
	projectID, err = metadata.ProjectID()
	if err != nil {
		return fmt.Errorf("failed to get current GCE ProjectID: %v", err)
	}

	inStaging = projectID == stagingProjectID
	if inStaging {
		log.Printf("Running in staging cluster (%q)", projectID)
	}

	tokenSource, _ = google.DefaultTokenSource(oauth2.NoContext)
	httpClient := oauth2.NewClient(oauth2.NoContext, tokenSource)
	serviceCtx = cloud.NewContext(projectID, httpClient)

	projectZone, err = metadata.Get("instance/zone")
	if err != nil || projectZone == "" {
		return fmt.Errorf("failed to get current GCE zone: %v", err)
	}

	// Convert the zone from "projects/1234/zones/us-central1-a" to "us-central1-a".
	projectZone = path.Base(projectZone)
	if !hasComputeScope() {
		return errors.New("The coordinator is not running with access to read and write Compute resources. VM support disabled.")

	}
	projectRegion = projectZone[:strings.LastIndex(projectZone, "-")] // "us-central1"

	externalIP, err = metadata.ExternalIP()
	if err != nil {
		return fmt.Errorf("ExternalIP: %v", err)
	}
	computeService, _ = compute.New(httpClient)
	errTryDeps = checkTryBuildDeps()
	if errTryDeps != nil {
		log.Printf("TryBot builders disabled due to error: %v", errTryDeps)
	} else {
		log.Printf("TryBot builders enabled.")
	}

	go gcePool.pollQuotaLoop()
	return nil
}
Esempio n. 15
0
func MakeGoogleCompute(email string, privateKey string, project string) (*GoogleCompute, error) {
	conf := &jwt.Config{
		Email:      email,
		PrivateKey: []byte(privateKey),
		Scopes:     []string{"https://www.googleapis.com/auth/compute"},
		TokenURL:   google.JWTTokenURL,
	}
	client := conf.Client(oauth2.NoContext)
	service, err := gcompute.New(client)
	if err != nil {
		return nil, err
	}
	return &GoogleCompute{
		service: service,
		project: project,
	}, nil
}
Esempio n. 16
0
// CreateGceManager constructs gceManager object.
func CreateGceManager(migs []*config.MigConfig) (*GceManager, error) {
	// Create Google Compute Engine service.
	client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource(""))
	gceService, err := gce.New(client)
	if err != nil {
		return nil, err
	}

	manager := &GceManager{
		migs:     migs,
		service:  gceService,
		migCache: map[config.InstanceConfig]*config.MigConfig{},
	}

	go wait.Forever(func() { manager.regenerateCacheIgnoreError() }, time.Hour)

	return manager, nil
}
Esempio n. 17
0
File: gce.go Progetto: raggi/contrib
// CreateGceManager constructs gceManager object.
func CreateGceManager(migs []*config.MigConfig, configReader io.Reader) (*GceManager, error) {
	// Create Google Compute Engine token.
	tokenSource := google.ComputeTokenSource("")
	if configReader != nil {
		var cfg provider_gce.Config
		if err := gcfg.ReadInto(&cfg, configReader); err != nil {
			glog.Errorf("Couldn't read config: %v", err)
			return nil, err
		}
		if cfg.Global.TokenURL == "" {
			glog.Warning("Empty tokenUrl in cloud config")
		} else {
			glog.Infof("Using TokenSource from config %#v", tokenSource)
			tokenSource = provider_gce.NewAltTokenSource(cfg.Global.TokenURL, cfg.Global.TokenBody)
		}
	} else {
		glog.Infof("Using default TokenSource %#v", tokenSource)
	}

	// Create Google Compute Engine service.
	client := oauth2.NewClient(oauth2.NoContext, tokenSource)
	gceService, err := gce.New(client)
	if err != nil {
		return nil, err
	}

	migInfos := make([]*migInformation, 0, len(migs))
	for _, mig := range migs {
		migInfos = append(migInfos, &migInformation{
			config: mig,
		})
	}

	manager := &GceManager{
		migs:     migInfos,
		service:  gceService,
		migCache: map[config.InstanceConfig]*config.MigConfig{},
	}

	go wait.Forever(func() { manager.regenerateCacheIgnoreError() }, time.Hour)

	return manager, nil
}
Esempio n. 18
0
func (h *DeployHandler) refreshZones() error {
	h.zonesMu.Lock()
	defer h.zonesMu.Unlock()
	defer func() {
		h.regions = make([]string, 0, len(h.zones))
		for r, _ := range h.zones {
			h.regions = append(h.regions, r)
		}
	}()
	project, hc, err := h.authenticatedClient()
	if err != nil {
		if err == errNoRefresh {
			h.zones = backupZones
			h.logger.Printf("Cannot refresh zones because %v. Using hard-coded ones instead.")
			return nil
		}
		return err
	}
	s, err := compute.New(hc)
	if err != nil {
		return err
	}
	rl, err := compute.NewRegionsService(s).List(project).Do()
	if err != nil {
		return fmt.Errorf("could not get a list of regions: %v", err)
	}
	h.zones = make(map[string][]string)
	for _, r := range rl.Items {
		zones := make([]string, 0, len(r.Zones))
		for _, z := range r.Zones {
			zone := path.Base(z)
			if zone == "europe-west1-a" {
				// Because even though the docs mark it as deprecated, it still shows up here, go figure.
				continue
			}
			zone = strings.Replace(zone, r.Name, "", 1)
			zones = append(zones, zone)
		}
		h.zones[r.Name] = zones
	}
	return nil
}
Esempio n. 19
0
// TODO(bradfitz,mpl): move this to go4.org/cloud/google/gceutil
func ZonesOfRegion(hc *http.Client, project, region string) (zones []string, err error) {
	s, err := compute.New(hc)
	if err != nil {
		return nil, err
	}
	zl, err := compute.NewZonesService(s).List(project).Do()
	if err != nil {
		return nil, fmt.Errorf("could not get a list of zones: %v", err)
	}
	if zl.NextPageToken != "" {
		return nil, errors.New("TODO: more than one page of zones found; use NextPageToken")
	}
	for _, z := range zl.Items {
		if path.Base(z.Region) != region {
			continue
		}
		zones = append(zones, z.Name)
	}
	return zones, nil
}
Esempio n. 20
0
// CreateGceManager constructs gceManager object.
func CreateGceManager(configReader io.Reader) (*GceManager, error) {
	// Create Google Compute Engine token.
	tokenSource := google.ComputeTokenSource("")
	if configReader != nil {
		var cfg provider_gce.Config
		if err := gcfg.ReadInto(&cfg, configReader); err != nil {
			glog.Errorf("Couldn't read config: %v", err)
			return nil, err
		}
		if cfg.Global.TokenURL == "" {
			glog.Warning("Empty tokenUrl in cloud config")
		} else {
			glog.Infof("Using TokenSource from config %#v", tokenSource)
			tokenSource = provider_gce.NewAltTokenSource(cfg.Global.TokenURL, cfg.Global.TokenBody)
		}
	} else {
		glog.Infof("Using default TokenSource %#v", tokenSource)
	}

	// Create Google Compute Engine service.
	client := oauth2.NewClient(oauth2.NoContext, tokenSource)
	gceService, err := gce.New(client)
	if err != nil {
		return nil, err
	}
	manager := &GceManager{
		migs:     make([]*migInformation, 0),
		service:  gceService,
		migCache: make(map[GceRef]*Mig),
	}
	go wait.Forever(func() {
		manager.cacheMutex.Lock()
		defer manager.cacheMutex.Unlock()
		if err := manager.regenerateCache(); err != nil {
			glog.Errorf("Error while regenerating Mig cache: %v", err)
		}
	}, time.Hour)
	return manager, nil
}
Esempio n. 21
0
func (c *Config) MaybeDeploy() {
	flag.Parse()
	if !*doLaunch {
		go c.restartLoop()
		return
	}
	defer os.Exit(1) // backup, in case we return without Fatal or os.Exit later

	if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" {
		log.Fatal("Can only use --cloudlaunch on linux/amd64, for now.")
	}

	if c.GCEProjectID == "" {
		log.Fatal("cloudconfig.GCEProjectID is empty")
	}
	filename := filepath.Join(os.Getenv("HOME"), "keys", c.GCEProjectID+".key.json")
	log.Printf("Using OAuth config from JSON service file: %s", filename)
	jwtConf, err := google.JWTConfigFromJSON([]byte(readFile(filename)), append([]string{
		storageapi.DevstorageFullControlScope,
		compute.ComputeScope,
		"https://www.googleapis.com/auth/cloud-platform",
	}, c.Scopes...)...)
	if err != nil {
		log.Fatalf("ConfigFromJSON: %v", err)
	}

	cl := &cloudLaunch{
		Config:      c,
		oauthClient: jwtConf.Client(oauth2.NoContext),
	}
	cl.computeService, _ = compute.New(cl.oauthClient)

	cl.uploadBinary()
	cl.createInstance()
	os.Exit(0)
}
Esempio n. 22
0
func (d *Deployer) checkProjectID() error {
	// TODO(mpl): cache the computeService in Deployer, instead of recreating a new one everytime?
	s, err := compute.New(d.Client)
	if err != nil {
		return projectIDError{
			id:    d.Conf.Project,
			cause: err,
		}
	}
	project, err := compute.NewProjectsService(s).Get(d.Conf.Project).Do()
	if err != nil {
		return projectIDError{
			id:    d.Conf.Project,
			cause: err,
		}
	}
	if project.Name != d.Conf.Project {
		return projectIDError{
			id:    d.Conf.Project,
			cause: fmt.Errorf("project ID do not match: got %q, wanted %q", project.Name, d.Conf.Project),
		}
	}
	return nil
}
Esempio n. 23
0
// Periodically populate the host-inventory:
func Updater(config *types.Config) {

	log.Infof("[hostInventoryUpdater] Started")

	updateFrequency := 5

	// Run forever:
	for {

		// Sleep until the next run:
		log.Debugf("[hostInventoryUpdater] Sleeping for %vs ...", updateFrequency)
		time.Sleep(time.Duration(updateFrequency) * time.Second)

		// Connect to GCE (either from GCE permissions, JSON file, or ENV-vars):
		client, err := google.DefaultClient(context.Background(), compute.ComputeScope)
		if err != nil {
			log.Errorf("[hostInventoryUpdater] Unable to authenticate to GCE! (%s)", err)
			continue
		} else {
			log.Debugf("[hostInventoryUpdater] Authenticated to GCE")
		}

		// Get a Compute service-object:
		computeService, err := compute.New(client)
		if err != nil {
			log.Errorf("[hostInventoryUpdater] Failed to connecting to GCE! %v", err)
			continue
		}

		// Get the project:
		googleComputeProject, err := metadata.ProjectID()
		if err != nil {
			log.Errorf("[hostInventoryUpdater] Unable to retrieve metadata from instance! (%s)", err)
			continue
		} else {
			log.Debugf("[hostInventoryUpdater] Found project-id (%v)", googleComputeProject)
		}

		// Make the zones.list() call:
		zonesList, err := computeService.Zones.List(googleComputeProject).Do()
		if err != nil {
			log.Errorf("[hostInventoryUpdater] Unable to make zones.list() call! (%s)", err)
			continue
		} else {
			log.Debugf("[hostInventoryUpdater] Found %v zones in this project (%v)", len(zonesList.Items), googleComputeProject)
		}

		// Lock the host-list (so we don't change it while another goroutine is using it):
		log.Tracef("[hostInventoryUpdater] Trying to lock config.HostInventoryMutex ...")
		config.HostInventoryMutex.Lock()
		log.Tracef("[hostInventoryUpdater] Locked config.HostInventoryMutex")

		// Clear out the existing host-inventory:
		config.HostInventory = types.HostInventory{
			Environments: make(map[string]types.Environment),
		}

		// Now check each zone:
		for _, googleComputeZone := range zonesList.Items {

			// Make the instances.list() call:
			instanceList, err := computeService.Instances.List(googleComputeProject, googleComputeZone.Name).Do()
			if err != nil {
				log.Errorf("[hostInventoryUpdater] Unable to make instances.list() call! (%s)", err)
				continue
			} else {
				log.Debugf("[hostInventoryUpdater] Found %v instances running in this project (%v) in this zone (%v)", len(instanceList.Items), googleComputeProject, googleComputeZone.Name)

				// Get the region-name (by slicing off the last two characters - gross!):
				regionName := googleComputeZone.Name[:len(googleComputeZone.Name)-2]

				// Iterate over each instance returned:
				for _, instance := range instanceList.Items {

					// Search for our role and environment metadata:
					var role, environment string
					for _, metadata := range instance.Metadata.Items {
						if metadata.Key == config.RoleMetadataKey {
							role = *metadata.Value
						}
						if metadata.Key == config.EnvironmentMetadataKey {
							environment = *metadata.Value
						}
					}

					// Make sure we have environment and role tags:
					if environment == "" || role == "" {
						log.Debugf("[hostInventoryUpdater] Instance (%v) must have both 'environment' and 'role' metadata in order for DNS records to be creted!", instance.Name)

						// Continue with the next instance:
						continue
					} else {
						log.Infof("[hostInventoryUpdater] Building records for instance (%v) in zone (%v) ...", instance.Name, googleComputeZone.Name)
					}

					// Add a new environment to the inventory (unless we already have it):
					if _, ok := config.HostInventory.Environments[environment]; !ok {
						config.HostInventory.Environments[environment] = types.Environment{
							DNSRecords: make(map[string][]string),
						}
					}

					// Build records for the primary network interface:
					if len(instance.NetworkInterfaces) > 0 {

						// Either create or add to the role-per-zone record:
						internalZoneRecord := fmt.Sprintf("%v.%v.i.%v.%v", role, googleComputeZone.Name, environment, config.DNSDomainName)
						if _, ok := config.HostInventory.Environments[environment].DNSRecords[internalZoneRecord]; !ok {
							config.HostInventory.Environments[environment].DNSRecords[internalZoneRecord] = []string{instance.NetworkInterfaces[0].NetworkIP}
						} else {
							config.HostInventory.Environments[environment].DNSRecords[internalZoneRecord] = append(config.HostInventory.Environments[environment].DNSRecords[internalZoneRecord], instance.NetworkInterfaces[0].NetworkIP)
						}

						// Either create or add to the role-per-region record:
						internalRegionRecord := fmt.Sprintf("%v.%v.i.%v.%v", role, regionName, environment, config.DNSDomainName)
						if _, ok := config.HostInventory.Environments[environment].DNSRecords[internalRegionRecord]; !ok {
							config.HostInventory.Environments[environment].DNSRecords[internalRegionRecord] = []string{instance.NetworkInterfaces[0].NetworkIP}
						} else {
							config.HostInventory.Environments[environment].DNSRecords[internalRegionRecord] = append(config.HostInventory.Environments[environment].DNSRecords[internalRegionRecord], instance.NetworkInterfaces[0].NetworkIP)
						}

					}

					// Build records for the secondary network interface (external addresses don't appear as interfaces on GCE, so this will never work):
					if len(instance.NetworkInterfaces) > 1 {

						// Either create or add to the external record:
						externalRecord := fmt.Sprintf("%v.%v.e.%v.%v", role, regionName, environment, config.DNSDomainName)
						if _, ok := config.HostInventory.Environments[environment].DNSRecords[externalRecord]; !ok {
							config.HostInventory.Environments[environment].DNSRecords[externalRecord] = []string{instance.NetworkInterfaces[1].NetworkIP}
						} else {
							config.HostInventory.Environments[environment].DNSRecords[externalRecord] = append(config.HostInventory.Environments[environment].DNSRecords[externalRecord], instance.NetworkInterfaces[1].NetworkIP)
						}
					}

				}

			}

		}

		// Unlock the host-inventory:
		log.Tracef("[hostInventoryUpdater] Unlocking config.HostInventoryMutex ...")
		config.HostInventoryMutex.Unlock()

		// Now set the sleep time to the correct value:
		updateFrequency = config.HostUpdateFrequency

	}

}
Esempio n. 24
0
func main() {
	flag.Parse()
	if *proj == "" {
		log.Fatalf("Missing --project flag")
	}
	prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj
	machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach

	const tokenFileName = "token.dat"
	tokenFile := tokenCacheFile(tokenFileName)
	tokenSource := oauth2.ReuseTokenSource(nil, tokenFile)
	token, err := tokenSource.Token()
	if err != nil {
		if *writeObject != "" {
			log.Fatalf("Can't use --write_object without a valid token.dat file already cached.")
		}
		log.Printf("Error getting token from %s: %v", tokenFileName, err)
		log.Printf("Get auth code from %v", config.AuthCodeURL("my-state"))
		fmt.Print("\nEnter auth code: ")
		sc := bufio.NewScanner(os.Stdin)
		sc.Scan()
		authCode := strings.TrimSpace(sc.Text())
		token, err = config.Exchange(oauth2.NoContext, authCode)
		if err != nil {
			log.Fatalf("Error exchanging auth code for a token: %v", err)
		}
		if err := tokenFile.WriteToken(token); err != nil {
			log.Fatalf("Error writing to %s: %v", tokenFileName, err)
		}
		tokenSource = oauth2.ReuseTokenSource(token, nil)
	}

	oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource)

	if *writeObject != "" {
		writeCloudStorageObject(oauthClient)
		return
	}

	computeService, _ := compute.New(oauthClient)

	natIP := *staticIP
	if natIP == "" {
		// Try to find it by name.
		aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do()
		if err != nil {
			log.Fatal(err)
		}
		// http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList
	IPLoop:
		for _, asl := range aggAddrList.Items {
			for _, addr := range asl.Addresses {
				if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" {
					natIP = addr.Address
					break IPLoop
				}
			}
		}
	}

	cloudConfig := baseConfig
	if *sshPub != "" {
		key := strings.TrimSpace(readFile(*sshPub))
		cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n    - %s\n", key)
	}
	if os.Getenv("USER") == "bradfitz" {
		cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n    - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= [email protected]")
	}
	const maxCloudConfig = 32 << 10 // per compute API docs
	if len(cloudConfig) > maxCloudConfig {
		log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig)
	}

	instance := &compute.Instance{
		Name:        *instName,
		Description: "Go Builder",
		MachineType: machType,
		Disks:       []*compute.AttachedDisk{instanceDisk(computeService)},
		Tags: &compute.Tags{
			Items: []string{"http-server", "https-server"},
		},
		Metadata: &compute.Metadata{
			Items: []*compute.MetadataItems{
				{
					Key:   "user-data",
					Value: cloudConfig,
				},
			},
		},
		NetworkInterfaces: []*compute.NetworkInterface{
			&compute.NetworkInterface{
				AccessConfigs: []*compute.AccessConfig{
					&compute.AccessConfig{
						Type:  "ONE_TO_ONE_NAT",
						Name:  "External NAT",
						NatIP: natIP,
					},
				},
				Network: prefix + "/global/networks/default",
			},
		},
		ServiceAccounts: []*compute.ServiceAccount{
			{
				Email: "default",
				Scopes: []string{
					compute.DevstorageFull_controlScope,
					compute.ComputeScope,
				},
			},
		},
	}

	log.Printf("Creating instance...")
	op, err := computeService.Instances.Insert(*proj, *zone, instance).Do()
	if err != nil {
		log.Fatalf("Failed to create instance: %v", err)
	}
	opName := op.Name
	log.Printf("Created. Waiting on operation %v", opName)
OpLoop:
	for {
		time.Sleep(2 * time.Second)
		op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do()
		if err != nil {
			log.Fatalf("Failed to get op %s: %v", opName, err)
		}
		switch op.Status {
		case "PENDING", "RUNNING":
			log.Printf("Waiting on operation %v", opName)
			continue
		case "DONE":
			if op.Error != nil {
				for _, operr := range op.Error.Errors {
					log.Printf("Error: %+v", operr)
				}
				log.Fatalf("Failed to start.")
			}
			log.Printf("Success. %+v", op)
			break OpLoop
		default:
			log.Fatalf("Unknown status %q: %+v", op.Status, op)
		}
	}

	inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do()
	if err != nil {
		log.Fatalf("Error getting instance after creation: %v", err)
	}
	ij, _ := json.MarshalIndent(inst, "", "    ")
	log.Printf("Instance: %s", ij)
}
Esempio n. 25
0
func main() {
	flag.Parse()

	if *staging {
		if *proj == "symbolic-datum-552" {
			*proj = "go-dashboard-dev"
		}
		if *coordinator == "https://storage.googleapis.com/go-builder-data/coordinator" {
			*coordinator = "https://storage.googleapis.com/dev-go-builder-data/coordinator"
		}
	}
	if *proj == "" {
		log.Fatalf("Missing --project flag")
	}
	if *staticIP == "" {
		// Hard-code this, since GCP doesn't let you rename an IP address, and so
		// this IP is still called "go-buidler-1-ip" in our project, from our old
		// naming convention plan.
		switch *proj {
		case "symbolic-datum-552":
			*staticIP = "107.178.219.46"
		case "go-dashboard-dev":
			*staticIP = "104.154.113.235"
		}
	}
	prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj
	machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach

	oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource())

	computeService, _ := compute.New(oauthClient)

	natIP := *staticIP
	if natIP == "" {
		// Try to find it by name.
		aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do()
		if err != nil {
			log.Fatal(err)
		}
		// https://godoc.org/google.golang.org/api/compute/v1#AddressAggregatedList
	IPLoop:
		for _, asl := range aggAddrList.Items {
			for _, addr := range asl.Addresses {
				if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" {
					natIP = addr.Address
					break IPLoop
				}
			}
		}
	}

	cloudConfig := strings.Replace(baseConfig, "$COORDINATOR", *coordinator, 1)
	if *sshPub != "" {
		key := strings.TrimSpace(readFile(*sshPub))
		cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n    - %s\n", key)
	}
	if os.Getenv("USER") == "bradfitz" {
		cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n    - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= [email protected]")
	}
	const maxCloudConfig = 32 << 10 // per compute API docs
	if len(cloudConfig) > maxCloudConfig {
		log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig)
	}

	instance := &compute.Instance{
		Name:        *instName,
		Description: "Go Builder",
		MachineType: machType,
		Disks:       []*compute.AttachedDisk{instanceDisk(computeService)},
		Tags: &compute.Tags{
			Items: []string{"http-server", "https-server", "allow-ssh"},
		},
		Metadata: &compute.Metadata{
			Items: []*compute.MetadataItems{
				{
					Key:   "user-data",
					Value: googleapi.String(cloudConfig),
				},
			},
		},
		NetworkInterfaces: []*compute.NetworkInterface{
			&compute.NetworkInterface{
				AccessConfigs: []*compute.AccessConfig{
					&compute.AccessConfig{
						Type:  "ONE_TO_ONE_NAT",
						Name:  "External NAT",
						NatIP: natIP,
					},
				},
				Network: prefix + "/global/networks/default",
			},
		},
		ServiceAccounts: []*compute.ServiceAccount{
			{
				Email: "default",
				Scopes: []string{
					compute.DevstorageFullControlScope,
					compute.ComputeScope,
					compute.CloudPlatformScope,
				},
			},
		},
	}

	log.Printf("Creating instance...")
	op, err := computeService.Instances.Insert(*proj, *zone, instance).Do()
	if err != nil {
		log.Fatalf("Failed to create instance: %v", err)
	}
	opName := op.Name
	log.Printf("Created. Waiting on operation %v", opName)
OpLoop:
	for {
		time.Sleep(2 * time.Second)
		op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do()
		if err != nil {
			log.Fatalf("Failed to get op %s: %v", opName, err)
		}
		switch op.Status {
		case "PENDING", "RUNNING":
			log.Printf("Waiting on operation %v", opName)
			continue
		case "DONE":
			if op.Error != nil {
				for _, operr := range op.Error.Errors {
					log.Printf("Error: %+v", operr)
				}
				log.Fatalf("Failed to start.")
			}
			log.Printf("Success. %+v", op)
			break OpLoop
		default:
			log.Fatalf("Unknown status %q: %+v", op.Status, op)
		}
	}

	inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do()
	if err != nil {
		log.Fatalf("Error getting instance after creation: %v", err)
	}
	ij, _ := json.MarshalIndent(inst, "", "    ")
	log.Printf("Instance: %s", ij)
}
Esempio n. 26
0
func newGCEService(storePath string) (*raw.Service, error) {
	client := newOauthClient(storePath)
	service, err := raw.New(client)
	return service, err
}
Esempio n. 27
0
func doCache(c *cli.Context) {
	cache, err := LoadCache()
	if err != nil {
		panic(err)
	}
	cache.Projects = []*cloudresourcemanager.Project{}
	cache.Instances = []*compute.Instance{}

	ctx := oauth2.NoContext
	scopes := []string{compute.ComputeReadonlyScope}
	client, err := google.DefaultClient(ctx, scopes...)
	if err != nil {
		panic(err)
	}

	// gcloud beta projects list
	log.Println("loading projects...")
	service, err := cloudresourcemanager.New(client)
	if err != nil {
		panic(err)
	}

	projects_list_call := service.Projects.List()
	for {
		res, err := projects_list_call.Do()

		if err != nil {
			panic(err)
		}

		cache.Projects = append(cache.Projects, res.Projects...)

		if res.NextPageToken != "" {
			log.Printf("loading more projects with nextPageToken ...")
			projects_list_call.PageToken(res.NextPageToken)
		} else {
			break
		}
	}
	log.Printf("loaded projects, %d projects found.\n", len(cache.Projects))

	semaphore := make(chan int, maxParallelApiCalls)
	notify := make(chan []*compute.Instance)

	// gcloud compute instances list (in parallel)
	for _, project := range cache.Projects {
		go func(project *cloudresourcemanager.Project, notify chan<- []*compute.Instance) {
			semaphore <- 0
			var instances []*compute.Instance

			log.Printf("loading instances in %s (%s)...\n", project.Name, project.ProjectId)
			service, err := compute.New(client)
			if err != nil {
				log.Printf("error on loading instances in %s (%s), ignored: %s\n", project.Name, project.ProjectId, err)
				notify <- nil
				<-semaphore
				return
			}

			aggregated_list_call := service.Instances.AggregatedList(project.ProjectId)
			// aggregated_list_call.MaxResults(10)
			for {
				res, err := aggregated_list_call.Do()

				if err != nil {
					log.Printf("error on loading instances in %s (%s), ignored: %s\n", project.Name, project.ProjectId, err)
					notify <- nil
					<-semaphore
					return
				}

				for _, instances_scoped_list := range res.Items {
					instances = append(instances, instances_scoped_list.Instances...)
				}

				if res.NextPageToken != "" {
					log.Printf("loading more instances with nextPageToken in %s (%s) ...", project.Name, project.ProjectId)
					aggregated_list_call.PageToken(res.NextPageToken)
				} else {
					break
				}
			}

			<-semaphore
			notify <- instances

			log.Printf("loaded instances in %s (%s), %d instances found.\n", project.Name, project.ProjectId, len(instances))
		}(project, notify)
	}
	for _, _ = range cache.Projects {
		instances, _ := <-notify
		if instances != nil {
			cache.Instances = append(cache.Instances, instances...)
		}
	}

	// sort projects, instances
	sort.Sort(projectsById(cache.Projects))
	sort.Sort(instancesBySelfLink(cache.Instances))

	SaveCache(cache)
	log.Println("saved cache.")
}
func newOrReuseVM(logf func(string, ...interface{}), cl *http.Client) (*ssh.Client, error) {
	c, err := compute.New(cl)
	if err != nil {
		return nil, err
	}

	user := "******"
	pub, auth, err := sshKey()
	if err != nil {
		return nil, err
	}
	sshPubKey := user + ":" + pub

	var op *compute.Operation

	if inst, err := c.Instances.Get(*project, *zone, *vmName).Do(); err != nil {
		logf("Creating new instance (getting instance %v in project %v and zone %v failed: %v)", *vmName, *project, *zone, err)
		instProto := &compute.Instance{
			Name:        *vmName,
			MachineType: "zones/" + *zone + "/machineTypes/g1-small",
			Disks: []*compute.AttachedDisk{{
				AutoDelete: true,
				Boot:       true,
				InitializeParams: &compute.AttachedDiskInitializeParams{
					SourceImage: *osImage,
					DiskSizeGb:  10,
				}},
			},
			NetworkInterfaces: []*compute.NetworkInterface{{
				Network:       "projects/" + *project + "/global/networks/default",
				AccessConfigs: []*compute.AccessConfig{{Name: "External NAT", Type: "ONE_TO_ONE_NAT"}},
			}},
			Metadata: &compute.Metadata{
				Items: []*compute.MetadataItems{{
					Key: "sshKeys", Value: &sshPubKey,
				}},
			},
			Tags: &compute.Tags{Items: []string{"ssh"}},
			ServiceAccounts: []*compute.ServiceAccount{{
				Email:  "default",
				Scopes: []string{proxybinary.SQLScope},
			}},
		}
		op, err = c.Instances.Insert(*project, *zone, instProto).Do()
		if err != nil {
			return nil, err
		}
	} else {
		logf("attempting to reuse instance %v (in project %v and zone %v)...", *vmName, *project, *zone)
		set := false
		md := inst.Metadata
		for _, v := range md.Items {
			if v.Key == "sshKeys" {
				v.Value = &sshPubKey
				set = true
				break
			}
		}
		if !set {
			md.Items = append(md.Items, &compute.MetadataItems{Key: "sshKeys", Value: &sshPubKey})
		}
		op, err = c.Instances.SetMetadata(*project, *zone, *vmName, md).Do()
		if err != nil {
			return nil, err
		}
	}

	for {
		if op.Error != nil && len(op.Error.Errors) > 0 {
			return nil, fmt.Errorf("errors: %v", op.Error.Errors)
		}

		log.Printf("%v %v (%v)", op.OperationType, op.TargetLink, op.Status)
		if op.Status == "DONE" {
			break
		}
		time.Sleep(5 * time.Second)

		op, err = c.ZoneOperations.Get(*project, *zone, op.Name).Do()
		if err != nil {
			return nil, err
		}
	}

	inst, err := c.Instances.Get(*project, *zone, *vmName).Do()
	if err != nil {
		return nil, fmt.Errorf("error getting instance after it was created: %v", err)
	}
	ip := inst.NetworkInterfaces[0].AccessConfigs[0].NatIP

	ssh, err := ssh.Dial("tcp", ip+":22", &ssh.ClientConfig{
		User: user,
		Auth: []ssh.AuthMethod{auth},
	})
	if err != nil {
		return nil, fmt.Errorf("couldn't ssh to %v (IP=%v): %v", *vmName, ip, err)
	}
	return ssh, nil
}
func TestInstanceCleaner_Run(t *testing.T) {
	mux := http.NewServeMux()
	mux.HandleFunc(
		"/foo-project/aggregated/instances",
		func(w http.ResponseWriter, req *http.Request) {
			body := map[string]interface{}{
				"items": map[string]interface{}{
					"zones/us-central1-a": map[string]interface{}{
						"instances": []interface{}{
							map[string]string{
								"name":              "test-vm-0",
								"status":            "RUNNING",
								"creationTimestamp": time.Now().Format(time.RFC3339),
								"zone":              "zones/us-central1-a",
							},
							map[string]string{
								"name":              "test-vm-1",
								"status":            "TERMINATED",
								"creationTimestamp": "2016-01-02T07:11:12.999-07:00",
								"zone":              "zones/us-central1-a",
							},
							map[string]string{
								"name":              "test-vm-2",
								"status":            "RUNNING",
								"creationTimestamp": time.Now().Add(-8 * time.Hour).Format(time.RFC3339),
								"zone":              "zones/us-central1-a",
							},
						},
					},
				},
			}
			err := json.NewEncoder(w).Encode(body)
			assert.Nil(t, err)
		})
	mux.HandleFunc(
		"/foo-project/zones/us-central1-a/instances/test-vm-1",
		func(w http.ResponseWriter, req *http.Request) {
			assert.Equal(t, req.Method, "DELETE")
			fmt.Fprintf(w, `{}`)
		})
	mux.HandleFunc(
		"/foo-project/zones/us-central1-a/instances/test-vm-2",
		func(w http.ResponseWriter, req *http.Request) {
			assert.Equal(t, req.Method, "DELETE")
			fmt.Fprintf(w, `{}`)
		})
	mux.HandleFunc("/",
		func(w http.ResponseWriter, req *http.Request) {
			t.Errorf("Unhandled URL: %s %v", req.Method, req.URL)
		})

	srv := httptest.NewServer(mux)

	defer srv.Close()

	cs, err := compute.New(&http.Client{})
	assert.Nil(t, err)
	cs.BasePath = srv.URL

	log := logrus.New()
	log.Level = logrus.FatalLevel
	if os.Getenv("GCLOUD_CLEANUP_TEST_DEBUG") != "" {
		log.Level = logrus.DebugLevel
	}
	rl := ratelimit.NewNullRateLimiter()
	cutoffTime := time.Now().Add(-1 * time.Hour)

	ic := newInstanceCleaner(cs, log, rl, 10, time.Second,
		cutoffTime, "foo-project",
		[]string{"name eq ^test.*"}, false)

	err = ic.Run()
	assert.Nil(t, err)
}
Esempio n. 30
0
// makeRequests makes some requests.
// req is an incoming request used to construct the trace.  traceClient is the
// client used to upload the trace.  rt is the trace client's http client's
// transport.  This is used to retrieve the trace uploaded by the client, if
// any.  If expectTrace is true, we expect a trace will be uploaded.  If
// synchronous is true, the call to Finish is expected not to return before the
// client has uploaded any traces.
func makeRequests(t *testing.T, req *http.Request, traceClient *Client, rt *fakeRoundTripper, synchronous bool, expectTrace bool) *http.Request {
	span := traceClient.SpanFromRequest(req)
	ctx := NewContext(context.Background(), span)

	// An HTTP request.
	{
		req2, err := http.NewRequest("GET", "http://example.com/bar", nil)
		if err != nil {
			t.Fatal(err)
		}
		resp := &http.Response{StatusCode: 200}
		s := span.NewRemoteChild(req2)
		s.Finish(WithResponse(resp))
	}

	// An autogenerated API call.
	{
		rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)}
		hc := &http.Client{Transport: rt}
		computeClient, err := compute.New(hc)
		if err != nil {
			t.Fatal(err)
		}
		_, err = computeClient.Zones.List(testProjectID).Context(ctx).Do()
		if err != nil {
			t.Fatal(err)
		}
	}

	// A cloud library call that uses the autogenerated API.
	{
		rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)}
		hc := &http.Client{Transport: rt}
		storageClient, err := storage.NewClient(context.Background(), option.WithHTTPClient(hc))
		if err != nil {
			t.Fatal(err)
		}
		var objAttrsList []*storage.ObjectAttrs
		it := storageClient.Bucket("testbucket").Objects(ctx, nil)
		for {
			objAttrs, err := it.Next()
			if err != nil && err != storage.Done {
				t.Fatal(err)
			}
			if err == storage.Done {
				break
			}
			objAttrsList = append(objAttrsList, objAttrs)
		}
	}

	done := make(chan struct{})
	go func() {
		if synchronous {
			err := span.FinishWait()
			if err != nil {
				t.Errorf("Unexpected error from span.FinishWait: %v", err)
			}
		} else {
			span.Finish()
		}
		done <- struct{}{}
	}()
	if !expectTrace {
		<-done
		select {
		case <-rt.reqc:
			t.Errorf("Got a trace, expected none.")
		case <-time.After(5 * time.Millisecond):
		}
		return nil
	} else if !synchronous {
		<-done
		return <-rt.reqc
	} else {
		select {
		case <-done:
			t.Errorf("Synchronous Finish didn't wait for trace upload.")
			return <-rt.reqc
		case <-time.After(5 * time.Millisecond):
			r := <-rt.reqc
			<-done
			return r
		}
	}
}