예제 #1
0
파일: deploy.go 프로젝트: sfrdmn/camlistore
// projectHasInstance checks for all the possible zones if there's already an instance for the project.
// It returns the name of the zone at the first instance it finds, if any.
func (d *Deployer) projectHasInstance() (zone string, err error) {
	s, err := compute.New(d.Client)
	if err != nil {
		return "", err
	}
	// TODO(mpl): make use of the handler's cached zones.
	zl, err := compute.NewZonesService(s).List(d.Conf.Project).Do()
	if err != nil {
		return "", fmt.Errorf("could not get a list of zones: %v", err)
	}
	computeService, _ := compute.New(d.Client)
	var zoneOnce sync.Once
	var grp syncutil.Group
	errc := make(chan error, 1)
	zonec := make(chan string, 1)
	timeout := time.NewTimer(30 * time.Second)
	defer timeout.Stop()
	for _, z := range zl.Items {
		z := z
		grp.Go(func() error {
			list, err := computeService.Instances.List(d.Conf.Project, z.Name).Do()
			if err != nil {
				return fmt.Errorf("could not list existing instances: %v", err)
			}
			if len(list.Items) > 0 {
				zoneOnce.Do(func() {
					zonec <- z.Name
				})
			}
			return nil
		})
	}
	go func() {
		errc <- grp.Err()
	}()
	// We block until either an instance was found in a zone, or all the instance
	// listing is done. Or we timed-out.
	select {
	case err = <-errc:
		return "", err
	case zone = <-zonec:
		// We voluntarily ignore any listing error if we found at least one instance
		// because that's what we primarily want to report about.
		return zone, nil
	case <-timeout.C:
		return "", errors.New("timed out")
	}
}
예제 #2
0
파일: deploy.go 프로젝트: sfrdmn/camlistore
// Get returns the Instance corresponding to the Project, Zone, and Name defined in the
// Deployer's Conf.
func (d *Deployer) Get() (*compute.Instance, error) {
	computeService, err := compute.New(d.Client)
	if err != nil {
		return nil, err
	}
	return computeService.Instances.Get(d.Conf.Project, d.Conf.Zone, d.Conf.Name).Do()
}
예제 #3
0
파일: deploy.go 프로젝트: sfrdmn/camlistore
// Create sets up and starts a Google Compute Engine instance as defined in d.Conf. It
// creates the necessary Google Storage buckets beforehand.
func (d *Deployer) Create(ctx *context.Context) (*compute.Instance, error) {
	if err := d.checkProjectID(); err != nil {
		return nil, err
	}

	computeService, _ := compute.New(d.Client)
	storageService, _ := storage.New(d.Client)

	fwc := make(chan error, 1)
	go func() {
		fwc <- d.setFirewall(ctx, computeService)
	}()

	config := cloudConfig(d.Conf)
	const maxCloudConfig = 32 << 10 // per compute API docs
	if len(config) > maxCloudConfig {
		return nil, fmt.Errorf("cloud config length of %d bytes is over %d byte limit", len(config), maxCloudConfig)
	}

	// TODO(mpl): maybe add a wipe mode where we erase other instances before attempting to create.
	if zone, err := d.projectHasInstance(); zone != "" {
		return nil, instanceExistsError{
			project: d.Conf.Project,
			zone:    zone,
		}
	} else if err != nil {
		return nil, fmt.Errorf("could not scan project for existing instances: %v", err)
	}

	if err := d.setBuckets(storageService, ctx); err != nil {
		return nil, fmt.Errorf("could not create buckets: %v", err)
	}

	if err := d.setupHTTPS(storageService); err != nil {
		return nil, fmt.Errorf("could not setup HTTPS: %v", err)
	}

	if err := d.createInstance(computeService, ctx); err != nil {
		return nil, fmt.Errorf("could not create compute instance: %v", err)
	}

	inst, err := computeService.Instances.Get(d.Conf.Project, d.Conf.Zone, d.Conf.Name).Do()
	if err != nil {
		return nil, fmt.Errorf("error getting instance after creation: %v", err)
	}
	if Verbose {
		ij, _ := json.MarshalIndent(inst, "", "    ")
		log.Printf("Instance: %s", ij)
	}

	if err = <-fwc; err != nil {
		return nil, fmt.Errorf("could not create firewall rules: %v", err)
	}
	return inst, nil
}
예제 #4
0
func (h *DeployHandler) refreshZones() error {
	h.zonesMu.Lock()
	defer h.zonesMu.Unlock()
	defer func() {
		h.regions = make([]string, 0, len(h.zones))
		for r, _ := range h.zones {
			h.regions = append(h.regions, r)
		}
	}()
	// TODO(mpl): get projectID and access tokens from metadata once camweb is on GCE.
	accountFile := os.Getenv("CAMLI_GCE_SERVICE_ACCOUNT")
	if accountFile == "" {
		h.Printf("No service account to query for the zones, using hard-coded ones instead.")
		h.zones = backupZones
		return nil
	}
	project := os.Getenv("CAMLI_GCE_PROJECT")
	if project == "" {
		h.Printf("No project we can query on to get the zones, using hard-coded ones instead.")
		h.zones = backupZones
		return nil
	}
	data, err := ioutil.ReadFile(accountFile)
	if err != nil {
		return err
	}
	conf, err := google.JWTConfigFromJSON(data, "https://www.googleapis.com/auth/compute.readonly")
	if err != nil {
		return err
	}
	s, err := compute.New(conf.Client(oauth2.NoContext))
	if err != nil {
		return err
	}
	rl, err := compute.NewRegionsService(s).List(project).Do()
	if err != nil {
		return fmt.Errorf("could not get a list of regions: %v", err)
	}
	h.zones = make(map[string][]string)
	for _, r := range rl.Items {
		zones := make([]string, 0, len(r.Zones))
		for _, z := range r.Zones {
			zone := path.Base(z)
			if zone == "europe-west1-a" {
				// Because even though the docs mark it as deprecated, it still shows up here, go figure.
				continue
			}
			zone = strings.Replace(zone, r.Name, "", 1)
			zones = append(zones, zone)
		}
		h.zones[r.Name] = zones
	}
	return nil
}
예제 #5
0
파일: deploy.go 프로젝트: sfrdmn/camlistore
func (d *Deployer) checkProjectID() error {
	// TODO(mpl): cache the computeService in Deployer, instead of recreating a new one everytime?
	s, err := compute.New(d.Client)
	if err != nil {
		return projectIDError{
			id:    d.Conf.Project,
			cause: err,
		}
	}
	project, err := compute.NewProjectsService(s).Get(d.Conf.Project).Do()
	if err != nil {
		return projectIDError{
			id:    d.Conf.Project,
			cause: err,
		}
	}
	if project.Name != d.Conf.Project {
		return projectIDError{
			id:    d.Conf.Project,
			cause: fmt.Errorf("project ID do not match: got %q, wanted %q", project.Name, d.Conf.Project),
		}
	}
	return nil
}