Ejemplo n.º 1
0
func (r *remoteImpl) setRef(ref string, pin common.Pin) error {
	if err := common.ValidatePin(pin); err != nil {
		return err
	}
	endpoint, err := refEndpoint(pin.PackageName, ref)
	if err != nil {
		return err
	}

	var request struct {
		InstanceID string `json:"instance_id"`
	}
	request.InstanceID = pin.InstanceID

	var reply struct {
		Status       string `json:"status"`
		ErrorMessage string `json:"error_message"`
	}
	if err = r.makeRequest(endpoint, "POST", &request, &reply); err != nil {
		return err
	}
	switch reply.Status {
	case "SUCCESS":
		return nil
	case "PROCESSING_NOT_FINISHED_YET":
		return &pendingProcessingError{reply.ErrorMessage}
	case "ERROR", "PROCESSING_FAILED":
		return errors.New(reply.ErrorMessage)
	}
	return fmt.Errorf("Unexpected status when moving ref: %s", reply.Status)
}
Ejemplo n.º 2
0
func (client *clientImpl) AttachTagsWhenReady(pin common.Pin, tags []string) error {
	err := common.ValidatePin(pin)
	if err != nil {
		return err
	}
	if len(tags) == 0 {
		return nil
	}
	for _, tag := range tags {
		client.Logger.Infof("cipd: attaching tag %s", tag)
	}
	deadline := client.clock.now().Add(TagAttachTimeout)
	for client.clock.now().Before(deadline) {
		err = client.remote.attachTags(pin, tags)
		if err == nil {
			client.Logger.Infof("cipd: all tags attached")
			return nil
		}
		if _, ok := err.(*pendingProcessingError); ok {
			client.Logger.Warningf("cipd: package instance is not ready yet - %s", err)
			client.clock.sleep(5 * time.Second)
		} else {
			client.Logger.Errorf("cipd: failed to attach tags - %s", err)
			return err
		}
	}
	client.Logger.Errorf("cipd: failed to attach tags - deadline exceeded")
	return ErrAttachTagsTimeout
}
Ejemplo n.º 3
0
func (client *clientImpl) SetRefWhenReady(ref string, pin common.Pin) error {
	if err := common.ValidatePackageRef(ref); err != nil {
		return err
	}
	if err := common.ValidatePin(pin); err != nil {
		return err
	}
	client.Logger.Infof("cipd: setting ref of %q: %q => %q", pin.PackageName, ref, pin.InstanceID)
	deadline := client.clock.now().Add(SetRefTimeout)
	for client.clock.now().Before(deadline) {
		err := client.remote.setRef(ref, pin)
		if err == nil {
			return nil
		}
		if _, ok := err.(*pendingProcessingError); ok {
			client.Logger.Warningf("cipd: package instance is not ready yet - %s", err)
			client.clock.sleep(5 * time.Second)
		} else {
			client.Logger.Errorf("cipd: failed to set ref - %s", err)
			return err
		}
	}
	client.Logger.Errorf("cipd: failed set ref - deadline exceeded")
	return ErrSetRefTimeout
}
Ejemplo n.º 4
0
func instanceEndpoint(pin common.Pin) (string, error) {
	if err := common.ValidatePin(pin); err != nil {
		return "", err
	}
	params := url.Values{}
	params.Add("package_name", pin.PackageName)
	params.Add("instance_id", pin.InstanceID)
	return "repo/v1/instance?" + params.Encode(), nil
}
Ejemplo n.º 5
0
func tagsEndpoint(pin common.Pin, tags []string) (string, error) {
	if err := common.ValidatePin(pin); err != nil {
		return "", err
	}
	for _, tag := range tags {
		if err := common.ValidateInstanceTag(tag); err != nil {
			return "", err
		}
	}
	params := url.Values{}
	params.Add("package_name", pin.PackageName)
	params.Add("instance_id", pin.InstanceID)
	for _, tag := range tags {
		params.Add("tag", tag)
	}
	return "repo/v1/tags?" + params.Encode(), nil
}
Ejemplo n.º 6
0
func (client *clientImpl) FetchInstance(pin common.Pin, output io.WriteSeeker) error {
	err := common.ValidatePin(pin)
	if err != nil {
		return err
	}
	client.Logger.Infof("cipd: resolving fetch URL for %s", pin)
	fetchInfo, err := client.remote.fetchInstance(pin)
	if err == nil {
		err = client.storage.download(fetchInfo.fetchURL, output)
	}
	if err != nil {
		client.Logger.Errorf("cipd: failed to fetch %s - %s", pin, err)
		return err
	}
	client.Logger.Infof("cipd: successfully fetched %s", pin)
	return nil
}
Ejemplo n.º 7
0
func (client *clientImpl) FetchAndDeployInstance(pin common.Pin) error {
	err := common.ValidatePin(pin)
	if err != nil {
		return err
	}

	// Use temp file for storing package file. Delete it when done.
	var instance local.PackageInstance
	f, err := client.deployer.TempFile(pin.InstanceID)
	if err != nil {
		return err
	}
	defer func() {
		// Instance takes ownership of the file, no need to close it separately.
		if instance == nil {
			f.Close()
		}
		os.Remove(f.Name())
	}()

	// Fetch the package data to the provided storage.
	err = client.FetchInstance(pin, f)
	if err != nil {
		return err
	}

	// Open the instance, verify the instance ID.
	instance, err = local.OpenInstance(f, pin.InstanceID)
	if err != nil {
		return err
	}
	defer instance.Close()

	// Deploy it. 'defer' will take care of removing the temp file if needed.
	_, err = client.deployer.DeployInstance(instance)
	return err
}
Ejemplo n.º 8
0
func (d *deployerImpl) DeployInstance(inst PackageInstance) (common.Pin, error) {
	pin := inst.Pin()
	d.logger.Infof("Deploying %s into %s", pin, d.fs.Root())

	// Be paranoid.
	if err := common.ValidatePin(pin); err != nil {
		return common.Pin{}, err
	}
	if _, err := d.fs.EnsureDirectory(d.fs.Root()); err != nil {
		return common.Pin{}, err
	}

	// Extract new version to the .cipd/pkgs/* guts. For "symlink" install mode it
	// is the final destination. For "copy" install mode it's a temp destination
	// and files will be moved to the site root later (in addToSiteRoot call).
	// ExtractPackageInstance knows how to build full paths and how to atomically
	// extract a package. No need to delete garbage if it fails.
	pkgPath := d.packagePath(pin.PackageName)
	destPath := filepath.Join(pkgPath, pin.InstanceID)
	if err := ExtractInstance(inst, NewFileSystemDestination(destPath, d.fs)); err != nil {
		return common.Pin{}, err
	}
	newManifest, err := d.readManifest(destPath)
	if err != nil {
		return common.Pin{}, err
	}

	// Remember currently deployed version (to remove it later). Do not freak out
	// if it's not there (prevInstanceID == "") or broken (err != nil).
	prevInstanceID, err := d.getCurrentInstanceID(pkgPath)
	prevManifest := Manifest{}
	if err == nil && prevInstanceID != "" {
		prevManifest, err = d.readManifest(filepath.Join(pkgPath, prevInstanceID))
	}
	if err != nil {
		d.logger.Warningf("Previous version of the package is broken: %s", err)
		prevManifest = Manifest{} // to make sure prevManifest.Files == nil.
	}

	// Install all new files to the site root.
	err = d.addToSiteRoot(newManifest.Files, newManifest.InstallMode, pkgPath, destPath)
	if err != nil {
		d.fs.EnsureDirectoryGone(destPath)
		return common.Pin{}, err
	}

	// Mark installed instance as a current one. After this call the package is
	// considered installed and the function must not fail. All cleanup below is
	// best effort.
	if err = d.setCurrentInstanceID(pkgPath, pin.InstanceID); err != nil {
		d.fs.EnsureDirectoryGone(destPath)
		return common.Pin{}, err
	}

	// Wait for async cleanup to finish.
	wg := sync.WaitGroup{}
	defer wg.Wait()

	// Remove old instance directory completely.
	if prevInstanceID != "" && prevInstanceID != pin.InstanceID {
		wg.Add(1)
		go func() {
			defer wg.Done()
			d.fs.EnsureDirectoryGone(filepath.Join(pkgPath, prevInstanceID))
		}()
	}

	// Remove no longer present files from the site root directory.
	if len(prevManifest.Files) > 0 {
		wg.Add(1)
		go func() {
			defer wg.Done()
			toKeep := map[string]bool{}
			for _, f := range newManifest.Files {
				toKeep[f.Name] = true
			}
			toKill := []FileInfo{}
			for _, f := range prevManifest.Files {
				if !toKeep[f.Name] {
					toKill = append(toKill, f)
				}
			}
			d.removeFromSiteRoot(toKill)
		}()
	}

	// Verify it's all right.
	newPin, err := d.CheckDeployed(pin.PackageName)
	if err == nil && newPin.InstanceID != pin.InstanceID {
		err = fmt.Errorf("Other instance (%s) was deployed concurrently", newPin.InstanceID)
	}
	if err == nil {
		d.logger.Infof("Successfully deployed %s", pin)
	} else {
		d.logger.Errorf("Failed to deploy %s: %s", pin, err)
	}
	return newPin, err
}