Exemplo n.º 1
0
func (s *S) createTestRelease(c *C, in *ct.Release) *ct.Release {
	if len(in.ArtifactIDs) == 0 {
		in.ArtifactIDs = []string{s.createTestArtifact(c, &ct.Artifact{Type: host.ArtifactTypeDocker}).ID}
		in.LegacyArtifactID = in.ArtifactIDs[0]
	}
	c.Assert(s.c.CreateRelease(in), IsNil)
	return in
}
Exemplo n.º 2
0
func (s *S) createTestRelease(c *C, in *ct.Release) *ct.Release {
	if in.ArtifactID == "" {
		in.ArtifactID = s.createTestArtifact(c, &ct.Artifact{}).ID
	}
	c.Assert(s.c.CreateRelease(in), IsNil)
	return in
}
Exemplo n.º 3
0
func (s *S) createTestRelease(c *C, in *ct.Release) *ct.Release {
	if in.ArtifactID == "" {
		in.ArtifactID = s.createTestArtifact(c, &ct.Artifact{}).ID
	}
	out := &ct.Release{}
	res, err := s.Post("/releases", in, out)
	c.Assert(err, IsNil)
	c.Assert(res.StatusCode, Equals, 200)
	return out
}
Exemplo n.º 4
0
func runImport(args *docopt.Args, client controller.Client) error {
	var src io.Reader = os.Stdin
	if filename := args.String["--file"]; filename != "" {
		f, err := os.Open(filename)
		if err != nil {
			return fmt.Errorf("error opening export file: %s", err)
		}
		defer f.Close()
		src = f
	}
	tr := tar.NewReader(src)

	var (
		app           *ct.App
		release       *ct.Release
		imageArtifact *ct.Artifact
		formation     *ct.Formation
		routes        []router.Route
		slug          io.Reader
		dockerImage   struct {
			config struct {
				Tag string `json:"tag"`
			}
			archive io.Reader
		}
		pgDump     io.Reader
		mysqlDump  io.Reader
		uploadSize int64
	)
	numResources := 0
	numRoutes := 1

	for {
		header, err := tr.Next()
		if err == io.EOF {
			break
		} else if err != nil {
			return fmt.Errorf("error reading export tar: %s", err)
		}

		switch path.Base(header.Name) {
		case "app.json":
			app = &ct.App{}
			if err := json.NewDecoder(tr).Decode(app); err != nil {
				return fmt.Errorf("error decoding app: %s", err)
			}
			app.ID = ""
		case "release.json":
			release = &ct.Release{}
			if err := json.NewDecoder(tr).Decode(release); err != nil {
				return fmt.Errorf("error decoding release: %s", err)
			}
			release.ID = ""
			release.ArtifactIDs = nil
		case "artifact.json":
			imageArtifact = &ct.Artifact{}
			if err := json.NewDecoder(tr).Decode(imageArtifact); err != nil {
				return fmt.Errorf("error decoding image artifact: %s", err)
			}
			imageArtifact.ID = ""
		case "formation.json":
			formation = &ct.Formation{}
			if err := json.NewDecoder(tr).Decode(formation); err != nil {
				return fmt.Errorf("error decoding formation: %s", err)
			}
			formation.AppID = ""
			formation.ReleaseID = ""
		case "routes.json":
			if err := json.NewDecoder(tr).Decode(&routes); err != nil {
				return fmt.Errorf("error decoding routes: %s", err)
			}
			for _, route := range routes {
				route.ID = ""
				route.ParentRef = ""
			}
		case "slug.tar.gz":
			f, err := ioutil.TempFile("", "slug.tar.gz")
			if err != nil {
				return fmt.Errorf("error creating slug tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading slug: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking slug tempfile: %s", err)
			}
			slug = f
			uploadSize += header.Size
		case "docker-image.json":
			if err := json.NewDecoder(tr).Decode(&dockerImage.config); err != nil {
				return fmt.Errorf("error decoding docker image json: %s", err)
			}
		case "docker-image.tar":
			f, err := ioutil.TempFile("", "docker-image.tar")
			if err != nil {
				return fmt.Errorf("error creating docker image tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading docker image: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking docker image tempfile: %s", err)
			}
			dockerImage.archive = f
			uploadSize += header.Size
		case "postgres.dump":
			f, err := ioutil.TempFile("", "postgres.dump")
			if err != nil {
				return fmt.Errorf("error creating db tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading db dump: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking db tempfile: %s", err)
			}
			pgDump = f
			uploadSize += header.Size
		case "mysql.dump":
			f, err := ioutil.TempFile("", "mysql.dump")
			if err != nil {
				return fmt.Errorf("error creating db tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading db dump: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking db tempfile: %s", err)
			}
			mysqlDump = f
			uploadSize += header.Size
		}
	}

	if app == nil {
		return fmt.Errorf("missing app.json")
	}
	oldName := app.Name
	if name := args.String["--name"]; name != "" {
		app.Name = name
	}
	if err := client.CreateApp(app); err != nil {
		return fmt.Errorf("error creating app: %s", err)
	}

	var bar *pb.ProgressBar
	if !args.Bool["--quiet"] && uploadSize > 0 && term.IsTerminal(os.Stderr.Fd()) {
		bar = pb.New(0)
		bar.SetUnits(pb.U_BYTES)
		bar.Total = uploadSize
		bar.ShowSpeed = true
		bar.Output = os.Stderr
		bar.Start()
		defer bar.Finish()
	}

	if pgDump != nil && release != nil {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "postgres",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning postgres resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}

		config, err := getPgRunConfig(client, app.ID, release)
		if err != nil {
			return fmt.Errorf("error getting postgres config: %s", err)
		}
		config.Stdin = pgDump
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		config.Exit = false
		if err := pgRestore(client, config); err != nil {
			return fmt.Errorf("error restoring postgres database: %s", err)
		}
	}

	if mysqlDump != nil && release != nil {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "mysql",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning mysql resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}

		config, err := getMysqlRunConfig(client, app.ID, release)
		if err != nil {
			return fmt.Errorf("error getting mysql config: %s", err)
		}
		config.Stdin = mysqlDump
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		config.Exit = false
		if err := mysqlRestore(client, config); err != nil {
			return fmt.Errorf("error restoring mysql database: %s", err)
		}
	}

	if release != nil && release.Env["FLYNN_REDIS"] != "" {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "redis",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning redis resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}
	}

	uploadSlug := release != nil && imageArtifact != nil && slug != nil

	if uploadSlug {
		// Use current slugrunner as the artifact
		gitreceiveRelease, err := client.GetAppRelease("gitreceive")
		if err != nil {
			return fmt.Errorf("unable to retrieve gitreceive release: %s", err)
		}
		if id, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_ID"]; ok {
			imageArtifact, err = client.GetArtifact(id)
			if err != nil {
				return fmt.Errorf("unable to get slugrunner image artifact: %s", err)
			}
		} else if uri, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_URI"]; ok {
			imageArtifact = &ct.Artifact{
				Type: host.ArtifactTypeDocker,
				URI:  uri,
			}
		} else {
			return fmt.Errorf("gitreceive env missing slug runner image")
		}
	}

	if dockerImage.config.Tag != "" && dockerImage.archive != nil {
		// load the docker image into the Docker daemon
		cmd := exec.Command("docker", "load")
		cmd.Stdin = dockerImage.archive
		if out, err := cmd.CombinedOutput(); err != nil {
			return fmt.Errorf("error running docker load: %s: %q", err, out)
		}

		// use the tag from the config (which will now be applied to
		// the loaded image) to push the image to docker-receive
		cluster, err := getCluster()
		if err != nil {
			return err
		}
		host, err := cluster.DockerPushHost()
		if err != nil {
			return err
		}
		tag := fmt.Sprintf("%s/%s:latest", host, app.Name)
		if out, err := exec.Command("docker", "tag", "--force", dockerImage.config.Tag, tag).CombinedOutput(); err != nil {
			return fmt.Errorf("error tagging docker image: %s: %q", err, out)
		}

		artifact, err := dockerPush(client, app.Name, tag)
		if err != nil {
			return fmt.Errorf("error pushing docker image: %s", err)
		}

		release.ArtifactIDs = []string{artifact.ID}
	} else if imageArtifact != nil {
		if imageArtifact.ID == "" {
			if err := client.CreateArtifact(imageArtifact); err != nil {
				return fmt.Errorf("error creating image artifact: %s", err)
			}
		}
		release.ArtifactIDs = []string{imageArtifact.ID}
	}

	if release != nil {
		for t, proc := range release.Processes {
			for i, port := range proc.Ports {
				if port.Service != nil && strings.HasPrefix(port.Service.Name, oldName) {
					proc.Ports[i].Service.Name = strings.Replace(port.Service.Name, oldName, app.Name, 1)
				}
			}
			release.Processes[t] = proc
		}
		if err := client.CreateRelease(release); err != nil {
			return fmt.Errorf("error creating release: %s", err)
		}
		if err := client.SetAppRelease(app.ID, release.ID); err != nil {
			return fmt.Errorf("error setting app release: %s", err)
		}
	}

	if uploadSlug {
		slugURI := fmt.Sprintf("http://blobstore.discoverd/%s/slug.tgz", random.UUID())
		config := runConfig{
			App:        app.ID,
			Release:    release.ID,
			DisableLog: true,
			Args:       []string{"curl", "--request", "PUT", "--upload-file", "-", slugURI},
			Stdin:      slug,
			Stdout:     ioutil.Discard,
			Stderr:     ioutil.Discard,
		}
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		if err := runJob(client, config); err != nil {
			return fmt.Errorf("error uploading slug: %s", err)
		}
		slugArtifact := &ct.Artifact{
			Type: host.ArtifactTypeFile,
			URI:  slugURI,
		}
		if err := client.CreateArtifact(slugArtifact); err != nil {
			return fmt.Errorf("error creating slug artifact: %s", err)
		}
		release.ID = ""
		release.ArtifactIDs = append(release.ArtifactIDs, slugArtifact.ID)
		if release.Meta == nil {
			release.Meta = make(map[string]string, 1)
		}
		release.Meta["git"] = "true"
		if err := client.CreateRelease(release); err != nil {
			return fmt.Errorf("error creating release: %s", err)
		}
		if err := client.SetAppRelease(app.ID, release.ID); err != nil {
			return fmt.Errorf("error setting app release: %s", err)
		}
	}

	if formation != nil && release != nil {
		formation.ReleaseID = release.ID
		formation.AppID = app.ID
		if err := client.PutFormation(formation); err != nil {
			return fmt.Errorf("error creating formation: %s", err)
		}
	}

	if args.Bool["--routes"] {
		for _, route := range routes {
			if err := client.CreateRoute(app.ID, &route); err != nil {
				if e, ok := err.(hh.JSONError); ok && e.Code == hh.ConflictErrorCode {
					// If the cluster domain matches then the default route
					// exported will conflict with the one created automatically.
					continue
				}
				return fmt.Errorf("error creating route: %s", err)
			}
			numRoutes++
		}
	}

	fmt.Printf("Imported %s (added %d routes, provisioned %d resources)\n", app.Name, numRoutes, numResources)

	return nil
}
Exemplo n.º 5
0
func runImport(args *docopt.Args, client *controller.Client) error {
	var src io.Reader = os.Stdin
	if filename := args.String["--file"]; filename != "" {
		f, err := os.Open(filename)
		if err != nil {
			return fmt.Errorf("error opening export file: %s", err)
		}
		defer f.Close()
		src = f
	}
	tr := tar.NewReader(src)

	var (
		app        *ct.App
		release    *ct.Release
		artifact   *ct.Artifact
		formation  *ct.Formation
		routes     []router.Route
		slug       io.Reader
		pgDump     io.Reader
		uploadSize int64
	)
	numResources := 0
	numRoutes := 1

	for {
		header, err := tr.Next()
		if err == io.EOF {
			break
		} else if err != nil {
			return fmt.Errorf("error reading export tar: %s", err)
		}

		switch path.Base(header.Name) {
		case "app.json":
			app = &ct.App{}
			if err := json.NewDecoder(tr).Decode(app); err != nil {
				return fmt.Errorf("error decoding app: %s", err)
			}
			app.ID = ""
		case "release.json":
			release = &ct.Release{}
			if err := json.NewDecoder(tr).Decode(release); err != nil {
				return fmt.Errorf("error decoding release: %s", err)
			}
			release.ID = ""
			release.ArtifactID = ""
		case "artifact.json":
			artifact = &ct.Artifact{}
			if err := json.NewDecoder(tr).Decode(artifact); err != nil {
				return fmt.Errorf("error decoding artifact: %s", err)
			}
			artifact.ID = ""
		case "formation.json":
			formation = &ct.Formation{}
			if err := json.NewDecoder(tr).Decode(formation); err != nil {
				return fmt.Errorf("error decoding formation: %s", err)
			}
			formation.AppID = ""
			formation.ReleaseID = ""
		case "routes.json":
			if err := json.NewDecoder(tr).Decode(&routes); err != nil {
				return fmt.Errorf("error decoding routes: %s", err)
			}
			for _, route := range routes {
				route.ID = ""
				route.ParentRef = ""
			}
		case "slug.tar.gz":
			f, err := ioutil.TempFile("", "slug.tar.gz")
			if err != nil {
				return fmt.Errorf("error creating slug tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading slug: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking slug tempfile: %s", err)
			}
			slug = f
			uploadSize += header.Size
		case "postgres.dump":
			f, err := ioutil.TempFile("", "postgres.dump")
			if err != nil {
				return fmt.Errorf("error creating db tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading db dump: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking db tempfile: %s", err)
			}
			pgDump = f
			uploadSize += header.Size
		}
	}

	if app == nil {
		return fmt.Errorf("missing app.json")
	}
	oldName := app.Name
	if name := args.String["--name"]; name != "" {
		app.Name = name
	}
	if err := client.CreateApp(app); err != nil {
		return fmt.Errorf("error creating app: %s", err)
	}

	var bar *pb.ProgressBar
	if !args.Bool["--quiet"] && uploadSize > 0 && term.IsTerminal(os.Stderr.Fd()) {
		bar = pb.New(0)
		bar.SetUnits(pb.U_BYTES)
		bar.Total = uploadSize
		bar.ShowSpeed = true
		bar.Output = os.Stderr
		bar.Start()
		defer bar.Finish()
	}

	if pgDump != nil && release != nil {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "postgres",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning postgres resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}

		config, err := getPgRunConfig(client, app.ID, release)
		if err != nil {
			return fmt.Errorf("error getting postgres config: %s", err)
		}
		config.Stdin = pgDump
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		config.Exit = false
		if err := pgRestore(client, config); err != nil {
			return fmt.Errorf("error restoring postgres database: %s", err)
		}
	}

	if release != nil && release.Env["FLYNN_REDIS"] != "" {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "redis",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning redis resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}
	}

	uploadSlug := release != nil && release.Env["SLUG_URL"] != "" && artifact != nil && slug != nil

	if uploadSlug {
		// Use current slugrunner as the artifact
		gitreceiveRelease, err := client.GetAppRelease("gitreceive")
		if err != nil {
			return fmt.Errorf("unable to retrieve gitreceive release: %s", err)
		}
		artifact = &ct.Artifact{
			Type: "docker",
			URI:  gitreceiveRelease.Env["SLUGRUNNER_IMAGE_URI"],
		}
		if artifact.URI == "" {
			return fmt.Errorf("gitreceive env missing SLUGRUNNER_IMAGE_URI")
		}
		release.Env["SLUG_URL"] = fmt.Sprintf("http://blobstore.discoverd/%s.tgz", random.UUID())
	}

	if artifact != nil {
		if err := client.CreateArtifact(artifact); err != nil {
			return fmt.Errorf("error creating artifact: %s", err)
		}
		release.ArtifactID = artifact.ID
	}

	if release != nil {
		for t, proc := range release.Processes {
			for i, port := range proc.Ports {
				if port.Service != nil && port.Service.Name == oldName+"-web" {
					proc.Ports[i].Service.Name = app.Name + "-web"
				}
			}
			release.Processes[t] = proc
		}
		if err := client.CreateRelease(release); err != nil {
			return fmt.Errorf("error creating release: %s", err)
		}
		if err := client.SetAppRelease(app.ID, release.ID); err != nil {
			return fmt.Errorf("error setting app release: %s", err)
		}
	}

	if uploadSlug {
		config := runConfig{
			App:        app.ID,
			Release:    release.ID,
			DisableLog: true,
			Entrypoint: []string{"curl"},
			Args:       []string{"--request", "PUT", "--upload-file", "-", release.Env["SLUG_URL"]},
			Stdin:      slug,
			Stdout:     ioutil.Discard,
			Stderr:     ioutil.Discard,
		}
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		if err := runJob(client, config); err != nil {
			return fmt.Errorf("error uploading slug: %s", err)
		}
	}

	if formation != nil && release != nil {
		formation.ReleaseID = release.ID
		formation.AppID = app.ID
		if err := client.PutFormation(formation); err != nil {
			return fmt.Errorf("error creating formation: %s", err)
		}
	}

	if args.Bool["--routes"] {
		for _, route := range routes {
			if err := client.CreateRoute(app.ID, &route); err != nil {
				if e, ok := err.(hh.JSONError); ok && e.Code == hh.ConflictErrorCode {
					// If the cluster domain matches then the default route
					// exported will conflict with the one created automatically.
					continue
				}
				return fmt.Errorf("error creating route: %s", err)
			}
			numRoutes++
		}
	}

	fmt.Printf("Imported %s (added %d routes, provisioned %d resources)\n", app.Name, numRoutes, numResources)

	return nil
}
Exemplo n.º 6
0
// Delete deletes any formations for the given app and release, then deletes
// the release and any associated file artifacts if there are no remaining
// formations for the release, enqueueing a worker job to delete any files
// stored in the blobstore
func (r *ReleaseRepo) Delete(app *ct.App, release *ct.Release) error {
	tx, err := r.db.Begin()
	if err != nil {
		return err
	}

	if err := tx.Exec("formation_delete", app.ID, release.ID); err != nil {
		tx.Rollback()
		return err
	}

	// if the release still has formations, don't remove it entirely, just
	// save a release deletion event and return
	rows, err := tx.Query("formation_list_by_release", release.ID)
	if err != nil {
		tx.Rollback()
		return err
	}
	formations, err := scanFormations(rows)
	if err != nil {
		tx.Rollback()
		return err
	}
	if len(formations) > 0 {
		apps := make([]string, len(formations))
		for i, f := range formations {
			apps[i] = f.AppID
		}
		event := ct.ReleaseDeletionEvent{
			ReleaseDeletion: &ct.ReleaseDeletion{
				RemainingApps: apps,
				ReleaseID:     release.ID,
			},
		}
		if err := createEvent(tx.Exec, &ct.Event{
			AppID:      app.ID,
			ObjectID:   release.ID,
			ObjectType: ct.EventTypeReleaseDeletion,
		}, event); err != nil {
			tx.Rollback()
			return err
		}
		return tx.Commit()
	}

	fileArtifacts, err := r.artifacts.ListIDs(release.FileArtifactIDs()...)
	if err != nil {
		return err
	}

	if err := tx.Exec("release_delete", release.ID); err != nil {
		tx.Rollback()
		return err
	}

	blobstoreFiles := make([]string, 0, len(fileArtifacts))
	for _, artifact := range fileArtifacts {
		if err := tx.Exec("release_artifacts_delete", release.ID, artifact.ID); err != nil {
			tx.Rollback()
			return err
		}

		// only delete artifacts which aren't still referenced by other releases
		var count int64
		if err := tx.QueryRow("artifact_release_count", artifact.ID).Scan(&count); err != nil {
			tx.Rollback()
			return err
		}
		if count > 0 {
			continue
		}

		if artifact.Blobstore() {
			blobstoreFiles = append(blobstoreFiles, artifact.URI)
		}
		if err := tx.Exec("artifact_delete", artifact.ID); err != nil {
			tx.Rollback()
			return err
		}
	}

	// if there are no blobstore files to delete, just save a release
	// deletion event and return
	if len(blobstoreFiles) == 0 {
		event := ct.ReleaseDeletionEvent{
			ReleaseDeletion: &ct.ReleaseDeletion{
				ReleaseID: release.ID,
			},
		}
		if err := createEvent(tx.Exec, &ct.Event{
			AppID:      app.ID,
			ObjectID:   release.ID,
			ObjectType: ct.EventTypeReleaseDeletion,
		}, event); err != nil {
			tx.Rollback()
			return err
		}
		return tx.Commit()
	}

	// enqueue a job to delete the blobstore files
	args, err := json.Marshal(struct {
		AppID     string
		ReleaseID string
		FileURIs  []string
	}{
		app.ID,
		release.ID,
		blobstoreFiles,
	})
	if err != nil {
		tx.Rollback()
		return err
	}
	job := &que.Job{
		Type: "release_cleanup",
		Args: args,
	}
	if err := r.que.EnqueueInTx(job, tx.Tx); err != nil {
		tx.Rollback()
		return err
	}

	return tx.Commit()
}
Exemplo n.º 7
0
func runReleaseUpdate(args *docopt.Args, client controller.Client) error {
	var release *ct.Release
	var err error
	if args.String["<id>"] != "" {
		release, err = client.GetRelease(args.String["<id>"])
	} else {
		release, err = client.GetAppRelease(mustApp())
	}
	if err != nil {
		return err
	}

	updates := &ct.Release{}
	data, err := ioutil.ReadFile(args.String["<file>"])
	if err != nil {
		return err
	}
	if err := json.Unmarshal(data, updates); err != nil {
		return err
	}

	// Basically, there's no way to merge JSON that can reliably knock out set values.
	// Instead, throw the --clean flag to start from a largely empty Release.
	if args.Bool["--clean"] {
		updates.ArtifactIDs = release.ArtifactIDs
		release = updates
	} else {
		release.ID = ""
		for key, value := range updates.Env {
			release.Env[key] = value
		}
		for key, value := range updates.Meta {
			release.Meta[key] = value
		}
		for procKey, procUpdate := range updates.Processes {
			procRelease, ok := release.Processes[procKey]
			if !ok {
				release.Processes[procKey] = procUpdate
				continue
			}

			if len(procUpdate.Cmd) > 0 {
				procRelease.Cmd = procUpdate.Cmd
			}
			if len(procUpdate.Entrypoint) > 0 {
				procRelease.Entrypoint = procUpdate.Entrypoint
			}
			for key, value := range procUpdate.Env {
				procRelease.Env[key] = value
			}
			if len(procUpdate.Ports) > 0 {
				procRelease.Ports = procUpdate.Ports
			}
			if procUpdate.Data {
				procRelease.Data = true
			}
			if procUpdate.Omni {
				procRelease.Omni = true
			}
			if procUpdate.HostNetwork {
				procRelease.HostNetwork = true
			}
			if len(procUpdate.Service) > 0 {
				procRelease.Service = procUpdate.Service
			}
			if procUpdate.Resurrect {
				procRelease.Resurrect = true
			}
			for resKey, resValue := range procUpdate.Resources {
				procRelease.Resources[resKey] = resValue
			}

			release.Processes[procKey] = procRelease
		}
	}

	if err := client.CreateRelease(release); err != nil {
		return err
	}

	if err := client.DeployAppRelease(mustApp(), release.ID); err != nil {
		return err
	}

	log.Printf("Created release %s.", release.ID)

	return nil
}
Exemplo n.º 8
0
func runImport(args *docopt.Args, client controller.Client) error {
	jobs, err := strconv.Atoi(args.String["--jobs"])
	if err != nil {
		return err
	}
	var src io.Reader = os.Stdin
	if filename := args.String["--file"]; filename != "" {
		f, err := os.Open(filename)
		if err != nil {
			return fmt.Errorf("error opening export file: %s", err)
		}
		defer f.Close()
		src = f
	}
	tr := tar.NewReader(src)

	var (
		app         *ct.App
		release     *ct.Release
		artifacts   []*ct.Artifact
		formation   *ct.Formation
		routes      []router.Route
		legacySlug  io.Reader
		dockerImage struct {
			config struct {
				Tag string `json:"tag"`
			}
			archive io.Reader
		}
		pgDump     io.Reader
		mysqlDump  io.Reader
		uploadSize int64
	)
	numResources := 0
	numRoutes := 1
	layers := make(map[string]io.Reader)

	for {
		header, err := tr.Next()
		if err == io.EOF {
			break
		} else if err != nil {
			return fmt.Errorf("error reading export tar: %s", err)
		}

		filename := path.Base(header.Name)
		if strings.HasSuffix(filename, ".layer") {
			f, err := ioutil.TempFile("", "flynn-layer-")
			if err != nil {
				return fmt.Errorf("error creating layer tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading %s: %s", header.Name, err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking layer tempfile: %s", err)
			}
			layers[strings.TrimSuffix(filename, ".layer")] = f
			uploadSize += header.Size
			continue
		}

		switch filename {
		case "app.json":
			app = &ct.App{}
			if err := json.NewDecoder(tr).Decode(app); err != nil {
				return fmt.Errorf("error decoding app: %s", err)
			}
			app.ID = ""
		case "release.json":
			release = &ct.Release{}
			if err := json.NewDecoder(tr).Decode(release); err != nil {
				return fmt.Errorf("error decoding release: %s", err)
			}
			release.ID = ""
			release.ArtifactIDs = nil
		case "artifacts.json":
			if err := json.NewDecoder(tr).Decode(&artifacts); err != nil {
				return fmt.Errorf("error decoding artifacts: %s", err)
			}
		case "formation.json":
			formation = &ct.Formation{}
			if err := json.NewDecoder(tr).Decode(formation); err != nil {
				return fmt.Errorf("error decoding formation: %s", err)
			}
			formation.AppID = ""
			formation.ReleaseID = ""
		case "routes.json":
			if err := json.NewDecoder(tr).Decode(&routes); err != nil {
				return fmt.Errorf("error decoding routes: %s", err)
			}
			for _, route := range routes {
				route.ID = ""
				route.ParentRef = ""
			}
		case "slug.tar.gz":
			f, err := ioutil.TempFile("", "slug.tar.gz")
			if err != nil {
				return fmt.Errorf("error creating slug tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading slug: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking slug tempfile: %s", err)
			}
			legacySlug = f
			uploadSize += header.Size
		case "docker-image.json":
			if err := json.NewDecoder(tr).Decode(&dockerImage.config); err != nil {
				return fmt.Errorf("error decoding docker image json: %s", err)
			}
		case "docker-image.tar":
			f, err := ioutil.TempFile("", "docker-image.tar")
			if err != nil {
				return fmt.Errorf("error creating docker image tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading docker image: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking docker image tempfile: %s", err)
			}
			dockerImage.archive = f
			uploadSize += header.Size
		case "postgres.dump":
			f, err := ioutil.TempFile("", "postgres.dump")
			if err != nil {
				return fmt.Errorf("error creating db tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading db dump: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking db tempfile: %s", err)
			}
			pgDump = f
			uploadSize += header.Size
		case "mysql.dump":
			f, err := ioutil.TempFile("", "mysql.dump")
			if err != nil {
				return fmt.Errorf("error creating db tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading db dump: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking db tempfile: %s", err)
			}
			mysqlDump = f
			uploadSize += header.Size
		}
	}

	if app == nil {
		return fmt.Errorf("missing app.json")
	}
	oldName := app.Name
	if name := args.String["--name"]; name != "" {
		app.Name = name
	}
	if err := client.CreateApp(app); err != nil {
		return fmt.Errorf("error creating app: %s", err)
	}

	var bar *pb.ProgressBar
	if !args.Bool["--quiet"] && uploadSize > 0 && term.IsTerminal(os.Stderr.Fd()) {
		bar = pb.New(0)
		bar.SetUnits(pb.U_BYTES)
		bar.Total = uploadSize
		bar.ShowSpeed = true
		bar.Output = os.Stderr
		bar.Start()
		defer bar.Finish()
	}

	if pgDump != nil && release != nil {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "postgres",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning postgres resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}

		config, err := getPgRunConfig(client, app.ID, release)
		if err != nil {
			return fmt.Errorf("error getting postgres config: %s", err)
		}
		config.Stdin = pgDump
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		config.Exit = false
		if err := pgRestore(client, config, jobs); err != nil {
			return fmt.Errorf("error restoring postgres database: %s", err)
		}
	}

	if mysqlDump != nil && release != nil {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "mysql",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning mysql resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}

		config, err := getMysqlRunConfig(client, app.ID, release)
		if err != nil {
			return fmt.Errorf("error getting mysql config: %s", err)
		}
		config.Stdin = mysqlDump
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		config.Exit = false
		if err := mysqlRestore(client, config); err != nil {
			return fmt.Errorf("error restoring mysql database: %s", err)
		}
	}

	if release != nil && release.Env["FLYNN_REDIS"] != "" {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "redis",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning redis resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}
	}

	var uploadLegacySlug bool

	if legacySlug != nil {
		if err := func() error {
			gitreceiveRelease, err := client.GetAppRelease("gitreceive")
			if err != nil {
				return fmt.Errorf("unable to retrieve gitreceive release: %s", err)
			}

			// handle legacy clusters which reference Docker image URIs
			if uri, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_URI"]; ok {
				artifact := &ct.Artifact{
					Type: ct.DeprecatedArtifactTypeDocker,
					URI:  uri,
				}
				if err := client.CreateArtifact(artifact); err != nil {
					return fmt.Errorf("error creating image artifact: %s", err)
				}
				uploadLegacySlug = true
				release.ArtifactIDs = []string{artifact.ID}
				return nil
			}

			slugBuilderID, ok := gitreceiveRelease.Env["SLUGBUILDER_IMAGE_ID"]
			if !ok {
				return fmt.Errorf("gitreceive env missing slugbuilder image")
			}
			slugRunnerID, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_ID"]
			if !ok {
				return fmt.Errorf("gitreceive env missing slugrunner image")
			}

			// handle legacy tarball based slugbuilders (which are Docker based)
			slugBuilderImage, err := client.GetArtifact(slugBuilderID)
			if err != nil {
				return fmt.Errorf("unable to get slugbuilder image artifact: %s", err)
			}
			if slugBuilderImage.Type == ct.DeprecatedArtifactTypeDocker {
				uploadLegacySlug = true
				release.ArtifactIDs = []string{slugRunnerID}
				return nil
			}

			// Use slugbuilder to convert the legacy slug to a
			// Flynn squashfs image
			slugImageID := random.UUID()
			config := runConfig{
				App:        app.ID,
				Release:    gitreceiveRelease.ID,
				ReleaseEnv: true,
				Artifacts:  []string{slugBuilderID},
				DisableLog: true,
				Args:       []string{"/bin/convert-legacy-slug.sh"},
				Stdin:      legacySlug,
				Stdout:     ioutil.Discard,
				Stderr:     ioutil.Discard,
				Env:        map[string]string{"SLUG_IMAGE_ID": slugImageID},
			}
			if bar != nil {
				config.Stdin = bar.NewProxyReader(config.Stdin)
			}
			if err := runJob(client, config); err != nil {
				return fmt.Errorf("error uploading slug: %s", err)
			}
			release.ID = ""
			release.ArtifactIDs = []string{slugRunnerID, slugImageID}
			if release.Meta == nil {
				release.Meta = make(map[string]string, 1)
			}
			release.Meta["git"] = "true"
			return nil
		}(); err != nil {
			return err
		}
	} else if dockerImage.config.Tag != "" && dockerImage.archive != nil {
		// load the docker image into the Docker daemon
		cmd := exec.Command("docker", "load")
		cmd.Stdin = dockerImage.archive
		if out, err := cmd.CombinedOutput(); err != nil {
			return fmt.Errorf("error running docker load: %s: %q", err, out)
		}

		// use the tag from the config (which will now be applied to
		// the loaded image) to push the image to docker-receive
		cluster, err := getCluster()
		if err != nil {
			return err
		}
		host, err := cluster.DockerPushHost()
		if err != nil {
			return err
		}
		tag := fmt.Sprintf("%s/%s:flynn-import-%s", host, app.Name, random.String(8))
		if out, err := exec.Command("docker", "tag", dockerImage.config.Tag, tag).CombinedOutput(); err != nil {
			return fmt.Errorf("error tagging docker image: %s: %q", err, out)
		}

		artifact, err := dockerPush(client, app.Name, tag)
		if err != nil {
			return fmt.Errorf("error pushing docker image: %s", err)
		}

		release.ArtifactIDs = []string{artifact.ID}
	} else if len(artifacts) > 0 {
		// import blobstore Flynn artifacts
		blobstoreRelease, err := client.GetAppRelease("blobstore")
		if err != nil {
			return fmt.Errorf("unable to retrieve blobstore release: %s", err)
		}
		upload := func(id, url string) error {
			layer, ok := layers[id]
			if !ok {
				return fmt.Errorf("missing layer in export: %s", id)
			}
			config := runConfig{
				App:        app.ID,
				Release:    blobstoreRelease.ID,
				DisableLog: true,
				Args:       []string{"curl", "--request", "PUT", "--upload-file", "-", url},
				Stdin:      layer,
				Stdout:     ioutil.Discard,
				Stderr:     ioutil.Discard,
			}
			if bar != nil {
				config.Stdin = bar.NewProxyReader(config.Stdin)
			}
			if err := runJob(client, config); err != nil {
				return fmt.Errorf("error uploading layer: %s", err)
			}
			return nil
		}

		release.ArtifactIDs = make([]string, len(artifacts))
		for i, artifact := range artifacts {
			if artifact.Type != ct.ArtifactTypeFlynn {
				continue
			}
			if !artifact.Blobstore() {
				continue
			}
			for _, rootfs := range artifact.Manifest().Rootfs {
				for _, layer := range rootfs.Layers {
					if err := upload(layer.ID, artifact.LayerURL(layer)); err != nil {
						return err
					}
				}
			}
			artifact.ID = ""
			if err := client.CreateArtifact(artifact); err != nil {
				return fmt.Errorf("error creating artifact: %s", err)
			}
			release.ArtifactIDs[i] = artifact.ID
		}

		// use the current slugrunner image for slug releases
		if release.IsGitDeploy() {
			gitreceiveRelease, err := client.GetAppRelease("gitreceive")
			if err != nil {
				return fmt.Errorf("unable to retrieve gitreceive release: %s", err)
			}
			slugRunnerID, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_ID"]
			if !ok {
				return fmt.Errorf("gitreceive env missing slugrunner image")
			}
			release.ArtifactIDs[0] = slugRunnerID
		}
	}

	if release != nil {
		for t, proc := range release.Processes {
			// update legacy slug releases to use Args rather than the
			// deprecated Entrypoint and Cmd fields
			if release.IsGitDeploy() && len(proc.Args) == 0 {
				proc.Args = append([]string{"/runner/init"}, proc.DeprecatedCmd...)
				proc.DeprecatedCmd = nil
			}
			for i, port := range proc.Ports {
				if port.Service != nil && strings.HasPrefix(port.Service.Name, oldName) {
					proc.Ports[i].Service.Name = strings.Replace(port.Service.Name, oldName, app.Name, 1)
				}
			}
			release.Processes[t] = proc
		}
		if err := client.CreateRelease(release); err != nil {
			return fmt.Errorf("error creating release: %s", err)
		}
		if err := client.SetAppRelease(app.ID, release.ID); err != nil {
			return fmt.Errorf("error setting app release: %s", err)
		}
	}

	if uploadLegacySlug {
		slugURI := fmt.Sprintf("http://blobstore.discoverd/%s/slug.tgz", random.UUID())
		config := runConfig{
			App:        app.ID,
			Release:    release.ID,
			DisableLog: true,
			Args:       []string{"curl", "--request", "PUT", "--upload-file", "-", slugURI},
			Stdin:      legacySlug,
			Stdout:     ioutil.Discard,
			Stderr:     ioutil.Discard,
		}
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		if err := runJob(client, config); err != nil {
			return fmt.Errorf("error uploading slug: %s", err)
		}
		slugArtifact := &ct.Artifact{
			Type: ct.DeprecatedArtifactTypeFile,
			URI:  slugURI,
		}
		if err := client.CreateArtifact(slugArtifact); err != nil {
			return fmt.Errorf("error creating slug artifact: %s", err)
		}
		release.ID = ""
		release.ArtifactIDs = append(release.ArtifactIDs, slugArtifact.ID)
		if release.Meta == nil {
			release.Meta = make(map[string]string, 1)
		}
		release.Meta["git"] = "true"
		if err := client.CreateRelease(release); err != nil {
			return fmt.Errorf("error creating release: %s", err)
		}
		if err := client.SetAppRelease(app.ID, release.ID); err != nil {
			return fmt.Errorf("error setting app release: %s", err)
		}
	}

	if formation != nil && release != nil {
		formation.ReleaseID = release.ID
		formation.AppID = app.ID
		if err := client.PutFormation(formation); err != nil {
			return fmt.Errorf("error creating formation: %s", err)
		}
	}

	if args.Bool["--routes"] {
		for _, route := range routes {
			if err := client.CreateRoute(app.ID, &route); err != nil {
				if e, ok := err.(hh.JSONError); ok && e.Code == hh.ConflictErrorCode {
					// If the cluster domain matches then the default route
					// exported will conflict with the one created automatically.
					continue
				}
				return fmt.Errorf("error creating route: %s", err)
			}
			numRoutes++
		}
	}

	fmt.Printf("Imported %s (added %d routes, provisioned %d resources)\n", app.Name, numRoutes, numResources)

	return nil
}