Exemplo n.º 1
0
func (p *AWSProvider) EnvironmentGet(app string) (structs.Environment, error) {
	a, err := p.AppGet(app)
	if err != nil {
		return nil, err
	}

	if a.Status == "creating" {
		return nil, fmt.Errorf("app is still being created: %s", app)
	}

	data, err := p.s3Get(a.Outputs["Settings"], "env")
	if err != nil {
		// if we get a 404 from aws just return an empty environment
		if awsError, ok := err.(awserr.RequestFailure); ok && awsError.StatusCode() == 404 {
			return structs.Environment{}, nil
		}

		return nil, err
	}

	if a.Parameters["Key"] != "" {
		cr := crypt.New(p.Region, p.Access, p.Secret)

		if d, err := cr.Decrypt(a.Parameters["Key"], data); err == nil {
			data = d
		}
	}

	env := structs.Environment{}
	env.LoadEnvironment(data)

	return env, nil
}
Exemplo n.º 2
0
Arquivo: app.go Projeto: gmelika/rack
// RunDetached runs a command in the background (e.g. non-blocking).
func (a *App) RunDetached(process, command, releaseID string) error {
	resources, err := a.Resources()
	if err != nil {
		return err
	}

	taskDefinitionArn := resources[UpperName(process)+"ECSTaskDefinition"].Id

	if releaseID == "" {
		releaseID = a.Release
	}

	release, err := GetRelease(a.Name, releaseID)
	if err != nil {
		return err
	}

	// If the releaseID specified isn't the app's current release:
	// - We have to find the right task definition OR
	// - create a new/temp task definition to run a release that hasn't been promoted.
	if releaseID != a.Release {
		task, err := ECS().DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{
			TaskDefinition: aws.String(taskDefinitionArn),
		})
		if err != nil {
			return err
		}

		td, _, err := findAppDefinitions(process, releaseID, *task.TaskDefinition.Family, 20)
		if err != nil {
			return err

		} else if td != nil {
			taskDefinitionArn = *td.TaskDefinitionArn

		} else {
			// If reached, the release exist but doesn't have a task definition (isn't promoted).
			// Create a task definition to run that release.

			var cd *ecs.ContainerDefinition
			for _, cd = range task.TaskDefinition.ContainerDefinitions {
				if *cd.Name == process {
					break
				}
				cd = nil
			}
			if cd == nil {
				return fmt.Errorf("unable to find container for process %s and release %s", process, releaseID)
			}

			env := structs.Environment{}
			env.LoadRaw(release.Env)

			for _, containerKV := range cd.Environment {
				for key, value := range env {

					if *containerKV.Name == "RELEASE" {
						*containerKV.Value = releaseID
						break

					}

					if *containerKV.Name == key {
						*containerKV.Value = value
						break
					}
				}
			}

			taskInput := &ecs.RegisterTaskDefinitionInput{
				ContainerDefinitions: []*ecs.ContainerDefinition{
					cd,
				},
				Family:  task.TaskDefinition.Family,
				Volumes: []*ecs.Volume{},
			}

			resp, err := ECS().RegisterTaskDefinition(taskInput)
			if err != nil {
				return err
			}

			taskDefinitionArn = *resp.TaskDefinition.TaskDefinitionArn
		}
	}

	req := &ecs.RunTaskInput{
		Cluster:        aws.String(os.Getenv("CLUSTER")),
		Count:          aws.Int64(1),
		StartedBy:      aws.String("convox"),
		TaskDefinition: aws.String(taskDefinitionArn),
	}

	if command != "" {
		req.Overrides = &ecs.TaskOverride{
			ContainerOverrides: []*ecs.ContainerOverride{
				&ecs.ContainerOverride{
					Name: aws.String(process),
					Command: []*string{
						aws.String("sh"),
						aws.String("-c"),
						aws.String(command),
					},
				},
			},
		}
	}

	_, err = ECS().RunTask(req)

	return err
}
Exemplo n.º 3
0
Arquivo: app.go Projeto: gmelika/rack
// RunAttached runs a command in the foreground (e.g blocking) and writing the output from said command to rw.
func (a *App) RunAttached(process, command, releaseID string, height, width int, rw io.ReadWriter) error {
	//TODO: A lot of logic in here should be moved to the provider interface.

	resources, err := a.Resources()
	if err != nil {
		return err
	}

	if releaseID == "" {
		releaseID = a.Release
	}

	release, err := GetRelease(a.Name, releaseID)
	if err != nil {
		return err
	}

	var container *ecs.ContainerDefinition
	unpromotedRelease := false

	task, err := ECS().DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{
		TaskDefinition: aws.String(resources[UpperName(process)+"ECSTaskDefinition"].Id),
	})
	if err != nil {
		return err
	}

	for _, container = range task.TaskDefinition.ContainerDefinitions {
		if *container.Name == process {
			break
		}
	}

	// This would force an app to be promoted once before being able to run a process.
	if container == nil {
		return fmt.Errorf("unable to find container for %s", process)
	}

	// If the release ID provided does not equal the active one, some logic is needed to determine the next steps.
	// - For a previous release, we iterate over the previous TaskDefinition revisions (starting with the latest) looking for the releaseID specified.
	// - If the release has yet to be promoted, we use the most recent TaskDefinition with the provided release's environment.
	if releaseID != a.Release {

		_, releaseContainer, err := findAppDefinitions(process, releaseID, *task.TaskDefinition.Family, 20)
		if err != nil {
			return err
		}

		// If container is nil, the release most likely hasn't been promoted and thus no TaskDefinition for it.
		if releaseContainer != nil {
			container = releaseContainer

		} else {
			fmt.Printf("Unable to find container for %s. Basing container off of most recent release: %s.\n", process, a.Release)
			unpromotedRelease = true
		}
	}

	var rawEnvs []string
	for _, env := range container.Environment {
		rawEnvs = append(rawEnvs, fmt.Sprintf("%s=%s", *env.Name, *env.Value))
	}
	containerEnvs := structs.Environment{}
	containerEnvs.LoadRaw(strings.Join(rawEnvs, "\n"))

	// Update any environment variables that might be part of the unpromoted release.
	if unpromotedRelease {

		releaseEnv := structs.Environment{}
		releaseEnv.LoadRaw(release.Env)

		for key, value := range releaseEnv {
			containerEnvs[key] = value
		}

		containerEnvs["RELEASE"] = release.Id
	}

	m, err := manifest.Load([]byte(release.Manifest))
	if err != nil {
		return err
	}

	me, ok := m.Services[process]
	if !ok {
		return fmt.Errorf("no such process: %s", process)
	}

	binds := []string{}
	host := ""

	pss, err := ListProcesses(a.Name)
	if err != nil {
		return err
	}

	for _, ps := range pss {
		if ps.Name == process {
			binds = ps.binds
			host = fmt.Sprintf("http://%s:2376", ps.Host)
			break
		}
	}

	var image, repository, tag, username, password, serverAddress string

	if registryId := a.Outputs["RegistryId"]; registryId != "" {
		image = fmt.Sprintf("%s.dkr.ecr.%s.amazonaws.com/%s:%s.%s", registryId, os.Getenv("AWS_REGION"), a.Outputs["RegistryRepository"], me.Name, release.Build)
		repository = fmt.Sprintf("%s.dkr.ecr.%s.amazonaws.com/%s", registryId, os.Getenv("AWS_REGION"), a.Outputs["RegistryRepository"])
		tag = fmt.Sprintf("%s.%s", me.Name, release.Build)

		resp, err := ECR().GetAuthorizationToken(&ecr.GetAuthorizationTokenInput{
			RegistryIds: []*string{aws.String(a.Outputs["RegistryId"])},
		})

		if err != nil {
			return err
		}

		if len(resp.AuthorizationData) < 1 {
			return fmt.Errorf("no authorization data")
		}

		endpoint := *resp.AuthorizationData[0].ProxyEndpoint
		serverAddress = endpoint[8:]

		data, err := base64.StdEncoding.DecodeString(*resp.AuthorizationData[0].AuthorizationToken)

		if err != nil {
			return err
		}

		parts := strings.SplitN(string(data), ":", 2)

		username = parts[0]
		password = parts[1]
	} else {
		image = fmt.Sprintf("%s/%s-%s:%s", os.Getenv("REGISTRY_HOST"), a.Name, me.Name, release.Build)
		repository = fmt.Sprintf("%s/%s-%s", os.Getenv("REGISTRY_HOST"), a.Name, me.Name)
		tag = release.Build
		serverAddress = os.Getenv("REGISTRY_HOST")
		username = "******"
		password = os.Getenv("PASSWORD")
	}

	d, err := Docker(host)
	if err != nil {
		return err
	}

	err = d.PullImage(docker.PullImageOptions{
		Repository: repository,
		Tag:        tag,
	}, docker.AuthConfiguration{
		ServerAddress: serverAddress,
		Username:      username,
		Password:      password,
	})
	if err != nil {
		return err
	}

	res, err := d.CreateContainer(docker.CreateContainerOptions{
		Config: &docker.Config{
			AttachStdin:  true,
			AttachStdout: true,
			AttachStderr: true,
			Env:          containerEnvs.List(),
			OpenStdin:    true,
			Tty:          true,
			Cmd:          []string{"sh", "-c", command},
			Image:        image,
			Labels: map[string]string{
				"com.convox.rack.type":    "oneoff",
				"com.convox.rack.app":     a.Name,
				"com.convox.rack.process": process,
				"com.convox.rack.release": release.Id,
			},
		},
		HostConfig: &docker.HostConfig{
			Binds: binds,
		},
	})
	if err != nil {
		return err
	}

	ir, iw := io.Pipe()
	or, ow := io.Pipe()

	go d.AttachToContainer(docker.AttachToContainerOptions{
		Container:    res.ID,
		InputStream:  ir,
		OutputStream: ow,
		ErrorStream:  ow,
		Stream:       true,
		Stdin:        true,
		Stdout:       true,
		Stderr:       true,
		RawTerminal:  true,
	})

	go io.Copy(iw, rw)
	go io.Copy(rw, or)

	// hacky
	time.Sleep(100 * time.Millisecond)

	err = d.StartContainer(res.ID, nil)
	if err != nil {
		return err
	}

	err = d.ResizeContainerTTY(res.ID, height, width)
	if err != nil {
		// In some cases, a container might finish and exit by the time ResizeContainerTTY is called.
		// Resizing the TTY shouldn't cause the call to error out for cases like that.
		fmt.Printf("fn=RunAttached level=warning msg=\"unable to resize container: %s\"", err)
	}

	code, err := d.WaitContainer(res.ID)
	if err != nil {
		return err
	}

	_, err = rw.Write([]byte(fmt.Sprintf("%s%d\n", StatusCodePrefix, code)))
	return err
}