コード例 #1
0
func newTaskResponse(task *api.Task, containerMap map[string]*api.DockerContainer) *TaskResponse {
	containers := []ContainerResponse{}
	for containerName, container := range containerMap {
		if container.Container.IsInternal {
			continue
		}
		containers = append(containers, ContainerResponse{container.DockerId, container.DockerName, containerName})
	}

	knownStatus := task.GetKnownStatus()
	knownBackendStatus := knownStatus.BackendStatus()
	desiredStatusInAgent := task.GetDesiredStatus()
	desiredStatus := desiredStatusInAgent.BackendStatus()

	if (knownBackendStatus == "STOPPED" && desiredStatus != "STOPPED") || (knownBackendStatus == "RUNNING" && desiredStatus == "PENDING") {
		desiredStatus = ""
	}

	return &TaskResponse{
		Arn:           task.Arn,
		DesiredStatus: desiredStatus,
		KnownStatus:   knownBackendStatus,
		Family:        task.Family,
		Version:       task.Version,
		Containers:    containers,
	}
}
コード例 #2
0
func (engine *DockerTaskEngine) pullContainer(task *api.Task, container *api.Container) DockerContainerMetadata {
	log.Info("Pulling container", "task", task, "container", container)
	seelog.Debugf("Attempting to obtain ImagePullDeleteLock to pull image - %s", container.Image)

	ImagePullDeleteLock.Lock()
	seelog.Debugf("Obtained ImagePullDeleteLock to pull image - %s", container.Image)
	defer seelog.Debugf("Released ImagePullDeleteLock after pulling image - %s", container.Image)
	defer ImagePullDeleteLock.Unlock()

	// If a task is blocked here for some time, and before it starts pulling image,
	// the task's desired status is set to stopped, then don't pull the image
	if task.GetDesiredStatus() == api.TaskStopped {
		seelog.Infof("Task desired status is stopped, skip pull container: %v, task %v", container, task)
		container.SetDesiredStatus(api.ContainerStopped)
		return DockerContainerMetadata{Error: TaskStoppedBeforePullBeginError{task.Arn}}
	}

	metadata := engine.client.PullImage(container.Image, container.RegistryAuthentication)
	err := engine.imageManager.AddContainerReferenceToImageState(container)
	if err != nil {
		seelog.Errorf("Error adding container reference to image state: %v", err)
	}
	imageState := engine.imageManager.GetImageStateFromImageName(container.Image)
	engine.state.AddImageState(imageState)
	engine.saver.Save()
	return metadata
}
コード例 #3
0
ファイル: task_equal.go プロジェクト: bmanas/amazon-ecs-agent
// TasksEqual determines if the lhs and rhs tasks are equal for the definition
// of having the same family, version, statuses, and equal containers.
func TasksEqual(lhs, rhs *api.Task) bool {
	if lhs == rhs {
		return true
	}
	if lhs.Arn != rhs.Arn {
		return false
	}

	if lhs.Family != rhs.Family || lhs.Version != rhs.Version {
		return false
	}

	for _, left := range lhs.Containers {
		right, ok := rhs.ContainerByName(left.Name)
		if !ok {
			return false
		}
		if !ContainersEqual(left, right) {
			return false
		}
	}

	if lhs.DesiredStatus != rhs.DesiredStatus {
		return false
	}
	if lhs.KnownStatus != rhs.KnownStatus {
		return false
	}
	return true
}
コード例 #4
0
func (engine *DockerTaskEngine) createContainer(task *api.Task, container *api.Container) DockerContainerMetadata {
	log.Info("Creating container", "task", task, "container", container)
	client := engine.client
	if container.DockerConfig.Version != nil {
		client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version))
	}

	// Resolve HostConfig
	// we have to do this in create, not start, because docker no longer handles
	// merging create config with start hostconfig the same; e.g. memory limits
	// get lost
	containerMap, ok := engine.state.ContainerMapByArn(task.Arn)
	if !ok {
		containerMap = make(map[string]*api.DockerContainer)
	}

	hostConfig, hcerr := task.DockerHostConfig(container, containerMap)
	hostConfig.NetworkMode = "host"

	if hcerr != nil {
		return DockerContainerMetadata{Error: api.NamedError(hcerr)}
	}

	config, err := task.DockerConfig(container)
	if err != nil {
		return DockerContainerMetadata{Error: api.NamedError(err)}
	}

	name := ""
	for i := 0; i < len(container.Name); i++ {
		c := container.Name[i]
		if !((c <= '9' && c >= '0') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c == '-')) {
			continue
		}
		name += string(c)
	}

	containerName := "ecs-" + task.Family + "-" + task.Version + "-" + name + "-" + utils.RandHex()

	// Pre-add the container in case we stop before the next, more useful,
	// AddContainer call. This ensures we have a way to get the container if
	// we die before 'createContainer' returns because we can inspect by
	// name
	engine.state.AddContainer(&api.DockerContainer{DockerName: containerName, Container: container}, task)

	metadata := client.CreateContainer(config, hostConfig, containerName)
	if metadata.Error != nil {
		return metadata
	}
	engine.state.AddContainer(&api.DockerContainer{DockerId: metadata.DockerId, DockerName: containerName, Container: container}, task)
	hostConfig.NetworkMode = "host"

	log.Info("Created container successfully", "task", task, "container", container)
	return metadata
}
コード例 #5
0
func (engine *DockerTaskEngine) AddTask(task *api.Task) error {
	task.PostUnmarshalTask(engine.credentialsManager)

	engine.processTasks.Lock()
	defer engine.processTasks.Unlock()

	existingTask, exists := engine.state.TaskByArn(task.Arn)
	if !exists {
		engine.state.AddTask(task)
		engine.startTask(task)
	} else {
		engine.updateTask(existingTask, task)
	}

	return nil
}
コード例 #6
0
func (engine *DockerTaskEngine) emitTaskEvent(task *api.Task, reason string) {
	taskKnownStatus := task.GetKnownStatus()
	if !taskKnownStatus.BackendRecognized() {
		return
	}
	if task.SentStatus >= taskKnownStatus {
		log.Debug("Already sent task event; no need to re-send", "task", task.Arn, "event", taskKnownStatus.String())
		return
	}
	event := api.TaskStateChange{
		TaskArn:    task.Arn,
		Status:     taskKnownStatus,
		Reason:     reason,
		SentStatus: &task.SentStatus,
	}
	log.Info("Task change event", "event", event)
	engine.taskEvents <- event
}
コード例 #7
0
// updateTask determines if a new transition needs to be applied to the
// referenced task, and if needed applies it. It should not be called anywhere
// but from 'AddTask' and is protected by the processTasks lock there.
func (engine *DockerTaskEngine) updateTask(task *api.Task, update *api.Task) {
	managedTask, ok := engine.managedTasks[task.Arn]
	if !ok {
		log.Crit("ACS message for a task we thought we managed, but don't!", "arn", task.Arn)
		// Is this the right thing to do?
		// Calling startTask should overwrite our bad 'state' data with the new
		// task which we do manage.. but this is still scary and shouldn't have happened
		engine.startTask(update)
		return
	}
	// Keep the lock because sequence numbers cannot be correct unless they are
	// also read in the order addtask was called
	// This does block the engine's ability to ingest any new events (including
	// stops for past tasks, ack!), but this is necessary for correctness
	updateDesiredStatus := update.GetDesiredStatus()
	log.Debug("Putting update on the acs channel", "task", task.Arn, "status", updateDesiredStatus, "seqnum", update.StopSequenceNumber)
	transition := acsTransition{desiredStatus: updateDesiredStatus}
	transition.seqnum = update.StopSequenceNumber
	managedTask.acsMessages <- transition
	log.Debug("Update was taken off the acs channel", "task", task.Arn, "status", updateDesiredStatus)
}
コード例 #8
0
func (engine *DockerTaskEngine) createContainer(task *api.Task, container *api.Container) DockerContainerMetadata {
	log.Info("Creating container", "task", task, "container", container)
	client := engine.client
	if container.DockerConfig.Version != nil {
		client = client.WithVersion(dockerclient.DockerVersion(*container.DockerConfig.Version))
	}

	// Resolve HostConfig
	// we have to do this in create, not start, because docker no longer handles
	// merging create config with start hostconfig the same; e.g. memory limits
	// get lost
	containerMap, ok := engine.state.ContainerMapByArn(task.Arn)
	if !ok {
		containerMap = make(map[string]*api.DockerContainer)
	}

	hostConfig, hcerr := task.DockerHostConfig(container, containerMap)
	if hcerr != nil {
		return DockerContainerMetadata{Error: api.NamedError(hcerr)}
	}

	config, err := task.DockerConfig(container)
	if err != nil {
		return DockerContainerMetadata{Error: api.NamedError(err)}
	}

	// Augment labels with some metadata from the agent. Explicitly do this last
	// such that it will always override duplicates in the provided raw config
	// data.
	config.Labels[labelPrefix+"task-arn"] = task.Arn
	config.Labels[labelPrefix+"container-name"] = container.Name
	config.Labels[labelPrefix+"task-definition-family"] = task.Family
	config.Labels[labelPrefix+"task-definition-version"] = task.Version
	config.Labels[labelPrefix+"cluster"] = engine.cfg.Cluster

	name := ""
	for i := 0; i < len(container.Name); i++ {
		c := container.Name[i]
		if !((c <= '9' && c >= '0') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c == '-')) {
			continue
		}
		name += string(c)
	}

	containerName := "ecs-" + task.Family + "-" + task.Version + "-" + name + "-" + utils.RandHex()

	// Pre-add the container in case we stop before the next, more useful,
	// AddContainer call. This ensures we have a way to get the container if
	// we die before 'createContainer' returns because we can inspect by
	// name
	engine.state.AddContainer(&api.DockerContainer{DockerName: containerName, Container: container}, task)
	seelog.Infof("Created container name mapping for task %s - %s -> %s", task, container, containerName)
	engine.saver.ForceSave()

	metadata := client.CreateContainer(config, hostConfig, containerName)
	if metadata.DockerId != "" {
		engine.state.AddContainer(&api.DockerContainer{DockerId: metadata.DockerId, DockerName: containerName, Container: container}, task)
	}
	seelog.Infof("Created docker container for task %s: %s -> %s", task, container, metadata.DockerId)
	return metadata
}