func newTaskResponse(task *api.Task, containerMap map[string]*api.DockerContainer) *TaskResponse { containers := []ContainerResponse{} for containerName, container := range containerMap { if container.Container.IsInternal { continue } containers = append(containers, ContainerResponse{container.DockerId, container.DockerName, containerName}) } knownStatus := task.GetKnownStatus() knownBackendStatus := knownStatus.BackendStatus() desiredStatusInAgent := task.GetDesiredStatus() desiredStatus := desiredStatusInAgent.BackendStatus() if (knownBackendStatus == "STOPPED" && desiredStatus != "STOPPED") || (knownBackendStatus == "RUNNING" && desiredStatus == "PENDING") { desiredStatus = "" } return &TaskResponse{ Arn: task.Arn, DesiredStatus: desiredStatus, KnownStatus: knownBackendStatus, Family: task.Family, Version: task.Version, Containers: containers, } }
func (engine *DockerTaskEngine) pullContainer(task *api.Task, container *api.Container) DockerContainerMetadata { log.Info("Pulling container", "task", task, "container", container) seelog.Debugf("Attempting to obtain ImagePullDeleteLock to pull image - %s", container.Image) ImagePullDeleteLock.Lock() seelog.Debugf("Obtained ImagePullDeleteLock to pull image - %s", container.Image) defer seelog.Debugf("Released ImagePullDeleteLock after pulling image - %s", container.Image) defer ImagePullDeleteLock.Unlock() // If a task is blocked here for some time, and before it starts pulling image, // the task's desired status is set to stopped, then don't pull the image if task.GetDesiredStatus() == api.TaskStopped { seelog.Infof("Task desired status is stopped, skip pull container: %v, task %v", container, task) container.SetDesiredStatus(api.ContainerStopped) return DockerContainerMetadata{Error: TaskStoppedBeforePullBeginError{task.Arn}} } metadata := engine.client.PullImage(container.Image, container.RegistryAuthentication) err := engine.imageManager.AddContainerReferenceToImageState(container) if err != nil { seelog.Errorf("Error adding container reference to image state: %v", err) } imageState := engine.imageManager.GetImageStateFromImageName(container.Image) engine.state.AddImageState(imageState) engine.saver.Save() return metadata }
// updateTask determines if a new transition needs to be applied to the // referenced task, and if needed applies it. It should not be called anywhere // but from 'AddTask' and is protected by the processTasks lock there. func (engine *DockerTaskEngine) updateTask(task *api.Task, update *api.Task) { managedTask, ok := engine.managedTasks[task.Arn] if !ok { log.Crit("ACS message for a task we thought we managed, but don't!", "arn", task.Arn) // Is this the right thing to do? // Calling startTask should overwrite our bad 'state' data with the new // task which we do manage.. but this is still scary and shouldn't have happened engine.startTask(update) return } // Keep the lock because sequence numbers cannot be correct unless they are // also read in the order addtask was called // This does block the engine's ability to ingest any new events (including // stops for past tasks, ack!), but this is necessary for correctness updateDesiredStatus := update.GetDesiredStatus() log.Debug("Putting update on the acs channel", "task", task.Arn, "status", updateDesiredStatus, "seqnum", update.StopSequenceNumber) transition := acsTransition{desiredStatus: updateDesiredStatus} transition.seqnum = update.StopSequenceNumber managedTask.acsMessages <- transition log.Debug("Update was taken off the acs channel", "task", task.Arn, "status", updateDesiredStatus) }