func (e *UnregisterSupervisorExecutor) Execute(t *Task) error { if e.arg.Host == "" { return errors.New("Please specify a host to unregister") } // TODO(edanaher): With appropriate confirmation (--force?), tear down containers on the host and delete // metadata. Until then, cowardly refuse to tear down supervisors with containers. listResponse, err := supervisor.List(e.arg.Host) if err != nil { return err } containerCount := len(listResponse.Containers) if containerCount > 0 { plural := "" if containerCount > 1 { plural = "s" } return errors.New(fmt.Sprintf("Supervisor still has %d running container%s", +containerCount, plural)) } supervisor.Teardown(e.arg.Host, []string{}, true) err = datamodel.Supervisor(e.arg.Host).Delete() if err != nil { return err } e.reply.Status = StatusOk return nil }
func cleanup(removeContainerFromHost bool, deployedContainers []*Container, t *Task) { // kill all references to deployed containers as well as the container itself for _, container := range deployedContainers { supervisor.Teardown(container.Host, []string{container.ID}, false) if instance, err := datamodel.GetInstance(container.ID); err == nil { instance.Delete() } else { t.Log(fmt.Sprintf("Failed to clean up instance %s: %s", container.ID, err.Error())) } DeleteAppShaFromEnv(container.App, container.Sha, container.Env) if removeContainerFromHost { datamodel.Supervisor(container.Host).RemoveContainer(container.ID) } } }
func (e *TeardownExecutor) Execute(t *Task) error { hostMap, err := getContainerIDsToTeardown(t, e.arg) if err != nil { return err } if e.arg.All { tl := datamodel.NewTeardownLock(t.ID) if err := tl.Lock(); err != nil { return err } defer tl.Unlock() } else if e.arg.Env != "" { tl := datamodel.NewTeardownLock(t.ID, e.arg.App, e.arg.Sha, e.arg.Env) if err := tl.Lock(); err != nil { return err } defer tl.Unlock() } else if e.arg.Sha != "" { tl := datamodel.NewTeardownLock(t.ID, e.arg.App, e.arg.Sha) if err := tl.Lock(); err != nil { return err } defer tl.Unlock() } else if e.arg.App != "" { tl := datamodel.NewTeardownLock(t.ID, e.arg.App) if err := tl.Lock(); err != nil { return err } defer tl.Unlock() } tornContainers := []string{} for host, containerIDs := range hostMap { if e.arg.All { t.LogStatus("Tearing Down * from %s", host) } else { t.LogStatus("Tearing Down %v from %s", containerIDs, host) } ihReply, err := supervisor.Teardown(host, containerIDs, e.arg.All) if err != nil { return errors.New(fmt.Sprintf("Error Tearing Down %v from %s : %s", containerIDs, host, err.Error())) } tornContainers = append(tornContainers, ihReply.ContainerIDs...) for _, tornContainerID := range ihReply.ContainerIDs { t.LogStatus("%s has been removed from host %s; removing zookeeper record about the container", tornContainerID, host) err := datamodel.DeleteFromPool([]string{tornContainerID}) if err != nil { t.Log("Error removing %s from pool: %v", tornContainerID, err) } datamodel.Supervisor(host).RemoveContainer(tornContainerID) instance, err := datamodel.GetInstance(tornContainerID) if err != nil { continue } last, _ := instance.Delete() if last { t.LogStatus("%s is the last one of its kind [app: %s SHA: %s Env: %s]", tornContainerID, instance.App, instance.Sha, instance.Env) DeleteAppShaFromEnv(instance.App, instance.Sha, instance.Env) } t.LogStatus("Successfully teardown %s", tornContainerID) } } e.reply.ContainerIDs = tornContainers return nil }