func (e *ContainerMaintenanceExecutor) Authorize() error { instance, err := datamodel.GetInstance(e.arg.ContainerID) if err != nil { return err } return AuthorizeApp(&e.arg.ManagerAuthArg, instance.App) }
func (e *AuthorizeSSHExecutor) Authorize() error { // get app for container ID instance, err := datamodel.GetInstance(e.arg.ContainerID) if err != nil { return err } return AuthorizeApp(&e.arg.ManagerAuthArg, instance.App) }
func (e *ContainerMaintenanceExecutor) Execute(t *Task) error { if e.arg.ContainerID == "" { return errors.New("Please specify a container id.") } instance, err := datamodel.GetInstance(e.arg.ContainerID) if err != nil { return err } ihReply, err := supervisor.ContainerMaintenance(instance.Host, e.arg.ContainerID, e.arg.Maintenance) e.reply.Status = ihReply.Status return err }
func (e *DeauthorizeSSHExecutor) Execute(t *Task) error { if e.arg.ContainerID == "" { return errors.New("Please specify a container id.") } if e.arg.User == "" { return errors.New("Please specify a user.") } instance, err := datamodel.GetInstance(e.arg.ContainerID) if err != nil { return err } ihReply, err := supervisor.DeauthorizeSSH(instance.Host, e.arg.ContainerID, e.arg.User) e.reply.Status = ihReply.Status return err }
func cleanup(removeContainerFromHost bool, deployedContainers []*Container, t *Task) { // kill all references to deployed containers as well as the container itself for _, container := range deployedContainers { supervisor.Teardown(container.Host, []string{container.ID}, false) if instance, err := datamodel.GetInstance(container.ID); err == nil { instance.Delete() } else { t.Log(fmt.Sprintf("Failed to clean up instance %s: %s", container.ID, err.Error())) } DeleteAppShaFromEnv(container.App, container.Sha, container.Env) if removeContainerFromHost { datamodel.Supervisor(container.Host).RemoveContainer(container.ID) } } }
func (e *ListContainersExecutor) Execute(t *Task) error { var err error if e.arg.App == "" && e.arg.Sha == "" && e.arg.Env == "" { // try to list all instances allContainerIDs, err := datamodel.ListAllInstances() if err != nil { e.reply.Status = StatusError return err } else { e.reply.Status = StatusOk } // filter by allowed app if err := AuthorizeSuperUser(&e.arg.ManagerAuthArg); err == nil { // if superuser, show everything e.reply.ContainerIDs = allContainerIDs } else { // else only show what is allowed allowedApps := GetAllowedApps(&e.arg.ManagerAuthArg, e.arg.ManagerAuthArg.User) e.reply.ContainerIDs = []string{} for _, cid := range allContainerIDs { if inst, err := datamodel.GetInstance(cid); err != nil && allowedApps[inst.App] { e.reply.ContainerIDs = append(e.reply.ContainerIDs, cid) } } } sort.Strings(e.reply.ContainerIDs) return nil } if e.arg.App == "" { return errors.New("App is empty") } if e.arg.Sha == "" { return errors.New("Sha is empty") } if e.arg.Env == "" { return errors.New("Environment is empty") } e.reply.ContainerIDs, err = datamodel.ListInstances(e.arg.App, e.arg.Sha, e.arg.Env) if err != nil { e.reply.Status = StatusError } else { sort.Strings(e.reply.ContainerIDs) e.reply.Status = StatusOk } return err }
func (e *DeployContainerExecutor) Execute(t *Task) error { if e.arg.ContainerID == "" { return errors.New("Container ID is empty") } if e.arg.Instances <= 0 { return errors.New("Instances should be > 0") } instance, err := datamodel.GetInstance(e.arg.ContainerID) if err != nil { return err } ihReply, err := supervisor.Get(instance.Host, instance.ID) if err != nil { return err } e.reply.Containers, err = deployContainer(&e.arg.ManagerAuthArg, ihReply.Container, e.arg.Instances, t) return err }
func copyContainer(auth *ManagerAuthArg, cid, toHost string, t *Task) (*Container, error) { // get old instance inst, err := datamodel.GetInstance(cid) if err != nil { return nil, err } // get manifest manifest := inst.Manifest if manifest == nil { // if we don't have the manifest in zk, try to get it from the supervisor ihReply, err := supervisor.Get(inst.Host, inst.ID) if err != nil { return nil, err } manifest = ihReply.Container.Manifest } manifest.Instances = 1 // validate and get deps deps, err := validateDeploy(auth, manifest, inst.Sha, inst.Env, t) if err != nil { return nil, err } // get zone of toHost zone, err := supervisor.GetZone(toHost) if err != nil { return nil, err } deployed, err := deployToHostsInZones(deps, manifest, inst.Sha, inst.Env, map[string][]string{zone: []string{toHost}}, []string{zone}, t) if err != nil { return nil, err } // should only deploy 1 since we're only moving 1 if len(deployed) != 1 { cleanup(true, deployed, t) return nil, errors.New(fmt.Sprintf("Didn't deploy 1 container. Deployed %d", len(deployed))) } return deployed[0], nil }
func (e *GetContainerExecutor) Execute(t *Task) (err error) { if e.arg.ContainerID == "" { return errors.New("Container ID is empty") } instance, err := datamodel.GetInstance(e.arg.ContainerID) if err != nil { e.reply.Status = StatusError return err } var ihReply *SupervisorGetReply ihReply, err = supervisor.Get(instance.Host, e.arg.ContainerID) if err != nil { e.reply.Status = StatusError return err } e.reply.Status = ihReply.Status ihReply.Container.Host = instance.Host e.reply.Container = ihReply.Container e.reply.Status = StatusOk return err }
func (e *AuthorizeSSHExecutor) Execute(t *Task) error { if e.arg.PublicKey == "" { return errors.New("Please specify an SSH public key.") } if e.arg.ContainerID == "" { return errors.New("Please specify a container id.") } if e.arg.User == "" { return errors.New("Please specify a user.") } instance, err := datamodel.GetInstance(e.arg.ContainerID) if err != nil { return err } ihReply, err := supervisor.AuthorizeSSH(instance.Host, e.arg.ContainerID, e.arg.User, e.arg.PublicKey) if err != nil { e.reply.Status = StatusError return err } e.reply.Host = instance.Host e.reply.Port = ihReply.Port e.reply.Status = ihReply.Status return err }
func (e *TeardownExecutor) Execute(t *Task) error { hostMap, err := getContainerIDsToTeardown(t, e.arg) if err != nil { return err } if e.arg.All { tl := datamodel.NewTeardownLock(t.ID) if err := tl.Lock(); err != nil { return err } defer tl.Unlock() } else if e.arg.Env != "" { tl := datamodel.NewTeardownLock(t.ID, e.arg.App, e.arg.Sha, e.arg.Env) if err := tl.Lock(); err != nil { return err } defer tl.Unlock() } else if e.arg.Sha != "" { tl := datamodel.NewTeardownLock(t.ID, e.arg.App, e.arg.Sha) if err := tl.Lock(); err != nil { return err } defer tl.Unlock() } else if e.arg.App != "" { tl := datamodel.NewTeardownLock(t.ID, e.arg.App) if err := tl.Lock(); err != nil { return err } defer tl.Unlock() } tornContainers := []string{} for host, containerIDs := range hostMap { if e.arg.All { t.LogStatus("Tearing Down * from %s", host) } else { t.LogStatus("Tearing Down %v from %s", containerIDs, host) } ihReply, err := supervisor.Teardown(host, containerIDs, e.arg.All) if err != nil { return errors.New(fmt.Sprintf("Error Tearing Down %v from %s : %s", containerIDs, host, err.Error())) } tornContainers = append(tornContainers, ihReply.ContainerIDs...) for _, tornContainerID := range ihReply.ContainerIDs { t.LogStatus("%s has been removed from host %s; removing zookeeper record about the container", tornContainerID, host) err := datamodel.DeleteFromPool([]string{tornContainerID}) if err != nil { t.Log("Error removing %s from pool: %v", tornContainerID, err) } datamodel.Supervisor(host).RemoveContainer(tornContainerID) instance, err := datamodel.GetInstance(tornContainerID) if err != nil { continue } last, _ := instance.Delete() if last { t.LogStatus("%s is the last one of its kind [app: %s SHA: %s Env: %s]", tornContainerID, instance.App, instance.Sha, instance.Env) DeleteAppShaFromEnv(instance.App, instance.Sha, instance.Env) } t.LogStatus("Successfully teardown %s", tornContainerID) } } e.reply.ContainerIDs = tornContainers return nil }
func getContainerIDsToTeardown(t *Task, arg ManagerTeardownArg) (hostMap map[string][]string, err error) { hostMap = map[string][]string{} // map of host -> []string container ids // TODO(edanaher,2014-07-02): This pile of conditionals is braindead and caused us to ignore an environment // with no sha, tearing down all of of an app instead of just one environment. // We really need to fix this to be reasonable, but for the moment, to fix it, I'm just adding another case. if arg.All { var hosts []string hosts, err = datamodel.ListSupervisors() if err != nil { return nil, errors.New("Error listing hosts: " + err.Error()) } for _, host := range hosts { hostMap[host] = []string{} } return } else if arg.ContainerID != "" { var instance *datamodel.ZkInstance instance, err = datamodel.GetInstance(arg.ContainerID) if err != nil { return } hostMap[instance.Host] = []string{arg.ContainerID} return } else if arg.App != "" { containerIDs := []string{} if arg.Sha != "" { if arg.Env != "" { if containerIDs, err = getContainerIDsOfShaEnv(t, arg.App, arg.Sha, arg.Env); err != nil { return nil, err } } else { if containerIDs, err = getContainerIDsOfSha(t, arg.App, arg.Sha); err != nil { return nil, err } } } else { if arg.Env != "" { if containerIDs, err = getContainerIDsOfEnv(t, arg.App, arg.Env); err != nil { return nil, err } } else { if containerIDs, err = getContainerIDsOfApp(t, arg.App); err != nil { return nil, err } } } for _, containerID := range containerIDs { instance, err := datamodel.GetInstance(containerID) if err != nil { continue } currentIDs := hostMap[instance.Host] if currentIDs == nil { hostMap[instance.Host] = []string{containerID} } else { hostMap[instance.Host] = append(currentIDs, containerID) } } return } return nil, errors.New("Invalid Arguments") }