示例#1
0
func (e *TeardownExecutor) Execute(t *Task) error {
	hostMap, err := getContainerIDsToTeardown(t, e.arg)
	if err != nil {
		return err
	}
	if e.arg.All {
		tl := datamodel.NewTeardownLock(t.ID)
		if err := tl.Lock(); err != nil {
			return err
		}
		defer tl.Unlock()
	} else if e.arg.Env != "" {
		tl := datamodel.NewTeardownLock(t.ID, e.arg.App, e.arg.Sha, e.arg.Env)
		if err := tl.Lock(); err != nil {
			return err
		}
		defer tl.Unlock()
	} else if e.arg.Sha != "" {
		tl := datamodel.NewTeardownLock(t.ID, e.arg.App, e.arg.Sha)
		if err := tl.Lock(); err != nil {
			return err
		}
		defer tl.Unlock()
	} else if e.arg.App != "" {
		tl := datamodel.NewTeardownLock(t.ID, e.arg.App)
		if err := tl.Lock(); err != nil {
			return err
		}
		defer tl.Unlock()
	}
	tornContainers := []string{}
	for host, containerIDs := range hostMap {
		if e.arg.All {
			t.LogStatus("Tearing Down * from %s", host)
		} else {
			t.LogStatus("Tearing Down %v from %s", containerIDs, host)
		}

		ihReply, err := supervisor.Teardown(host, containerIDs, e.arg.All)
		if err != nil {
			return errors.New(fmt.Sprintf("Error Tearing Down %v from %s : %s", containerIDs, host,
				err.Error()))
		}
		tornContainers = append(tornContainers, ihReply.ContainerIDs...)
		for _, tornContainerID := range ihReply.ContainerIDs {

			t.LogStatus("%s has been removed from host %s; removing zookeeper record about the container",
				tornContainerID, host)
			err := datamodel.DeleteFromPool([]string{tornContainerID})
			if err != nil {
				t.Log("Error removing %s from pool: %v", tornContainerID, err)
			}
			datamodel.Supervisor(host).RemoveContainer(tornContainerID)
			instance, err := datamodel.GetInstance(tornContainerID)
			if err != nil {
				continue
			}
			last, _ := instance.Delete()
			if last {
				t.LogStatus("%s is the last one of its kind [app: %s SHA: %s Env: %s]",
					tornContainerID, instance.App, instance.Sha, instance.Env)

				DeleteAppShaFromEnv(instance.App, instance.Sha, instance.Env)
			}
			t.LogStatus("Successfully teardown %s", tornContainerID)
		}
	}
	e.reply.ContainerIDs = tornContainers
	return nil
}
func deployToHostsInZones(deps map[string]DepsType, manifest *Manifest, sha, env string,
	hosts map[string][]string, zones []string, t *Task) ([]*Container, error) {
	deployedContainers := []*Container{}
	// fetch the app
	zkApp, err := datamodel.GetApp(manifest.Name)
	if err != nil {
		return nil, err
	}
	// first check if zones have enough hosts
	for _, zone := range zones {
		// fail if zone has no hosts
		if hosts[zone] == nil || len(hosts[zone]) == 0 {
			return nil, errors.New(fmt.Sprintf("No hosts available for app %s in zone %s", manifest.Name, zone))
		}
	}
	// now that we know that enough hosts are available
	t.LogStatus("Deploying to zones: %v", zones)
	respCh := make(chan *DeployZoneResult, len(zones))
	for _, zone := range zones {
		go deployToZone(respCh, deps, manifest, sha, env, hosts[zone], zone)
	}
	numResults := 0
	status := "Deployed to zones: "
	for result := range respCh {
		deployedContainers = append(deployedContainers, result.Containers...)
		if result.Error != nil {
			err = result.Error
			t.Log(err.Error())
			status += result.Zone + ":FAIL "
		} else {
			status += result.Zone + ":SUCCESS "
		}
		t.LogStatus(status)
		numResults++
		if numResults >= len(zones) { // we're done
			close(respCh)
		}
	}
	if err != nil {
		cleanup(false, deployedContainers, t)
		return nil, err
	}

	// set ports on zk supervisor - can't do this in parallel. we may deploy to the same host at the same time
	// and since we don't lock zookeeper (maybe we should), this would result in a race condition.
	t.LogStatus("Updating Zookeeper")
	for _, cont := range deployedContainers {
		datamodel.Supervisor(cont.Host).SetContainerAndPort(cont.ID, cont.PrimaryPort)
	}

	// we're good now, so lets move on
	t.LogStatus("Updating Router")
	deployedIDs := make([]string, len(deployedContainers))
	count := 0
	for _, cont := range deployedContainers {
		deployedIDs[count] = cont.ID
		count++
	}
	err = datamodel.AddToPool(deployedIDs)
	if err != nil { // if we can't add the pool, clean up and fail
		cleanup(true, deployedContainers, t)
		return nil, errors.New("Update Pool Error: " + err.Error())
	}
	if zkApp.Internal {
		// reserve router port if needed and add app+env
		_, _, err = datamodel.ReserveRouterPortAndUpdateTrie(zkApp.Internal, manifest.Name, sha, env)
		if err != nil {
			datamodel.DeleteFromPool(deployedIDs)
			cleanup(true, deployedContainers, t)
			return nil, errors.New("Reserve Router Port Error: " + err.Error())
		}
	} else {
		// only update trie
		_, err = datamodel.UpdateAppEnvTrie(zkApp.Internal, manifest.Name, sha, env)
		if err != nil {
			datamodel.DeleteFromPool(deployedIDs)
			cleanup(true, deployedContainers, t)
			return nil, errors.New("Reserve Router Port Error: " + err.Error())
		}
	}
	return deployedContainers, nil
}