Esempio n. 1
0
func waitForPodLabeledWithRC(selector klabels.Selector, rcID fields.ID) error {
	client := kp.NewConsulClient(kp.Options{})
	applicator := labels.NewConsulApplicator(client, 1)

	// we have to label this hostname as being allowed to run tests
	host, err := os.Hostname()
	if err != nil {
		return fmt.Errorf("Could not get hostname: %s", err)
	}
	err = applicator.SetLabel(labels.NODE, host, "test", "yes")
	if err != nil {
		return fmt.Errorf("Could not set node selector label on %s: %v", host, err)
	}

	quitCh := make(chan struct{})
	defer close(quitCh)
	watchCh := applicator.WatchMatches(selector, labels.POD, quitCh)
	waitTime := time.After(30 * time.Second)
	for {
		select {
		case <-waitTime:
			return fmt.Errorf("Label selector %v wasn't matched before timeout: %s", selector, targetLogs())
		case res, ok := <-watchCh:
			if !ok {
				return fmt.Errorf("Label selector watch unexpectedly terminated")
			}
			if len(res) > 1 {
				return fmt.Errorf("Too many results found, should only have 1: %v", res)
			}
			if len(res) == 1 {
				_, podID, err := labels.NodeAndPodIDFromPodLabel(res[0])
				if err != nil {
					return err
				}
				if podID.String() != "hello" {
					return fmt.Errorf("Should have found the hello pod, instead found %s", podID)
				}
				return nil
			}
		}
	}
}
Esempio n. 2
0
func (rc *replicationController) CurrentPods() (types.PodLocations, error) {
	selector := klabels.Everything().Add(RCIDLabel, klabels.EqualsOperator, []string{rc.ID().String()})

	podMatches, err := rc.podApplicator.GetMatches(selector, labels.POD)
	if err != nil {
		return nil, err
	}

	result := make(types.PodLocations, len(podMatches))
	for i, podMatch := range podMatches {
		// ID will be something like <nodename>/<podid>.
		node, podID, err := labels.NodeAndPodIDFromPodLabel(podMatch)
		if err != nil {
			return nil, err
		}
		result[i].Node = node
		result[i].PodID = podID
	}
	return result, nil
}
Esempio n. 3
0
func (rc *replicationController) CurrentPods() (types.PodLocations, error) {
	selector := klabels.Everything().Add(RCIDLabel, klabels.EqualsOperator, []string{rc.ID().String()})

	// replication controllers can only pass cachedMatch = false because their operations for matching
	// replica counts are not necessarily idempotent.
	podMatches, err := rc.podApplicator.GetMatches(selector, labels.POD, false)
	if err != nil {
		return nil, err
	}

	result := make(types.PodLocations, len(podMatches))
	for i, podMatch := range podMatches {
		// ID will be something like <nodename>/<podid>.
		node, podID, err := labels.NodeAndPodIDFromPodLabel(podMatch)
		if err != nil {
			return nil, err
		}
		result[i].Node = node
		result[i].PodID = podID
	}
	return result, nil
}
Esempio n. 4
0
func (ds *daemonSet) CurrentPods() (types.PodLocations, error) {
	// Changing DaemonSet.ID is not permitted, so as long as there is no uuid
	// collision, this will always get the current pod path that this daemon set
	// had scheduled on
	selector := klabels.Everything().Add(DSIDLabel, klabels.EqualsOperator, []string{ds.ID().String()})

	podMatches, err := ds.applicator.GetMatches(selector, labels.POD, ds.cachedPodMatch)
	if err != nil {
		return nil, util.Errorf("Unable to get matches on pod tree: %v", err)
	}

	result := make(types.PodLocations, len(podMatches))
	for i, podMatch := range podMatches {
		// ID will be something like <node>/<PodID>
		node, podID, err := labels.NodeAndPodIDFromPodLabel(podMatch)
		if err != nil {
			return nil, err
		}
		result[i].Node = node
		result[i].PodID = podID
	}

	return result, nil
}
Esempio n. 5
0
File: farm.go Progetto: drcapulet/p2
// This function removes all pods with a DSIDLabel where the daemon set id does
// not exist in the store at every interval specified because it is possible
// that the farm will unexpectedly crash or someone deletes or modifies a node
func (dsf *Farm) cleanupDaemonSetPods(quitCh <-chan struct{}) {
	timer := time.NewTimer(time.Duration(0))

	for {
		select {
		case <-quitCh:
			return
		case <-timer.C:
		}
		timer.Reset(cleanupInterval)

		allDaemonSets, err := dsf.dsStore.List()
		if err != nil {
			dsf.logger.Errorf("Unable to get daemon sets from intent tree in daemon set farm: %v", err)
			continue
		}

		dsIDMap := make(map[fields.ID]ds_fields.DaemonSet)
		for _, dsFields := range allDaemonSets {
			dsIDMap[dsFields.ID] = dsFields
		}

		dsIDLabelSelector := klabels.Everything().
			Add(DSIDLabel, klabels.ExistsOperator, []string{})

		allPods, err := dsf.applicator.GetMatches(dsIDLabelSelector, labels.POD)
		if err != nil {
			dsf.logger.Errorf("Unable to get matches for daemon sets in pod store: %v", err)
			continue
		}

		for _, podLabels := range allPods {
			// Only check if it is a pod scheduled by a daemon set
			dsID := podLabels.Labels.Get(DSIDLabel)

			// Check if the daemon set exists, if it doesn't unschedule the pod
			if _, ok := dsIDMap[fields.ID(dsID)]; ok {
				continue
			}

			nodeName, podID, err := labels.NodeAndPodIDFromPodLabel(podLabels)
			if err != nil {
				dsf.logger.NoFields().Error(err)
				continue
			}

			// TODO: Since this mirrors the unschedule function in daemon_set.go,
			// We should find a nice way to couple them together
			dsf.logger.NoFields().Infof("Unscheduling '%v' in node '%v' with dangling daemon set uuid '%v'", podID, nodeName, dsID)

			_, err = dsf.kpStore.DeletePod(kp.INTENT_TREE, nodeName, podID)
			if err != nil {
				dsf.logger.NoFields().Errorf("Unable to delete pod id '%v' in node '%v', from intent tree: %v", podID, nodeName, err)
				continue
			}

			id := labels.MakePodLabelKey(nodeName, podID)
			err = dsf.applicator.RemoveLabel(labels.POD, id, DSIDLabel)
			if err != nil {
				dsf.logger.NoFields().Errorf("Error removing ds pod id label '%v': %v", id, err)
			}
		}
	}
}