Esempio n. 1
0
func main() {
	kingpin.Version(version.VERSION)
	_, opts, _ := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)
	podStore := podstore.NewConsul(client.KV())

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("Could not get the hostname to do scheduling: %s", err)
		}
		*nodeName = hostname
	}

	if *manifestPath == "" {
		kingpin.Usage()
		log.Fatalln("No manifest given")
	}

	podManifest, err := manifest.FromPath(*manifestPath)
	if err != nil {
		log.Fatalf("Could not read manifest at %s: %s\n", *manifestPath, err)
	}

	out := schedule.Output{
		PodID: podManifest.ID(),
	}
	if *uuidPod {
		out.PodUniqueKey, err = podStore.Schedule(podManifest, types.NodeName(*nodeName))
		if err != nil {
			log.Fatalf("Could not schedule pod: %s", err)
		}
	} else {

		// Legacy pod
		podPrefix := kp.INTENT_TREE
		if *hookGlobal {
			podPrefix = kp.HOOK_TREE
		}
		_, err := store.SetPod(podPrefix, types.NodeName(*nodeName), podManifest)
		if err != nil {
			log.Fatalf("Could not write manifest %s to intent store: %s\n", podManifest.ID(), err)
		}
	}

	outBytes, err := json.Marshal(out)
	if err != nil {
		log.Fatalf("Successfully scheduled manifest but couldn't marshal JSON output")
	}

	fmt.Println(string(outBytes))
}
Esempio n. 2
0
func TestStartWatchBasic(t *testing.T) {
	quitCh := make(chan struct{})
	defer close(quitCh)
	hs, checker := NewFakeHealthStore()
	go hs.StartWatch(quitCh)

	node := types.NodeName("abc01.sjc1")
	podID1 := types.PodID("podID1")
	podID2 := types.PodID("podID2")

	result := hs.Fetch(podID1, node)
	if result != nil {
		t.Errorf("expected cache to start empty, found %v", result)
	}

	checker.Send([]*health.Result{
		{ID: podID1, Node: node},
		{ID: podID2, Node: node},
	})

	result = hs.Fetch(podID1, node)
	if result == nil {
		t.Errorf("expected health store to have %s", podID1)
	}

	result = hs.Fetch(podID2, node)
	if result == nil {
		t.Errorf("expected health store to have %s", podID2)
	}
}
Esempio n. 3
0
func TestWriteRealityIndex(t *testing.T) {
	node := types.NodeName("some_node")
	key := types.NewPodUUID()

	realityIndexPath := fmt.Sprintf("reality/%s/%s", node, key.ID)

	store, fakeKV := storeWithFakeKV(t, make(map[string]Pod), make(map[string]PodIndex))

	// confirm that the reality index doesn't exist
	pair, _, err := fakeKV.Get(realityIndexPath, nil)
	if err != nil {
		t.Fatalf("Initial conditions were not met: error fetching %s: %s", realityIndexPath, err)
	}

	if pair != nil {
		t.Fatalf("Initial conditions were not met: expected key %s to not exist", realityIndexPath)
	}

	err = store.WriteRealityIndex(key, node)
	if err != nil {
		t.Fatal(err)
	}

	pair, _, err = fakeKV.Get(realityIndexPath, nil)
	if err != nil {
		t.Fatalf("Unable to fetch the deleted key (%s): %s", realityIndexPath, err)
	}

	if pair == nil {
		t.Fatalf("%s should have been written but it wasn't", realityIndexPath)
	}
}
Esempio n. 4
0
func TestFakeServiceHealth(t *testing.T) {
	fake := FakePodStore{}
	targetService := "paladin"
	targetHost := types.NodeName("aaa2.dfw.square")
	targetStatus := "healthy"

	fake.healthResults = map[string]kp.WatchResult{
		kp.HealthPath("shrimpy", "aaa1.dfw.square"): kp.WatchResult{
			Service: "shrimpy",
			Status:  "critical",
		},
		kp.HealthPath(targetService, targetHost): kp.WatchResult{
			Service: targetService,
			Status:  targetStatus,
		},
	}

	serviceRes, err := fake.GetServiceHealth(targetService)
	if err != nil {
		t.Fatal(err)
	}
	if len(serviceRes) != 1 {
		t.Fatalf("Expected %v to have a single health entry, found %v", targetService, len(serviceRes))
	}
	watchResult, ok := serviceRes[kp.HealthPath(targetService, targetHost)]
	if !ok {
		t.Fatalf("Expected to find a result for %v", targetHost)
	}
	if watchResult.Status != targetStatus {
		t.Fatalf("Status didn't match expected: %v", watchResult.Status)
	}
}
Esempio n. 5
0
// UnmarshalConfig reads the preparer's configuration from its bytes.
func UnmarshalConfig(config []byte) (*PreparerConfig, error) {
	appConfig := AppConfig{}
	err := yaml.Unmarshal(config, &appConfig)
	preparerConfig := appConfig.P2PreparerConfig
	if err != nil {
		return nil, util.Errorf("The config file %s was malformatted - %s", config, err)
	}

	if preparerConfig.NodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			return nil, util.Errorf("Couldn't determine hostname: %s", err)
		}

		preparerConfig.NodeName = types.NodeName(hostname)
	}
	if preparerConfig.ConsulAddress == "" {
		preparerConfig.ConsulAddress = DefaultConsulAddress
	}
	if preparerConfig.HooksDirectory == "" {
		preparerConfig.HooksDirectory = hooks.DEFAULT_PATH
	}
	if preparerConfig.PodRoot == "" {
		preparerConfig.PodRoot = pods.DefaultPath
	}
	return &preparerConfig, nil

}
Esempio n. 6
0
func schedule(client client.Client, logger logging.Logger) {
	m, err := manifest.FromPath(*manifestFile)
	if err != nil {
		logger.Fatalf("Could not read manifest: %s", err)
	}

	podUniqueKey, err := client.Schedule(m, types.NodeName(*node))
	if err != nil {
		logger.Fatalf("Could not schedule: %s", err)
	}

	output := struct {
		PodID        types.PodID        `json:"pod_id"`
		PodUniqueKey types.PodUniqueKey `json:"pod_unique_key"`
	}{
		PodID:        m.ID(),
		PodUniqueKey: podUniqueKey,
	}

	outBytes, err := json.Marshal(output)
	if err != nil {
		logger.Infof("Scheduled pod with key: %s", podUniqueKey)
		return
	}

	fmt.Println(string(outBytes))
}
Esempio n. 7
0
File: main.go Progetto: rudle/p2
// Returns nodes to be removed and nodes to be added
func makeNodeChanges(oldNodeLabels []labels.Labeled, newNodeLabels []labels.Labeled) ([]types.NodeName, []types.NodeName) {
	var oldNodeNames []types.NodeName
	var newNodeNames []types.NodeName

	for _, node := range oldNodeLabels {
		oldNodeNames = append(oldNodeNames, types.NodeName(node.ID))
	}

	for _, node := range newNodeLabels {
		newNodeNames = append(newNodeNames, types.NodeName(node.ID))
	}

	toRemove := types.NewNodeSet(oldNodeNames...).Difference(types.NewNodeSet(newNodeNames...)).ListNodes()
	toAdd := types.NewNodeSet(newNodeNames...).Difference(types.NewNodeSet(oldNodeNames...)).ListNodes()

	return toRemove, toAdd
}
Esempio n. 8
0
func TestSchedule(t *testing.T) {
	store, kv := storeWithFakeKV(t, nil, nil)

	node := types.NodeName("some_node")
	key, err := store.Schedule(testManifest(), node)
	if err != nil {
		t.Fatalf("Unexpected error scheduling pod: %s", err)
	}

	// Now reach into the consul impl to make sure it was stored with an index
	// and a main pod
	podPath := fmt.Sprintf("pods/%s", key.ID)
	indexPath := fmt.Sprintf("intent/%s/%s", node, key.ID)

	if kv.Entries[podPath] == nil {
		t.Fatalf("Key '%s' wasn't set as expected", podPath)
	}

	var pod Pod
	err = json.Unmarshal(kv.Entries[podPath].Value, &pod)
	if err != nil {
		t.Fatal(err)
	}

	if pod.Node != node {
		t.Errorf("Pod wasn't stored with correct node, wanted '%s' was '%s'", node, pod.Node)
	}

	expectedManifestSha, err := testManifest().SHA()
	if err != nil {
		t.Fatal(err)
	}

	podManifestSha, err := pod.Manifest.SHA()
	if err != nil {
		t.Fatal(err)
	}

	if expectedManifestSha != podManifestSha {
		t.Error("Pod was scheduled with the wrong manifest")
	}

	if kv.Entries[indexPath] == nil {
		t.Fatalf("Index '%s' wasn't set as expected", indexPath)
	}

	var index PodIndex
	err = json.Unmarshal(kv.Entries[indexPath].Value, &index)
	if err != nil {
		t.Fatal(err)
	}

	if index.PodKey.ID != key.ID {
		t.Errorf("Index didn't have expected key, wanted '%s' was '%s'", key.ID, index.PodKey)
	}
}
Esempio n. 9
0
func scheduleForThisHost(manifest manifest.Manifest, alsoReality bool) error {
	store := kp.NewConsulStore(kp.NewConsulClient(kp.Options{
		Token: *consulToken,
	}))
	hostname, err := os.Hostname()
	if err != nil {
		return err
	}
	_, err = store.SetPod(kp.INTENT_TREE, types.NodeName(hostname), manifest)
	if err != nil {
		return err
	}

	if alsoReality {
		_, err = store.SetPod(kp.REALITY_TREE, types.NodeName(hostname), manifest)
		return err
	}
	return nil
}
Esempio n. 10
0
func (sel *applicatorScheduler) EligibleNodes(_ manifest.Manifest, selector klabels.Selector) ([]types.NodeName, error) {
	nodes, err := sel.applicator.GetMatches(selector, labels.NODE, false)
	if err != nil {
		return nil, err
	}

	result := make([]types.NodeName, len(nodes))
	for i, node := range nodes {
		result[i] = types.NodeName(node.ID)
	}
	return result, nil
}
Esempio n. 11
0
func NodeAndPodIDFromPodLabel(labeled Labeled) (types.NodeName, types.PodID, error) {
	if labeled.LabelType != POD {
		return "", "", util.Errorf("Label was not a pod label, was %s", labeled.LabelType)
	}

	parts := strings.SplitN(labeled.ID, "/", 2)
	if len(parts) < 2 {
		return "", "", util.Errorf("malformed pod label %s", labeled.ID)
	}

	return types.NodeName(parts[0]), types.PodID(parts[1]), nil
}
Esempio n. 12
0
// Initializes a pod from the current_manifest.yaml file in the pod's home directory. This function
// will error or return an old manifest if run during an inappropriate hook event, use of this
// function is discouraged in most cases
func (h *HookEnv) PodFromDisk() (*pods.Pod, error) {
	node, err := h.Node()
	if err != nil {
		return nil, err
	}
	podHome, err := h.PodHome()
	if err != nil {
		return nil, err
	}

	return pods.PodFromPodHome(types.NodeName(node), podHome)
}
Esempio n. 13
0
func updateWithUniformHealth(t *testing.T, numNodes int, status health.HealthState) (update, map[types.NodeName]health.Result) {
	current := map[types.NodeName]bool{}
	checks := map[types.NodeName]health.Result{}

	for i := 0; i < numNodes; i++ {
		node := types.NodeName(fmt.Sprintf("node%d", i))
		current[node] = true
		checks[node] = health.Result{Status: status}
	}

	upd, _, _ := updateWithHealth(t, numNodes, 0, current, nil, nil)
	return upd, checks
}
Esempio n. 14
0
func (h *HookEnv) Node() (types.NodeName, error) {
	node := os.Getenv(HOOKED_NODE_ENV_VAR)
	if node == "" {
		// TODO: This can't be a hard error right now. It would mean hooks built with this
		// change couldn't run under preparers without this change. We'll fix it later.
		// (At the present time, hooks shouldn't care about their node name.)
		hostname, err := os.Hostname()
		if err != nil {
			return "", util.Errorf("Error getting node name: %v", err)
		}
		node = hostname
	}
	return types.NodeName(node), nil
}
Esempio n. 15
0
func (hs *healthStore) cache(results []*health.Result) {
	tmpCache := make(map[cacheKey]*health.Result, len(results))

	// duplicate key is undefined behavior
	for _, result := range results {
		result := result
		tmpCache[newCacheKey(result.ID, types.NodeName(result.Node))] = result
	}

	hs.lock.Lock()
	defer hs.lock.Unlock()

	hs.cachedHealth = tmpCache
}
Esempio n. 16
0
File: main.go Progetto: drcapulet/p2
func main() {
	// CLI takes a hostname, a token and recursively deletes all pods
	// in the reality tree. This is useful if any pods on a host have
	// been manually altered in some way and need to be restored to
	// a known state.
	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()

	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)

	pods, _, err := store.ListPods(kp.REALITY_TREE, types.NodeName(*nodeName))
	if err != nil {
		log.Fatalf("Could not list pods for node %v: %v", *nodeName, err)
	}
	for _, pod := range pods {
		log.Printf("Deleting %v from reality\n", pod.Manifest.ID())
		_, err := store.DeletePod(kp.REALITY_TREE, types.NodeName(*nodeName), pod.Manifest.ID())
		if err != nil {
			log.Fatalf("Could not remove %v from pod reality tree: %v", err)
		}
	}
}
Esempio n. 17
0
func TestReadPodFromIndex(t *testing.T) {
	node := types.NodeName("some_node")
	key := types.NewPodUUID()

	podPath := fmt.Sprintf("pods/%s", key.ID)
	indexPath := fmt.Sprintf("intent/%s/%s", node, key.ID)

	pod := Pod{
		Manifest: testManifest(),
		Node:     node,
	}

	index := PodIndex{
		PodKey: key,
	}

	// Initialize the store with entries at the pod path and index path
	pods := map[string]Pod{
		podPath: pod,
	}

	// This test doesn't actually care if there's an index, but let's keep it hygienic
	indices := map[string]PodIndex{
		indexPath: index,
	}
	store, _ := storeWithFakeKV(t, pods, indices)

	outPod, err := store.ReadPod(key)
	if err != nil {
		t.Fatalf("Unexpected error reading pod: %s", err)
	}

	if pod.Node != outPod.Node {
		t.Errorf("Pod node, didn't match expected, wanted %+v was %+v", pod.Node, outPod.Node)
	}

	expectedManifestSha, err := testManifest().SHA()
	if err != nil {
		t.Fatal(err)
	}

	podManifestSha, err := outPod.Manifest.SHA()
	if err != nil {
		t.Fatal(err)
	}

	if expectedManifestSha != podManifestSha {
		t.Error("Pod returned with the wrong manifest")
	}
}
Esempio n. 18
0
File: main.go Progetto: rudle/p2
func main() {
	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			fmt.Fprintf(os.Stderr, "error getting hostname. use --node to specify a node: %v\n", err)
			os.Exit(1)
		}
		*nodeName = hostname
	}

	rm := NewP2RM(kp.NewConsulClient(opts), *podName, types.NodeName(*nodeName))

	podIsManagedByRC, rcID, err := rm.checkForManagingReplicationController()
	if err != nil {
		os.Exit(2)
	}

	if !podIsManagedByRC {
		err = rm.deletePod()
		if err != nil {
			os.Exit(2)
		}
	}

	if podIsManagedByRC && !*deallocation {
		fmt.Fprintf(
			os.Stderr,
			"error: %s is managed by replication controller: %s\n"+
				"It's possible you meant you deallocate this pod on this node. If so, please confirm your intention with --deallocate\n", *nodeName, rcID)
		os.Exit(2)
	}

	if podIsManagedByRC && *deallocation {
		err = rm.decrementDesiredCount(rcID)
		if err != nil {
			fmt.Fprintf(os.Stderr,
				"Encountered error deallocating from the RC %s. You may attempt this command again or use `p2-rctl` to cleanup manually.\n%v",
				rcID,
				err)
		}
	}

	fmt.Printf("%s: successfully removed %s\n", rm.NodeName, rm.PodName)
}
Esempio n. 19
0
File: main.go Progetto: rudle/p2
func main() {
	kingpin.Version(version.VERSION)
	_, opts := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("Could not get the hostname to do scheduling: %s", err)
		}
		*nodeName = hostname
	}
	if *podClusters {
		watchPodClusters(client)
	} else {
		podPrefix := kp.INTENT_TREE
		if *watchReality {
			podPrefix = kp.REALITY_TREE
		} else if *hooks {
			podPrefix = kp.HOOK_TREE
		}
		log.Printf("Watching manifests at %s/%s/\n", podPrefix, *nodeName)

		quit := make(chan struct{})
		errChan := make(chan error)
		podCh := make(chan []kp.ManifestResult)
		go store.WatchPods(podPrefix, types.NodeName(*nodeName), quit, errChan, podCh)
		for {
			select {
			case results := <-podCh:
				if len(results) == 0 {
					fmt.Println(fmt.Sprintf("No manifests exist for %s under %s (they may have been deleted)", *nodeName, podPrefix))
				} else {
					for _, result := range results {
						if err := result.Manifest.Write(os.Stdout); err != nil {
							log.Fatalf("write error: %v", err)
						}
					}
				}
			case err := <-errChan:
				log.Fatalf("Error occurred while listening to pods: %s", err)
			}
		}
	}
}
Esempio n. 20
0
func handlePodRemoval(consulClient consulutil.ConsulClient, labeler labels.ApplicatorWithoutWatches) error {
	var rm *P2RM
	if *podUniqueKey != "" {
		rm = NewUUIDP2RM(consulClient, types.PodUniqueKey(*podUniqueKey), types.PodID(*podName), labeler)
	} else {
		if *nodeName == "" {
			hostname, err := os.Hostname()
			if err != nil {
				return fmt.Errorf("error getting hostname. use --node to specify a node: %v\n", err)
			}
			*nodeName = hostname
		}

		rm = NewLegacyP2RM(consulClient, types.PodID(*podName), types.NodeName(*nodeName), labeler)
	}

	podIsManagedByRC, rcID, err := rm.checkForManagingReplicationController()
	if err != nil {
		return err
	}

	if !podIsManagedByRC {
		err = rm.deletePod()
		if err != nil {
			return err
		}
	}

	if podIsManagedByRC && !*deallocation {
		return fmt.Errorf("error: %s is managed by replication controller: %s\n"+
			"It's possible you meant you deallocate this pod on this node. If so, please confirm your intention with --deallocate\n", *nodeName, rcID)
	}

	if podIsManagedByRC && *deallocation {
		err = rm.decrementDesiredCount(rcID)
		if err != nil {
			return fmt.Errorf("Encountered error deallocating from the RC %s. You may attempt this command again or use `p2-rctl` to cleanup manually.\n%v",
				rcID,
				err)
		}
	}

	fmt.Printf("%s: successfully removed %s\n", rm.NodeName, rm.PodID)
	return nil
}
Esempio n. 21
0
func verifyHealthChecks(config *preparer.PreparerConfig, services []string) error {
	client, err := config.GetConsulClient()
	if err != nil {
		return err
	}
	store := kp.NewConsulStore(client)

	time.Sleep(30 * time.Second)
	// check consul for health information for each app
	name, err := os.Hostname()
	if err != nil {
		return err
	}

	node := types.NodeName(name)
	for _, sv := range services {
		res, err := store.GetHealth(sv, node)
		if err != nil {
			return err
		} else if (res == kp.WatchResult{}) {
			return fmt.Errorf("No results for %s: \n\n %s", sv, targetLogs())
		} else if res.Status != string(health.Passing) {
			return fmt.Errorf("%s did not pass health check: \n\n %s", sv, targetLogs())
		} else {
			fmt.Println(res)
		}
	}

	for _, sv := range services {
		res, err := store.GetServiceHealth(sv)
		getres, _ := store.GetHealth(sv, node)
		if err != nil {
			return err
		}
		val := res[kp.HealthPath(sv, node)]
		if getres.Id != val.Id || getres.Service != val.Service || getres.Status != val.Status {
			return fmt.Errorf("GetServiceHealth failed %+v: \n\n%s", res, targetLogs())
		}
	}

	// if it reaches here it means health checks
	// are being written to the KV store properly
	return nil
}
Esempio n. 22
0
File: kv.go Progetto: drcapulet/p2
func extractNodeFromKey(key string) (types.NodeName, error) {
	keyParts := strings.Split(key, "/")

	if len(keyParts) == 0 {
		return "", util.Errorf("Malformed key '%s'", key)
	}

	if keyParts[0] == "hooks" {
		// Hooks are allowed to not have a node, everything else should
		return "", nil
	}

	// A key should look like intent/<node>/<pod_id> OR intent/<node>/<uuid> for example
	if len(keyParts) != 3 {
		return "", util.Errorf("Malformed key '%s'", key)
	}

	return types.NodeName(keyParts[1]), nil
}
Esempio n. 23
0
File: main.go Progetto: drcapulet/p2
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("error getting node name: %v", err)
		}
		*nodeName = hostname
	}

	dir := hooks.Hooks(*hookDir, &logging.DefaultLogger)

	hookType, err := hooks.AsHookType(*hookType)
	if err != nil {
		log.Fatalln(err)
	}

	pod, err := pods.PodFromPodHome(types.NodeName(*nodeName), *podDir)
	if err != nil {
		log.Fatalln(err)
	}

	var podManifest manifest.Manifest
	if *manifestPath != "" {
		podManifest, err = manifest.FromPath(*manifestPath)
		if err != nil {
			log.Fatalln(err)
		}
	} else {
		podManifest, err = pod.CurrentManifest()
		if err != nil {
			log.Fatalln(err)
		}
	}

	log.Printf("About to run %s hooks for pod %s\n", hookType, pod.Home())
	err = dir.RunHookType(hookType, pod, podManifest)
	if err != nil {
		log.Fatalln(err)
	}
}
Esempio n. 24
0
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("error getting node name: %v", err)
		}
		*nodeName = hostname
	}

	manifest, err := manifest.FromURI(*manifestURI)
	if err != nil {
		log.Fatalf("%s", err)
	}

	hookFactory := pods.NewHookFactory(filepath.Join(*podRoot, "hooks", *hookType), types.NodeName(*nodeName))

	// /data/pods/hooks/<event>/<id>
	// if the event is the empty string (global hook), then that path segment
	// will be cleaned out
	pod := hookFactory.NewHookPod(manifest.ID())

	// for now use noop verifier in this CLI
	err = pod.Install(manifest, auth.NopVerifier(), artifact.NewRegistry(*registryURI, uri.DefaultFetcher, osversion.DefaultDetector))
	if err != nil {
		log.Fatalf("Could not install manifest %s: %s", manifest.ID(), err)
	}
	// hooks write their current manifest manually since it's normally done at
	// launch time
	_, err = pod.WriteCurrentManifest(manifest)
	if err != nil {
		log.Fatalf("Could not write current manifest for %s: %s", manifest.ID(), err)
	}

	err = hooks.InstallHookScripts(*hookRoot, pod, manifest, logging.DefaultLogger)
	if err != nil {
		log.Fatalf("Could not write hook scripts: %s", err)
	}
}
Esempio n. 25
0
func verifyReality(waitTime time.Duration, consulID types.PodID, agentID types.PodID) error {
	quit := make(chan struct{})
	defer close(quit)
	store := kp.NewConsulStore(kp.NewConsulClient(kp.Options{
		Token: *consulToken,
	}))
	hostname, err := os.Hostname()
	if err != nil {
		return err
	}
	waitChan := time.After(waitTime)
	hasConsul := false
	hasPreparer := false
	for {
		select {
		case <-waitChan:
			return util.Errorf(
				"Consul and/or Preparer weren't in the reality store within %s (consul=%t, preparer=%t)",
				waitTime, hasConsul, hasPreparer)
		case <-time.After(100 * time.Millisecond):
			results, _, err := store.ListPods(kp.REALITY_TREE, types.NodeName(hostname))
			if err != nil {
				log.Printf("Error looking for pods: %s\n", err)
				continue
			}
			for _, res := range results {
				if res.Manifest.ID() == consulID {
					hasConsul = true
				} else if res.Manifest.ID() == agentID {
					hasPreparer = true
				}
			}
			if hasConsul && hasPreparer {
				return nil
			}
		}
	}
}
Esempio n. 26
0
func main() {
	kingpin.Version(version.VERSION)
	kingpin.Parse()

	if *nodeName == "" {
		hostname, err := os.Hostname()
		if err != nil {
			log.Fatalf("error getting node name: %v", err)
		}
		*nodeName = hostname
	}

	manifest, err := manifest.FromURI(*manifestURI)
	if err != nil {
		log.Fatalf("%s", err)
	}

	err = authorize(manifest)
	if err != nil {
		log.Fatalf("%s", err)
	}

	podFactory := pods.NewFactory(*podRoot, types.NodeName(*nodeName))
	pod := podFactory.NewLegacyPod(manifest.ID())
	err = pod.Install(manifest, auth.NopVerifier(), artifact.NewRegistry(nil, uri.DefaultFetcher, osversion.DefaultDetector))
	if err != nil {
		log.Fatalf("Could not install manifest %s: %s", manifest.ID(), err)
	}

	success, err := pod.Launch(manifest)
	if err != nil {
		log.Fatalf("Could not launch manifest %s: %s", manifest.ID(), err)
	}
	if !success {
		log.Fatalln("Unsuccessful launch of one or more things in the manifest")
	}
}
Esempio n. 27
0
func (s store) SchedulePod(_ context.Context, req *podstore_protos.SchedulePodRequest) (*podstore_protos.SchedulePodResponse, error) {
	if req.NodeName == "" {
		return nil, grpc.Errorf(codes.InvalidArgument, "node_name must be provided")
	}

	if req.Manifest == "" {
		return nil, grpc.Errorf(codes.InvalidArgument, "manifest must be provided")
	}

	manifest, err := manifest.FromBytes([]byte(req.Manifest))
	if err != nil {
		return nil, grpc.Errorf(codes.InvalidArgument, "could not parse passed manifest: %s", err)
	}

	podUniqueKey, err := s.scheduler.Schedule(manifest, types.NodeName(req.NodeName))
	if err != nil {
		return nil, grpc.Errorf(codes.Unavailable, "could not schedule pod: %s", err)
	}

	resp := &podstore_protos.SchedulePodResponse{
		PodUniqueKey: podUniqueKey.String(),
	}
	return resp, nil
}
Esempio n. 28
0
func TestUnschedule(t *testing.T) {
	node := types.NodeName("some_node")
	key := types.NewPodUUID()

	podPath := fmt.Sprintf("pods/%s", key.ID)
	indexPath := fmt.Sprintf("intent/%s/%s", node, key.ID)

	// Initialize the store with entries at the pod path and index path
	pods := map[string]Pod{
		podPath: {
			Manifest: testManifest(),
			Node:     node,
		},
	}

	indices := map[string]PodIndex{
		indexPath: {
			PodKey: key,
		},
	}
	store, kv := storeWithFakeKV(t, pods, indices)

	// Now delete the pod entry
	err := store.Unschedule(key)
	if err != nil {
		t.Fatalf("Unexpected error deleting pod: %s", err)
	}

	if kv.Entries[podPath] != nil {
		t.Fatalf("Key '%s' was deleted as expected", podPath)
	}

	if kv.Entries[indexPath] != nil {
		t.Fatalf("Index '%s' was deleted as expected", indexPath)
	}
}
Esempio n. 29
0
func main() {
	kingpin.Version(version.VERSION)
	_, opts, _ := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(opts)
	store := kp.NewConsulStore(client)

	var intents []kp.ManifestResult
	var realities []kp.ManifestResult
	var err error
	filterNodeName := types.NodeName(*nodeArg)
	filterPodID := types.PodID(*podArg)

	if filterNodeName != "" {
		intents, _, err = store.ListPods(kp.INTENT_TREE, filterNodeName)
	} else {
		intents, _, err = store.AllPods(kp.INTENT_TREE)
	}
	if err != nil {
		message := "Could not list intent kvpairs: %s"
		if kvErr, ok := err.(consulutil.KVError); ok {
			log.Fatalf(message, kvErr.UnsafeError)
		} else {
			log.Fatalf(message, err)
		}
	}

	if filterNodeName != "" {
		realities, _, err = store.ListPods(kp.REALITY_TREE, filterNodeName)
	} else {
		realities, _, err = store.AllPods(kp.REALITY_TREE)
	}

	if err != nil {
		message := "Could not list reality kvpairs: %s"
		if kvErr, ok := err.(consulutil.KVError); ok {
			log.Fatalf(message, kvErr.UnsafeError)
		} else {
			log.Fatalf(message, err)
		}
	}

	statusMap := make(map[types.PodID]map[types.NodeName]inspect.NodePodStatus)

	for _, kvp := range intents {
		if err = inspect.AddKVPToMap(kvp, inspect.INTENT_SOURCE, filterNodeName, filterPodID, statusMap); err != nil {
			log.Fatal(err)
		}
	}

	for _, kvp := range realities {
		if err = inspect.AddKVPToMap(kvp, inspect.REALITY_SOURCE, filterNodeName, filterPodID, statusMap); err != nil {
			log.Fatal(err)
		}
	}

	hchecker := checker.NewConsulHealthChecker(client)
	for podID := range statusMap {
		resultMap, err := hchecker.Service(podID.String())
		if err != nil {
			log.Fatalf("Could not retrieve health checks for pod %s: %s", podID, err)
		}

		for node, result := range resultMap {
			if filterNodeName != "" && node != filterNodeName {
				continue
			}

			old := statusMap[podID][node]
			old.Health = result.Status
			statusMap[podID][node] = old
		}
	}

	// Keep this switch in sync with the enum options for the "format" flag. Rethink this
	// design once there are many different formats.
	switch *format {
	case "tree":
		// Native data format is already a "tree"
		enc := json.NewEncoder(os.Stdout)
		err = enc.Encode(statusMap)
	case "list":
		// "List" format is a flattened version of "tree"
		var output []inspect.NodePodStatus
		for podID, nodes := range statusMap {
			for node, status := range nodes {
				status.PodId = podID
				status.NodeName = node
				output = append(output, status)
			}
		}
		enc := json.NewEncoder(os.Stdout)
		err = enc.Encode(output)
	default:
		err = fmt.Errorf("unrecognized format: %s", *format)
	}
	if err != nil {
		log.Fatal(err)
	}
}
Esempio n. 30
0
func TestMultipleFarms(t *testing.T) {
	retryInterval = testFarmRetryInterval

	dsStore := dsstoretest.NewFake()
	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()

	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	session := kptest.NewSession()
	firstLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "firstMultiple",
	})

	var allNodes []types.NodeName
	allNodes = append(allNodes, "node1", "node2", "node3")
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	//
	// Instantiate first farm
	//
	firstFarm := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       session,
		logger:        firstLogger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	firstQuitCh := make(chan struct{})
	defer close(firstQuitCh)
	go func() {
		go firstFarm.cleanupDaemonSetPods(firstQuitCh)
		firstFarm.mainLoop(firstQuitCh)
	}()

	//
	// Instantiate second farm
	//
	secondLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "secondMultiple",
	})
	secondFarm := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       session,
		logger:        secondLogger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	secondQuitCh := make(chan struct{})
	defer close(secondQuitCh)
	go func() {
		go secondFarm.cleanupDaemonSetPods(secondQuitCh)
		secondFarm.mainLoop(secondQuitCh)
	}()

	// Make two daemon sets with difference node selectors
	// First daemon set
	podID := types.PodID("testPod")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
	dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")

	// Second daemon set
	anotherNodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"})
	anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, anotherNodeSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")

	// Make a node and verify that it was scheduled by the first daemon set
	applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")

	labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID := labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")

	// Make a second node and verify that it was scheduled by the second daemon set
	applicator.SetLabel(labels.NODE, "node2", pc_fields.AvailabilityZoneLabel, "az2")

	labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled")

	// Make a third unschedulable node and verify it doesn't get scheduled by anything
	applicator.SetLabel(labels.NODE, "node3", pc_fields.AvailabilityZoneLabel, "undefined")

	labeled, err = waitForPodLabel(applicator, false, "node3/testPod")
	Assert(t).IsNil(err, "Expected pod not to have a dsID label")

	// Now add 10 new nodes and verify that they are scheduled by the first daemon set
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		err := applicator.SetLabel(labels.NODE, nodeName, pc_fields.AvailabilityZoneLabel, "az1")
		Assert(t).IsNil(err, "expected no error labeling node")
	}
	for i := 0; i < 10; i++ {
		podPath := fmt.Sprintf("good_node%v/testPod", i)
		labeled, err = waitForPodLabel(applicator, true, podPath)
		Assert(t).IsNil(err, "Expected pod to have a dsID label")
		dsID := labeled.Labels.Get(DSIDLabel)
		Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")
	}

	//
	// Update a daemon set's node selector and expect a node to be unscheduled
	//
	mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"})
		dsToUpdate.NodeSelector = someSelector
		return dsToUpdate, nil
	}
	anotherDSData, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	// Verify node2 is unscheduled
	labeled, err = waitForPodLabel(applicator, false, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod not to have a dsID label")

	//
	// Now update the node selector to schedule node2 again and verify
	//
	mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"})
		dsToUpdate.NodeSelector = someSelector
		return dsToUpdate, nil
	}
	anotherDSData, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	// Verify node2 is scheduled
	labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled")

	//
	// Disabling a daemon set should not unschedule any nodes
	//
	mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = true

		someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"})
		dsToUpdate.NodeSelector = someSelector
		return dsToUpdate, nil
	}
	_, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	// Verify it is disabled
	condition := func() error {
		anotherDSData, _, err = dsStore.Get(anotherDSData.ID)
		if err != nil {
			return util.Errorf("Expected no error getting daemon set")
		}
		if !anotherDSData.Disabled {
			return util.Errorf("Expected daemon set to be disabled")
		}

		if _, ok := firstFarm.children[anotherDSData.ID]; ok {
			if !firstFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be disabled in only one farm")
			}
			if _, ok := secondFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else if _, ok := secondFarm.children[anotherDSData.ID]; ok {
			if !secondFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be disabled in only one farm")
			}
			if _, ok := firstFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else {
			return util.Errorf("Expected daemon set to be disabled in only one farm")
		}
		return nil
	}
	err = waitForCondition(condition)
	Assert(t).IsNil(err, "Error disabling daemon set!")

	// Verify node2 is scheduled
	labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled")

	//
	// Enable a daemon set should make the dameon set resume its regular activities
	//
	mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = false
		return dsToUpdate, nil
	}
	_, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")

	// Verify it is enabled
	condition = func() error {
		anotherDSData, _, err = dsStore.Get(anotherDSData.ID)
		if err != nil {
			return util.Errorf("Expected no error getting daemon set")
		}
		if anotherDSData.Disabled {
			return util.Errorf("Expected daemon set to be enabled")
		}

		if _, ok := firstFarm.children[anotherDSData.ID]; ok {
			if firstFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be enabled in only one farm")
			}
			if _, ok := secondFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else if _, ok := secondFarm.children[anotherDSData.ID]; ok {
			if secondFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be enabled in only one farm")
			}
			if _, ok := firstFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else {
			return util.Errorf("Expected daemon set to be enabled in only one farm")
		}
		return nil
	}
	err = waitForCondition(condition)
	Assert(t).IsNil(err, "Error enabling daemon set!")

	// Verify node2 is unscheduled
	labeled, err = waitForPodLabel(applicator, false, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod not to have a dsID label")
}