Example #1
0
func TestList(t *testing.T) {
	store := consulStoreWithFakeKV()

	// Create first DaemonSet
	firstPodID := types.PodID("some_pod_id")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	azLabel := pc_fields.AvailabilityZone("some_zone")
	selector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(firstPodID)

	firstManifest := manifestBuilder.GetManifest()

	timeout := replication.NoTimeout

	firstDS, err := store.Create(firstManifest, minHealth, clusterName, selector, firstPodID, timeout)
	if err != nil {
		t.Fatalf("Unable to create daemon set: %s", err)
	}

	// Create second DaemonSet
	secondPodID := types.PodID("different_pod_id")

	manifestBuilder = manifest.NewBuilder()
	manifestBuilder.SetID(secondPodID)

	secondManifest := manifestBuilder.GetManifest()
	secondDS, err := store.Create(secondManifest, minHealth, clusterName, selector, secondPodID, timeout)
	if err != nil {
		t.Fatalf("Unable to create daemon set: %s", err)
	}

	daemonSetList, err := store.List()
	if err != nil {
		t.Fatalf("Error getting list of daemon sets: %s", err)
	}

	Assert(t).AreEqual(len(daemonSetList), 2, "Unexpected number of daemon sets listed")

	for _, daemonSet := range daemonSetList {
		if daemonSet.ID == firstDS.ID {
			Assert(t).AreEqual(daemonSet.PodID, firstPodID, "Listed daemon set pod ids were not equal")

		} else if daemonSet.PodID == secondDS.PodID {
			Assert(t).AreEqual(daemonSet.PodID, secondPodID, "Listed daemon set pod ids were not equal")

		} else {
			t.Errorf("Unexpected daemon set listed: %v", daemonSet)
		}
	}
}
Example #2
0
func getConsulManifest(dir string) (string, error) {
	consulTar := fmt.Sprintf(
		"file://%s",
		util.From(runtime.Caller(0)).ExpandPath("../hoisted-consul_052.tar.gz"),
	)
	builder := manifest.NewBuilder()
	builder.SetID("consul")
	stanzas := map[launch.LaunchableID]launch.LaunchableStanza{
		"consul": {
			LaunchableType: "hoist",
			Location:       consulTar,
		},
	}
	builder.SetLaunchables(stanzas)
	manifest := builder.GetManifest()

	consulPath := path.Join(dir, "consul.yaml")
	f, err := os.OpenFile(consulPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		return "", err
	}
	defer f.Close()

	err = manifest.Write(f)
	if err != nil {
		return "", err
	}
	return consulPath, f.Close()
}
Example #3
0
// Writes a pod manifest for the hello pod at with the specified name in the
// specified dir, configured to run on the specified port. Returns the path to
// the signed manifest
func writeHelloManifest(dir string, manifestName string, port int) (string, error) {
	hello := fmt.Sprintf("file://%s", util.From(runtime.Caller(0)).ExpandPath("../hoisted-hello_def456.tar.gz"))
	builder := manifest.NewBuilder()
	builder.SetID("hello")
	builder.SetStatusPort(port)
	builder.SetStatusHTTP(true)
	stanzas := map[launch.LaunchableID]launch.LaunchableStanza{
		"hello": {
			LaunchableType: "hoist",
			Location:       hello,
		},
	}
	builder.SetLaunchables(stanzas)
	builder.SetConfig(map[interface{}]interface{}{
		"port": port,
	})
	manifest := builder.GetManifest()

	manifestPath := filepath.Join(dir, manifestName)
	f, err := os.OpenFile(manifestPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		return "", err
	}
	defer f.Close()
	err = manifest.Write(f)
	if err != nil {
		return "", err
	}

	return signManifest(manifestPath, dir)
}
Example #4
0
func testManifest() manifest.Manifest {
	builder := manifest.NewBuilder()

	// bare minimum manifest, we don't care what's in it
	builder.SetID("some_pod")
	return builder.GetManifest()
}
Example #5
0
func newManifestResult(id types.PodID) kp.ManifestResult {
	builder := manifest.NewBuilder()
	builder.SetID(id)
	builder.SetStatusPort(1) // StatusPort must != 0 for updatePods to use it
	return kp.ManifestResult{
		Manifest: builder.GetManifest(),
	}
}
Example #6
0
func testHookListener(t *testing.T) (HookListener, string, <-chan struct{}) {
	hookPrefix := kp.HOOK_TREE
	destDir, _ := ioutil.TempDir("", "pods")
	defer os.RemoveAll(destDir)
	execDir, err := ioutil.TempDir("", "exec")
	defer os.RemoveAll(execDir)
	Assert(t).IsNil(err, "should not have erred creating a tempdir")

	current, err := user.Current()
	Assert(t).IsNil(err, "test setup: could not get the current user")
	builder := manifest.NewBuilder()
	builder.SetID("users")
	builder.SetRunAsUser(current.Username)
	builder.SetLaunchables(map[launch.LaunchableID]launch.LaunchableStanza{
		"create": {
			Location:       util.From(runtime.Caller(0)).ExpandPath("hoisted-hello_def456.tar.gz"),
			LaunchableType: "hoist",
			LaunchableId:   "create",
		},
	})
	podManifest := builder.GetManifest()
	manifestBytes, err := podManifest.Marshal()
	Assert(t).IsNil(err, "manifest bytes error should have been nil")

	fakeSigner, err := openpgp.NewEntity("p2", "p2-test", "*****@*****.**", nil)
	Assert(t).IsNil(err, "NewEntity error should have been nil")

	var buf bytes.Buffer
	sigWriter, err := clearsign.Encode(&buf, fakeSigner.PrivateKey, nil)
	Assert(t).IsNil(err, "clearsign encode error should have been nil")

	sigWriter.Write(manifestBytes)
	sigWriter.Close()

	podManifest, err = manifest.FromBytes(buf.Bytes())
	Assert(t).IsNil(err, "should have generated manifest from signed bytes")

	fakeIntent := fakeStoreWithManifests(kp.ManifestResult{
		Manifest: podManifest,
	})

	hookFactory := pods.NewHookFactory(destDir, "testNode")

	listener := HookListener{
		Intent:           fakeIntent,
		HookPrefix:       hookPrefix,
		ExecDir:          execDir,
		HookFactory:      hookFactory,
		Logger:           logging.DefaultLogger,
		authPolicy:       auth.FixedKeyringPolicy{openpgp.EntityList{fakeSigner}, nil},
		artifactVerifier: auth.NopVerifier(),
		artifactRegistry: artifact.NewRegistry(nil, uri.DefaultFetcher, osversion.DefaultDetector),
	}

	return listener, destDir, fakeIntent.quit
}
Example #7
0
File: pod_test.go Project: rudle/p2
func TestInstall(t *testing.T) {
	fetcher := uri.NewLoggedFetcher(nil)
	testContext := util.From(runtime.Caller(0))

	currentUser, err := user.Current()
	Assert(t).IsNil(err, "test setup: couldn't get current user")

	testLocation := testContext.ExpandPath("hoisted-hello_3c021aff048ca8117593f9c71e03b87cf72fd440.tar.gz")

	launchables := map[launch.LaunchableID]launch.LaunchableStanza{
		"hello": {
			LaunchableId:   "hello",
			Location:       testLocation,
			LaunchableType: "hoist",
		},
	}

	builder := manifest.NewBuilder()
	builder.SetID("hello")
	builder.SetLaunchables(launchables)
	builder.SetRunAsUser(currentUser.Username)
	manifest := builder.GetManifest()

	testPodDir, err := ioutil.TempDir("", "testPodDir")
	Assert(t).IsNil(err, "Got an unexpected error creating a temp directory")
	defer os.RemoveAll(testPodDir)

	pod := Pod{
		Id:      "testPod",
		home:    testPodDir,
		logger:  Log.SubLogger(logrus.Fields{"pod": "testPod"}),
		Fetcher: fetcher,
	}

	err = pod.Install(manifest, auth.NopVerifier(), artifact.NewRegistry(nil, uri.DefaultFetcher, osversion.DefaultDetector))
	Assert(t).IsNil(err, "there should not have been an error when installing")

	Assert(t).AreEqual(
		fetcher.SrcUri.String(),
		testLocation,
		"The correct url wasn't set for the curl library",
	)

	hoistedHelloUnpacked := filepath.Join(testPodDir, "hello", "installs", "hello_3c021aff048ca8117593f9c71e03b87cf72fd440")
	if info, err := os.Stat(hoistedHelloUnpacked); err != nil || !info.IsDir() {
		t.Fatalf("Expected %s to be the unpacked artifact location", hoistedHelloUnpacked)
	}
	helloLaunch := filepath.Join(hoistedHelloUnpacked, "bin", "launch")
	if info, err := os.Stat(helloLaunch); err != nil || info.IsDir() {
		t.Fatalf("Expected %s to be a the launch script for hello", helloLaunch)
	}
}
Example #8
0
func createDaemonSet(store *consulStore, t *testing.T) ds_fields.DaemonSet {
	podID := types.PodID("some_pod_id")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	azLabel := pc_fields.AvailabilityZone("some_zone")
	selector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)

	manifest := manifestBuilder.GetManifest()

	timeout := replication.NoTimeout

	ds, err := store.Create(manifest, minHealth, clusterName, selector, podID, timeout)
	if err != nil {
		t.Fatalf("Unable to create daemon set: %s", err)
	}

	if ds.ID == "" {
		t.Error("daemon set should have an id")
	}

	Assert(t).AreNotEqual(ds.ID, "", "Daemon set should have an id")
	Assert(t).AreNotEqual(ds.PodID, "", "Daemon set should have a pod id")

	Assert(t).AreEqual(ds.PodID, podID, "Daemon set pod id was not set correctly")
	Assert(t).AreEqual(ds.MinHealth, minHealth, "Daemon set minimum health was not set correctly")
	Assert(t).AreEqual(ds.Name, clusterName, "Daemon set cluster name was not set correctly")
	Assert(t).IsFalse(ds.Disabled, "Daemon set disabled field was not set correctly")

	testLabels := klabels.Set{
		pc_fields.AvailabilityZoneLabel: azLabel.String(),
	}
	if matches := ds.NodeSelector.Matches(testLabels); !matches {
		t.Error("The daemon set has a bad node selector")
	}

	originalSHA, err := manifest.SHA()
	if err != nil {
		t.Fatal("Unable to retrieve SHA from manifest")
	}
	getSHA, err := ds.Manifest.SHA()
	if err != nil {
		t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set")
	}
	Assert(t).AreEqual(originalSHA, getSHA, "Daemon set manifest not set correctly")

	return ds
}
Example #9
0
File: check.go Project: rudle/p2
func createHelloReplicationController(dir string) (fields.ID, error) {
	hello := fmt.Sprintf("file://%s", util.From(runtime.Caller(0)).ExpandPath("../hoisted-hello_def456.tar.gz"))
	builder := manifest.NewBuilder()
	builder.SetID("hello")
	builder.SetStatusPort(43770)
	stanzas := map[launch.LaunchableID]launch.LaunchableStanza{
		"hello": {
			LaunchableId:   "hello",
			LaunchableType: "hoist",
			Location:       hello,
		},
	}
	builder.SetLaunchables(stanzas)
	manifest := builder.GetManifest()
	manifestPath := path.Join(dir, "hello.yaml")

	f, err := os.OpenFile(manifestPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		return fields.ID(""), err
	}
	defer f.Close()
	err = manifest.Write(f)
	if err != nil {
		return fields.ID(""), err
	}
	f.Close()

	manifestPath, err = signManifest(manifestPath, dir)
	if err != nil {
		return fields.ID(""), err
	}

	cmd := exec.Command("p2-rctl", "--log-json", "create", "--manifest", manifestPath, "--node-selector", "test=yes")
	out := bytes.Buffer{}
	cmd.Stdout = &out
	cmd.Stderr = &out
	err = cmd.Run()
	if err != nil {
		return fields.ID(""), fmt.Errorf("Couldn't create replication controller for hello: %s %s", out.String(), err)
	}
	var rctlOut struct {
		ID string `json:"id"`
	}

	err = json.Unmarshal(out.Bytes(), &rctlOut)
	if err != nil {
		return fields.ID(""), fmt.Errorf("Couldn't read RC ID out of p2-rctl invocation result: %v", err)
	}

	return fields.ID(rctlOut.ID), exec.Command("p2-rctl", "set-replicas", rctlOut.ID, "1").Run()
}
Example #10
0
func main() {
	bin2pod.Version(version.VERSION)
	kingpin.MustParse(bin2pod.Parse(os.Args[1:]))

	res := result{}
	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID())

	stanza := launch.LaunchableStanza{}
	stanza.LaunchableType = "hoist"

	workingDir := activeDir()

	err := addManifestConfig(manifestBuilder)
	tarLocation, err := makeTar(workingDir)
	if err != nil {
		log.Fatalln(err.Error())
	}
	res.TarPath = tarLocation

	if *location != "" {
		stanza.Location = *location
		res.FinalLocation = *location
	} else {
		stanza.Location = fmt.Sprintf("file://%v", tarLocation)
		res.FinalLocation = tarLocation
	}

	manifestBuilder.SetLaunchables(map[launch.LaunchableID]launch.LaunchableStanza{
		launch.LaunchableID(podID()): stanza,
	})

	if err != nil {
		log.Fatalln(err.Error())
	}

	manifest := manifestBuilder.GetManifest()
	res.ManifestPath, err = writeManifest(workingDir, manifest)
	if err != nil {
		log.Fatalf("Couldn't write manifest: %s", err)
	}

	b, err := json.MarshalIndent(res, "", "  ")
	if err != nil {
		log.Fatalf("Couldn't marshal output: %s", err)
	}
	_, err = os.Stdout.Write(b)
	if err != nil {
		log.Fatalf("Couldn't write to stdout: %s", err)
	}
}
Example #11
0
func TestCreate(t *testing.T) {
	store := consulStoreWithFakeKV()
	createDaemonSet(store, t)

	// Create a bad DaemonSet
	podID := types.PodID("")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	azLabel := pc_fields.AvailabilityZone("some_zone")
	selector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID("")

	podManifest := manifestBuilder.GetManifest()

	timeout := replication.NoTimeout

	if _, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout); err == nil {
		t.Error("Expected create to fail on bad pod id")
	}

	podID = types.PodID("pod_id")
	if _, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout); err == nil {
		t.Error("Expected create to fail on bad manifest pod id")
	}

	manifestBuilder = manifest.NewBuilder()
	manifestBuilder.SetID("different_pod_id")

	podManifest = manifestBuilder.GetManifest()
	if _, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout); err == nil {
		t.Error("Expected create to fail on pod id and manifest pod id mismatch")
	}
}
Example #12
0
File: pod_test.go Project: rudle/p2
func TestBuildRunitServices(t *testing.T) {
	fakeSB := runit.FakeServiceBuilder()
	defer fakeSB.Cleanup()
	serviceBuilder := &fakeSB.ServiceBuilder

	pod := Pod{
		P2Exec:         "/usr/bin/p2-exec",
		Id:             "testPod",
		home:           "/data/pods/testPod",
		ServiceBuilder: serviceBuilder,
		LogExec:        runit.DefaultLogExec(),
		FinishExec:     DefaultFinishExec,
	}
	hl, sb := hoist.FakeHoistLaunchableForDir("multiple_script_test_hoist_launchable")
	defer hoist.CleanupFakeLaunchable(hl, sb)
	hl.RunAs = "testPod"
	executables, err := hl.Executables(serviceBuilder)
	outFilePath := filepath.Join(serviceBuilder.ConfigRoot, "testPod.yaml")

	Assert(t).IsNil(err, "Got an unexpected error when attempting to start runit services")
	testManifest := manifest.NewBuilder()
	testManifest.SetRestartPolicy(runit.RestartPolicyAlways)
	testLaunchable := hl.If()
	pod.buildRunitServices([]launch.Launchable{testLaunchable}, testManifest.GetManifest())

	f, err := os.Open(outFilePath)
	defer f.Close()
	bytes, err := ioutil.ReadAll(f)
	Assert(t).IsNil(err, "Got an unexpected error reading the servicebuilder yaml file")

	expectedMap := map[string]runit.ServiceTemplate{
		executables[0].Service.Name: {
			Run:    executables[0].Exec,
			Log:    runit.DefaultLogExec(),
			Finish: pod.FinishExecForLaunchable(testLaunchable),
		},
		executables[1].Service.Name: {
			Run:    executables[1].Exec,
			Log:    runit.DefaultLogExec(),
			Finish: pod.FinishExecForLaunchable(testLaunchable),
		},
	}
	expected, err := yaml.Marshal(expectedMap)
	Assert(t).IsNil(err, "Got error marshalling expected map to yaml")

	Assert(t).AreEqual(string(bytes), string(expected), "Servicebuilder yaml file didn't have expected contents")
}
Example #13
0
func TestInstallHooks(t *testing.T) {
	destDir, _ := ioutil.TempDir("", "pods")
	defer os.RemoveAll(destDir)
	execDir, err := ioutil.TempDir("", "exec")
	defer os.RemoveAll(execDir)
	Assert(t).IsNil(err, "should not have erred creating a tempdir")

	current, err := user.Current()
	Assert(t).IsNil(err, "test setup: could not get the current user")
	builder := manifest.NewBuilder()
	builder.SetID("users")
	builder.SetRunAsUser(current.Username)
	builder.SetLaunchables(map[launch.LaunchableID]launch.LaunchableStanza{
		"create": {
			Location:       util.From(runtime.Caller(0)).ExpandPath("testdata/hoisted-hello_def456.tar.gz"),
			LaunchableType: "hoist",
		},
	})

	podManifest := builder.GetManifest()

	hookFactory := pods.NewHookFactory(destDir, "testNode")
	hooksPod := hookFactory.NewHookPod(podManifest.ID())

	preparer := Preparer{
		hooksManifest:    podManifest,
		hooksPod:         hooksPod,
		hooksExecDir:     execDir,
		Logger:           logging.DefaultLogger,
		artifactRegistry: artifact.NewRegistry(nil, uri.DefaultFetcher, osversion.DefaultDetector),
		artifactVerifier: auth.NopVerifier(),
	}

	err = preparer.InstallHooks()
	Assert(t).IsNil(err, "There should not have been an error in the call to SyncOnce()")
	currentAlias := filepath.Join(destDir, "users", "create", "current", "bin", "launch")
	_, err = os.Stat(currentAlias)
	Assert(t).IsNil(err, fmt.Sprintf("%s should have been created", currentAlias))

	hookFile := filepath.Join(execDir, "users__create__launch")
	_, err = os.Stat(hookFile)
	Assert(t).IsNil(err, "should have created the user launch script")
}
Example #14
0
func rcsWithIDs(t *testing.T, id string, num int) api.KVPairs {
	var pairs api.KVPairs
	builder := manifest.NewBuilder()
	builder.SetID("slug")
	manifest := builder.GetManifest()
	for i := 0; i < num; i++ {
		rc := fields.RC{
			ID:       fields.ID(id),
			Manifest: manifest,
		}

		jsonRC, err := json.Marshal(rc)
		if err != nil {
			t.Fatalf("Unable to marshal test RC as json: %s", err)
		}

		pairs = append(pairs, &api.KVPair{
			Value: jsonRC,
		})
	}

	return pairs
}
func setup(t *testing.T) (
	rcStore rcstore.Store,
	kpStore fakeKpStore,
	applicator labels.Applicator,
	rc *replicationController,
	alerter *alertingtest.AlertRecorder,
) {

	rcStore = rcstore.NewFake()

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID("testPod")
	podManifest := manifestBuilder.GetManifest()

	nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
	podLabels := map[string]string{"podTest": "successful"}

	rcData, err := rcStore.Create(podManifest, nodeSelector, podLabels)
	Assert(t).IsNil(err, "expected no error creating request")

	kpStore = fakeKpStore{manifests: make(map[string]manifest.Manifest)}
	applicator = labels.NewFakeApplicator()
	alerter = alertingtest.NewRecorder()

	rc = New(
		rcData,
		&kpStore,
		rcStore,
		scheduler.NewApplicatorScheduler(applicator),
		applicator,
		logging.DefaultLogger,
		alerter,
	).(*replicationController)

	return
}
Example #16
0
// TestSchedule checks consecutive scheduling and unscheduling for:
//	- creation of a daemon set
// 	- different node selectors
//	- changes to nodes allocations
// 	- mutations to a daemon set
//	- deleting a daemon set
func TestSchedule(t *testing.T) {
	retryInterval = testFarmRetryInterval

	//
	// Setup fixture and schedule a pod
	//
	dsStore := dsstoretest.NewFake()

	podID := types.PodID("testPod")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
	timeout := replication.NoTimeout

	dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, timeout)
	Assert(t).IsNil(err, "expected no error creating request")

	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()

	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	var allNodes []types.NodeName
	allNodes = append(allNodes, "node1", "node2", "nodeOk")
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("bad_node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	ds := New(
		dsData,
		dsStore,
		kpStore,
		applicator,
		applicator,
		logging.DefaultLogger,
		&happyHealthChecker,
		0,
		false,
	).(*daemonSet)

	scheduled := scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 0, "expected no pods to have been labeled")

	err = waitForPodsInIntent(kpStore, 0)
	Assert(t).IsNil(err, "Unexpected number of pods scheduled")

	err = applicator.SetLabel(labels.NODE, "node1", "nodeQuality", "bad")
	Assert(t).IsNil(err, "expected no error labeling node1")
	err = applicator.SetLabel(labels.NODE, "node2", "nodeQuality", "good")
	Assert(t).IsNil(err, "expected no error labeling node2")

	//
	// Adds a watch that will automatically send a signal when a change was made
	// to the daemon set
	//
	quitCh := make(chan struct{})
	updatedCh := make(chan *ds_fields.DaemonSet)
	deletedCh := make(chan *ds_fields.DaemonSet)
	defer close(quitCh)
	defer close(updatedCh)
	defer close(deletedCh)
	desiresErrCh := ds.WatchDesires(quitCh, updatedCh, deletedCh)
	dsChangesErrCh := watchDSChanges(ds, dsStore, quitCh, updatedCh, deletedCh)

	//
	// Verify that the pod has been scheduled
	//
	numNodes := waitForNodes(t, ds, 1, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 1, "took too long to schedule")

	scheduled = scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 1, "expected a node to have been labeled")
	Assert(t).AreEqual(scheduled[0].ID, "node2/testPod", "expected node labeled with the daemon set's id")

	// Verify that the scheduled pod is correct
	err = waitForSpecificPod(kpStore, "node2", types.PodID("testPod"))
	Assert(t).IsNil(err, "Unexpected pod scheduled")

	//
	// Add 10 good nodes and 10 bad nodes then verify
	//
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		err := applicator.SetLabel(labels.NODE, nodeName, "nodeQuality", "good")
		Assert(t).IsNil(err, "expected no error labeling node")
	}

	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("bad_node%v", i)
		err := applicator.SetLabel(labels.NODE, nodeName, "nodeQuality", "bad")
		Assert(t).IsNil(err, "expected no error labeling node")
	}

	// The node watch should automatically notice a change
	numNodes = waitForNodes(t, ds, 11, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 11, "took too long to schedule")

	scheduled = scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 11, "expected a lot of nodes to have been labeled")

	//
	// Add a node with the labels nodeQuality=good and cherry=pick
	//
	err = applicator.SetLabel(labels.NODE, "nodeOk", "nodeQuality", "good")
	Assert(t).IsNil(err, "expected no error labeling nodeOk")
	err = applicator.SetLabel(labels.NODE, "nodeOk", "cherry", "pick")
	Assert(t).IsNil(err, "expected no error labeling nodeOk")
	fmt.Println(applicator.GetLabels(labels.NODE, "nodeOk"))

	numNodes = waitForNodes(t, ds, 12, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 12, "took too long to schedule")

	// Schedule only a node that is both nodeQuality=good and cherry=pick
	mutator := func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToChange.NodeSelector = klabels.Everything().
			Add("nodeQuality", klabels.EqualsOperator, []string{"good"}).
			Add("cherry", klabels.EqualsOperator, []string{"pick"})
		return dsToChange, nil
	}
	_, err = dsStore.MutateDS(ds.ID(), mutator)
	Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set")

	numNodes = waitForNodes(t, ds, 1, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 1, "took too long to schedule")

	// Verify that the scheduled pod is correct
	err = waitForSpecificPod(kpStore, "nodeOk", types.PodID("testPod"))
	Assert(t).IsNil(err, "Unexpected pod scheduled")

	//
	// Disabling the daemon set and making a change should not do anything
	//
	mutator = func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToChange.Disabled = true
		dsToChange.NodeSelector = klabels.Everything().
			Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
		return dsToChange, nil
	}
	_, err = dsStore.MutateDS(ds.ID(), mutator)
	Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set")

	numNodes = waitForNodes(t, ds, 1, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 1, "took too long to unschedule")

	//
	// Now re-enable it and try to schedule everything
	//
	mutator = func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToChange.NodeSelector = klabels.Everything()
		dsToChange.Disabled = false
		return dsToChange, nil
	}
	_, err = dsStore.MutateDS(ds.ID(), mutator)
	Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set")

	// 11 good nodes 11 bad nodes, and 1 good cherry picked node = 23 nodes
	numNodes = waitForNodes(t, ds, 23, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 23, "took too long to schedule")

	//
	// Deleting the daemon set should unschedule all of its nodes
	//
	ds.logger.NoFields().Info("Deleting daemon set...")
	err = dsStore.Delete(ds.ID())
	if err != nil {
		t.Fatalf("Unable to delete daemon set: %v", err)
	}
	numNodes = waitForNodes(t, ds, 0, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 0, "took too long to unschedule")

	scheduled = scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 0, "expected all nodes to have been unlabeled")

	err = waitForPodsInIntent(kpStore, 0)
	Assert(t).IsNil(err, "Unexpected number of pods scheduled")
}
Example #17
0
func TestMultipleFarms(t *testing.T) {
	retryInterval = testFarmRetryInterval

	dsStore := dsstoretest.NewFake()
	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()

	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	session := kptest.NewSession()
	firstLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "firstMultiple",
	})

	var allNodes []types.NodeName
	allNodes = append(allNodes, "node1", "node2", "node3")
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	//
	// Instantiate first farm
	//
	firstFarm := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       session,
		logger:        firstLogger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	firstQuitCh := make(chan struct{})
	defer close(firstQuitCh)
	go func() {
		go firstFarm.cleanupDaemonSetPods(firstQuitCh)
		firstFarm.mainLoop(firstQuitCh)
	}()

	//
	// Instantiate second farm
	//
	secondLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "secondMultiple",
	})
	secondFarm := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       session,
		logger:        secondLogger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	secondQuitCh := make(chan struct{})
	defer close(secondQuitCh)
	go func() {
		go secondFarm.cleanupDaemonSetPods(secondQuitCh)
		secondFarm.mainLoop(secondQuitCh)
	}()

	// Make two daemon sets with difference node selectors
	// First daemon set
	podID := types.PodID("testPod")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
	dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")

	// Second daemon set
	anotherNodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"})
	anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, anotherNodeSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")

	// Make a node and verify that it was scheduled by the first daemon set
	applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")

	labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID := labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")

	// Make a second node and verify that it was scheduled by the second daemon set
	applicator.SetLabel(labels.NODE, "node2", pc_fields.AvailabilityZoneLabel, "az2")

	labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled")

	// Make a third unschedulable node and verify it doesn't get scheduled by anything
	applicator.SetLabel(labels.NODE, "node3", pc_fields.AvailabilityZoneLabel, "undefined")

	labeled, err = waitForPodLabel(applicator, false, "node3/testPod")
	Assert(t).IsNil(err, "Expected pod not to have a dsID label")

	// Now add 10 new nodes and verify that they are scheduled by the first daemon set
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		err := applicator.SetLabel(labels.NODE, nodeName, pc_fields.AvailabilityZoneLabel, "az1")
		Assert(t).IsNil(err, "expected no error labeling node")
	}
	for i := 0; i < 10; i++ {
		podPath := fmt.Sprintf("good_node%v/testPod", i)
		labeled, err = waitForPodLabel(applicator, true, podPath)
		Assert(t).IsNil(err, "Expected pod to have a dsID label")
		dsID := labeled.Labels.Get(DSIDLabel)
		Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")
	}

	//
	// Update a daemon set's node selector and expect a node to be unscheduled
	//
	mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"})
		dsToUpdate.NodeSelector = someSelector
		return dsToUpdate, nil
	}
	anotherDSData, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	// Verify node2 is unscheduled
	labeled, err = waitForPodLabel(applicator, false, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod not to have a dsID label")

	//
	// Now update the node selector to schedule node2 again and verify
	//
	mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"})
		dsToUpdate.NodeSelector = someSelector
		return dsToUpdate, nil
	}
	anotherDSData, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	// Verify node2 is scheduled
	labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled")

	//
	// Disabling a daemon set should not unschedule any nodes
	//
	mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = true

		someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"})
		dsToUpdate.NodeSelector = someSelector
		return dsToUpdate, nil
	}
	_, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	// Verify it is disabled
	condition := func() error {
		anotherDSData, _, err = dsStore.Get(anotherDSData.ID)
		if err != nil {
			return util.Errorf("Expected no error getting daemon set")
		}
		if !anotherDSData.Disabled {
			return util.Errorf("Expected daemon set to be disabled")
		}

		if _, ok := firstFarm.children[anotherDSData.ID]; ok {
			if !firstFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be disabled in only one farm")
			}
			if _, ok := secondFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else if _, ok := secondFarm.children[anotherDSData.ID]; ok {
			if !secondFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be disabled in only one farm")
			}
			if _, ok := firstFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else {
			return util.Errorf("Expected daemon set to be disabled in only one farm")
		}
		return nil
	}
	err = waitForCondition(condition)
	Assert(t).IsNil(err, "Error disabling daemon set!")

	// Verify node2 is scheduled
	labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled")

	//
	// Enable a daemon set should make the dameon set resume its regular activities
	//
	mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = false
		return dsToUpdate, nil
	}
	_, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")

	// Verify it is enabled
	condition = func() error {
		anotherDSData, _, err = dsStore.Get(anotherDSData.ID)
		if err != nil {
			return util.Errorf("Expected no error getting daemon set")
		}
		if anotherDSData.Disabled {
			return util.Errorf("Expected daemon set to be enabled")
		}

		if _, ok := firstFarm.children[anotherDSData.ID]; ok {
			if firstFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be enabled in only one farm")
			}
			if _, ok := secondFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else if _, ok := secondFarm.children[anotherDSData.ID]; ok {
			if secondFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be enabled in only one farm")
			}
			if _, ok := firstFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else {
			return util.Errorf("Expected daemon set to be enabled in only one farm")
		}
		return nil
	}
	err = waitForCondition(condition)
	Assert(t).IsNil(err, "Error enabling daemon set!")

	// Verify node2 is unscheduled
	labeled, err = waitForPodLabel(applicator, false, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod not to have a dsID label")
}
Example #18
0
func TestCleanupPods(t *testing.T) {
	retryInterval = testFarmRetryInterval

	dsStore := dsstoretest.NewFake()
	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()

	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	// Make some dangling pod labels and instantiate a farm and expect it clean it up
	podID := types.PodID("testPod")
	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	var allNodes []types.NodeName
	allNodes = append(allNodes)
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("node%v", i)
		id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
		err := applicator.SetLabel(labels.POD, id, DSIDLabel, "impossible_id")
		Assert(t).IsNil(err, "Expected no error labeling node")

		_, err = kpStore.SetPod(kp.INTENT_TREE, types.NodeName(nodeName), podManifest)
		Assert(t).IsNil(err, "Expected no error added pod to intent tree")
	}

	// Assert that precondition is true
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("node%v", i)
		id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
		labeled, err := applicator.GetLabels(labels.POD, id)
		Assert(t).IsNil(err, "Expected no error getting labels")
		Assert(t).IsTrue(labeled.Labels.Has(DSIDLabel), "Precondition failed: Pod must have a dsID label")

		_, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID)
		Assert(t).IsNil(err, "Expected no error getting pod from intent store")
		Assert(t).AreNotEqual(err, pods.NoCurrentManifest, "Precondition failed: Pod was not in intent store")
	}

	// Instantiate farm
	logger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "cleanupPods",
	})
	dsf := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       kptest.NewSession(),
		logger:        logger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	quitCh := make(chan struct{})
	defer close(quitCh)
	go func() {
		go dsf.cleanupDaemonSetPods(quitCh)
		dsf.mainLoop(quitCh)
	}()

	// Make there are no nodes left
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("node%v", i)
		id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
		_, err := waitForPodLabel(applicator, false, id)
		Assert(t).IsNil(err, "Expected pod not to have a dsID label")

		condition := func() error {
			_, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID)
			if err != pods.NoCurrentManifest {
				return util.Errorf("Expected pod to be deleted in intent store")
			}
			return nil
		}
		err = waitForCondition(condition)
		Assert(t).IsNil(err, "Error cleaning up pods")
	}
}
Example #19
0
// Tests dsContends for changes to both daemon sets and nodes
func TestContendNodes(t *testing.T) {
	retryInterval = testFarmRetryInterval

	//
	// Instantiate farm
	//
	dsStore := dsstoretest.NewFake()
	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()
	logger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "contendNodes",
	})
	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	var allNodes []types.NodeName
	allNodes = append(allNodes, "node1")
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	dsf := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       kptest.NewSession(),
		logger:        logger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	quitCh := make(chan struct{})
	defer close(quitCh)
	go func() {
		go dsf.cleanupDaemonSetPods(quitCh)
		dsf.mainLoop(quitCh)
	}()

	//
	// Check for contention between two daemon sets among their nodes
	//
	// Make a daemon set
	podID := types.PodID("testPod")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
	dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, dsData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	// Make a node and verify that it was scheduled
	applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")

	labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID := labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")

	// Make another daemon set with a contending AvailabilityZoneLabel and verify
	// that it gets disabled and that the node label does not change
	anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
	Assert(t).AreNotEqual(dsData.ID.String(), anotherDSData.ID.String(), "Precondition failed")
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, anotherDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	labeled, err = waitForPodLabel(applicator, true, "node1/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	anotherDSID := labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(dsData.ID.String(), anotherDSID, "Expected pod label not to be overwritten")

	// Expect the new daemon set to be disabled both in the farm and in the dsStore
	err = waitForDisabled(dsf, dsStore, anotherDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling daemon set!")

	//
	// Make a third daemon set and update its node selector to force a contend,
	// then verify that it has been disabled and the node hasn't been overwritten
	//
	anotherSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"undefined"})
	badDS, err := dsStore.Create(podManifest, minHealth, clusterName, anotherSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, badDS.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.NodeSelector = nodeSelector
		return dsToUpdate, nil
	}

	badDS, err = dsStore.MutateDS(badDS.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelector(dsf, badDS)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	labeled, err = waitForPodLabel(applicator, true, "node1/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	anotherDSID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(dsData.ID.String(), anotherDSID, "Expected pod label not to be overwritten")

	err = waitForDisabled(dsf, dsStore, anotherDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling daemon set!")
}
Example #20
0
// Tests dsContends for NodeSelectors
func TestContendSelectors(t *testing.T) {
	retryInterval = testFarmRetryInterval

	//
	// Instantiate farm
	//
	dsStore := dsstoretest.NewFake()
	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()
	logger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "contendSelectors",
	})
	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	var allNodes []types.NodeName
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	dsf := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       kptest.NewSession(),
		logger:        logger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	quitCh := make(chan struct{})
	defer close(quitCh)
	go func() {
		go dsf.cleanupDaemonSetPods(quitCh)
		dsf.mainLoop(quitCh)
	}()

	//
	// Make two daemon sets with a everything selector and verify that they trivially
	// contend and that only the second daemon set gets disabled
	//
	// Make a daemon set
	podID := types.PodID("testPod")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	everythingSelector := klabels.Everything()
	firstDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, firstDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	secondDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, secondDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	// Verify that only the second daemon set is disabled
	err = waitForDisabled(dsf, dsStore, firstDSData.ID, false)
	Assert(t).IsNil(err, "First daemon set should not be disabled")

	err = waitForDisabled(dsf, dsStore, secondDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling second daemon set")

	// Add another daemon set with different selector and verify it gets disabled
	someSelector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"nowhere"})
	thirdDSData, err := dsStore.Create(podManifest, minHealth, clusterName, someSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, thirdDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	err = waitForDisabled(dsf, dsStore, thirdDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling third daemon set")

	//
	// Disable first daemon set, then enable second and third daemon sets in that order
	// and then there should be a contend on the third daemon set
	//
	disableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = true
		return dsToUpdate, nil
	}

	enableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = false
		return dsToUpdate, nil
	}

	// Disable first ds and verify it is disabled
	_, err = dsStore.MutateDS(firstDSData.ID, disableMutator)
	Assert(t).IsNil(err, "Expected no error getting daemon set")

	err = waitForDisabled(dsf, dsStore, firstDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling first daemon set")

	// Enable second ds and verify it is enabled
	_, err = dsStore.MutateDS(secondDSData.ID, enableMutator)
	Assert(t).IsNil(err, "Expected no error getting daemon set")

	err = waitForDisabled(dsf, dsStore, secondDSData.ID, false)
	Assert(t).IsNil(err, "Error enabling second daemon set")

	// Enabled third ds and verify it disabled because it contends with second ds
	_, err = dsStore.MutateDS(thirdDSData.ID, enableMutator)
	Assert(t).IsNil(err, "Expected no error getting daemon set")

	err = waitForDisabled(dsf, dsStore, thirdDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling third daemon set")

	//
	// Test equivalent selectors, fifth ds should contend with fourth
	//
	anotherPodID := types.PodID("anotherPodID")

	anotherManifestBuilder := manifest.NewBuilder()
	anotherManifestBuilder.SetID(anotherPodID)
	anotherPodManifest := manifestBuilder.GetManifest()

	equalSelector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"})

	fourthDSData, err := dsStore.Create(anotherPodManifest, minHealth, clusterName, equalSelector, anotherPodID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, fourthDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	// Verify the fourth daemon set is enabled
	err = waitForDisabled(dsf, dsStore, fourthDSData.ID, false)
	Assert(t).IsNil(err, "Error enabling fourth daemon set")

	fifthDSData, err := dsStore.Create(anotherPodManifest, minHealth, clusterName, equalSelector, anotherPodID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, fifthDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	// Verify that the fifth daemon set contends and gets disabled
	err = waitForDisabled(dsf, dsStore, fifthDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling fifth daemon set")
}
Example #21
0
File: kv_test.go Project: rudle/p2
func testManifest(id types.PodID) manifest.Manifest {
	builder := manifest.NewBuilder()
	builder.SetID(id)
	return builder.GetManifest()
}
Example #22
0
func podWithIDAndPort(id string, port int) manifest.Manifest {
	builder := manifest.NewBuilder()
	builder.SetID(types.PodID(id))
	builder.SetStatusPort(port)
	return builder.GetManifest()
}
Example #23
0
func TestPublishToReplication(t *testing.T) {
	retryInterval = testFarmRetryInterval

	//
	// Setup fixture and schedule a pod
	//
	dsStore := dsstoretest.NewFake()

	podID := types.PodID("testPod")
	minHealth := 1
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
	timeout := replication.NoTimeout

	dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, timeout)
	Assert(t).IsNil(err, "expected no error creating request")

	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()

	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	var allNodes []types.NodeName
	allNodes = append(allNodes, "node1", "node2")
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	ds := New(
		dsData,
		dsStore,
		kpStore,
		applicator,
		applicator,
		logging.DefaultLogger,
		&happyHealthChecker,
		0,
		false,
	).(*daemonSet)

	scheduled := scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 0, "expected no pods to have been labeled")
	err = waitForPodsInIntent(kpStore, 0)
	Assert(t).IsNil(err, "Unexpected number of pods scheduled")

	err = applicator.SetLabel(labels.NODE, "node1", "nodeQuality", "good")
	Assert(t).IsNil(err, "expected no error labeling node1")
	err = applicator.SetLabel(labels.NODE, "node2", "nodeQuality", "good")
	Assert(t).IsNil(err, "expected no error labeling node1")

	//
	// Adds a watch that will automatically send a signal when a change was made
	// to the daemon set
	//
	quitCh := make(chan struct{})
	updatedCh := make(chan *ds_fields.DaemonSet)
	deletedCh := make(chan *ds_fields.DaemonSet)
	defer close(quitCh)
	defer close(updatedCh)
	defer close(deletedCh)
	desiresErrCh := ds.WatchDesires(quitCh, updatedCh, deletedCh)
	dsChangesErrCh := watchDSChanges(ds, dsStore, quitCh, updatedCh, deletedCh)

	//
	// Verify that 2 pods have been scheduled
	//
	numNodes := waitForNodes(t, ds, 2, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 2, "took too long to schedule")

	scheduled = scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 2, "expected a node to have been labeled")

	// Mutate the daemon set so that the node is unscheduled, this should not produce an error
	mutator := func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToChange.NodeSelector = klabels.Everything().
			Add("nodeQuality", klabels.EqualsOperator, []string{"bad"})
		return dsToChange, nil
	}
	_, err = dsStore.MutateDS(ds.ID(), mutator)
	Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set")

	select {
	case <-time.After(1 * time.Second):
	case err := <-desiresErrCh:
		t.Fatalf("Unexpected error unscheduling pod: %v", err)
	}
	numNodes = waitForNodes(t, ds, 0, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 0, "took too long to unschedule")
}
Example #24
0
func TestWatchAll(t *testing.T) {
	store := consulStoreWithFakeKV()
	//
	// Create a new daemon set
	//
	podID := types.PodID("some_pod_id")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	azLabel := pc_fields.AvailabilityZone("some_zone")
	selector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	timeout := replication.NoTimeout

	ds, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout)
	if err != nil {
		t.Fatalf("Unable to create daemon set: %s", err)
	}
	//
	// Create another new daemon set
	//
	someOtherPodID := types.PodID("some_other_pod_id")

	manifestBuilder = manifest.NewBuilder()
	manifestBuilder.SetID(someOtherPodID)
	someOtherManifest := manifestBuilder.GetManifest()

	someOtherDS, err := store.Create(someOtherManifest, minHealth, clusterName, selector, someOtherPodID, timeout)
	if err != nil {
		t.Fatalf("Unable to create daemon set: %s", err)
	}
	//
	// Watch for create and verify
	//
	quitCh := make(chan struct{})
	inCh := store.WatchAll(quitCh, 0)
	defer close(quitCh)

	var watched WatchedDaemonSetList
	select {
	case watched = <-inCh:
	case <-time.After(5 * time.Second):
		t.Fatal("Expected something on channel but found nothing")
	}
	if watched.Err != nil {
		t.Errorf("Unexpected error on watched daemon sets: %s", watched.Err)
	}

	Assert(t).AreEqual(len(watched.DaemonSets), 2, "Unexpected number of daemon sets watched")

	for _, watchedDS := range watched.DaemonSets {
		if watchedDS.ID == ds.ID {
			Assert(t).AreEqual(watchedDS.PodID, ds.PodID, "Daemon sets should have equal pod ids")
		} else if watchedDS.ID == someOtherDS.ID {
			Assert(t).AreEqual(watchedDS.PodID, someOtherDS.PodID, "Daemon sets should have equal pod ids")
		} else {
			t.Errorf("Expected to find id '%s' among watch results, but was not present", watchedDS.ID)
		}
	}

	//
	// Watch for delete and verify
	//
	err = store.Delete(someOtherDS.ID)
	if err != nil {
		t.Error("Unable to delete daemon set")
	}
	select {
	case watched = <-inCh:
	case <-time.After(5 * time.Second):
		t.Fatal("Expected something on channel but found nothing")
	}
	if watched.Err != nil {
		t.Errorf("Unexpected error on watched daemon sets: %s", watched.Err)
	}

	Assert(t).AreEqual(len(watched.DaemonSets), 1, "Unexpected number of daemon sets watched")
	Assert(t).AreEqual(ds.ID, watched.DaemonSets[0].ID, "Daemon sets should have equal ids")

	//
	// Watch for update and verify
	//
	mutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToMutate.Disabled = !dsToMutate.Disabled
		return dsToMutate, nil
	}

	ds, err = store.MutateDS(ds.ID, mutator)
	if err != nil {
		t.Fatalf("Unable to mutate daemon set: %s", err)
	}

	select {
	case watched = <-inCh:
	case <-time.After(5 * time.Second):
		t.Fatal("Expected something on channel but found nothing")
	}
	if watched.Err != nil {
		t.Errorf("Unexpected error on watched daemon sets: %s", watched.Err)
	}

	Assert(t).AreEqual(len(watched.DaemonSets), 1, "Unexpected number of daemon sets watched")
	Assert(t).AreEqual(ds.ID, watched.DaemonSets[0].ID, "Daemon sets should have equal ids")
	Assert(t).AreEqual(ds.PodID, watched.DaemonSets[0].PodID, "Daemon sets should have equal pod ids")
}
Example #25
0
func TestMutate(t *testing.T) {
	store := consulStoreWithFakeKV()

	podID := types.PodID("some_pod_id")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	azLabel := pc_fields.AvailabilityZone("some_zone")
	selector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	timeout := replication.NoTimeout

	ds, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout)
	if err != nil {
		t.Fatalf("Unable to create daemon set: %s", err)
	}
	//
	// Invalid mutates
	//
	errorMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		return dsToMutate, util.Errorf("This is an error")
	}
	_, err = store.MutateDS(ds.ID, errorMutator)
	if err == nil {
		t.Error("Expected error when mutator produces an error")
	}

	badIDMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToMutate.ID = ""
		return dsToMutate, nil
	}
	_, err = store.MutateDS(ds.ID, badIDMutator)
	if err == nil {
		t.Error("Expected error when mutating daemon set ID")
	}

	badPodIDMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToMutate.PodID = ""
		return dsToMutate, nil
	}
	_, err = store.MutateDS(ds.ID, badPodIDMutator)
	if err == nil {
		t.Error("Expected error when mutating daemon set PodID to mismatch manifest")
	}
	//
	// A valid mutate followed by validation
	//
	someOtherDisabled := !ds.Disabled
	someOtherMinHealth := 42
	someOtherName := ds_fields.ClusterName("some_other_name")
	someOtherPodID := types.PodID("some_other_pod_id")

	manifestBuilder = manifest.NewBuilder()
	manifestBuilder.SetID(someOtherPodID)
	someOtherManifest := manifestBuilder.GetManifest()

	someOtherAZLabel := pc_fields.AvailabilityZone("some_other_zone")
	someOtherSelector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{someOtherAZLabel.String()})

	goodMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToMutate.Disabled = someOtherDisabled
		dsToMutate.Manifest = someOtherManifest
		dsToMutate.MinHealth = someOtherMinHealth
		dsToMutate.Name = someOtherName
		dsToMutate.NodeSelector = someOtherSelector
		dsToMutate.PodID = someOtherPodID
		return dsToMutate, nil
	}
	someOtherDS, err := store.MutateDS(ds.ID, goodMutator)
	if err != nil {
		t.Fatalf("Unable to mutate daemon set: %s", err)
	}

	Assert(t).AreEqual(someOtherDS.ID, ds.ID, "Daemon sets should be equal ids")
	Assert(t).AreEqual(someOtherDS.PodID, someOtherPodID, "Daemon sets should have equal pod ids")
	Assert(t).AreEqual(someOtherDS.MinHealth, someOtherMinHealth, "Daemon sets should have equal minimum healths")
	Assert(t).AreEqual(someOtherDS.Name, someOtherName, "Daemon sets should have equal names")
	Assert(t).AreEqual(someOtherDS.Disabled, someOtherDisabled, "Daemon sets should have same disabled fields")

	someOtherLabels := klabels.Set{
		pc_fields.AvailabilityZoneLabel: someOtherAZLabel.String(),
	}
	if matches := someOtherDS.NodeSelector.Matches(someOtherLabels); !matches {
		t.Error("The daemon set has a bad node selector")
	}

	someOtherSHA, err := someOtherManifest.SHA()
	if err != nil {
		t.Fatal("Unable to retrieve SHA from manifest")
	}
	dsSHA, err := someOtherDS.Manifest.SHA()
	if err != nil {
		t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set")
	}
	Assert(t).AreEqual(someOtherSHA, dsSHA, "Daemon set shas were not equal")

	//
	// Validate daemon set from a get function
	//
	getDS, _, err := store.Get(ds.ID)
	if err != nil {
		t.Fatalf("Unable to get daemon set: %s", err)
	}
	Assert(t).AreEqual(getDS.ID, ds.ID, "Daemon sets should be equal ids")
	Assert(t).AreEqual(getDS.PodID, someOtherPodID, "Daemon sets should have equal pod ids")
	Assert(t).AreEqual(getDS.MinHealth, someOtherMinHealth, "Daemon sets should have equal minimum healths")
	Assert(t).AreEqual(getDS.Name, someOtherName, "Daemon sets should have equal names")
	Assert(t).AreEqual(getDS.Disabled, someOtherDisabled, "Daemon sets should have same disabled fields")

	someOtherLabels = klabels.Set{
		pc_fields.AvailabilityZoneLabel: someOtherAZLabel.String(),
	}
	if matches := getDS.NodeSelector.Matches(someOtherLabels); !matches {
		t.Error("The daemon set has a bad node selector")
	}

	someOtherSHA, err = someOtherManifest.SHA()
	if err != nil {
		t.Fatal("Unable to retrieve SHA from manifest")
	}
	dsSHA, err = getDS.Manifest.SHA()
	if err != nil {
		t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set")
	}
	Assert(t).AreEqual(someOtherSHA, dsSHA, "Daemon set shas were not equal")
}
Example #26
0
func podWithID(id string) manifest.Manifest {
	builder := manifest.NewBuilder()
	builder.SetID(types.PodID(id))
	return builder.GetManifest()
}
Example #27
0
func testManifest() manifest.Manifest {
	builder := manifest.NewBuilder()
	builder.SetID("slug")
	return builder.GetManifest()
}
Example #28
0
func TestGet(t *testing.T) {
	store := consulStoreWithFakeKV()
	//
	// Create DaemonSet
	//
	podID := types.PodID("some_pod_id")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	azLabel := pc_fields.AvailabilityZone("some_zone")
	selector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)

	manifest := manifestBuilder.GetManifest()

	timeout := replication.NoTimeout

	ds, err := store.Create(manifest, minHealth, clusterName, selector, podID, timeout)
	if err != nil {
		t.Fatalf("Unable to create daemon set: %s", err)
	}

	Assert(t).AreNotEqual(ds.ID, "", "Daemon set should have an id")

	//
	// Get DaemonSet and verify it is the same
	//
	getDS, _, err := store.Get(ds.ID)
	if err != nil {
		t.Fatalf("Error retrieving created daemon set: %s", err)
	}

	Assert(t).AreNotEqual(getDS.ID, "", "Daemon set should have an id")
	Assert(t).AreNotEqual(getDS.PodID, "", "Daemon set should have a pod id")

	Assert(t).AreEqual(ds.ID, getDS.ID, "Daemon set should be equal ids")
	Assert(t).AreEqual(ds.PodID, getDS.PodID, "Daemon set should have equal pod ids")
	Assert(t).AreEqual(ds.MinHealth, getDS.MinHealth, "Daemon set should have equal minimum healths")
	Assert(t).AreEqual(ds.Name, getDS.Name, "Daemon set should have equal names")
	Assert(t).AreEqual(ds.Disabled, getDS.Disabled, "Daemon set should have same disabled fields")

	testLabels := klabels.Set{
		pc_fields.AvailabilityZoneLabel: azLabel.String(),
	}
	if matches := getDS.NodeSelector.Matches(testLabels); !matches {
		t.Error("The daemon set has a bad node selector")
	}

	originalSHA, err := manifest.SHA()
	if err != nil {
		t.Fatal("Unable to retrieve SHA from manifest")
	}
	getSHA, err := getDS.Manifest.SHA()
	if err != nil {
		t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set")
	}
	Assert(t).AreEqual(originalSHA, getSHA, "Daemon set shas were not equal")

	// Invalid get opertaion
	_, _, err = store.Get("bad_id")
	if err == nil {
		t.Error("Expected get operation to fail when getting a daemon set which does not exist")
	}
}
Example #29
0
func basicManifest() manifest.Manifest {
	builder := manifest.NewBuilder()
	builder.SetID(testPodId)
	return builder.GetManifest()
}