Ejemplo n.º 1
1
func TestLabelsOnCreate(t *testing.T) {
	store := consulStoreWithFakeKV()
	podID := types.PodID("pod_id")
	az := fields.AvailabilityZone("us-west")
	clusterName := fields.ClusterName("cluster_name")

	selector := klabels.Everything().
		Add(fields.PodIDLabel, klabels.EqualsOperator, []string{podID.String()}).
		Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{az.String()}).
		Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{clusterName.String()})

	annotations := fields.Annotations(map[string]interface{}{
		"foo": "bar",
	})

	pc, err := store.Create(podID, az, clusterName, selector, annotations, kptest.NewSession())
	if err != nil {
		t.Fatalf("Unable to create pod cluster: %s", err)
	}

	matches, err := store.applicator.GetMatches(selector, labels.PC)
	if err != nil {
		t.Fatalf("Unable to check for label match on new pod cluster: %s", err)
	}

	if len(matches) != 1 {
		t.Errorf("Expected one pod cluster to match label selector")
	}

	if fields.ID(matches[0].ID) != pc.ID {
		t.Errorf("The pod cluster selector didn't match the new pod cluster")
	}
}
Ejemplo n.º 2
0
func TestLockForSync(t *testing.T) {
	id := fields.ID("abc123")
	store := consulStoreWithFakeKV()
	syncerType := ConcreteSyncerType("some_syncer")
	session := kptest.NewSession()

	unlocker, err := store.LockForSync(id, syncerType, session)
	if err != nil {
		t.Fatalf("Unexpected error locking pod cluster for sync: %s", err)
	}

	_, err = store.LockForSync(id, syncerType, session)
	if err == nil {
		t.Fatal("Expected an error locking the same cluster for the same syncer type, but there wasn't one")
	} else {
		if !consulutil.IsAlreadyLocked(err) {
			t.Errorf("Expected error to be an already locked error, was %s", err)
		}
	}

	err = unlocker.Unlock()
	if err != nil {
		t.Errorf("Error unlocking the sync lock: %s", err)
	}

	_, err = store.LockForSync(id, syncerType, session)
	if err != nil {
		t.Fatalf("Unexpected error re-locking pod cluster for sync: %s", err)
	}
}
Ejemplo n.º 3
0
func TestCreate(t *testing.T) {
	testAZ := fields.AvailabilityZone("west-coast")
	testCN := fields.ClusterName("test")
	testPodID := types.PodID("pod")
	selector := labels.Everything().
		Add(fields.PodIDLabel, labels.EqualsOperator, []string{testPodID.String()}).
		Add(fields.AvailabilityZoneLabel, labels.EqualsOperator, []string{testAZ.String()}).
		Add(fields.ClusterNameLabel, labels.EqualsOperator, []string{testCN.String()})
	session := kptest.NewSession()
	pcstore := pcstoretest.NewFake()

	pcController := NewPodCluster(testAZ, testCN, testPodID, pcstore, selector, session)

	annotations := map[string]string{
		"load_balancer_info": "totally",
		"pager_information":  "555-111-2222",
	}

	buf, err := json.Marshal(annotations)
	if err != nil {
		t.Errorf("json marshal error: %v", err)
	}

	var testAnnotations fields.Annotations
	if err := json.Unmarshal(buf, &testAnnotations); err != nil {
		t.Errorf("json unmarshal error: %v", err)
	}

	pc, err := pcController.Create(fields.Annotations(testAnnotations))
	if err != nil {
		t.Errorf("got error during creation: %v", err)
	}
	if pc.ID == "" {
		t.Error("got empty pc ID")
	}

	if pc.PodID != testPodID {
		t.Errorf("Expected to get %s, got: %v", pc.PodID, testPodID)
	}

	if pc.Name != testCN {
		t.Errorf("Expected to get %s, got: %v", testCN, pc.Name)
	}

	if pc.AvailabilityZone != testAZ {
		t.Errorf("Expected to get %s, got: %v", testAZ, pc.AvailabilityZone)
	}

	if pc.PodSelector.String() != selector.String() {
		t.Errorf("Expected to get %s, got: %v", selector, pc.PodSelector)
	}

	if pc.Annotations["load_balancer_info"] != testAnnotations["load_balancer_info"] {
		t.Errorf("Expected to get %s, got: %v", testAnnotations, pc.Annotations)
	}

	if pc.Annotations["pager_information"] != testAnnotations["pager_information"] {
		t.Errorf("Expected to get %s, got: %v", testAnnotations, pc.Annotations)
	}
}
Ejemplo n.º 4
0
func TestList(t *testing.T) {
	store := consulStoreWithFakeKV()
	podID := types.PodID("pod_id")
	az := fields.AvailabilityZone("us-west")
	clusterName := fields.ClusterName("cluster_name")

	selector := klabels.Everything()

	annotations := fields.Annotations(map[string]interface{}{
		"foo": "bar",
	})

	// Create a pod cluster
	pc, err := store.Create(podID, az, clusterName, selector, annotations, kptest.NewSession())
	if err != nil {
		t.Fatalf("Unable to create pod cluster: %s", err)
	}

	// Create another one
	pc2, err := store.Create(podID+"2", az, clusterName, selector, annotations, kptest.NewSession())
	if err != nil {
		t.Fatalf("Unable to create pod cluster: %s", err)
	}

	// Now test List() and make sure we get both back
	pcs, err := store.List()
	if err != nil {
		t.Fatalf("Unable to list pod clusters: %s", err)
	}

	if len(pcs) != 2 {
		t.Fatalf("Expected 2 results but there were %d", len(pcs))
	}

	for _, foundPC := range pcs {
		found := false
		for _, expectedPC := range []fields.ID{pc.ID, pc2.ID} {
			if foundPC.ID == expectedPC {
				found = true
			}
		}

		if !found {
			t.Errorf("Didn't find one of the pod clusters in the list")
		}
	}
}
Ejemplo n.º 5
0
func TestWatch(t *testing.T) {
	store := consulStoreWithFakeKV()
	podID := types.PodID("pod_id")
	az := fields.AvailabilityZone("us-west")
	clusterName := fields.ClusterName("cluster_name")

	selector := klabels.Everything().
		Add(fields.PodIDLabel, klabels.EqualsOperator, []string{podID.String()}).
		Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{az.String()}).
		Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{clusterName.String()})

	annotations := fields.Annotations(map[string]interface{}{
		"foo": "bar",
	})

	var watched WatchedPodClusters
	session := kptest.NewSession()
	pc, err := store.Create(podID, az, clusterName, selector, annotations, session)
	if err != nil {
		t.Fatalf("Unable to create first pod cluster: %s", err)
	}

	pc2, err := store.Create(podID, "us-east", clusterName, selector, annotations, session)
	if err != nil {
		t.Fatalf("Unable to create second pod cluster: %s", err)
	}

	quit := make(chan struct{})
	defer close(quit)
	watch := store.Watch(quit)

	select {
	case watchedPC := <-watch:
		watched = watchedPC
	case <-time.After(5 * time.Second):
		t.Fatal("nothing on the channel")
	}

	if len(watched.Clusters) != 2 {
		t.Fatalf("Expected to get two watched PodClusters, but did not: got %v", len(watched.Clusters))
	}

	expectedIDs := []fields.ID{pc.ID, pc2.ID}
	for _, id := range expectedIDs {
		found := false
		for _, pc := range watched.Clusters {
			if id == pc.ID {
				found = true
				break
			}
		}
		if !found {
			t.Errorf("Expected to find id '%s' among watch results, but was not present", id)
		}
	}
}
Ejemplo n.º 6
0
func createPodCluster(store *consulStore, t *testing.T) fields.PodCluster {
	podID := types.PodID("pod_id")
	az := fields.AvailabilityZone("us-west")
	clusterName := fields.ClusterName("cluster_name")

	selector := klabels.Everything().
		Add(fields.PodIDLabel, klabels.EqualsOperator, []string{podID.String()}).
		Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{az.String()}).
		Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{clusterName.String()})

	annotations := fields.Annotations(map[string]interface{}{
		"foo": "bar",
	})

	session := kptest.NewSession()
	pc, err := store.Create(podID, az, clusterName, selector, annotations, session)
	if err != nil {
		t.Fatalf("Unable to create pod cluster: %s", err)
	}

	if pc.ID == "" {
		t.Errorf("pod cluster should have an id")
	}

	if pc.PodID == "" {
		t.Errorf("pod cluster should have a pod id")
	}

	if pc.PodID != podID {
		t.Errorf("pod id wasn't properly set. Wanted '%s' got '%s'", podID, pc.PodID)
	}

	if pc.AvailabilityZone != az {
		t.Errorf("availability zone wasn't properly set. Wanted '%s' got '%s'", az, pc.AvailabilityZone)
	}

	if pc.Name != clusterName {
		t.Errorf("cluster name wasn't properly set. Wanted '%s' got '%s'", clusterName, pc.Name)
	}

	testLabels := klabels.Set{
		fields.PodIDLabel:            podID.String(),
		fields.AvailabilityZoneLabel: az.String(),
		fields.ClusterNameLabel:      clusterName.String(),
	}

	if matches := pc.PodSelector.Matches(testLabels); !matches {
		t.Errorf("the pod cluster has a bad pod selector")
	}

	if pc.Annotations["foo"] != "bar" {
		t.Errorf("Annotations didn't match expected")
	}
	return pc
}
Ejemplo n.º 7
0
func TestDelete(t *testing.T) {
	store := consulStoreWithFakeKV()
	podID := types.PodID("pod_id")
	az := fields.AvailabilityZone("us-west")
	clusterName := fields.ClusterName("cluster_name")

	selector := klabels.Everything().
		Add(fields.PodIDLabel, klabels.EqualsOperator, []string{podID.String()}).
		Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{az.String()}).
		Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{clusterName.String()})

	annotations := fields.Annotations(map[string]interface{}{
		"foo": "bar",
	})

	// Create a pod cluster
	pc, err := store.Create(podID, az, clusterName, selector, annotations, kptest.NewSession())
	if err != nil {
		t.Fatalf("Unable to create pod cluster: %s", err)
	}

	pc, err = store.Get(pc.ID)
	if err != nil {
		t.Fatalf("Unable to get pod cluster: %s", err)
	}

	err = store.Delete(pc.ID)
	if err != nil {
		t.Fatalf("Unexpected error deleting pod cluster: %s", err)
	}

	_, err = store.Get(pc.ID)
	if err == nil {
		t.Fatalf("Should have gotten an error fetching a deleted pod cluster")
	}

	if !IsNotExist(err) {
		t.Errorf("The error should have been a pocstore.IsNotExist but was '%s'", err)
	}

	labels, err := store.applicator.GetLabels(labels.PC, pc.ID.String())
	if err != nil {
		t.Fatalf("Got error when trying to confirm label deletion: %s", err)
	}

	if len(labels.Labels) != 0 {
		t.Errorf("Labels were not deleted along with the pod cluster")
	}
}
Ejemplo n.º 8
0
func TestWatchPodCluster(t *testing.T) {
	store := consulStoreWithFakeKV()
	pod := fields.ID("pod_id")
	podID := types.PodID("pod_id")
	az := fields.AvailabilityZone("us-west")
	clusterName := fields.ClusterName("cluster_name")

	selector := klabels.Everything().
		Add(fields.PodIDLabel, klabels.EqualsOperator, []string{pod.String()}).
		Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{az.String()}).
		Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{clusterName.String()})

	annotations := fields.Annotations(map[string]interface{}{
		"foo": "bar",
	})

	session := kptest.NewSession()
	pc, err := store.Create(podID, az, clusterName, selector, annotations, session)
	if err != nil {
		t.Fatalf("Unable to create pod cluster: %s", err)
	}

	quit := make(chan struct{})
	watch := store.WatchPodCluster(pc.ID, quit)

	var watched *WatchedPodCluster
	select {
	case watchedPC := <-watch:
		watched = &watchedPC
	case <-time.After(5 * time.Second):
		quit <- struct{}{}
		t.Fatal("nothing on the channel")
	}

	if watched == nil {
		t.Fatalf("Expected to get a watched PodCluster, but did not")
	}

	if watched.PodCluster.ID != pc.ID {
		t.Fatalf("Expected watched PodCluster to match %s Pod Cluster ID. Instead was %s", pc.ID, watched.PodCluster.ID)
	}
}
Ejemplo n.º 9
0
func TestPodClusterFromID(t *testing.T) {
	testAZ := fields.AvailabilityZone("west-coast")
	testCN := fields.ClusterName("test")
	testPodID := types.PodID("pod")
	selector := labels.Everything().
		Add(fields.PodIDLabel, labels.EqualsOperator, []string{testPodID.String()}).
		Add(fields.AvailabilityZoneLabel, labels.EqualsOperator, []string{testAZ.String()}).
		Add(fields.ClusterNameLabel, labels.EqualsOperator, []string{testCN.String()})
	session := kptest.NewSession()
	fakePCStore := pcstoretest.NewFake()

	pcControllerFromLabels := NewPodCluster(testAZ, testCN, testPodID, fakePCStore, selector, session)
	pc, err := pcControllerFromLabels.Create(fields.Annotations{})
	if err != nil {
		t.Fatal(err)
	}
	pcControllerFromLabels = nil

	pcControllerFromID := NewPodClusterFromID(pc.ID, session, fakePCStore)
	retrievedPC, err := pcControllerFromID.Get()
	if err != nil {
		t.Fatal(err)
	}
	if pc.ID != retrievedPC.ID {
		t.Errorf("Did not get correct PC back from datastore, expected %s, got %s.\n%v", pc.ID, retrievedPC.ID, retrievedPC)
	}

	errs := pcControllerFromID.Delete()
	if len(errs) > 0 {
		t.Fatalf("%v", errs)
	}

	notFoundPC, err := pcControllerFromID.Get()
	if err != pcstore.NoPodCluster {
		t.Errorf("Expected to get pcstore.NoPodCluster, but got %v", err)
	}

	if notFoundPC.ID != "" {
		t.Errorf("Expected to not find PC but found %v", notFoundPC)
	}
}
Ejemplo n.º 10
0
func TestWatchAndSync(t *testing.T) {
	store := consulStoreWithFakeKV()
	quit := make(chan struct{})
	defer close(quit)
	syncSignal := make(chan struct{})
	defer close(syncSignal)

	example := examplePodCluster()
	pc1, err := store.Create(
		example.PodID,
		example.AvailabilityZone,
		"name1",
		example.PodSelector,
		example.Annotations,
		kptest.NewSession(),
	)
	if err != nil {
		t.Fatalf("Couldn't create test pod cluster: %s", err)
	}

	pc2, err := store.Create(
		example.PodID,
		example.AvailabilityZone,
		"name2",
		example.PodSelector,
		example.Annotations,
		kptest.NewSession(),
	)
	if err != nil {
		t.Fatalf("Couldn't create test pod cluster: %s", err)
	}

	// Include one of the 2 clusters as initial
	syncer := &RecordingSyncer{
		InitialClusters: []fields.ID{pc1.ID},
		SyncSignal:      syncSignal,
	}

	go func() {
		err := store.WatchAndSync(syncer, quit)
		if err != nil {
			t.Fatalf("Couldn't start WatchAndSync(): %s", err)
		}
	}()

	expectedSyncCount := 2
	actualSyncCount := 0
	for actualSyncCount < expectedSyncCount {
		select {
		case <-time.After(1 * time.Second):
			t.Fatalf("Timed out waiting for sync to happen, there were %d syncs: %s", actualSyncCount, syncer.SyncClusterCalls)
		case <-syncSignal:
			actualSyncCount++
		}
	}

	for _, expectedID := range []fields.ID{pc1.ID, pc2.ID} {
		found := false
		for _, id := range syncer.SyncClusterCalls {
			if id == expectedID {
				found = true
				break
			}
		}

		if !found {
			t.Fatalf("Expected sync to be called for %s but it wasn't. Called for %s", expectedID, syncer.SyncClusterCalls)
		}
	}
}
Ejemplo n.º 11
0
func TestGet(t *testing.T) {
	store := consulStoreWithFakeKV()
	podID := types.PodID("pod_id")
	az := fields.AvailabilityZone("us-west")
	clusterName := fields.ClusterName("cluster_name")

	selector := klabels.Everything().
		Add(fields.PodIDLabel, klabels.EqualsOperator, []string{podID.String()}).
		Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{az.String()}).
		Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{clusterName.String()})

	annotations := fields.Annotations(map[string]interface{}{
		"foo": "bar",
	})

	// Create a pod cluster
	pc, err := store.Create(podID, az, clusterName, selector, annotations, kptest.NewSession())
	if err != nil {
		t.Fatalf("Unable to create pod cluster: %s", err)
	}

	pc, err = store.Get(pc.ID)
	if err != nil {
		t.Fatalf("Unable to get pod cluster: %s", err)
	}

	if pc.ID == "" {
		t.Errorf("pod cluster should have an id")
	}

	if pc.PodID == "" {
		t.Errorf("pod cluster should have a pod id")
	}

	if pc.PodID != podID {
		t.Errorf("pod id wasn't properly set. Wanted '%s' got '%s'", podID, pc.PodID)
	}

	if pc.AvailabilityZone != az {
		t.Errorf("availability zone wasn't properly set. Wanted '%s' got '%s'", az, pc.AvailabilityZone)
	}

	if pc.Name != clusterName {
		t.Errorf("cluster name wasn't properly set. Wanted '%s' got '%s'", clusterName, pc.Name)
	}

	testLabels := klabels.Set{
		fields.PodIDLabel:            podID.String(),
		fields.AvailabilityZoneLabel: az.String(),
		fields.ClusterNameLabel:      clusterName.String(),
	}

	if matches := pc.PodSelector.Matches(testLabels); !matches {
		t.Errorf("the pod cluster has a bad pod selector")
	}

	if pc.Annotations["foo"] != "bar" {
		t.Errorf("Annotations didn't match expected")
	}

	found, err := store.FindWhereLabeled(podID, az, clusterName)
	if err != nil {
		t.Errorf("Could not retrieve labeled pods: %v", err)
	}

	if len(found) != 1 {
		t.Errorf("Found incorrect number of labeled pods, expected 1: %v", len(found))
	}

	if found[0].ID != pc.ID {
		t.Errorf("Didn't find the right pod cluster: %v vs %v", found[0].ID, pc.ID)
	}
}
Ejemplo n.º 12
0
func TestMultipleFarms(t *testing.T) {
	retryInterval = testFarmRetryInterval

	dsStore := dsstoretest.NewFake()
	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()

	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	session := kptest.NewSession()
	firstLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "firstMultiple",
	})

	var allNodes []types.NodeName
	allNodes = append(allNodes, "node1", "node2", "node3")
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	//
	// Instantiate first farm
	//
	firstFarm := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       session,
		logger:        firstLogger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	firstQuitCh := make(chan struct{})
	defer close(firstQuitCh)
	go func() {
		go firstFarm.cleanupDaemonSetPods(firstQuitCh)
		firstFarm.mainLoop(firstQuitCh)
	}()

	//
	// Instantiate second farm
	//
	secondLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "secondMultiple",
	})
	secondFarm := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       session,
		logger:        secondLogger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	secondQuitCh := make(chan struct{})
	defer close(secondQuitCh)
	go func() {
		go secondFarm.cleanupDaemonSetPods(secondQuitCh)
		secondFarm.mainLoop(secondQuitCh)
	}()

	// Make two daemon sets with difference node selectors
	// First daemon set
	podID := types.PodID("testPod")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
	dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")

	// Second daemon set
	anotherNodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"})
	anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, anotherNodeSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")

	// Make a node and verify that it was scheduled by the first daemon set
	applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")

	labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID := labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")

	// Make a second node and verify that it was scheduled by the second daemon set
	applicator.SetLabel(labels.NODE, "node2", pc_fields.AvailabilityZoneLabel, "az2")

	labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled")

	// Make a third unschedulable node and verify it doesn't get scheduled by anything
	applicator.SetLabel(labels.NODE, "node3", pc_fields.AvailabilityZoneLabel, "undefined")

	labeled, err = waitForPodLabel(applicator, false, "node3/testPod")
	Assert(t).IsNil(err, "Expected pod not to have a dsID label")

	// Now add 10 new nodes and verify that they are scheduled by the first daemon set
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		err := applicator.SetLabel(labels.NODE, nodeName, pc_fields.AvailabilityZoneLabel, "az1")
		Assert(t).IsNil(err, "expected no error labeling node")
	}
	for i := 0; i < 10; i++ {
		podPath := fmt.Sprintf("good_node%v/testPod", i)
		labeled, err = waitForPodLabel(applicator, true, podPath)
		Assert(t).IsNil(err, "Expected pod to have a dsID label")
		dsID := labeled.Labels.Get(DSIDLabel)
		Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")
	}

	//
	// Update a daemon set's node selector and expect a node to be unscheduled
	//
	mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"})
		dsToUpdate.NodeSelector = someSelector
		return dsToUpdate, nil
	}
	anotherDSData, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	// Verify node2 is unscheduled
	labeled, err = waitForPodLabel(applicator, false, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod not to have a dsID label")

	//
	// Now update the node selector to schedule node2 again and verify
	//
	mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"})
		dsToUpdate.NodeSelector = someSelector
		return dsToUpdate, nil
	}
	anotherDSData, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	// Verify node2 is scheduled
	labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled")

	//
	// Disabling a daemon set should not unschedule any nodes
	//
	mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = true

		someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"})
		dsToUpdate.NodeSelector = someSelector
		return dsToUpdate, nil
	}
	_, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	// Verify it is disabled
	condition := func() error {
		anotherDSData, _, err = dsStore.Get(anotherDSData.ID)
		if err != nil {
			return util.Errorf("Expected no error getting daemon set")
		}
		if !anotherDSData.Disabled {
			return util.Errorf("Expected daemon set to be disabled")
		}

		if _, ok := firstFarm.children[anotherDSData.ID]; ok {
			if !firstFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be disabled in only one farm")
			}
			if _, ok := secondFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else if _, ok := secondFarm.children[anotherDSData.ID]; ok {
			if !secondFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be disabled in only one farm")
			}
			if _, ok := firstFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else {
			return util.Errorf("Expected daemon set to be disabled in only one farm")
		}
		return nil
	}
	err = waitForCondition(condition)
	Assert(t).IsNil(err, "Error disabling daemon set!")

	// Verify node2 is scheduled
	labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled")

	//
	// Enable a daemon set should make the dameon set resume its regular activities
	//
	mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = false
		return dsToUpdate, nil
	}
	_, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")

	// Verify it is enabled
	condition = func() error {
		anotherDSData, _, err = dsStore.Get(anotherDSData.ID)
		if err != nil {
			return util.Errorf("Expected no error getting daemon set")
		}
		if anotherDSData.Disabled {
			return util.Errorf("Expected daemon set to be enabled")
		}

		if _, ok := firstFarm.children[anotherDSData.ID]; ok {
			if firstFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be enabled in only one farm")
			}
			if _, ok := secondFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else if _, ok := secondFarm.children[anotherDSData.ID]; ok {
			if secondFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be enabled in only one farm")
			}
			if _, ok := firstFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else {
			return util.Errorf("Expected daemon set to be enabled in only one farm")
		}
		return nil
	}
	err = waitForCondition(condition)
	Assert(t).IsNil(err, "Error enabling daemon set!")

	// Verify node2 is unscheduled
	labeled, err = waitForPodLabel(applicator, false, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod not to have a dsID label")
}
Ejemplo n.º 13
0
func TestCleanupPods(t *testing.T) {
	retryInterval = testFarmRetryInterval

	dsStore := dsstoretest.NewFake()
	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()

	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	// Make some dangling pod labels and instantiate a farm and expect it clean it up
	podID := types.PodID("testPod")
	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	var allNodes []types.NodeName
	allNodes = append(allNodes)
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("node%v", i)
		id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
		err := applicator.SetLabel(labels.POD, id, DSIDLabel, "impossible_id")
		Assert(t).IsNil(err, "Expected no error labeling node")

		_, err = kpStore.SetPod(kp.INTENT_TREE, types.NodeName(nodeName), podManifest)
		Assert(t).IsNil(err, "Expected no error added pod to intent tree")
	}

	// Assert that precondition is true
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("node%v", i)
		id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
		labeled, err := applicator.GetLabels(labels.POD, id)
		Assert(t).IsNil(err, "Expected no error getting labels")
		Assert(t).IsTrue(labeled.Labels.Has(DSIDLabel), "Precondition failed: Pod must have a dsID label")

		_, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID)
		Assert(t).IsNil(err, "Expected no error getting pod from intent store")
		Assert(t).AreNotEqual(err, pods.NoCurrentManifest, "Precondition failed: Pod was not in intent store")
	}

	// Instantiate farm
	logger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "cleanupPods",
	})
	dsf := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       kptest.NewSession(),
		logger:        logger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	quitCh := make(chan struct{})
	defer close(quitCh)
	go func() {
		go dsf.cleanupDaemonSetPods(quitCh)
		dsf.mainLoop(quitCh)
	}()

	// Make there are no nodes left
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("node%v", i)
		id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
		_, err := waitForPodLabel(applicator, false, id)
		Assert(t).IsNil(err, "Expected pod not to have a dsID label")

		condition := func() error {
			_, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID)
			if err != pods.NoCurrentManifest {
				return util.Errorf("Expected pod to be deleted in intent store")
			}
			return nil
		}
		err = waitForCondition(condition)
		Assert(t).IsNil(err, "Error cleaning up pods")
	}
}
Ejemplo n.º 14
0
// Tests dsContends for changes to both daemon sets and nodes
func TestContendNodes(t *testing.T) {
	retryInterval = testFarmRetryInterval

	//
	// Instantiate farm
	//
	dsStore := dsstoretest.NewFake()
	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()
	logger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "contendNodes",
	})
	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	var allNodes []types.NodeName
	allNodes = append(allNodes, "node1")
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	dsf := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       kptest.NewSession(),
		logger:        logger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	quitCh := make(chan struct{})
	defer close(quitCh)
	go func() {
		go dsf.cleanupDaemonSetPods(quitCh)
		dsf.mainLoop(quitCh)
	}()

	//
	// Check for contention between two daemon sets among their nodes
	//
	// Make a daemon set
	podID := types.PodID("testPod")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
	dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, dsData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	// Make a node and verify that it was scheduled
	applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")

	labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID := labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")

	// Make another daemon set with a contending AvailabilityZoneLabel and verify
	// that it gets disabled and that the node label does not change
	anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
	Assert(t).AreNotEqual(dsData.ID.String(), anotherDSData.ID.String(), "Precondition failed")
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, anotherDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	labeled, err = waitForPodLabel(applicator, true, "node1/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	anotherDSID := labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(dsData.ID.String(), anotherDSID, "Expected pod label not to be overwritten")

	// Expect the new daemon set to be disabled both in the farm and in the dsStore
	err = waitForDisabled(dsf, dsStore, anotherDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling daemon set!")

	//
	// Make a third daemon set and update its node selector to force a contend,
	// then verify that it has been disabled and the node hasn't been overwritten
	//
	anotherSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"undefined"})
	badDS, err := dsStore.Create(podManifest, minHealth, clusterName, anotherSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, badDS.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.NodeSelector = nodeSelector
		return dsToUpdate, nil
	}

	badDS, err = dsStore.MutateDS(badDS.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelector(dsf, badDS)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	labeled, err = waitForPodLabel(applicator, true, "node1/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	anotherDSID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(dsData.ID.String(), anotherDSID, "Expected pod label not to be overwritten")

	err = waitForDisabled(dsf, dsStore, anotherDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling daemon set!")
}
Ejemplo n.º 15
0
// Tests dsContends for NodeSelectors
func TestContendSelectors(t *testing.T) {
	retryInterval = testFarmRetryInterval

	//
	// Instantiate farm
	//
	dsStore := dsstoretest.NewFake()
	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()
	logger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "contendSelectors",
	})
	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	var allNodes []types.NodeName
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	dsf := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       kptest.NewSession(),
		logger:        logger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	quitCh := make(chan struct{})
	defer close(quitCh)
	go func() {
		go dsf.cleanupDaemonSetPods(quitCh)
		dsf.mainLoop(quitCh)
	}()

	//
	// Make two daemon sets with a everything selector and verify that they trivially
	// contend and that only the second daemon set gets disabled
	//
	// Make a daemon set
	podID := types.PodID("testPod")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	everythingSelector := klabels.Everything()
	firstDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, firstDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	secondDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, secondDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	// Verify that only the second daemon set is disabled
	err = waitForDisabled(dsf, dsStore, firstDSData.ID, false)
	Assert(t).IsNil(err, "First daemon set should not be disabled")

	err = waitForDisabled(dsf, dsStore, secondDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling second daemon set")

	// Add another daemon set with different selector and verify it gets disabled
	someSelector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"nowhere"})
	thirdDSData, err := dsStore.Create(podManifest, minHealth, clusterName, someSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, thirdDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	err = waitForDisabled(dsf, dsStore, thirdDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling third daemon set")

	//
	// Disable first daemon set, then enable second and third daemon sets in that order
	// and then there should be a contend on the third daemon set
	//
	disableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = true
		return dsToUpdate, nil
	}

	enableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = false
		return dsToUpdate, nil
	}

	// Disable first ds and verify it is disabled
	_, err = dsStore.MutateDS(firstDSData.ID, disableMutator)
	Assert(t).IsNil(err, "Expected no error getting daemon set")

	err = waitForDisabled(dsf, dsStore, firstDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling first daemon set")

	// Enable second ds and verify it is enabled
	_, err = dsStore.MutateDS(secondDSData.ID, enableMutator)
	Assert(t).IsNil(err, "Expected no error getting daemon set")

	err = waitForDisabled(dsf, dsStore, secondDSData.ID, false)
	Assert(t).IsNil(err, "Error enabling second daemon set")

	// Enabled third ds and verify it disabled because it contends with second ds
	_, err = dsStore.MutateDS(thirdDSData.ID, enableMutator)
	Assert(t).IsNil(err, "Expected no error getting daemon set")

	err = waitForDisabled(dsf, dsStore, thirdDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling third daemon set")

	//
	// Test equivalent selectors, fifth ds should contend with fourth
	//
	anotherPodID := types.PodID("anotherPodID")

	anotherManifestBuilder := manifest.NewBuilder()
	anotherManifestBuilder.SetID(anotherPodID)
	anotherPodManifest := manifestBuilder.GetManifest()

	equalSelector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"})

	fourthDSData, err := dsStore.Create(anotherPodManifest, minHealth, clusterName, equalSelector, anotherPodID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, fourthDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	// Verify the fourth daemon set is enabled
	err = waitForDisabled(dsf, dsStore, fourthDSData.ID, false)
	Assert(t).IsNil(err, "Error enabling fourth daemon set")

	fifthDSData, err := dsStore.Create(anotherPodManifest, minHealth, clusterName, equalSelector, anotherPodID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, fifthDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	// Verify that the fifth daemon set contends and gets disabled
	err = waitForDisabled(dsf, dsStore, fifthDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling fifth daemon set")
}
Ejemplo n.º 16
0
func TestUpdate(t *testing.T) {
	testAZ := fields.AvailabilityZone("west-coast")
	testCN := fields.ClusterName("test")
	testPodID := types.PodID("pod")
	selector := labels.Everything().
		Add(fields.PodIDLabel, labels.EqualsOperator, []string{testPodID.String()}).
		Add(fields.AvailabilityZoneLabel, labels.EqualsOperator, []string{testAZ.String()}).
		Add(fields.ClusterNameLabel, labels.EqualsOperator, []string{testCN.String()})
	session := kptest.NewSession()
	pcstore := pcstoretest.NewFake()

	pcController := NewPodCluster(testAZ, testCN, testPodID, pcstore, selector, session)

	var annotations = map[string]string{
		"load_balancer_info": "totally",
		"pager_information":  "555-111-2222",
	}

	buf, err := json.Marshal(annotations)
	if err != nil {
		t.Errorf("json marshal error: %v", err)
	}

	var testAnnotations fields.Annotations
	if err := json.Unmarshal(buf, &testAnnotations); err != nil {
		t.Errorf("json unmarshal error: %v", err)
	}

	pc, err := pcController.Create(fields.Annotations(testAnnotations))
	if err != nil {
		t.Fatalf("Unable to create pod cluster due to: %v", err)
	}

	newAnnotations := map[string]string{
		"pager_information": "555-111-2222",
		"priority":          "1001",
	}

	buf, err = json.Marshal(newAnnotations)
	if err != nil {
		t.Errorf("json marshal error: %v", err)
	}

	var newTestAnnotations fields.Annotations
	if err := json.Unmarshal(buf, &newTestAnnotations); err != nil {
		t.Errorf("json unmarshal error: %v", err)
	}

	pc, err = pcController.Update(newTestAnnotations)
	if err != nil {
		t.Fatalf("Got error updating PC annotations: %v", err)
	}

	if pc.Annotations["pager_information"] != newAnnotations["pager_information"] {
		t.Errorf("Got unexpected pager_information. Expected %s, got %s", newAnnotations["pager_information"], pc.Annotations["pager_information"])
	}

	if pc.Annotations["priority"] != newAnnotations["priority"] {
		t.Errorf("Got unexpected priority. Expected %s, got %s", newAnnotations["priority"], pc.Annotations["priority"])
	}

	if pc.Annotations["load_balancer_info"] != nil {
		t.Errorf("Expected to erase old annotation field. Instead we have: %s", pc.Annotations["load_balancer_info"])
	}
}