Ejemplo n.º 1
1
func TestLabelsOnCreate(t *testing.T) {
	store := consulStoreWithFakeKV()
	podID := types.PodID("pod_id")
	az := fields.AvailabilityZone("us-west")
	clusterName := fields.ClusterName("cluster_name")

	selector := klabels.Everything().
		Add(fields.PodIDLabel, klabels.EqualsOperator, []string{podID.String()}).
		Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{az.String()}).
		Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{clusterName.String()})

	annotations := fields.Annotations(map[string]interface{}{
		"foo": "bar",
	})

	pc, err := store.Create(podID, az, clusterName, selector, annotations, kptest.NewSession())
	if err != nil {
		t.Fatalf("Unable to create pod cluster: %s", err)
	}

	matches, err := store.applicator.GetMatches(selector, labels.PC)
	if err != nil {
		t.Fatalf("Unable to check for label match on new pod cluster: %s", err)
	}

	if len(matches) != 1 {
		t.Errorf("Expected one pod cluster to match label selector")
	}

	if fields.ID(matches[0].ID) != pc.ID {
		t.Errorf("The pod cluster selector didn't match the new pod cluster")
	}
}
Ejemplo n.º 2
0
func TestCreate(t *testing.T) {
	testAZ := fields.AvailabilityZone("west-coast")
	testCN := fields.ClusterName("test")
	testPodID := types.PodID("pod")
	selector := labels.Everything().
		Add(fields.PodIDLabel, labels.EqualsOperator, []string{testPodID.String()}).
		Add(fields.AvailabilityZoneLabel, labels.EqualsOperator, []string{testAZ.String()}).
		Add(fields.ClusterNameLabel, labels.EqualsOperator, []string{testCN.String()})
	session := kptest.NewSession()
	pcstore := pcstoretest.NewFake()

	pcController := NewPodCluster(testAZ, testCN, testPodID, pcstore, selector, session)

	annotations := map[string]string{
		"load_balancer_info": "totally",
		"pager_information":  "555-111-2222",
	}

	buf, err := json.Marshal(annotations)
	if err != nil {
		t.Errorf("json marshal error: %v", err)
	}

	var testAnnotations fields.Annotations
	if err := json.Unmarshal(buf, &testAnnotations); err != nil {
		t.Errorf("json unmarshal error: %v", err)
	}

	pc, err := pcController.Create(fields.Annotations(testAnnotations))
	if err != nil {
		t.Errorf("got error during creation: %v", err)
	}
	if pc.ID == "" {
		t.Error("got empty pc ID")
	}

	if pc.PodID != testPodID {
		t.Errorf("Expected to get %s, got: %v", pc.PodID, testPodID)
	}

	if pc.Name != testCN {
		t.Errorf("Expected to get %s, got: %v", testCN, pc.Name)
	}

	if pc.AvailabilityZone != testAZ {
		t.Errorf("Expected to get %s, got: %v", testAZ, pc.AvailabilityZone)
	}

	if pc.PodSelector.String() != selector.String() {
		t.Errorf("Expected to get %s, got: %v", selector, pc.PodSelector)
	}

	if pc.Annotations["load_balancer_info"] != testAnnotations["load_balancer_info"] {
		t.Errorf("Expected to get %s, got: %v", testAnnotations, pc.Annotations)
	}

	if pc.Annotations["pager_information"] != testAnnotations["pager_information"] {
		t.Errorf("Expected to get %s, got: %v", testAnnotations, pc.Annotations)
	}
}
Ejemplo n.º 3
0
func TestWatch(t *testing.T) {
	store := consulStoreWithFakeKV()
	podID := types.PodID("pod_id")
	az := fields.AvailabilityZone("us-west")
	clusterName := fields.ClusterName("cluster_name")

	selector := klabels.Everything().
		Add(fields.PodIDLabel, klabels.EqualsOperator, []string{podID.String()}).
		Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{az.String()}).
		Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{clusterName.String()})

	annotations := fields.Annotations(map[string]interface{}{
		"foo": "bar",
	})

	var watched WatchedPodClusters
	session := kptest.NewSession()
	pc, err := store.Create(podID, az, clusterName, selector, annotations, session)
	if err != nil {
		t.Fatalf("Unable to create first pod cluster: %s", err)
	}

	pc2, err := store.Create(podID, "us-east", clusterName, selector, annotations, session)
	if err != nil {
		t.Fatalf("Unable to create second pod cluster: %s", err)
	}

	quit := make(chan struct{})
	defer close(quit)
	watch := store.Watch(quit)

	select {
	case watchedPC := <-watch:
		watched = watchedPC
	case <-time.After(5 * time.Second):
		t.Fatal("nothing on the channel")
	}

	if len(watched.Clusters) != 2 {
		t.Fatalf("Expected to get two watched PodClusters, but did not: got %v", len(watched.Clusters))
	}

	expectedIDs := []fields.ID{pc.ID, pc2.ID}
	for _, id := range expectedIDs {
		found := false
		for _, pc := range watched.Clusters {
			if id == pc.ID {
				found = true
				break
			}
		}
		if !found {
			t.Errorf("Expected to find id '%s' among watch results, but was not present", id)
		}
	}
}
Ejemplo n.º 4
0
func TestList(t *testing.T) {
	store := consulStoreWithFakeKV()

	// Create first DaemonSet
	firstPodID := types.PodID("some_pod_id")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	azLabel := pc_fields.AvailabilityZone("some_zone")
	selector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(firstPodID)

	firstManifest := manifestBuilder.GetManifest()

	timeout := replication.NoTimeout

	firstDS, err := store.Create(firstManifest, minHealth, clusterName, selector, firstPodID, timeout)
	if err != nil {
		t.Fatalf("Unable to create daemon set: %s", err)
	}

	// Create second DaemonSet
	secondPodID := types.PodID("different_pod_id")

	manifestBuilder = manifest.NewBuilder()
	manifestBuilder.SetID(secondPodID)

	secondManifest := manifestBuilder.GetManifest()
	secondDS, err := store.Create(secondManifest, minHealth, clusterName, selector, secondPodID, timeout)
	if err != nil {
		t.Fatalf("Unable to create daemon set: %s", err)
	}

	daemonSetList, err := store.List()
	if err != nil {
		t.Fatalf("Error getting list of daemon sets: %s", err)
	}

	Assert(t).AreEqual(len(daemonSetList), 2, "Unexpected number of daemon sets listed")

	for _, daemonSet := range daemonSetList {
		if daemonSet.ID == firstDS.ID {
			Assert(t).AreEqual(daemonSet.PodID, firstPodID, "Listed daemon set pod ids were not equal")

		} else if daemonSet.PodID == secondDS.PodID {
			Assert(t).AreEqual(daemonSet.PodID, secondPodID, "Listed daemon set pod ids were not equal")

		} else {
			t.Errorf("Unexpected daemon set listed: %v", daemonSet)
		}
	}
}
Ejemplo n.º 5
0
func createPodCluster(store *consulStore, t *testing.T) fields.PodCluster {
	podID := types.PodID("pod_id")
	az := fields.AvailabilityZone("us-west")
	clusterName := fields.ClusterName("cluster_name")

	selector := klabels.Everything().
		Add(fields.PodIDLabel, klabels.EqualsOperator, []string{podID.String()}).
		Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{az.String()}).
		Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{clusterName.String()})

	annotations := fields.Annotations(map[string]interface{}{
		"foo": "bar",
	})

	session := kptest.NewSession()
	pc, err := store.Create(podID, az, clusterName, selector, annotations, session)
	if err != nil {
		t.Fatalf("Unable to create pod cluster: %s", err)
	}

	if pc.ID == "" {
		t.Errorf("pod cluster should have an id")
	}

	if pc.PodID == "" {
		t.Errorf("pod cluster should have a pod id")
	}

	if pc.PodID != podID {
		t.Errorf("pod id wasn't properly set. Wanted '%s' got '%s'", podID, pc.PodID)
	}

	if pc.AvailabilityZone != az {
		t.Errorf("availability zone wasn't properly set. Wanted '%s' got '%s'", az, pc.AvailabilityZone)
	}

	if pc.Name != clusterName {
		t.Errorf("cluster name wasn't properly set. Wanted '%s' got '%s'", clusterName, pc.Name)
	}

	testLabels := klabels.Set{
		fields.PodIDLabel:            podID.String(),
		fields.AvailabilityZoneLabel: az.String(),
		fields.ClusterNameLabel:      clusterName.String(),
	}

	if matches := pc.PodSelector.Matches(testLabels); !matches {
		t.Errorf("the pod cluster has a bad pod selector")
	}

	if pc.Annotations["foo"] != "bar" {
		t.Errorf("Annotations didn't match expected")
	}
	return pc
}
Ejemplo n.º 6
0
func createDaemonSet(store *consulStore, t *testing.T) ds_fields.DaemonSet {
	podID := types.PodID("some_pod_id")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	azLabel := pc_fields.AvailabilityZone("some_zone")
	selector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)

	manifest := manifestBuilder.GetManifest()

	timeout := replication.NoTimeout

	ds, err := store.Create(manifest, minHealth, clusterName, selector, podID, timeout)
	if err != nil {
		t.Fatalf("Unable to create daemon set: %s", err)
	}

	if ds.ID == "" {
		t.Error("daemon set should have an id")
	}

	Assert(t).AreNotEqual(ds.ID, "", "Daemon set should have an id")
	Assert(t).AreNotEqual(ds.PodID, "", "Daemon set should have a pod id")

	Assert(t).AreEqual(ds.PodID, podID, "Daemon set pod id was not set correctly")
	Assert(t).AreEqual(ds.MinHealth, minHealth, "Daemon set minimum health was not set correctly")
	Assert(t).AreEqual(ds.Name, clusterName, "Daemon set cluster name was not set correctly")
	Assert(t).IsFalse(ds.Disabled, "Daemon set disabled field was not set correctly")

	testLabels := klabels.Set{
		pc_fields.AvailabilityZoneLabel: azLabel.String(),
	}
	if matches := ds.NodeSelector.Matches(testLabels); !matches {
		t.Error("The daemon set has a bad node selector")
	}

	originalSHA, err := manifest.SHA()
	if err != nil {
		t.Fatal("Unable to retrieve SHA from manifest")
	}
	getSHA, err := ds.Manifest.SHA()
	if err != nil {
		t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set")
	}
	Assert(t).AreEqual(originalSHA, getSHA, "Daemon set manifest not set correctly")

	return ds
}
Ejemplo n.º 7
0
func examplePodCluster() fields.PodCluster {
	podId := "slug"
	availabilityZone := "us-west"
	clusterName := "production"

	return fields.PodCluster{
		PodID:            types.PodID(podId),
		AvailabilityZone: fields.AvailabilityZone(availabilityZone),
		Name:             fields.ClusterName(clusterName),
		PodSelector:      klabels.Everything(),
		Annotations:      fields.Annotations{},
	}
}
Ejemplo n.º 8
0
func TestDelete(t *testing.T) {
	store := consulStoreWithFakeKV()
	podID := types.PodID("pod_id")
	az := fields.AvailabilityZone("us-west")
	clusterName := fields.ClusterName("cluster_name")

	selector := klabels.Everything().
		Add(fields.PodIDLabel, klabels.EqualsOperator, []string{podID.String()}).
		Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{az.String()}).
		Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{clusterName.String()})

	annotations := fields.Annotations(map[string]interface{}{
		"foo": "bar",
	})

	// Create a pod cluster
	pc, err := store.Create(podID, az, clusterName, selector, annotations, kptest.NewSession())
	if err != nil {
		t.Fatalf("Unable to create pod cluster: %s", err)
	}

	pc, err = store.Get(pc.ID)
	if err != nil {
		t.Fatalf("Unable to get pod cluster: %s", err)
	}

	err = store.Delete(pc.ID)
	if err != nil {
		t.Fatalf("Unexpected error deleting pod cluster: %s", err)
	}

	_, err = store.Get(pc.ID)
	if err == nil {
		t.Fatalf("Should have gotten an error fetching a deleted pod cluster")
	}

	if !IsNotExist(err) {
		t.Errorf("The error should have been a pocstore.IsNotExist but was '%s'", err)
	}

	labels, err := store.applicator.GetLabels(labels.PC, pc.ID.String())
	if err != nil {
		t.Fatalf("Got error when trying to confirm label deletion: %s", err)
	}

	if len(labels.Labels) != 0 {
		t.Errorf("Labels were not deleted along with the pod cluster")
	}
}
Ejemplo n.º 9
0
func TestList(t *testing.T) {
	store := consulStoreWithFakeKV()
	podID := types.PodID("pod_id")
	az := fields.AvailabilityZone("us-west")
	clusterName := fields.ClusterName("cluster_name")

	selector := klabels.Everything()

	annotations := fields.Annotations(map[string]interface{}{
		"foo": "bar",
	})

	// Create a pod cluster
	pc, err := store.Create(podID, az, clusterName, selector, annotations, kptest.NewSession())
	if err != nil {
		t.Fatalf("Unable to create pod cluster: %s", err)
	}

	// Create another one
	pc2, err := store.Create(podID+"2", az, clusterName, selector, annotations, kptest.NewSession())
	if err != nil {
		t.Fatalf("Unable to create pod cluster: %s", err)
	}

	// Now test List() and make sure we get both back
	pcs, err := store.List()
	if err != nil {
		t.Fatalf("Unable to list pod clusters: %s", err)
	}

	if len(pcs) != 2 {
		t.Fatalf("Expected 2 results but there were %d", len(pcs))
	}

	for _, foundPC := range pcs {
		found := false
		for _, expectedPC := range []fields.ID{pc.ID, pc2.ID} {
			if foundPC.ID == expectedPC {
				found = true
			}
		}

		if !found {
			t.Errorf("Didn't find one of the pod clusters in the list")
		}
	}
}
Ejemplo n.º 10
0
func TestWatchPodCluster(t *testing.T) {
	store := consulStoreWithFakeKV()
	pod := fields.ID("pod_id")
	podID := types.PodID("pod_id")
	az := fields.AvailabilityZone("us-west")
	clusterName := fields.ClusterName("cluster_name")

	selector := klabels.Everything().
		Add(fields.PodIDLabel, klabels.EqualsOperator, []string{pod.String()}).
		Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{az.String()}).
		Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{clusterName.String()})

	annotations := fields.Annotations(map[string]interface{}{
		"foo": "bar",
	})

	session := kptest.NewSession()
	pc, err := store.Create(podID, az, clusterName, selector, annotations, session)
	if err != nil {
		t.Fatalf("Unable to create pod cluster: %s", err)
	}

	quit := make(chan struct{})
	watch := store.WatchPodCluster(pc.ID, quit)

	var watched *WatchedPodCluster
	select {
	case watchedPC := <-watch:
		watched = &watchedPC
	case <-time.After(5 * time.Second):
		quit <- struct{}{}
		t.Fatal("nothing on the channel")
	}

	if watched == nil {
		t.Fatalf("Expected to get a watched PodCluster, but did not")
	}

	if watched.PodCluster.ID != pc.ID {
		t.Fatalf("Expected watched PodCluster to match %s Pod Cluster ID. Instead was %s", pc.ID, watched.PodCluster.ID)
	}
}
Ejemplo n.º 11
0
func TestPodClusterFromID(t *testing.T) {
	testAZ := fields.AvailabilityZone("west-coast")
	testCN := fields.ClusterName("test")
	testPodID := types.PodID("pod")
	selector := labels.Everything().
		Add(fields.PodIDLabel, labels.EqualsOperator, []string{testPodID.String()}).
		Add(fields.AvailabilityZoneLabel, labels.EqualsOperator, []string{testAZ.String()}).
		Add(fields.ClusterNameLabel, labels.EqualsOperator, []string{testCN.String()})
	session := kptest.NewSession()
	fakePCStore := pcstoretest.NewFake()

	pcControllerFromLabels := NewPodCluster(testAZ, testCN, testPodID, fakePCStore, selector, session)
	pc, err := pcControllerFromLabels.Create(fields.Annotations{})
	if err != nil {
		t.Fatal(err)
	}
	pcControllerFromLabels = nil

	pcControllerFromID := NewPodClusterFromID(pc.ID, session, fakePCStore)
	retrievedPC, err := pcControllerFromID.Get()
	if err != nil {
		t.Fatal(err)
	}
	if pc.ID != retrievedPC.ID {
		t.Errorf("Did not get correct PC back from datastore, expected %s, got %s.\n%v", pc.ID, retrievedPC.ID, retrievedPC)
	}

	errs := pcControllerFromID.Delete()
	if len(errs) > 0 {
		t.Fatalf("%v", errs)
	}

	notFoundPC, err := pcControllerFromID.Get()
	if err != pcstore.NoPodCluster {
		t.Errorf("Expected to get pcstore.NoPodCluster, but got %v", err)
	}

	if notFoundPC.ID != "" {
		t.Errorf("Expected to not find PC but found %v", notFoundPC)
	}
}
Ejemplo n.º 12
0
func TestCreate(t *testing.T) {
	store := consulStoreWithFakeKV()
	createDaemonSet(store, t)

	// Create a bad DaemonSet
	podID := types.PodID("")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	azLabel := pc_fields.AvailabilityZone("some_zone")
	selector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID("")

	podManifest := manifestBuilder.GetManifest()

	timeout := replication.NoTimeout

	if _, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout); err == nil {
		t.Error("Expected create to fail on bad pod id")
	}

	podID = types.PodID("pod_id")
	if _, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout); err == nil {
		t.Error("Expected create to fail on bad manifest pod id")
	}

	manifestBuilder = manifest.NewBuilder()
	manifestBuilder.SetID("different_pod_id")

	podManifest = manifestBuilder.GetManifest()
	if _, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout); err == nil {
		t.Error("Expected create to fail on pod id and manifest pod id mismatch")
	}
}
Ejemplo n.º 13
0
func TestConcreteSyncerWithPrevious(t *testing.T) {
	store := consulStoreWithFakeKV()
	store.logger.Logger.Level = logrus.DebugLevel

	store.applicator.SetLabel(labels.POD, "1234-123-123-1234", "color", "red")
	store.applicator.SetLabel(labels.POD, "abcd-abc-abc-abcd", "color", "blue")

	syncer := &fakeSyncer{
		[]fields.ID{},
		make(chan fakeSync),
		make(chan fakeSync),
		false,
	}

	// Previous == current, simulates a concrete syncer starting up
	change := podClusterChange{
		previous: &fields.PodCluster{
			ID:               fields.ID("abc123"),
			PodID:            types.PodID("vvv"),
			AvailabilityZone: fields.AvailabilityZone("west"),
			Name:             "production",
			PodSelector:      klabels.Everything().Add("color", klabels.EqualsOperator, []string{"red"}),
		},
		current: &fields.PodCluster{
			ID:               fields.ID("abc123"),
			PodID:            types.PodID("vvv"),
			AvailabilityZone: fields.AvailabilityZone("west"),
			Name:             "production",
			PodSelector:      klabels.Everything().Add("color", klabels.EqualsOperator, []string{"red"}),
		},
	}

	changes := make(chan podClusterChange)
	go store.handlePCUpdates(syncer, changes, metrics.NewHistogram(metrics.NewExpDecaySample(1028, 0.015)))

	select {
	case changes <- change:
	case <-time.After(5 * time.Second):
		t.Fatal("Test timed out trying to write change to handlePCChange")
	}

	select {
	case <-time.After(5 * time.Second):
		t.Fatal("Test timed out trying to read from the syncer")
	case sync := <-syncer.synced:
		if sync.syncedCluster == nil {
			t.Fatal("unexpectedly didn't get a cluster on the sync channel")
		}
		if sync.syncedCluster.ID != change.current.ID {
			t.Fatalf("got unexpected synced cluster %v", sync.syncedCluster.ID)
		}
		if len(sync.syncedPods) != 1 {
			t.Fatalf("got unexpected number of synced pods with cluster: %v", len(sync.syncedPods))
		}
		if sync.syncedPods[0].ID != "1234-123-123-1234" {
			t.Fatalf("got unexpected pod ID from labeled pods sync: %v", sync.syncedPods[0].ID)
		}
	}

	// now we send a new update that changes the pod cluster's target pod from the red one to the blue one.
	// (from 1234-123-123-1234 to abcd-abc-abc-abcd )
	change = podClusterChange{
		previous: change.current,
		current: &fields.PodCluster{
			ID:               fields.ID("abc123"),
			PodID:            types.PodID("vvv"),
			AvailabilityZone: fields.AvailabilityZone("west"),
			Name:             "production",
			PodSelector:      klabels.Everything().Add("color", klabels.EqualsOperator, []string{"blue"}),
		},
	}

	select {
	case changes <- change:
	case <-time.After(5 * time.Second):
		t.Fatal("Test timed out trying to write change to handlePCChange")
	}

	select {
	case <-time.After(5 * time.Second):
		t.Fatal("Test timed out trying to read from the syncer")
	case sync := <-syncer.synced:
		if sync.syncedCluster == nil {
			t.Fatal("unexpectedly didn't get a cluster on the sync channel")
		}
		if sync.syncedCluster.ID != change.current.ID {
			t.Fatalf("got unexpected synced cluster %v", sync.syncedCluster.ID)
		}
		if len(sync.syncedPods) != 1 {
			t.Fatalf("got unexpected number of synced pods with cluster: %v", len(sync.syncedPods))
		}
		if sync.syncedPods[0].ID != "abcd-abc-abc-abcd" {
			t.Fatalf("got unexpected pod ID from labeled pods sync: %v", sync.syncedPods[0].ID)
		}
	}

	// appear to have deleted the cluster
	change.previous = change.current
	change.current = nil

	select {
	case changes <- change:
	case <-time.After(5 * time.Second):
		t.Fatal("Test timed out trying to write deletion change to handlePCChange")
	}

	select {
	case <-time.After(5 * time.Second):
		t.Fatal("Test timed out trying to read from the syncer")
	case sync := <-syncer.deleted:
		if sync.syncedCluster == nil {
			t.Fatal("unexpectedly didn't get a cluster on the sync channel")
		}
		if sync.syncedCluster.ID != change.previous.ID {
			t.Fatalf("got unexpected synced cluster %v", sync.syncedCluster.ID)
		}
	}

	close(changes)
}
Ejemplo n.º 14
0
func TestUpdate(t *testing.T) {
	testAZ := fields.AvailabilityZone("west-coast")
	testCN := fields.ClusterName("test")
	testPodID := types.PodID("pod")
	selector := labels.Everything().
		Add(fields.PodIDLabel, labels.EqualsOperator, []string{testPodID.String()}).
		Add(fields.AvailabilityZoneLabel, labels.EqualsOperator, []string{testAZ.String()}).
		Add(fields.ClusterNameLabel, labels.EqualsOperator, []string{testCN.String()})
	session := kptest.NewSession()
	pcstore := pcstoretest.NewFake()

	pcController := NewPodCluster(testAZ, testCN, testPodID, pcstore, selector, session)

	var annotations = map[string]string{
		"load_balancer_info": "totally",
		"pager_information":  "555-111-2222",
	}

	buf, err := json.Marshal(annotations)
	if err != nil {
		t.Errorf("json marshal error: %v", err)
	}

	var testAnnotations fields.Annotations
	if err := json.Unmarshal(buf, &testAnnotations); err != nil {
		t.Errorf("json unmarshal error: %v", err)
	}

	pc, err := pcController.Create(fields.Annotations(testAnnotations))
	if err != nil {
		t.Fatalf("Unable to create pod cluster due to: %v", err)
	}

	newAnnotations := map[string]string{
		"pager_information": "555-111-2222",
		"priority":          "1001",
	}

	buf, err = json.Marshal(newAnnotations)
	if err != nil {
		t.Errorf("json marshal error: %v", err)
	}

	var newTestAnnotations fields.Annotations
	if err := json.Unmarshal(buf, &newTestAnnotations); err != nil {
		t.Errorf("json unmarshal error: %v", err)
	}

	pc, err = pcController.Update(newTestAnnotations)
	if err != nil {
		t.Fatalf("Got error updating PC annotations: %v", err)
	}

	if pc.Annotations["pager_information"] != newAnnotations["pager_information"] {
		t.Errorf("Got unexpected pager_information. Expected %s, got %s", newAnnotations["pager_information"], pc.Annotations["pager_information"])
	}

	if pc.Annotations["priority"] != newAnnotations["priority"] {
		t.Errorf("Got unexpected priority. Expected %s, got %s", newAnnotations["priority"], pc.Annotations["priority"])
	}

	if pc.Annotations["load_balancer_info"] != nil {
		t.Errorf("Expected to erase old annotation field. Instead we have: %s", pc.Annotations["load_balancer_info"])
	}
}
Ejemplo n.º 15
0
func TestMutate(t *testing.T) {
	store := consulStoreWithFakeKV()
	oldPc := createPodCluster(store, t)

	// After creating the pod cluster, we now update it using a mutator function
	newPodID := types.PodID("pod_id-diff")
	newAz := fields.AvailabilityZone("us-west-diff")
	newClusterName := fields.ClusterName("cluster_name_diff")

	newSelector := klabels.Everything().
		Add(fields.PodIDLabel, klabels.EqualsOperator, []string{newPodID.String()}).
		Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{newAz.String()}).
		Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{newClusterName.String()})

	newAnnotations := fields.Annotations(map[string]interface{}{
		"bar": "foo",
	})

	intendedPC := fields.PodCluster{
		ID:               oldPc.ID,
		PodID:            newPodID,
		AvailabilityZone: newAz,
		Name:             newClusterName,
		PodSelector:      newSelector,
		Annotations:      newAnnotations,
	}

	mutator := func(pc fields.PodCluster) (fields.PodCluster, error) {
		pc.PodID = intendedPC.PodID
		pc.AvailabilityZone = intendedPC.AvailabilityZone
		pc.Name = intendedPC.Name
		pc.PodSelector = intendedPC.PodSelector
		pc.Annotations = intendedPC.Annotations
		return pc, nil
	}

	_, err := store.MutatePC(intendedPC.ID, mutator)
	if err != nil {
		t.Fatalf("Unable to update pod cluster: %s", err)
	}

	newPC, err := store.Get(intendedPC.ID)
	if err != nil {
		t.Fatalf("Unable to find pod cluster: %s", err)
	}

	if newPC.ID == "" {
		t.Errorf("pod cluster should have an id")
	}

	if newPC.PodID == "" {
		t.Errorf("pod cluster should have a pod id")
	}

	if newPC.ID != oldPc.ID {
		t.Errorf("id should not have been updated. Wanted '%s' got '%s'", oldPc.ID, newPC.ID)
	}

	if newPC.PodID != newPodID {
		t.Errorf("pod id wasn't properly updated. Wanted '%s' got '%s'", newPodID, newPC.PodID)
	}

	if newPC.AvailabilityZone != newAz {
		t.Errorf("availability zone wasn't properly updated. Wanted '%s' got '%s'", newAz, newPC.AvailabilityZone)
	}

	if newPC.Name != newClusterName {
		t.Errorf("cluster name wasn't properly updated. Wanted '%s' got '%s'", newClusterName, newPC.Name)
	}

	newTestLabels := klabels.Set{
		fields.PodIDLabel:            newPodID.String(),
		fields.AvailabilityZoneLabel: newAz.String(),
		fields.ClusterNameLabel:      newClusterName.String(),
	}

	if matches := newPC.PodSelector.Matches(newTestLabels); !matches {
		t.Errorf("the pod cluster has a bad pod selector")
	}

	if newPC.Annotations["bar"] != "foo" {
		t.Errorf("Annotations didn't match expected")
	} else if _, ok := newPC.Annotations["foo"]; ok {
		t.Errorf("Annotations didn't match expected")
	}
}
Ejemplo n.º 16
0
func TestGet(t *testing.T) {
	store := consulStoreWithFakeKV()
	podID := types.PodID("pod_id")
	az := fields.AvailabilityZone("us-west")
	clusterName := fields.ClusterName("cluster_name")

	selector := klabels.Everything().
		Add(fields.PodIDLabel, klabels.EqualsOperator, []string{podID.String()}).
		Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{az.String()}).
		Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{clusterName.String()})

	annotations := fields.Annotations(map[string]interface{}{
		"foo": "bar",
	})

	// Create a pod cluster
	pc, err := store.Create(podID, az, clusterName, selector, annotations, kptest.NewSession())
	if err != nil {
		t.Fatalf("Unable to create pod cluster: %s", err)
	}

	pc, err = store.Get(pc.ID)
	if err != nil {
		t.Fatalf("Unable to get pod cluster: %s", err)
	}

	if pc.ID == "" {
		t.Errorf("pod cluster should have an id")
	}

	if pc.PodID == "" {
		t.Errorf("pod cluster should have a pod id")
	}

	if pc.PodID != podID {
		t.Errorf("pod id wasn't properly set. Wanted '%s' got '%s'", podID, pc.PodID)
	}

	if pc.AvailabilityZone != az {
		t.Errorf("availability zone wasn't properly set. Wanted '%s' got '%s'", az, pc.AvailabilityZone)
	}

	if pc.Name != clusterName {
		t.Errorf("cluster name wasn't properly set. Wanted '%s' got '%s'", clusterName, pc.Name)
	}

	testLabels := klabels.Set{
		fields.PodIDLabel:            podID.String(),
		fields.AvailabilityZoneLabel: az.String(),
		fields.ClusterNameLabel:      clusterName.String(),
	}

	if matches := pc.PodSelector.Matches(testLabels); !matches {
		t.Errorf("the pod cluster has a bad pod selector")
	}

	if pc.Annotations["foo"] != "bar" {
		t.Errorf("Annotations didn't match expected")
	}

	found, err := store.FindWhereLabeled(podID, az, clusterName)
	if err != nil {
		t.Errorf("Could not retrieve labeled pods: %v", err)
	}

	if len(found) != 1 {
		t.Errorf("Found incorrect number of labeled pods, expected 1: %v", len(found))
	}

	if found[0].ID != pc.ID {
		t.Errorf("Didn't find the right pod cluster: %v vs %v", found[0].ID, pc.ID)
	}
}
Ejemplo n.º 17
0
func main() {
	cmd, consulOpts, labeler := flags.ParseWithConsulOptions()
	client := kp.NewConsulClient(consulOpts)
	kv := kp.NewConsulStore(client)
	logger := logging.NewLogger(logrus.Fields{})
	pcstore := pcstore.NewConsul(client, labeler, labels.NewConsulApplicator(client, 0), &logger)
	session, _, err := kv.NewSession(fmt.Sprintf("pcctl-%s", currentUserName()), nil)
	if err != nil {
		log.Fatalf("Could not create session: %s", err)
	}

	switch cmd {
	case cmdCreateText:
		az := fields.AvailabilityZone(*createAZ)
		cn := fields.ClusterName(*createName)
		podID := types.PodID(*createPodID)
		selector := selectorFrom(az, cn, podID)
		pccontrol := control.NewPodCluster(az, cn, podID, pcstore, selector, session)

		annotations := *createAnnotations
		var parsedAnnotations map[string]interface{}
		err := json.Unmarshal([]byte(annotations), &parsedAnnotations)
		if err != nil {
			log.Fatalf("could not parse json: %v", err)
		}
		_, err = pccontrol.Create(parsedAnnotations)
		if err != nil {
			log.Fatalf("err: %v", err)
		}
	case cmdGetText:
		az := fields.AvailabilityZone(*getAZ)
		cn := fields.ClusterName(*getName)
		podID := types.PodID(*getPodID)
		pcID := fields.ID(*getID)

		var pccontrol *control.PodCluster
		if pcID != "" {
			pccontrol = control.NewPodClusterFromID(pcID, session, pcstore)
		} else if az != "" && cn != "" && podID != "" {
			selector := selectorFrom(az, cn, podID)
			pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session)
		} else {
			log.Fatalf("Expected one of: pcID or (pod,az,name)")
		}

		pc, err := pccontrol.Get()
		if err != nil {
			log.Fatalf("Caught error while fetching pod cluster: %v", err)
		}

		bytes, err := json.Marshal(pc)
		if err != nil {
			logger.WithError(err).Fatalln("Unable to marshal PC as JSON")
		}
		fmt.Printf("%s", bytes)
	case cmdDeleteText:
		az := fields.AvailabilityZone(*deleteAZ)
		cn := fields.ClusterName(*deleteName)
		podID := types.PodID(*deletePodID)
		pcID := fields.ID(*deleteID)

		var pccontrol *control.PodCluster
		if pcID != "" {
			pccontrol = control.NewPodClusterFromID(pcID, session, pcstore)
		} else if az != "" && cn != "" && podID != "" {
			selector := selectorFrom(az, cn, podID)
			pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session)
		} else {
			log.Fatalf("Expected one of: pcID or (pod,az,name)")
		}

		errors := pccontrol.Delete()
		if len(errors) >= 1 {
			for _, err := range errors {
				_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Failed to delete one pod cluster matching arguments. Error:\n %s\n", err.Error())))
			}
			os.Exit(1)
		}
	case cmdUpdateText:
		az := fields.AvailabilityZone(*updateAZ)
		cn := fields.ClusterName(*updateName)
		podID := types.PodID(*updatePodID)
		pcID := fields.ID(*updateID)

		var pccontrol *control.PodCluster
		if pcID != "" {
			pccontrol = control.NewPodClusterFromID(pcID, session, pcstore)
		} else if az != "" && cn != "" && podID != "" {
			selector := selectorFrom(az, cn, podID)
			pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session)
		} else {
			log.Fatalf("Expected one of: pcID or (pod,az,name)")
		}

		var annotations fields.Annotations
		err := json.Unmarshal([]byte(*updateAnnotations), &annotations)
		if err != nil {
			_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Annotations are invalid JSON. Err follows:\n%v", err)))
			os.Exit(1)
		}

		pc, err := pccontrol.Update(annotations)
		if err != nil {
			log.Fatalf("Error during PodCluster update: %v\n%v", err, pc)
			os.Exit(1)
		}
		bytes, err := json.Marshal(pc)
		if err != nil {
			log.Fatalf("Update succeeded, but error during displaying PC: %v\n%+v", err, pc)
			os.Exit(1)
		}
		fmt.Printf("%s", bytes)
	case cmdListText:
		pcs, err := pcstore.List()
		if err != nil {
			_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Could not list pcs. Err follows:\n%v", err)))
			os.Exit(1)
		}

		bytes, err := json.Marshal(pcs)
		if err != nil {
			_, _ = os.Stderr.Write([]byte(fmt.Sprintf("Could not marshal pc list. Err follows:\n%v", err)))
			os.Exit(1)
		}
		fmt.Printf("%s", bytes)
	default:
		log.Fatalf("Unrecognized command %v", cmd)
	}
}
Ejemplo n.º 18
0
func TestGet(t *testing.T) {
	store := consulStoreWithFakeKV()
	//
	// Create DaemonSet
	//
	podID := types.PodID("some_pod_id")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	azLabel := pc_fields.AvailabilityZone("some_zone")
	selector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)

	manifest := manifestBuilder.GetManifest()

	timeout := replication.NoTimeout

	ds, err := store.Create(manifest, minHealth, clusterName, selector, podID, timeout)
	if err != nil {
		t.Fatalf("Unable to create daemon set: %s", err)
	}

	Assert(t).AreNotEqual(ds.ID, "", "Daemon set should have an id")

	//
	// Get DaemonSet and verify it is the same
	//
	getDS, _, err := store.Get(ds.ID)
	if err != nil {
		t.Fatalf("Error retrieving created daemon set: %s", err)
	}

	Assert(t).AreNotEqual(getDS.ID, "", "Daemon set should have an id")
	Assert(t).AreNotEqual(getDS.PodID, "", "Daemon set should have a pod id")

	Assert(t).AreEqual(ds.ID, getDS.ID, "Daemon set should be equal ids")
	Assert(t).AreEqual(ds.PodID, getDS.PodID, "Daemon set should have equal pod ids")
	Assert(t).AreEqual(ds.MinHealth, getDS.MinHealth, "Daemon set should have equal minimum healths")
	Assert(t).AreEqual(ds.Name, getDS.Name, "Daemon set should have equal names")
	Assert(t).AreEqual(ds.Disabled, getDS.Disabled, "Daemon set should have same disabled fields")

	testLabels := klabels.Set{
		pc_fields.AvailabilityZoneLabel: azLabel.String(),
	}
	if matches := getDS.NodeSelector.Matches(testLabels); !matches {
		t.Error("The daemon set has a bad node selector")
	}

	originalSHA, err := manifest.SHA()
	if err != nil {
		t.Fatal("Unable to retrieve SHA from manifest")
	}
	getSHA, err := getDS.Manifest.SHA()
	if err != nil {
		t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set")
	}
	Assert(t).AreEqual(originalSHA, getSHA, "Daemon set shas were not equal")

	// Invalid get opertaion
	_, _, err = store.Get("bad_id")
	if err == nil {
		t.Error("Expected get operation to fail when getting a daemon set which does not exist")
	}
}
Ejemplo n.º 19
0
func TestWatchAll(t *testing.T) {
	store := consulStoreWithFakeKV()
	//
	// Create a new daemon set
	//
	podID := types.PodID("some_pod_id")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	azLabel := pc_fields.AvailabilityZone("some_zone")
	selector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	timeout := replication.NoTimeout

	ds, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout)
	if err != nil {
		t.Fatalf("Unable to create daemon set: %s", err)
	}
	//
	// Create another new daemon set
	//
	someOtherPodID := types.PodID("some_other_pod_id")

	manifestBuilder = manifest.NewBuilder()
	manifestBuilder.SetID(someOtherPodID)
	someOtherManifest := manifestBuilder.GetManifest()

	someOtherDS, err := store.Create(someOtherManifest, minHealth, clusterName, selector, someOtherPodID, timeout)
	if err != nil {
		t.Fatalf("Unable to create daemon set: %s", err)
	}
	//
	// Watch for create and verify
	//
	quitCh := make(chan struct{})
	inCh := store.WatchAll(quitCh, 0)
	defer close(quitCh)

	var watched WatchedDaemonSetList
	select {
	case watched = <-inCh:
	case <-time.After(5 * time.Second):
		t.Fatal("Expected something on channel but found nothing")
	}
	if watched.Err != nil {
		t.Errorf("Unexpected error on watched daemon sets: %s", watched.Err)
	}

	Assert(t).AreEqual(len(watched.DaemonSets), 2, "Unexpected number of daemon sets watched")

	for _, watchedDS := range watched.DaemonSets {
		if watchedDS.ID == ds.ID {
			Assert(t).AreEqual(watchedDS.PodID, ds.PodID, "Daemon sets should have equal pod ids")
		} else if watchedDS.ID == someOtherDS.ID {
			Assert(t).AreEqual(watchedDS.PodID, someOtherDS.PodID, "Daemon sets should have equal pod ids")
		} else {
			t.Errorf("Expected to find id '%s' among watch results, but was not present", watchedDS.ID)
		}
	}

	//
	// Watch for delete and verify
	//
	err = store.Delete(someOtherDS.ID)
	if err != nil {
		t.Error("Unable to delete daemon set")
	}
	select {
	case watched = <-inCh:
	case <-time.After(5 * time.Second):
		t.Fatal("Expected something on channel but found nothing")
	}
	if watched.Err != nil {
		t.Errorf("Unexpected error on watched daemon sets: %s", watched.Err)
	}

	Assert(t).AreEqual(len(watched.DaemonSets), 1, "Unexpected number of daemon sets watched")
	Assert(t).AreEqual(ds.ID, watched.DaemonSets[0].ID, "Daemon sets should have equal ids")

	//
	// Watch for update and verify
	//
	mutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToMutate.Disabled = !dsToMutate.Disabled
		return dsToMutate, nil
	}

	ds, err = store.MutateDS(ds.ID, mutator)
	if err != nil {
		t.Fatalf("Unable to mutate daemon set: %s", err)
	}

	select {
	case watched = <-inCh:
	case <-time.After(5 * time.Second):
		t.Fatal("Expected something on channel but found nothing")
	}
	if watched.Err != nil {
		t.Errorf("Unexpected error on watched daemon sets: %s", watched.Err)
	}

	Assert(t).AreEqual(len(watched.DaemonSets), 1, "Unexpected number of daemon sets watched")
	Assert(t).AreEqual(ds.ID, watched.DaemonSets[0].ID, "Daemon sets should have equal ids")
	Assert(t).AreEqual(ds.PodID, watched.DaemonSets[0].PodID, "Daemon sets should have equal pod ids")
}
Ejemplo n.º 20
0
func TestMutate(t *testing.T) {
	store := consulStoreWithFakeKV()

	podID := types.PodID("some_pod_id")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	azLabel := pc_fields.AvailabilityZone("some_zone")
	selector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	timeout := replication.NoTimeout

	ds, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout)
	if err != nil {
		t.Fatalf("Unable to create daemon set: %s", err)
	}
	//
	// Invalid mutates
	//
	errorMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		return dsToMutate, util.Errorf("This is an error")
	}
	_, err = store.MutateDS(ds.ID, errorMutator)
	if err == nil {
		t.Error("Expected error when mutator produces an error")
	}

	badIDMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToMutate.ID = ""
		return dsToMutate, nil
	}
	_, err = store.MutateDS(ds.ID, badIDMutator)
	if err == nil {
		t.Error("Expected error when mutating daemon set ID")
	}

	badPodIDMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToMutate.PodID = ""
		return dsToMutate, nil
	}
	_, err = store.MutateDS(ds.ID, badPodIDMutator)
	if err == nil {
		t.Error("Expected error when mutating daemon set PodID to mismatch manifest")
	}
	//
	// A valid mutate followed by validation
	//
	someOtherDisabled := !ds.Disabled
	someOtherMinHealth := 42
	someOtherName := ds_fields.ClusterName("some_other_name")
	someOtherPodID := types.PodID("some_other_pod_id")

	manifestBuilder = manifest.NewBuilder()
	manifestBuilder.SetID(someOtherPodID)
	someOtherManifest := manifestBuilder.GetManifest()

	someOtherAZLabel := pc_fields.AvailabilityZone("some_other_zone")
	someOtherSelector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{someOtherAZLabel.String()})

	goodMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToMutate.Disabled = someOtherDisabled
		dsToMutate.Manifest = someOtherManifest
		dsToMutate.MinHealth = someOtherMinHealth
		dsToMutate.Name = someOtherName
		dsToMutate.NodeSelector = someOtherSelector
		dsToMutate.PodID = someOtherPodID
		return dsToMutate, nil
	}
	someOtherDS, err := store.MutateDS(ds.ID, goodMutator)
	if err != nil {
		t.Fatalf("Unable to mutate daemon set: %s", err)
	}

	Assert(t).AreEqual(someOtherDS.ID, ds.ID, "Daemon sets should be equal ids")
	Assert(t).AreEqual(someOtherDS.PodID, someOtherPodID, "Daemon sets should have equal pod ids")
	Assert(t).AreEqual(someOtherDS.MinHealth, someOtherMinHealth, "Daemon sets should have equal minimum healths")
	Assert(t).AreEqual(someOtherDS.Name, someOtherName, "Daemon sets should have equal names")
	Assert(t).AreEqual(someOtherDS.Disabled, someOtherDisabled, "Daemon sets should have same disabled fields")

	someOtherLabels := klabels.Set{
		pc_fields.AvailabilityZoneLabel: someOtherAZLabel.String(),
	}
	if matches := someOtherDS.NodeSelector.Matches(someOtherLabels); !matches {
		t.Error("The daemon set has a bad node selector")
	}

	someOtherSHA, err := someOtherManifest.SHA()
	if err != nil {
		t.Fatal("Unable to retrieve SHA from manifest")
	}
	dsSHA, err := someOtherDS.Manifest.SHA()
	if err != nil {
		t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set")
	}
	Assert(t).AreEqual(someOtherSHA, dsSHA, "Daemon set shas were not equal")

	//
	// Validate daemon set from a get function
	//
	getDS, _, err := store.Get(ds.ID)
	if err != nil {
		t.Fatalf("Unable to get daemon set: %s", err)
	}
	Assert(t).AreEqual(getDS.ID, ds.ID, "Daemon sets should be equal ids")
	Assert(t).AreEqual(getDS.PodID, someOtherPodID, "Daemon sets should have equal pod ids")
	Assert(t).AreEqual(getDS.MinHealth, someOtherMinHealth, "Daemon sets should have equal minimum healths")
	Assert(t).AreEqual(getDS.Name, someOtherName, "Daemon sets should have equal names")
	Assert(t).AreEqual(getDS.Disabled, someOtherDisabled, "Daemon sets should have same disabled fields")

	someOtherLabels = klabels.Set{
		pc_fields.AvailabilityZoneLabel: someOtherAZLabel.String(),
	}
	if matches := getDS.NodeSelector.Matches(someOtherLabels); !matches {
		t.Error("The daemon set has a bad node selector")
	}

	someOtherSHA, err = someOtherManifest.SHA()
	if err != nil {
		t.Fatal("Unable to retrieve SHA from manifest")
	}
	dsSHA, err = getDS.Manifest.SHA()
	if err != nil {
		t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set")
	}
	Assert(t).AreEqual(someOtherSHA, dsSHA, "Daemon set shas were not equal")
}