func TestLabelsOnCreate(t *testing.T) { store := consulStoreWithFakeKV() podID := types.PodID("pod_id") az := fields.AvailabilityZone("us-west") clusterName := fields.ClusterName("cluster_name") selector := klabels.Everything(). Add(fields.PodIDLabel, klabels.EqualsOperator, []string{podID.String()}). Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{az.String()}). Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{clusterName.String()}) annotations := fields.Annotations(map[string]interface{}{ "foo": "bar", }) pc, err := store.Create(podID, az, clusterName, selector, annotations, kptest.NewSession()) if err != nil { t.Fatalf("Unable to create pod cluster: %s", err) } matches, err := store.applicator.GetMatches(selector, labels.PC) if err != nil { t.Fatalf("Unable to check for label match on new pod cluster: %s", err) } if len(matches) != 1 { t.Errorf("Expected one pod cluster to match label selector") } if fields.ID(matches[0].ID) != pc.ID { t.Errorf("The pod cluster selector didn't match the new pod cluster") } }
func TestLockForSync(t *testing.T) { id := fields.ID("abc123") store := consulStoreWithFakeKV() syncerType := ConcreteSyncerType("some_syncer") session := kptest.NewSession() unlocker, err := store.LockForSync(id, syncerType, session) if err != nil { t.Fatalf("Unexpected error locking pod cluster for sync: %s", err) } _, err = store.LockForSync(id, syncerType, session) if err == nil { t.Fatal("Expected an error locking the same cluster for the same syncer type, but there wasn't one") } else { if !consulutil.IsAlreadyLocked(err) { t.Errorf("Expected error to be an already locked error, was %s", err) } } err = unlocker.Unlock() if err != nil { t.Errorf("Error unlocking the sync lock: %s", err) } _, err = store.LockForSync(id, syncerType, session) if err != nil { t.Fatalf("Unexpected error re-locking pod cluster for sync: %s", err) } }
func TestWatchPodCluster(t *testing.T) { store := consulStoreWithFakeKV() pod := fields.ID("pod_id") podID := types.PodID("pod_id") az := fields.AvailabilityZone("us-west") clusterName := fields.ClusterName("cluster_name") selector := klabels.Everything(). Add(fields.PodIDLabel, klabels.EqualsOperator, []string{pod.String()}). Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{az.String()}). Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{clusterName.String()}) annotations := fields.Annotations(map[string]interface{}{ "foo": "bar", }) session := kptest.NewSession() pc, err := store.Create(podID, az, clusterName, selector, annotations, session) if err != nil { t.Fatalf("Unable to create pod cluster: %s", err) } quit := make(chan struct{}) watch := store.WatchPodCluster(pc.ID, quit) var watched *WatchedPodCluster select { case watchedPC := <-watch: watched = &watchedPC case <-time.After(5 * time.Second): quit <- struct{}{} t.Fatal("nothing on the channel") } if watched == nil { t.Fatalf("Expected to get a watched PodCluster, but did not") } if watched.PodCluster.ID != pc.ID { t.Fatalf("Expected watched PodCluster to match %s Pod Cluster ID. Instead was %s", pc.ID, watched.PodCluster.ID) } }
func (s *consulStore) FindWhereLabeled(podID types.PodID, availabilityZone fields.AvailabilityZone, clusterName fields.ClusterName) ([]fields.PodCluster, error) { sel := klabels.Everything(). Add(fields.PodIDLabel, klabels.EqualsOperator, []string{podID.String()}). Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{availabilityZone.String()}). Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{clusterName.String()}) podClusters, err := s.applicator.GetMatches(sel, labels.PC) if err != nil { return nil, err } ret := make([]fields.PodCluster, len(podClusters)) for i, pc := range podClusters { ret[i], err = s.Get(fields.ID(pc.ID)) if err != nil { return nil, err } } return ret, nil }
func (p *FakePCStore) Create( podID types.PodID, availabilityZone fields.AvailabilityZone, clusterName fields.ClusterName, podSelector klabels.Selector, annotations fields.Annotations, _ pcstore.Session, ) (fields.PodCluster, error) { id := fields.ID(uuid.New()) pc := fields.PodCluster{ ID: id, PodID: podID, AvailabilityZone: availabilityZone, Name: clusterName, PodSelector: podSelector, Annotations: annotations, } p.podClusters[id] = pc if watcher, ok := p.watchers[id]; ok { watcher <- pcstore.WatchedPodCluster{PodCluster: &pc, Err: nil} } return pc, nil }
func TestInitialClusters(t *testing.T) { store := consulStoreWithFakeKV() syncer := &fakeSyncer{ []fields.ID{"abc-123"}, make(chan fakeSync), make(chan fakeSync), false, } clusters, err := store.getInitialClusters(syncer) if err != nil { t.Fatalf("Did not expect error to occur getting clusters: %v", err) } if len(clusters.Clusters) != 1 { t.Fatalf("Got unexpected number of clusters (%v)", len(clusters.Clusters)) } if clusters.Clusters[0].ID != fields.ID("abc-123") { t.Fatalf("Got unexpected initial cluster %v", clusters.Clusters[0].ID) } }
func (s *consulStore) Create( podID types.PodID, availabilityZone fields.AvailabilityZone, clusterName fields.ClusterName, podSelector klabels.Selector, annotations fields.Annotations, session Session, ) (fields.PodCluster, error) { id := fields.ID(uuid.New()) unlocker, err := s.lockForCreation(podID, availabilityZone, clusterName, session) if err != nil { return fields.PodCluster{}, err } defer unlocker.Unlock() existing, err := s.FindWhereLabeled(podID, availabilityZone, clusterName) if err != nil { return fields.PodCluster{}, util.Errorf("Couldn't determine if pod cluster exists already: %v", err) } if len(existing) > 0 { return existing[0], PodClusterAlreadyExists } pc := fields.PodCluster{ ID: id, PodID: podID, AvailabilityZone: availabilityZone, Name: clusterName, PodSelector: podSelector, Annotations: annotations, } key, err := pcPath(id) if err != nil { return fields.PodCluster{}, err } jsonPC, err := json.Marshal(pc) if err != nil { // Probably the annotations don't marshal to JSON return fields.PodCluster{}, util.Errorf("Unable to marshal pod cluster as JSON: %s", err) } // the chance of the UUID already existing is vanishingly small, but // technically not impossible, so we should use the CAS index to guard // against duplicate UUIDs success, _, err := s.kv.CAS(&api.KVPair{ Key: key, Value: jsonPC, ModifyIndex: 0, }, nil) if err != nil { return fields.PodCluster{}, consulutil.NewKVError("cas", key, err) } if !success { return fields.PodCluster{}, util.Errorf("Could not set pod cluster at path '%s'", key) } err = s.setLabelsForPC(pc) if err != nil { // TODO: what if this delete fails? deleteErr := s.Delete(pc.ID) if deleteErr != nil { err = util.Errorf("%s\n%s", err, deleteErr) } return fields.PodCluster{}, err } return pc, nil }
func TestConcreteSyncerWithPrevious(t *testing.T) { store := consulStoreWithFakeKV() store.logger.Logger.Level = logrus.DebugLevel store.applicator.SetLabel(labels.POD, "1234-123-123-1234", "color", "red") store.applicator.SetLabel(labels.POD, "abcd-abc-abc-abcd", "color", "blue") syncer := &fakeSyncer{ []fields.ID{}, make(chan fakeSync), make(chan fakeSync), false, } // Previous == current, simulates a concrete syncer starting up change := podClusterChange{ previous: &fields.PodCluster{ ID: fields.ID("abc123"), PodID: types.PodID("vvv"), AvailabilityZone: fields.AvailabilityZone("west"), Name: "production", PodSelector: klabels.Everything().Add("color", klabels.EqualsOperator, []string{"red"}), }, current: &fields.PodCluster{ ID: fields.ID("abc123"), PodID: types.PodID("vvv"), AvailabilityZone: fields.AvailabilityZone("west"), Name: "production", PodSelector: klabels.Everything().Add("color", klabels.EqualsOperator, []string{"red"}), }, } changes := make(chan podClusterChange) go store.handlePCUpdates(syncer, changes, metrics.NewHistogram(metrics.NewExpDecaySample(1028, 0.015))) select { case changes <- change: case <-time.After(5 * time.Second): t.Fatal("Test timed out trying to write change to handlePCChange") } select { case <-time.After(5 * time.Second): t.Fatal("Test timed out trying to read from the syncer") case sync := <-syncer.synced: if sync.syncedCluster == nil { t.Fatal("unexpectedly didn't get a cluster on the sync channel") } if sync.syncedCluster.ID != change.current.ID { t.Fatalf("got unexpected synced cluster %v", sync.syncedCluster.ID) } if len(sync.syncedPods) != 1 { t.Fatalf("got unexpected number of synced pods with cluster: %v", len(sync.syncedPods)) } if sync.syncedPods[0].ID != "1234-123-123-1234" { t.Fatalf("got unexpected pod ID from labeled pods sync: %v", sync.syncedPods[0].ID) } } // now we send a new update that changes the pod cluster's target pod from the red one to the blue one. // (from 1234-123-123-1234 to abcd-abc-abc-abcd ) change = podClusterChange{ previous: change.current, current: &fields.PodCluster{ ID: fields.ID("abc123"), PodID: types.PodID("vvv"), AvailabilityZone: fields.AvailabilityZone("west"), Name: "production", PodSelector: klabels.Everything().Add("color", klabels.EqualsOperator, []string{"blue"}), }, } select { case changes <- change: case <-time.After(5 * time.Second): t.Fatal("Test timed out trying to write change to handlePCChange") } select { case <-time.After(5 * time.Second): t.Fatal("Test timed out trying to read from the syncer") case sync := <-syncer.synced: if sync.syncedCluster == nil { t.Fatal("unexpectedly didn't get a cluster on the sync channel") } if sync.syncedCluster.ID != change.current.ID { t.Fatalf("got unexpected synced cluster %v", sync.syncedCluster.ID) } if len(sync.syncedPods) != 1 { t.Fatalf("got unexpected number of synced pods with cluster: %v", len(sync.syncedPods)) } if sync.syncedPods[0].ID != "abcd-abc-abc-abcd" { t.Fatalf("got unexpected pod ID from labeled pods sync: %v", sync.syncedPods[0].ID) } } // appear to have deleted the cluster change.previous = change.current change.current = nil select { case changes <- change: case <-time.After(5 * time.Second): t.Fatal("Test timed out trying to write deletion change to handlePCChange") } select { case <-time.After(5 * time.Second): t.Fatal("Test timed out trying to read from the syncer") case sync := <-syncer.deleted: if sync.syncedCluster == nil { t.Fatal("unexpectedly didn't get a cluster on the sync channel") } if sync.syncedCluster.ID != change.previous.ID { t.Fatalf("got unexpected synced cluster %v", sync.syncedCluster.ID) } } close(changes) }
func TestZipPodClusterResults(t *testing.T) { store := consulStore{} previous := WatchedPodClusters{ []*fields.PodCluster{ { ID: fields.ID("abc123"), PodID: types.PodID("vvv"), Name: "old name 1", }, { ID: fields.ID("def456"), PodID: types.PodID("xxx"), Name: "old name 2", }, }, nil, } current := WatchedPodClusters{ []*fields.PodCluster{ { ID: fields.ID("abc123"), PodID: types.PodID("vvv"), Name: "new name 1", }, { ID: fields.ID("987fed"), PodID: types.PodID("zzz"), Name: "new name 3", }, }, nil, } zipped := store.zipResults(current, previous) if len(zipped) != 3 { t.Errorf("Unexpected number of clusters in zipped results: %v", len(zipped)) } updated := zipped[fields.ID("abc123")] if updated.previous == nil || updated.current == nil { t.Fatalf("Either (%v) or (%v) is nil, but neither should be", updated.previous, updated.current) } if updated.current.Name != "new name 1" { t.Errorf("%v was not the right name for the current cluster", updated.current.Name) } if updated.previous.Name != "old name 1" { t.Errorf("%v was not the right name for the previous cluster", updated.previous.Name) } onlyOld := zipped[fields.ID("def456")] if onlyOld.current != nil { t.Error("cluster onlyOld should not have had a current cluster") } if onlyOld.previous == nil { t.Fatalf("the previous cluster should not have been nil") } if onlyOld.previous.Name != "old name 2" { t.Errorf("The old name %v was wrong for the cluster", onlyOld.previous.Name) } onlyNew := zipped[fields.ID("987fed")] if onlyNew.previous != nil { t.Error("cluster onlyNew should not have had a previous cluster") } if onlyNew.current == nil { t.Fatalf("the current cluster should not have been nil") } if onlyNew.current.Name != "new name 3" { t.Errorf("The old name %v was wrong for the cluster", onlyNew.current.Name) } }
func main() { cmd, consulOpts, labeler := flags.ParseWithConsulOptions() client := kp.NewConsulClient(consulOpts) kv := kp.NewConsulStore(client) logger := logging.NewLogger(logrus.Fields{}) pcstore := pcstore.NewConsul(client, labeler, labels.NewConsulApplicator(client, 0), &logger) session, _, err := kv.NewSession(fmt.Sprintf("pcctl-%s", currentUserName()), nil) if err != nil { log.Fatalf("Could not create session: %s", err) } switch cmd { case cmdCreateText: az := fields.AvailabilityZone(*createAZ) cn := fields.ClusterName(*createName) podID := types.PodID(*createPodID) selector := selectorFrom(az, cn, podID) pccontrol := control.NewPodCluster(az, cn, podID, pcstore, selector, session) annotations := *createAnnotations var parsedAnnotations map[string]interface{} err := json.Unmarshal([]byte(annotations), &parsedAnnotations) if err != nil { log.Fatalf("could not parse json: %v", err) } _, err = pccontrol.Create(parsedAnnotations) if err != nil { log.Fatalf("err: %v", err) } case cmdGetText: az := fields.AvailabilityZone(*getAZ) cn := fields.ClusterName(*getName) podID := types.PodID(*getPodID) pcID := fields.ID(*getID) var pccontrol *control.PodCluster if pcID != "" { pccontrol = control.NewPodClusterFromID(pcID, session, pcstore) } else if az != "" && cn != "" && podID != "" { selector := selectorFrom(az, cn, podID) pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session) } else { log.Fatalf("Expected one of: pcID or (pod,az,name)") } pc, err := pccontrol.Get() if err != nil { log.Fatalf("Caught error while fetching pod cluster: %v", err) } bytes, err := json.Marshal(pc) if err != nil { logger.WithError(err).Fatalln("Unable to marshal PC as JSON") } fmt.Printf("%s", bytes) case cmdDeleteText: az := fields.AvailabilityZone(*deleteAZ) cn := fields.ClusterName(*deleteName) podID := types.PodID(*deletePodID) pcID := fields.ID(*deleteID) var pccontrol *control.PodCluster if pcID != "" { pccontrol = control.NewPodClusterFromID(pcID, session, pcstore) } else if az != "" && cn != "" && podID != "" { selector := selectorFrom(az, cn, podID) pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session) } else { log.Fatalf("Expected one of: pcID or (pod,az,name)") } errors := pccontrol.Delete() if len(errors) >= 1 { for _, err := range errors { _, _ = os.Stderr.Write([]byte(fmt.Sprintf("Failed to delete one pod cluster matching arguments. Error:\n %s\n", err.Error()))) } os.Exit(1) } case cmdUpdateText: az := fields.AvailabilityZone(*updateAZ) cn := fields.ClusterName(*updateName) podID := types.PodID(*updatePodID) pcID := fields.ID(*updateID) var pccontrol *control.PodCluster if pcID != "" { pccontrol = control.NewPodClusterFromID(pcID, session, pcstore) } else if az != "" && cn != "" && podID != "" { selector := selectorFrom(az, cn, podID) pccontrol = control.NewPodCluster(az, cn, podID, pcstore, selector, session) } else { log.Fatalf("Expected one of: pcID or (pod,az,name)") } var annotations fields.Annotations err := json.Unmarshal([]byte(*updateAnnotations), &annotations) if err != nil { _, _ = os.Stderr.Write([]byte(fmt.Sprintf("Annotations are invalid JSON. Err follows:\n%v", err))) os.Exit(1) } pc, err := pccontrol.Update(annotations) if err != nil { log.Fatalf("Error during PodCluster update: %v\n%v", err, pc) os.Exit(1) } bytes, err := json.Marshal(pc) if err != nil { log.Fatalf("Update succeeded, but error during displaying PC: %v\n%+v", err, pc) os.Exit(1) } fmt.Printf("%s", bytes) case cmdListText: pcs, err := pcstore.List() if err != nil { _, _ = os.Stderr.Write([]byte(fmt.Sprintf("Could not list pcs. Err follows:\n%v", err))) os.Exit(1) } bytes, err := json.Marshal(pcs) if err != nil { _, _ = os.Stderr.Write([]byte(fmt.Sprintf("Could not marshal pc list. Err follows:\n%v", err))) os.Exit(1) } fmt.Printf("%s", bytes) default: log.Fatalf("Unrecognized command %v", cmd) } }