func TestCreateFailsIfCantAcquireLock(t *testing.T) { newRCID := rc_fields.ID("new_rc") oldRCID := rc_fields.ID("old_rc") rollstore := newRollStore(t, nil) update := fields.Update{ NewRC: newRCID, OldRC: oldRCID, } // Grab an update creation lock on one of the RCs and make sure the // creation fails session, _, err := rollstore.store.NewSession("conflicting session", nil) if err != nil { t.Fatalf("Unable to create session for conflicting lock: %s", err) } defer session.Destroy() _, err = rollstore.rcstore.LockForUpdateCreation(update.OldRC, session) if err != nil { t.Fatalf("Unable to acquire conflicting lock on old rc: %s", err) } _, err = rollstore.CreateRollingUpdateFromExistingRCs(update, nil, nil) if err == nil { t.Fatal("Expected update creation to fail due to lock conflict") } ru, _ := rollstore.Get(fields.ID(newRCID)) if ru.NewRC != "" || ru.OldRC != "" { t.Fatal("New ru shouldn't have been created but it was") } }
func createHelloReplicationController(dir string) (fields.ID, error) { signedManifestPath, err := writeHelloManifest(dir, "hello.yaml", 43770) if err != nil { return "", err } cmd := exec.Command("p2-rctl", "--log-json", "create", "--manifest", signedManifestPath, "--node-selector", "test=yes") out := bytes.Buffer{} cmd.Stdout = &out cmd.Stderr = &out err = cmd.Run() if err != nil { return fields.ID(""), fmt.Errorf("Couldn't create replication controller for hello: %s %s", out.String(), err) } var rctlOut struct { ID string `json:"id"` } err = json.Unmarshal(out.Bytes(), &rctlOut) if err != nil { return fields.ID(""), fmt.Errorf("Couldn't read RC ID out of p2-rctl invocation result: %v", err) } output, err := exec.Command("p2-rctl", "set-replicas", rctlOut.ID, "1").CombinedOutput() if err != nil { fmt.Println(string(output)) return "", err } return fields.ID(rctlOut.ID), nil }
// Given a consul key path, returns the RC ID and the lock type. Returns an err // if the key does not resemble an RC lock key func (s *consulStore) lockTypeFromKey(key string) (fields.ID, LockType, error) { keyParts := strings.Split(key, "/") // Sanity check key structure e.g. /lock/replication_controllers/abcd-1234 if len(keyParts) < 3 || len(keyParts) > 4 { return "", UnknownLockType, util.Errorf("Key '%s' does not resemble an RC lock", key) } if keyParts[0] != consulutil.LOCK_TREE { return "", UnknownLockType, util.Errorf("Key '%s' does not resemble an RC lock", key) } if keyParts[1] != rcTree { return "", UnknownLockType, util.Errorf("Key '%s' does not resemble an RC lock", key) } rcID := keyParts[2] if len(keyParts) == 3 { // There's no lock suffix, so this is an ownership lock return fields.ID(rcID), OwnershipLockType, nil } switch keyParts[3] { case mutationSuffix: return fields.ID(rcID), MutationLockType, nil case updateCreationSuffix: return fields.ID(rcID), UpdateCreationLockType, nil default: return fields.ID(rcID), UnknownLockType, nil } }
func TestWouldWorkOn(t *testing.T) { fakeLabels := labels.NewFakeApplicator() fakeLabels.SetLabel(labels.RC, "abc-123", "color", "red") fakeLabels.SetLabel(labels.RC, "def-456", "color", "blue") f := &Farm{ labeler: fakeLabels, rcSelector: klabels.Everything().Add("color", klabels.EqualsOperator, []string{"red"}), } workOn, err := f.shouldWorkOn(rc_fields.ID("abc-123")) Assert(t).IsNil(err, "should not have erred on abc-123") Assert(t).IsTrue(workOn, "should have worked on abc-123, but didn't") dontWorkOn, err := f.shouldWorkOn(rc_fields.ID("def-456")) Assert(t).IsNil(err, "should not have erred on def-456") Assert(t).IsFalse(dontWorkOn, "should not have worked on def-456, but did") dontWorkOn, err = f.shouldWorkOn(rc_fields.ID("987-cba")) Assert(t).IsNil(err, "should not have erred on 987-cba") Assert(t).IsFalse(dontWorkOn, "should not have worked on 987-cba, but did") f.rcSelector = klabels.Everything() workOn, err = f.shouldWorkOn(rc_fields.ID("def-456")) Assert(t).IsNil(err, "should not have erred on def-456") Assert(t).IsTrue(workOn, "should have worked on def-456, but didn't") }
func (r RCtl) RollingUpdate(oldID, newID string, want, need int, deletes bool) { if want < need { r.logger.WithFields(logrus.Fields{ "want": want, "need": need, }).Fatalln("Cannot run update with desired replicas less than minimum replicas") } sessions := make(chan string) quit := make(chan struct{}) go kp.ConsulSessionManager(api.SessionEntry{ LockDelay: 1 * time.Nanosecond, Behavior: api.SessionBehaviorDelete, TTL: "15s", }, r.baseClient, sessions, quit, r.logger) session := <-sessions if session == "" { r.logger.NoFields().Fatalln("Could not acquire session") } lock := r.kps.NewUnmanagedLock(session, "") result := make(chan bool, 1) go func() { result <- roll.NewUpdate(roll_fields.Update{ OldRC: rc_fields.ID(oldID), NewRC: rc_fields.ID(newID), DesiredReplicas: want, MinimumReplicas: need, DeletePods: deletes, }, r.kps, r.rcs, r.hcheck, r.labeler, r.sched, r.logger, lock).Run(quit) close(result) }() signals := make(chan os.Signal, 2) signal.Notify(signals, syscall.SIGTERM, os.Interrupt) LOOP: for { select { case <-signals: // try to clean up locks on ^C close(quit) // do not exit right away - the session and result channels will be // closed after the quit is requested, ensuring that the locks held // by the farm were released. r.logger.NoFields().Errorln("Got signal, exiting") case <-sessions: r.logger.NoFields().Fatalln("Lost session") case res := <-result: // done, either due to ^C (already printed message above) or // clean finish if res { r.logger.NoFields().Infoln("Done") } break LOOP } } }
func (r rctlParams) ScheduleUpdate(oldID, newID string, want, need int) { _, err := r.rls.CreateRollingUpdateFromExistingRCs(roll_fields.Update{ OldRC: rc_fields.ID(oldID), NewRC: rc_fields.ID(newID), DesiredReplicas: want, MinimumReplicas: need, }, nil, nil) if err != nil { r.logger.WithError(err).Fatalln("Could not create rolling update") } else { r.logger.WithField("id", newID).Infoln("Created new rolling update") } }
func TestCreateRollingUpdateFromExistingRCs(t *testing.T) { rollstore := newRollStore(t, nil) newRCID := rc_fields.ID("new_rc") oldRCID := rc_fields.ID("old_rc") update := fields.Update{ NewRC: newRCID, OldRC: oldRCID, } newRCLabels := klabels.Set(map[string]string{ "some_key": "some_val", }) u, err := rollstore.CreateRollingUpdateFromExistingRCs(update, newRCLabels, newRCLabels) if err != nil { t.Fatalf("Unexpected error creating update: %s", err) } storedUpdate, err := rollstore.Get(update.ID()) if err != nil { t.Fatalf("Unable to retrieve value put in roll store: %s", err) } if storedUpdate.NewRC != newRCID { t.Errorf("Stored update didn't have expected new rc value: wanted '%s' but got '%s'", newRCID, storedUpdate.NewRC) } if storedUpdate.OldRC != oldRCID { t.Errorf("Stored update didn't have expected old rc value: wanted '%s' but got '%s'", oldRCID, storedUpdate.OldRC) } rcLabels, err := rollstore.labeler.GetLabels(labels.RC, newRCID.String()) if err != nil { t.Fatalf("Unable to fetch labels for newly created new RC: %s", err) } if rcLabels.Labels["some_key"] != "some_val" { t.Errorf("Expected labels to be set on new RC") } ruLabels, err := rollstore.labeler.GetLabels(labels.RU, u.ID().String()) if err != nil { t.Fatalf("Unable to fetch labels for newly created new RU: %s", err) } if ruLabels.Labels["some_key"] != "some_val" { t.Errorf("Expected labels to be set on new RU") } }
func (r RCtl) ScheduleUpdate(oldID, newID string, want, need int, deletes bool) { err := r.rls.Put(roll_fields.Update{ OldRC: rc_fields.ID(oldID), NewRC: rc_fields.ID(newID), DesiredReplicas: want, MinimumReplicas: need, DeletePods: deletes, }) if err != nil { r.logger.WithError(err).Fatalln("Could not create rolling update") } else { r.logger.WithField("id", newID).Infoln("Created new rolling update") } }
func (r rctlParams) Delete(id string, force bool) { err := r.rcs.Delete(rc_fields.ID(id), force) if err != nil { r.logger.WithError(err).Fatalln("Could not delete replication controller in Consul") } r.logger.WithField("id", id).Infoln("Deleted replication controller") }
func (r rctlParams) Disable(id string) { err := r.rcs.Disable(rc_fields.ID(id)) if err != nil { r.logger.WithError(err).Fatalln("Could not disable replication controller in Consul") } r.logger.WithField("id", id).Infoln("Disabled replication controller") }
func (r RCtl) Enable(id string) { err := r.rcs.Enable(rc_fields.ID(id)) if err != nil { r.logger.WithError(err).Fatalln("Could not enable replication controller in Consul") } r.logger.WithField("id", id).Infoln("Enabled replication controller") }
// these parts of Create may require a retry func (s *consulStore) innerCreate(manifest pods.Manifest, nodeSelector klabels.Selector, podLabels klabels.Set) (fields.RC, error) { id := fields.ID(uuid.New()) rcp := kp.RCPath(id.String()) rc := fields.RC{ ID: id, Manifest: manifest, NodeSelector: nodeSelector, PodLabels: podLabels, ReplicasDesired: 0, Disabled: false, } jsonRC, err := json.Marshal(rc) if err != nil { return fields.RC{}, err } success, _, err := s.kv.CAS(&api.KVPair{ Key: rcp, Value: jsonRC, // the chance of the UUID already existing is vanishingly small, but // technically not impossible, so we should use the CAS index to guard // against duplicate UUIDs ModifyIndex: 0, }, nil) if err != nil { return fields.RC{}, consulutil.NewKVError("cas", rcp, err) } if !success { return fields.RC{}, CASError(rcp) } return rc, nil }
func TestLockTypeFromKey(t *testing.T) { store := consulStore{} expectedRCID := fields.ID("abcd-1234") mutationLockPath, err := store.mutationLockPath(expectedRCID) if err != nil { t.Fatalf("Unable to compute lock path for rc") } updateCreationLockPath, err := store.updateCreationLockPath(expectedRCID) if err != nil { t.Fatalf("Unable to compute lock path for rc") } ownershipLockPath, err := store.ownershipLockPath(expectedRCID) if err != nil { t.Fatalf("Unable to compute lock path for rc") } type lockTypeExpectation struct { Key string ExpectedType LockType ExpectError bool } expectations := []lockTypeExpectation{ {mutationLockPath, MutationLockType, false}, {updateCreationLockPath, UpdateCreationLockType, false}, {ownershipLockPath, OwnershipLockType, false}, {"bogus_key", UnknownLockType, true}, {"/lock/bogus_key", UnknownLockType, true}, {"/lock/replication_controllers/bogus/key/blah", UnknownLockType, true}, } for _, expectation := range expectations { rcId, lockType, err := store.lockTypeFromKey(expectation.Key) if lockType != expectation.ExpectedType { t.Errorf("Expected lock type for %s to be %s, was %s", expectation.Key, expectation.ExpectedType.String(), lockType.String()) } if expectation.ExpectError { if err == nil { t.Errorf("Expected an error to be returned for key %s, but there wasn't", expectation.Key) } } else { if err != nil { t.Errorf("Unexpected error for key %s: %s", expectation.Key, err) } if rcId != expectedRCID { t.Errorf("Expected returned rcID to be '%s', was '%s'", expectedRCID, rcId) } } } }
func TestCreateRollingUpdateFromOneExistingRCWithIDFailsIfCantAcquireLock(t *testing.T) { oldRCID := rc_fields.ID("old_rc") rollstore := newRollStore(t, nil) // Grab an update creation lock on the old RC and make sure the // creation fails session, _, err := rollstore.store.NewSession("conflicting session", nil) if err != nil { t.Fatalf("Unable to create session for conflicting lock: %s", err) } defer session.Destroy() _, err = rollstore.rcstore.LockForUpdateCreation(oldRCID, session) if err != nil { t.Fatalf("Unable to acquire conflicting lock on old rc: %s", err) } newUpdate, err := rollstore.CreateRollingUpdateFromOneExistingRCWithID( oldRCID, 1, 0, false, 0, testManifest(), testNodeSelector(), nil, nil, nil, ) if err == nil { t.Fatalf("Should have erred creating conflicting update") } update, err := rollstore.Get(fields.ID(newUpdate.NewRC)) if err != nil { t.Fatalf("Should nothave erred checking for update creation: %s", err) } if update.NewRC != "" { t.Fatalf("Update was created but shouldn't have been: %s", err) } rcs, err := rollstore.rcstore.List() if err != nil { t.Fatalf("Shouldn't have failed to list RCs: %s", err) } if len(rcs) != 0 { t.Fatalf("There shouldn't be any new RCs after a failed update: expect 0 but were %d", len(rcs)) } }
func (rm *P2RM) checkForManagingReplicationController() (bool, fields.ID, error) { podLabels, err := rm.Labeler.GetLabels(labels.POD, rm.LabelID) if err != nil { return false, "", fmt.Errorf("unable to check node for labels: %v", err) } if podLabels.Labels.Has(rc.RCIDLabel) { return true, fields.ID(podLabels.Labels.Get(rc.RCIDLabel)), nil } return false, "", nil }
// Test that if a conflicting update exists, a new one will not be admitted func TestCreateExistingRCsMutualExclusion(t *testing.T) { newRCID := rc_fields.ID("new_rc") oldRCID := rc_fields.ID("old_rc") conflictingEntry := fields.Update{ OldRC: newRCID, NewRC: rc_fields.ID("some_other_rc"), } rollstore := newRollStore(t, []fields.Update{conflictingEntry}) update := fields.Update{ NewRC: newRCID, OldRC: oldRCID, } _, err := rollstore.CreateRollingUpdateFromExistingRCs(update, nil, nil) if err == nil { t.Fatal("Expected update creation to fail due to conflict") } if conflictingErr, ok := err.(*ConflictingRUError); !ok { t.Error("Returned error didn't have ConflictingRUError type") } else { if conflictingErr.ConflictingID != conflictingEntry.ID() { t.Errorf("Expected error to have conflicting ID of '%s', was '%s'", conflictingEntry.ID(), conflictingErr.ConflictingID) } if conflictingErr.ConflictingRCID != conflictingEntry.OldRC { t.Errorf("Expected error to have conflicting rc ID of '%s', was '%s'", conflictingEntry.OldRC, conflictingErr.ConflictingRCID) } } ru, _ := rollstore.Get(fields.ID(update.NewRC)) if ru.NewRC != "" || ru.OldRC != "" { t.Fatal("New ru shouldn't have been created but it was") } }
func (r rctlParams) SetReplicas(id string, replicas int) { if replicas < 0 { r.logger.NoFields().Fatalln("Cannot set negative replica count") } err := r.rcs.SetDesiredReplicas(rc_fields.ID(id), replicas) if err != nil { r.logger.WithError(err).Fatalln("Could not set desired replica count in Consul") } r.logger.WithFields(logrus.Fields{ "id": id, "replicas": replicas, }).Infoln("Set desired replica count of replication controller") }
func TestLockRCs(t *testing.T) { fakeStore := kptest.NewFakePodStore(nil, nil) session, _, err := fakeStore.NewSession("fake rc lock session", nil) Assert(t).IsNil(err, "Should not have erred getting fake session") update := NewUpdate(fields.Update{ NewRC: rc_fields.ID("new_rc"), OldRC: rc_fields.ID("old_rc"), }, nil, rcstore.NewFake(), nil, nil, nil, logging.DefaultLogger, session, nil, ).(*update) err = update.lockRCs(make(<-chan struct{})) Assert(t).IsNil(err, "should not have erred locking RCs") Assert(t).IsNotNil(update.newRCUnlocker, "should have consulutil.Unlocker for unlocking new rc") Assert(t).IsNotNil(update.oldRCUnlocker, "should have consulutil.Unlocker for unlocking old rc") }
func createHelloReplicationController(dir string) (fields.ID, error) { hello := fmt.Sprintf("file://%s", util.From(runtime.Caller(0)).ExpandPath("../hoisted-hello_def456.tar.gz")) builder := manifest.NewBuilder() builder.SetID("hello") builder.SetStatusPort(43770) stanzas := map[launch.LaunchableID]launch.LaunchableStanza{ "hello": { LaunchableId: "hello", LaunchableType: "hoist", Location: hello, }, } builder.SetLaunchables(stanzas) manifest := builder.GetManifest() manifestPath := path.Join(dir, "hello.yaml") f, err := os.OpenFile(manifestPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return fields.ID(""), err } defer f.Close() err = manifest.Write(f) if err != nil { return fields.ID(""), err } f.Close() manifestPath, err = signManifest(manifestPath, dir) if err != nil { return fields.ID(""), err } cmd := exec.Command("p2-rctl", "--log-json", "create", "--manifest", manifestPath, "--node-selector", "test=yes") out := bytes.Buffer{} cmd.Stdout = &out cmd.Stderr = &out err = cmd.Run() if err != nil { return fields.ID(""), fmt.Errorf("Couldn't create replication controller for hello: %s %s", out.String(), err) } var rctlOut struct { ID string `json:"id"` } err = json.Unmarshal(out.Bytes(), &rctlOut) if err != nil { return fields.ID(""), fmt.Errorf("Couldn't read RC ID out of p2-rctl invocation result: %v", err) } return fields.ID(rctlOut.ID), exec.Command("p2-rctl", "set-replicas", rctlOut.ID, "1").Run() }
func rollsWithIDs(t *testing.T, id string, num int) api.KVPairs { var pairs api.KVPairs for i := 0; i < num; i++ { ru := fields.Update{ NewRC: rc_fields.ID(id), } jsonRU, err := json.Marshal(ru) if err != nil { t.Fatalf("Unable to marshal test RU as json: %s", err) } pairs = append(pairs, &api.KVPair{ Value: jsonRU, }) } return pairs }
func (r rctlParams) Get(id string, manifest bool) { getRC, err := r.rcs.Get(rc_fields.ID(id)) if err != nil { r.logger.WithError(err).Fatalln("Could not get replication controller in Consul") } if manifest { out, err := getRC.Manifest.Marshal() if err != nil { r.logger.WithError(err).Fatalln("Could not marshal replication controller manifest") } fmt.Printf("%s", out) } else { out, err := json.MarshalIndent(getRC, "", " ") if err != nil { r.logger.WithError(err).Fatalln("Could not marshal replication controller to JSON") } fmt.Printf("%s\n", out) } }
func rcsWithIDs(t *testing.T, id string, num int) api.KVPairs { var pairs api.KVPairs builder := manifest.NewBuilder() builder.SetID("slug") manifest := builder.GetManifest() for i := 0; i < num; i++ { rc := fields.RC{ ID: fields.ID(id), Manifest: manifest, } jsonRC, err := json.Marshal(rc) if err != nil { t.Fatalf("Unable to marshal test RC as json: %s", err) } pairs = append(pairs, &api.KVPair{ Value: jsonRC, }) } return pairs }
func (s *fakeStore) Create(manifest pods.Manifest, nodeSelector labels.Selector, podLabels labels.Set) (fields.RC, error) { // A real replication controller will use a UUID. // We'll just use a monotonically increasing counter for expedience. s.creates += 1 id := fields.ID(strconv.Itoa(s.creates)) entry := fakeEntry{ RC: fields.RC{ ID: id, Manifest: manifest, NodeSelector: nodeSelector, PodLabels: podLabels, ReplicasDesired: 0, Disabled: false, }, watchers: make(map[int]chan struct{}), lastWatcherId: 0, } s.rcs[id] = &entry return entry.RC, nil }
package rcstore import ( "fmt" "testing" "github.com/square/p2/pkg/kp/consulutil" "github.com/square/p2/pkg/kp/kptest" "github.com/square/p2/pkg/rc/fields" ) const testRCId = fields.ID("abcd-1234") func TestLockForOwnership(t *testing.T) { kpStore := kptest.NewFakePodStore(nil, nil) session, _, err := kpStore.NewSession("test rc ownership session", nil) if err != nil { t.Fatalf("Unable to create fake session in fake store: %s", err) } rcstore := consulStore{} unlocker, err := rcstore.LockForOwnership(testRCId, session) if err != nil { t.Fatalf("Unable to lock rc for ownership: %s", err) } expectedKey := fmt.Sprintf("%s/%s/%s", consulutil.LOCK_TREE, rcTree, testRCId) if unlocker.Key() != expectedKey { t.Errorf("Key did not match expected: wanted '%s' but got '%s'", expectedKey, unlocker.Key()) }
"github.com/square/p2/pkg/kp" "github.com/square/p2/pkg/kp/consulutil" "github.com/square/p2/pkg/kp/kptest" "github.com/square/p2/pkg/kp/rcstore" "github.com/square/p2/pkg/labels" "github.com/square/p2/pkg/manifest" rc_fields "github.com/square/p2/pkg/rc/fields" "github.com/square/p2/pkg/roll/fields" "github.com/hashicorp/consul/api" klabels "k8s.io/kubernetes/pkg/labels" ) const ( testRCId = rc_fields.ID("abcd-1234") testRCId2 = rc_fields.ID("def-456") ) func TestNewConsul(t *testing.T) { store := NewConsul(kp.NewConsulClient(kp.Options{}), nil) rollstore := store.(consulStore) if rollstore.kv == nil { t.Fatal("kv should not be nil for constructed rollstore") } if rollstore.rcstore == nil { t.Fatal("rcstore should not be nil for constructed rollstore") } if rollstore.labeler == nil {
func TestCreateRollingUpdateFromOneExistingRCWithID(t *testing.T) { oldRCID := rc_fields.ID("old_rc") rollstore := newRollStore(t, nil) newRCLabels := klabels.Set(map[string]string{ "some_key": "some_val", }) newUpdate, err := rollstore.CreateRollingUpdateFromOneExistingRCWithID( oldRCID, 1, 0, false, 0, testManifest(), testNodeSelector(), nil, newRCLabels, newRCLabels, ) if err != nil { t.Fatalf("Unable to create rolling update: %s", err) } storedUpdate, err := rollstore.Get(fields.ID(newUpdate.NewRC)) if err != nil { t.Fatalf("Unable to retrieve value put in roll store: %s", err) } if storedUpdate.NewRC != newUpdate.NewRC { t.Errorf("Stored update didn't have expected new rc value: wanted '%s' but got '%s'", newUpdate.NewRC, storedUpdate.NewRC) } if storedUpdate.OldRC != oldRCID { t.Errorf("Stored update didn't have expected old rc value: wanted '%s' but got '%s'", oldRCID, storedUpdate.OldRC) } _, err = rollstore.rcstore.Get(newUpdate.NewRC) if err != nil { t.Fatalf("Shouldn't have failed to fetch new RC: %s", err) } rcLabels, err := rollstore.labeler.GetLabels(labels.RC, storedUpdate.NewRC.String()) if err != nil { t.Fatalf("Unable to fetch labels for newly created new RC: %s", err) } if rcLabels.Labels["some_key"] != "some_val" { t.Errorf("Expected labels to be set on new RC") } ruLabels, err := rollstore.labeler.GetLabels(labels.RU, newUpdate.ID().String()) if err != nil { t.Fatalf("Unable to fetch labels for newly created new RU: %s", err) } if ruLabels.Labels["some_key"] != "some_val" { t.Errorf("Expected labels to be set on new RU") } }
func (r rctlParams) RollingUpdate(oldID, newID string, want, need int, pagerdutyServiceKey string) { if want < need { r.logger.WithFields(logrus.Fields{ "want": want, "need": need, }).Fatalln("Cannot run update with desired replicas less than minimum replicas") } sessions := make(chan string) quit := make(chan struct{}) go consulutil.SessionManager(api.SessionEntry{ Name: SessionName(), LockDelay: 5 * time.Second, Behavior: api.SessionBehaviorDelete, TTL: "15s", }, r.baseClient, sessions, quit, r.logger) sessionID := <-sessions if sessionID == "" { r.logger.NoFields().Fatalln("Could not acquire session") } session := r.kps.NewUnmanagedSession(sessionID, "") alerter := alerting.NewNop() if pagerdutyServiceKey != "" { var err error alerter, err = alerting.NewPagerduty(pagerdutyServiceKey, r.httpClient) if err != nil { r.logger.WithError(err).Fatalln("Could not initialize pagerduty alerter") } } result := make(chan bool, 1) go func() { result <- roll.NewUpdate(roll_fields.Update{ OldRC: rc_fields.ID(oldID), NewRC: rc_fields.ID(newID), DesiredReplicas: want, MinimumReplicas: need, }, r.kps, r.rcs, r.hcheck, r.labeler, r.sched, r.logger, session, alerter).Run(quit) close(result) }() signals := make(chan os.Signal, 2) signal.Notify(signals, syscall.SIGTERM, os.Interrupt) LOOP: for { select { case <-signals: // try to clean up locks on ^C close(quit) // do not exit right away - the session and result channels will be // closed after the quit is requested, ensuring that the locks held // by the farm were released. r.logger.NoFields().Errorln("Got signal, exiting") case <-sessions: r.logger.NoFields().Fatalln("Lost session") case res := <-result: // done, either due to ^C (already printed message above) or // clean finish if res { r.logger.NoFields().Infoln("Done") } break LOOP } } }
// Creates a rolling update that may or may not already have an existing old // RC. If one matches the oldRCSelector, it will be used as the old RC in the // new update. If one does not exist, a "dummy" old RC will be created that is // identical to the specifications for the new RC. // Returns an error if the old RC exists but is part of another RU, or if // the label selector returns more than one match. func (s consulStore) CreateRollingUpdateFromOneMaybeExistingWithLabelSelector( oldRCSelector klabels.Selector, desiredReplicas int, minimumReplicas int, leaveOld bool, rollDelay time.Duration, newRCManifest manifest.Manifest, newRCNodeSelector klabels.Selector, newRCPodLabels klabels.Set, newRCLabels klabels.Set, rollLabels klabels.Set, ) (u roll_fields.Update, err error) { // This function may or may not create old and new RCs and subsequently // fail, so we defer a function that does any cleanup (if applicable) var cleanupOldRC func() var cleanupNewRC func() defer func() { if err != nil { if cleanupOldRC != nil { cleanupOldRC() } if cleanupNewRC != nil { cleanupNewRC() } } }() session, renewalErrCh, err := s.newRUCreationSession() if err != nil { return roll_fields.Update{}, err } defer session.Destroy() // Check if any RCs match the oldRCSelector matches, err := s.labeler.GetMatches(oldRCSelector, labels.RC, false) if err != nil { return roll_fields.Update{}, err } var oldRCID rc_fields.ID if len(matches) > 1 { return roll_fields.Update{}, AmbiguousRCSelector } else if len(matches) == 1 { oldRCID = rc_fields.ID(matches[0].ID) } else { if leaveOld { return roll_fields.Update{}, util.Errorf( "Can't create an update with LeaveOld set if there is no old RC (sel=%s)", oldRCSelector.String(), ) } // Create the old RC using the same info as the new RC, it'll be // removed when the update completes anyway rc, err := s.rcstore.Create(newRCManifest, newRCNodeSelector, newRCPodLabels) if err != nil { return roll_fields.Update{}, err } oldRCID = rc.ID cleanupOldRC = func() { err = s.rcstore.Delete(oldRCID, false) if err != nil { s.logger.WithError(err).Errorf("Unable to cleanup newly-created old RC %s after update creation failure:", oldRCID) } // Any labels we wrote will be deleted by rcstore.Delete() } // Copy the new RC labels to the old RC as well err = s.labeler.SetLabels(labels.RC, oldRCID.String(), newRCLabels) if err != nil { return roll_fields.Update{}, err } } // Lock the old RC to guarantee that no new updates can use it err = s.lockRCs(rc_fields.IDs{oldRCID}, session) if err != nil { return roll_fields.Update{}, err } // Check for updates that exist that operate on the old RC err = s.checkForConflictingUpdates(rc_fields.IDs{oldRCID}) if err != nil { return roll_fields.Update{}, err } // Create the new RC var newRCID rc_fields.ID select { case err = <-renewalErrCh: return roll_fields.Update{}, err default: rc, err := s.rcstore.Create(newRCManifest, newRCNodeSelector, newRCPodLabels) if err != nil { return roll_fields.Update{}, err } newRCID = rc.ID } cleanupNewRC = func() { err = s.rcstore.Delete(newRCID, false) if err != nil { s.logger.WithError(err).Errorf("Unable to cleanup newly-created new RC %s after update creation failure:", newRCID) } // Any labels we wrote will be deleted by rcstore.Delete() } // lock newly-created new rc so it's less likely to race on it // with another parallel update creation err = s.lockRCs(rc_fields.IDs{newRCID}, session) if err != nil { return roll_fields.Update{}, err } // Check once again for conflicting updates in case a racing update // creation grabbed the new RC we just created err = s.checkForConflictingUpdates(rc_fields.IDs{newRCID}) if err != nil { return roll_fields.Update{}, err } // Now that we know there are no RUs in progress, and we have the // update creation locks, we can safely apply labels. err = s.labeler.SetLabels(labels.RC, newRCID.String(), newRCLabels) if err != nil { return roll_fields.Update{}, err } u = roll_fields.Update{ OldRC: oldRCID, NewRC: newRCID, DesiredReplicas: desiredReplicas, MinimumReplicas: minimumReplicas, LeaveOld: leaveOld, RollDelay: rollDelay, } return s.attemptRUCreation(u, rollLabels, renewalErrCh) }