// When there is no session, an update + close should never be written. func TestHealthSessionDestroy(t *testing.T) { // Standard Consul test fixture f := NewConsulTestFixture(t) defer f.Close() // Launch an updater with manual control over health checks and session management checks := make(chan WatchResult) sessions := make(chan string) go processHealthUpdater(f.Client, checks, sessions, logging.TestLogger()) waiter := f.NewKeyWaiter(hKey) // Create health result & create session => write session := f.CreateSession() sessions <- session checks <- h1 waiter.WaitForChange() if r, err := f.Store.GetHealth("svc", "node"); err != nil || !r.ValueEquiv(h1) { t.Fatalf("unexpected health, got value %#v error %#v", r, err) } // Tell the health updater that the session ended => no action sessions <- "" time.Sleep(50 * time.Millisecond) // Give the updater a new check => no action checks <- h2 time.Sleep(50 * time.Millisecond) // No more health checks => exit close(checks) time.Sleep(100 * time.Millisecond) if r, err := f.Store.GetHealth("svc", "node"); err != nil || !r.ValueEquiv(h1) { t.Fatalf("unexpected health, got value %#v error %#v", r, err) } }
// TestExit checks that the session manager function exits once the session reader quits // reading and signals to the manager to exit. func TestExit(t *testing.T) { t.Parallel() f := NewFixture(t) defer f.Stop() sessions := make(chan string) done := make(chan struct{}) exited := make(chan struct{}) go func() { SessionManager( api.SessionEntry{TTL: "10s"}, f.Client, sessions, done, logging.TestLogger(), ) close(exited) }() // Wait for a session to appear for s := ""; s == ""; s = <-sessions { } // Closing the "done" channel signals to the session manager that the reader has quit // reading the stream. It should exit soon thereafter. close(done) select { case <-exited: case <-time.After(20 * time.Second): t.Error("timed out waiting for SessionManager to exit") } }
// Basic test of the full health manager: create a service, update its health, then // destroy it. func TestHealthBasic(t *testing.T) { f := NewConsulTestFixture(t) defer f.Close() waiter := f.NewKeyWaiter(hKey) manager := f.Store.NewHealthManager("node", logging.TestLogger()) defer manager.Close() updater := manager.NewUpdater("svc", "svc") // Creating an updater with no health statuses shouldn't write anything time.Sleep(100 * time.Millisecond) if r, err := f.Store.GetHealth("svc", "node"); err != nil || r != hEmpty { t.Fatalf("health expected to be empty, got value %#v error %#v", r, err) } // Write one health check result if err := updater.PutHealth(h1); err != nil { t.Error("error writing new health value: ", err) } // Check health result in Consul waiter.WaitForChange() if r, err := f.Store.GetHealth("svc", "node"); err != nil || !r.ValueEquiv(h1) { t.Fatalf("unexpected health, got value %#v error %#v", r, err) } // Destroy the service, health check should disappear updater.Close() waiter.WaitForChange() if r, err := f.Store.GetHealth("svc", "node"); err != nil || r != hEmpty { t.Fatalf("health expected to be empty, got value %#v error %#v", r, err) } }
// Test that as long as there is no session, there should be no updates. func TestHealthSessionRequired(t *testing.T) { // Standard Consul test fixture f := NewConsulTestFixture(t) defer f.Close() // Launch an updater with manual control over health checks and session management checks := make(chan WatchResult) sessions := make(chan string) go processHealthUpdater(f.Client, checks, sessions, logging.TestLogger()) // There should be no health check initially if r, err := f.Store.GetHealth("svc", "node"); err != nil || r != hEmpty { t.Fatalf("health expected to be empty, got value %#v error %#v", r, err) } // Adding health checks pre-session shouldn't affect Consul checks <- h1 time.Sleep(50 * time.Millisecond) checks <- h1 time.Sleep(50 * time.Millisecond) checks <- h2 time.Sleep(100 * time.Millisecond) if r, err := f.Store.GetHealth("svc", "node"); err != nil || r != hEmpty { t.Fatalf("health expected to be empty, got value %#v error %#v", r, err) } }
// A basic test of the ConsulSessionManager: create a session, then quit. func TestSessionBasics(t *testing.T) { f := NewConsulTestFixture(t) defer f.Close() sessions := make(chan string) done := make(chan struct{}) go ConsulSessionManager( api.SessionEntry{ Behavior: api.SessionBehaviorDelete, TTL: "10s", }, f.Client, sessions, done, logging.TestLogger(), ) ok := false for s := range sessions { if s != "" && done != nil { ok = true close(done) done = nil } } if !ok { t.Error("valid session never appeared") } }
// Test that new service statuses are written to Consul and equivalent statues write nothing. func TestHealthUpdate(t *testing.T) { f := NewConsulTestFixture(t) defer f.Close() manager := f.Store.NewHealthManager("node", logging.TestLogger()) defer manager.Close() updater := manager.NewUpdater("svc", "svc") defer updater.Close() waiter := f.NewKeyWaiter(hKey) // Count how many changes to the health value there are counterChan := make(chan chan int, 1) counterWaiter := f.NewKeyWaiter(hKey) go func() { count := 0 for { counterWaiter.WaitForChange() count += 1 select { case c := <-counterChan: c <- count return default: } } }() if err := updater.PutHealth(h1); err != nil { // 1 t.Error("error writing health: ", err) } waiter.WaitForChange() if err := updater.PutHealth(h1); err != nil { // still 1 t.Error("error writing health: ", err) } time.Sleep(30 * time.Millisecond) if err := updater.PutHealth(h1); err != nil { // still 1 t.Error("error writing health: ", err) } if err := updater.PutHealth(h2); err != nil { // 2 t.Error("error writing health: ", err) } waiter.WaitForChange() if err := updater.PutHealth(h2); err != nil { // still 2 t.Error("error writing health: ", err) } c := make(chan int) counterChan <- c if err := updater.PutHealth(h1); err != nil { // 3 t.Error("error writing health: ", err) } count := <-c t.Logf("Consul received %d updates", count) // Counter is asynchronous, so it's possible for it to miss an update. if !(2 <= count && count <= 3) { t.Fail() } }
// Test that if the session restarts, health checks should be restored. func TestHealthSessionRestart(t *testing.T) { // Standard Consul test fixture f := NewConsulTestFixture(t) defer f.Close() // Launch an updater with manual control over health checks and session management checks := make(chan WatchResult) sessions := make(chan string) go processHealthUpdater(f.Client, checks, sessions, logging.TestLogger()) waiter := f.NewKeyWaiter(hKey) // Add check & add session => write s1 := f.CreateSession() sessions <- s1 checks <- h1 waiter.WaitForChange() if r, err := f.Store.GetHealth("svc", "node"); err != nil || !r.ValueEquiv(h1) { t.Fatalf("unexpected health, got value %#v error %#v", r, err) } // Changing the health status should send an update checks <- h2 waiter.WaitForChange() if r, err := f.Store.GetHealth("svc", "node"); err != nil || !r.ValueEquiv(h2) { t.Fatalf("unexpected health, got value %#v error %#v", r, err) } // Destroying the session should automatically clear the update f.DestroySession(s1) sessions <- "" waiter.WaitForChange() if r, err := f.Store.GetHealth("svc", "node"); err != nil || r != hEmpty { t.Fatalf("health expected to be empty, got value %#v error %#v", r, err) } // No change when updating health mid-session checks <- h3 time.Sleep(50 * time.Millisecond) if r, err := f.Store.GetHealth("svc", "node"); err != nil || r != hEmpty { t.Fatalf("health expected to be empty, got value %#v error %#v", r, err) } // When the new session is refreshed, the key should reappear s2 := f.CreateSession() sessions <- s2 waiter.WaitForChange() if r, err := f.Store.GetHealth("svc", "node"); err != nil || !r.ValueEquiv(h3) { t.Fatalf("unexpected health, got value %#v error %#v", r, err) } // Shut down the health checker, deleting the health check close(checks) waiter.WaitForChange() if r, err := f.Store.GetHealth("svc", "node"); err != nil || r != hEmpty { t.Fatalf("health expected to be empty, got value %#v error %#v", r, err) } }
// Test that the KeyringPolicy can load a keyring func TestKeyring(t *testing.T) { // Setup three entities, two of which are saved to a keyring file h := testHarness{} msg := []byte("Hello World!") ents := h.loadEntities() sigs := h.signMessage(msg, ents) keyfile := h.tempFile() defer rm(t, keyfile) h.saveKeys(ents[:2], keyfile) if h.Err != nil { t.Error(h.Err) return } policy, err := LoadKeyringPolicy( keyfile, map[types.PodID][]string{ "restricted": {fmt.Sprintf("%X", ents[1].PrimaryKey.Fingerprint)}, }, ) if err != nil { t.Error("creating keyring policy:", err) return } logger := logging.TestLogger() // Key in keyring signs the message err = policy.AuthorizeApp(TestSigned{"foo", "foo", msg, sigs[0]}, logger) if err != nil { t.Error("error authorizing pod manifest:", err) } // Key not in keyring signs the message err = policy.AuthorizeApp(TestSigned{"foo", "foo", msg, sigs[2]}, logger) if err == nil { t.Error("accepted unauthorized signature") } // Verify preparer authorization policy err = policy.AuthorizeApp(TestSigned{"restricted", "restricted", msg, sigs[1]}, logger) if err != nil { t.Error("error authorizing pod manifest:", err) } err = policy.AuthorizeApp(TestSigned{"restricted", "restricted", msg, sigs[0]}, logger) if err == nil { t.Error("accepted unauthorized signature") } }
// Test that FileKeyringPolicy can reload a keyring file when it // changes. func TestKeyAddition(t *testing.T) { h := testHarness{} msg := []byte("Testing 1, 2, 3") // two entities ents := h.loadEntities()[:2] // that both sign the message sigs := h.signMessage(msg, ents) // and a temporary file to hold the keyring keyfile := h.tempFile() defer rm(t, keyfile) h.saveKeys(ents[:1], keyfile) h.backdate(keyfile, time.Minute) if h.Err != nil { t.Error(h.Err) return } policy, err := NewFileKeyringPolicy(keyfile, nil) if err != nil { t.Errorf("%s: error loading keyring: %s", keyfile, err) return } logger := logging.TestLogger() // Keyring contains only ents[0] err = policy.AuthorizeApp(TestSigned{"test", "test", msg, sigs[0]}, logger) if err != nil { t.Error("expected authorized, got error:", err) } err = policy.AuthorizeApp(TestSigned{"test", "test", msg, sigs[1]}, logger) if err == nil { t.Error("expected failure, got authorization") } // Update the keyring file with both keys. The mtime is updated // because we backdated the previous version. h.saveKeys(ents, keyfile) err = policy.AuthorizeApp(TestSigned{"test", "test", msg, sigs[0]}, logger) if err != nil { t.Error("expected authorized, got error:", err) } err = policy.AuthorizeApp(TestSigned{"test", "test", msg, sigs[1]}, logger) if err != nil { t.Error("expected authorized, got error:", err) } }
func TestDirectoriesDoNotBreakEverything(t *testing.T) { tempDir, err := ioutil.TempDir("", "hook") Assert(t).IsNil(err, "the error should have been nil") defer os.RemoveAll(tempDir) podDir, err := ioutil.TempDir("", "pod") defer os.RemoveAll(podDir) Assert(t).IsNil(err, "the error should have been nil") Assert(t).IsNil(os.Mkdir(path.Join(tempDir, "mydir"), 0755), "Should not have erred") pod := pods.NewPod(podId, podDir) logger := logging.TestLogger() hooks := Hooks(os.TempDir(), &logger) err = hooks.runHooks(tempDir, AFTER_INSTALL, pod, testManifest(), logging.DefaultLogger) Assert(t).IsNil(err, "Got an error when running a directory inside the hooks directory") }
func TestUpdateStatus(t *testing.T) { logger := logging.TestLogger() healthManager := &MockHealthManager{} reality := []kp.ManifestResult{newManifestResult("foo"), newManifestResult("bar")} pods1 := updatePods(healthManager, nil, []PodWatch{}, reality, "", &logger) Assert(t).AreEqual(2, len(pods1), "new pods were not added") Assert(t).AreEqual(2, healthManager.UpdaterCreated, "new pods did not create an updaters") // Change the status port, expect one pod to change healthManager.Reset() builder := reality[0].Manifest.GetBuilder() builder.SetStatusPort(2) reality[0].Manifest = builder.GetManifest() pods2 := updatePods(healthManager, nil, pods1, reality, "", &logger) Assert(t).AreEqual(2, len(pods2), "updatePods() changed the number of pods") Assert(t).AreEqual(1, healthManager.UpdaterCreated, "one pod should have been refreshed") }
func TestUpdatePath(t *testing.T) { logger := logging.TestLogger() healthManager := &MockHealthManager{} reality := []kp.ManifestResult{newManifestResult("foo"), newManifestResult("bar")} pods1 := updatePods(healthManager, nil, nil, []PodWatch{}, reality, "bobnode", &logger) Assert(t).AreEqual(2, len(pods1), "new pods were not added") Assert(t).AreEqual(2, healthManager.UpdaterCreated, "new pods did not create an updaters") // Change the status port, expect one pod to change healthManager.Reset() builder := reality[0].Manifest.GetBuilder() builder.SetStatusPath("/_foobar") reality[0].Manifest = builder.GetManifest() pods2 := updatePods(healthManager, nil, nil, pods1, reality, "bobnode", &logger) Assert(t).AreEqual(2, len(pods2), "updatePods() changed the number of pods") Assert(t).AreEqual(1, healthManager.UpdaterCreated, "one pod should have been refreshed") Assert(t).AreEqual("https://bobnode:1/_status", pods2[0].statusChecker.URI, "pod should be checking correct path") Assert(t).AreEqual("https://bobnode:1/_foobar", pods2[1].statusChecker.URI, "pod should be checking correct path") }
func TestHookRunWithTimeout(t *testing.T) { timeout := 1 * time.Millisecond sleep := "1" // 1 second sleep to be executed by the script // build an executable file to feed to Hook contents := []byte("#!/bin/bash\nsleep " + sleep) tmpFile, err := tempFileWithContents("test-hook-run-with-timeout.", contents) if err != nil { t.Error(err.Error()) } defer os.Remove(tmpFile) hook := Hook{tmpFile, "timeout-test-hook", timeout, []string{}, logging.TestLogger()} toErr := hook.RunWithTimeout() if _, ok := toErr.(HookTimeoutError); !ok { // we either had no error or a different error t.Errorf("timeout did not throw a HookTimeoutError: timeout: %#v / sleep: %#v / err: %#v", timeout, sleep, toErr) } }
func TestDirectoriesDoNotBreakEverything(t *testing.T) { tempDir, err := ioutil.TempDir("", "hook") Assert(t).IsNil(err, "the error should have been nil") defer os.RemoveAll(tempDir) podDir, err := ioutil.TempDir("", "pod") defer os.RemoveAll(podDir) Assert(t).IsNil(err, "the error should have been nil") Assert(t).IsNil(os.Mkdir(path.Join(tempDir, "mydir"), 0755), "Should not have erred") // So PodFromPodHome doesn't bail out, write a minimal current_manifest.yaml ioutil.WriteFile(path.Join(podDir, "current_manifest.yaml"), []byte("id: my_hook"), 0755) pod, err := pods.PodFromPodHome("testNode", podDir) Assert(t).IsNil(err, "the error should have been nil") logger := logging.TestLogger() hooks := Hooks(os.TempDir(), &logger) err = hooks.runHooks(tempDir, AFTER_INSTALL, pod, testManifest(), logging.DefaultLogger) Assert(t).IsNil(err, "Got an error when running a directory inside the hooks directory") }
func updateWithHealth(t *testing.T, desiredOld, desiredNew int, oldNodes, newNodes map[types.NodeName]bool, checks map[types.NodeName]health.Result, ) (update, manifest.Manifest, manifest.Manifest) { podID := "mypod" oldManifest := podWithIDAndPort(podID, 9001) newManifest := podWithIDAndPort(podID, 9002) podMap := map[kptest.FakePodStoreKey]manifest.Manifest{} assignManifestsToNodes(types.PodID(podID), oldNodes, podMap, oldManifest, newManifest) assignManifestsToNodes(types.PodID(podID), newNodes, podMap, newManifest, oldManifest) kps := kptest.NewFakePodStore(podMap, nil) rcs := rcstore.NewFake() applicator := labels.NewFakeApplicator() oldRC, err := createRC(rcs, applicator, oldManifest, desiredOld, oldNodes) Assert(t).IsNil(err, "expected no error setting up old RC") newRC, err := createRC(rcs, applicator, newManifest, desiredNew, newNodes) Assert(t).IsNil(err, "expected no error setting up new RC") return update{ kps: kps, rcs: rcs, hcheck: checkertest.NewSingleService(podID, checks), labeler: applicator, logger: logging.TestLogger(), Update: fields.Update{ OldRC: oldRC.ID, NewRC: newRC.ID, }, }, oldManifest, newManifest }
// Check that the authorization policy of hooks follows that of the // preparer, not their own name func TestDpolHooks(t *testing.T) { h := testHarness{} msg := []byte("Once, I was a real Turtle.") ents := h.loadEntities() sigs := h.signMessage(msg, ents) keyfile := h.tempFile() defer rm(t, keyfile) h.saveKeys(ents, keyfile) polfile := h.tempFile() defer rm(t, polfile) userSig := sigs[0] adminSig := sigs[1] h.saveYaml( RawDeployPol{ Groups: map[DpGroup][]DpUserEmail{ "users": {"test1@testing"}, "admins": {"test2@testing"}, }, Apps: map[string][]DpGroup{ // Hooks should be treated like the preparer "preparer": {"admins"}, // ...even if there is an app with the same name as a hook "myhook": {"users"}, "someapp": {"users"}, }, }, polfile, ) if h.Err != nil { t.Error(h.Err) return } policy, err := NewTestUserPolicy(keyfile, polfile) if err != nil { t.Error("error creating user policy: ", err) return } logger := logging.TestLogger() // P2 admins should be able to deploy the preparer and its hooks. err = policy.AuthorizeApp(TestSigned{"preparer", "preparer", msg, adminSig}, logger) if err != nil { t.Error("expected authorized, got error: ", err) } err = policy.AuthorizeApp(TestSigned{"preparer", "root", msg, adminSig}, logger) if err != nil { t.Error("expected authorized, got error: ", err) } err = policy.AuthorizeHook(TestSigned{"myhook", "myhook", msg, adminSig}, logger) if err != nil { t.Error("expected authorized, got error: ", err) } // Users should not be able to modify the preparer or run hooks err = policy.AuthorizeApp(TestSigned{"preparer", "preparer", msg, userSig}, logger) if err == nil { t.Error("expected failure, got authorization") } err = policy.AuthorizeApp(TestSigned{"preparer", "root", msg, userSig}, logger) if err == nil { t.Error("expected failure, got authorization") } err = policy.AuthorizeApp(TestSigned{"preparer", "someapp", msg, userSig}, logger) if err == nil { t.Error("expected failure, got authorization") } err = policy.AuthorizeHook(TestSigned{"myhook", "myhook", msg, userSig}, logger) if err == nil { t.Error("expected failure, got authorization") } err = policy.AuthorizeHook(TestSigned{"myhook", "preparer", msg, userSig}, logger) if err == nil { t.Error("expected failure, got authorization") } }
// Test that deploy policy changes will be picked up. func TestDpolChanges(t *testing.T) { h := testHarness{} msg := []byte("Oh dear! Oh dear! I shall be too late!") ents := h.loadEntities() sigs := h.signMessage(msg, ents) keyfile := h.tempFile() defer rm(t, keyfile) h.saveKeys(ents, keyfile) polfile := h.tempFile() defer rm(t, polfile) // Initial policy h.saveYaml( RawDeployPol{ Groups: map[DpGroup][]DpUserEmail{ "a": {"test1@testing"}, "b": {"test1@testing", "test2@testing"}, "c": {"test3@testing"}, }, Apps: map[string][]DpGroup{ "foo": {"a"}, "bar": {"b"}, "baz": {"a", "c"}, }, }, polfile, ) h.backdate(polfile, time.Minute) if h.Err != nil { t.Error(h.Err) return } policy, err := NewTestUserPolicy(keyfile, polfile) if err != nil { t.Error("error creating user policy: ", err) return } logger := logging.TestLogger() // Test every combination of app/user and check it against the expectation matrix tester := func(expected map[types.PodID][3]bool) { for app, results := range expected { for signer := range results { err = policy.AuthorizeApp(TestSigned{app, string(app), msg, sigs[signer]}, logger) if err != nil && results[signer] { t.Errorf( "app %s signer %d: expected authorized, got error: %s", app, signer, err, ) } else if err == nil && !results[signer] { t.Errorf( "app %s signer %d: expected failure, got authorization", app, signer, ) } } } } tester(map[types.PodID][3]bool{ "foo": {true, false, false}, "bar": {true, true, false}, "baz": {true, false, true}, "ohi": {false, false, false}, }) // Change the policy h.saveYaml( RawDeployPol{ Groups: map[DpGroup][]DpUserEmail{ "a": {"test1@testing", "test2@testing"}, "b": {"test1@testing"}, "c": {"test3@testing"}, }, Apps: map[string][]DpGroup{ "foo": {"a"}, "bar": {"b"}, "baz": {"c"}, "ohi": {"a", "b", "c"}, }, }, polfile, ) tester(map[types.PodID][3]bool{ "foo": {true, true, false}, "bar": {true, false, false}, "baz": {false, false, true}, "ohi": {true, true, true}, }) }