func consulStoreWithFakeKV() *consulStore { return &consulStore{ kv: consulutil.NewFakeClient().KV(), logger: logging.DefaultLogger, retries: 0, } }
func consulStoreWithFakeKV() *consulStore { return &consulStore{ kv: consulutil.NewFakeClient().KV(), applicator: labels.NewFakeApplicator(), logger: logging.DefaultLogger, } }
// Instantiates a preparer with a podStore and podStatusStore backed by a fake // consul KV implementation. The podStore field is necessary for acquiring the // "intent" manifest for uuid pods, while the podStatusStore is necessary for // getting the reality manifest func testResultSetPreparer() *Preparer { fakeStatusStore := statusstoretest.NewFake() return &Preparer{ podStatusStore: podstatus.NewConsul(fakeStatusStore, kp.PreparerPodStatusNamespace), podStore: podstore.NewConsul(consulutil.NewFakeClient().KV()), } }
func setupServerWithFakePodStore() (podstore.Store, store) { fakePodStore := podstore.NewConsul(consulutil.NewFakeClient().KV_) server := store{ scheduler: fakePodStore, } return fakePodStore, server }
func TestPublishLatestRCsWithLockInfoWithLocks(t *testing.T) { client := consulutil.NewFakeClient() fakeKV := client.KV() rcstore := NewConsul(client, 1) inCh := make(chan []fields.RC) defer close(inCh) quitCh := make(chan struct{}) defer close(quitCh) lockResultCh, errCh := rcstore.publishLatestRCsWithLockInfo(inCh, quitCh) go func() { for err := range errCh { t.Fatalf("Unexpected error on errCh: %s", err) } }() // Create a test case with 2 RCs with no locks lockedCase := LockInfoTestCase{ InputRCs: []fields.RC{fields.RC{ID: "abc"}, fields.RC{ID: "123"}}, ExpectedOutput: []RCLockResult{ { RC: fields.RC{ID: "abc"}, LockedForMutation: true, }, { RC: fields.RC{ID: "123"}, LockedForOwnership: true, }, }, } ownershipLockPath, err := rcstore.ownershipLockPath("123") if err != nil { t.Fatalf("Unable to compute ownership lock path: %s", err) } _, _, err = fakeKV.Acquire(&api.KVPair{ Key: ownershipLockPath, }, nil) if err != nil { t.Fatalf("Unable to lock for ownership: %s", err) } mutationLockPath, err := rcstore.mutationLockPath("abc") if err != nil { t.Fatalf("Unable to compute mutation lock path: %s", err) } _, _, err = fakeKV.Acquire(&api.KVPair{ Key: mutationLockPath, }, nil) if err != nil { t.Fatalf("Unable to lock for mutation: %s", err) } verifyLockInfoTestCase(t, lockedCase, inCh, lockResultCh) // Add an update creation lock to the second one lockedCase2 := LockInfoTestCase{ InputRCs: []fields.RC{fields.RC{ID: "abc"}, fields.RC{ID: "123"}}, ExpectedOutput: []RCLockResult{ { RC: fields.RC{ID: "abc"}, LockedForMutation: true, }, { RC: fields.RC{ID: "123"}, LockedForOwnership: true, LockedForUpdateCreation: true, }, }, } updateCreationLockPath, err := rcstore.updateCreationLockPath("123") if err != nil { t.Fatalf("Unable to compute update creation lock path: %s", err) } _, _, err = fakeKV.Acquire(&api.KVPair{ Key: updateCreationLockPath, }, nil) if err != nil { t.Fatalf("Unable to lock for updateCreation: %s", err) } verifyLockInfoTestCase(t, lockedCase2, inCh, lockResultCh) }
func TestPublishLatestRCsWithLockInfoNoLocks(t *testing.T) { client := consulutil.NewFakeClient() rcstore := NewConsul(client, 1) inCh := make(chan []fields.RC) defer close(inCh) quitCh := make(chan struct{}) defer close(quitCh) lockResultCh, errCh := rcstore.publishLatestRCsWithLockInfo(inCh, quitCh) go func() { for err := range errCh { t.Fatalf("Unexpected error on errCh: %s", err) } }() // Create a test case with 2 RCs with no locks unlockedCase := LockInfoTestCase{ InputRCs: []fields.RC{fields.RC{ID: "abc"}, fields.RC{ID: "123"}}, ExpectedOutput: []RCLockResult{ { RC: fields.RC{ID: "abc"}, }, { RC: fields.RC{ID: "123"}, }, }, } verifyLockInfoTestCase(t, unlockedCase, inCh, lockResultCh) // Write the same input once more without reading the channel and make // sure that doesn't cause timeouts select { case inCh <- unlockedCase.InputRCs: case <-time.After(1 * time.Second): t.Fatalf("Timed out writing to input channel") } // create a new case unlockedCase2 := LockInfoTestCase{ InputRCs: []fields.RC{ fields.RC{ID: "abc"}, fields.RC{ID: "123"}, fields.RC{ID: "456"}, }, ExpectedOutput: []RCLockResult{ { RC: fields.RC{ID: "abc"}, }, { RC: fields.RC{ID: "123"}, }, { RC: fields.RC{ID: "456"}, }, }, } verifyLockInfoTestCase(t, unlockedCase2, inCh, lockResultCh) }
func storeWithFakeKV() *consulStore { return &consulStore{ kv: consulutil.NewFakeClient().KV(), } }
func TestAllPods(t *testing.T) { fakeConsulClient := consulutil.NewFakeClient() store := NewConsulStore(fakeConsulClient) consulStore, ok := store.(*consulStore) if !ok { t.Fatal("Type assertion to consulStore struct type failed") } // Add a new uuid pod (i.e. we expect an index rather than a manifest to be written to /intent) uuidKey, err := consulStore.podStore.Schedule(testManifest("first_pod"), "node1") if err != nil { t.Fatal(err) } // now write a legacy manifest to /intent _, err = store.SetPod(INTENT_TREE, "node2", testManifest("second_pod")) if err != nil { t.Fatal(err) } // Now retrieve all the pods and make sure the manifest results are sane allPods, _, err := store.AllPods(INTENT_TREE) if err != nil { t.Fatalf("Unexpected error calling all pods: %s", err) } if len(allPods) != 2 { t.Errorf("Expected 2 pods to be returned, but there were '%d'", len(allPods)) } // Make sure the legacy pod was found legacyFound := false for _, result := range allPods { if result.PodLocation.Node == "node2" { legacyFound = true if result.Manifest.ID() != "second_pod" { t.Errorf("Legacy pod manifest should have had id '%s' but was '%s'", "second_pod", result.Manifest.ID()) } if result.PodUniqueKey != nil { t.Error("Legacy pod should not have a uuid") } } } if !legacyFound { t.Error("Didn't find the legacy (non uuid) pod") } uuidPodFound := false for _, result := range allPods { if result.PodLocation.Node == "node1" { uuidPodFound = true if result.Manifest.ID() != "first_pod" { t.Errorf("UUID pod manifest should have had id '%s' but was '%s'", "first_pod", result.Manifest.ID()) } if result.PodUniqueKey == nil { t.Error("UUID pod should have a uuid") } if result.PodUniqueKey.ID != uuidKey.ID { t.Errorf("Expected legacy pod to have PodUniqueKeyID '%s', was '%s'", "node2/second_pod", result.PodUniqueKey.ID) } } } if !uuidPodFound { t.Error("Didn't find uuid pod") } }
func TestAllPods(t *testing.T) { fakeConsulClient := consulutil.NewFakeClient() store := NewConsulStore(fakeConsulClient) // Add a new uuid pod (i.e. we expect an index rather than a manifest to be written to /intent) uuidKey, err := store.podStore.Schedule(testManifest("first_pod"), "node1") if err != nil { t.Fatal(err) } // Add a status entry for the pod err = store.podStatusStore.Set(uuidKey, podstatus.PodStatus{Manifest: "id: first_pod"}) if err != nil { t.Fatal(err) } // Write the /reality index for the pod err = store.podStore.WriteRealityIndex(uuidKey, "node1") if err != nil { t.Fatal(err) } // now write a legacy manifest to /intent and /reality _, err = store.SetPod(INTENT_TREE, "node2", testManifest("second_pod")) if err != nil { t.Fatal(err) } _, err = store.SetPod(REALITY_TREE, "node2", testManifest("second_pod")) if err != nil { t.Fatal(err) } // Now retrieve all the /intent pods and make sure the manifest results are sane allIntentPods, _, err := store.AllPods(INTENT_TREE) if err != nil { t.Fatalf("Unexpected error calling all pods: %s", err) } if len(allIntentPods) != 2 { t.Errorf("Expected 2 intent pods to be returned, but there were %d", len(allIntentPods)) } // Make sure the legacy pod was found legacyFound := false for _, result := range allIntentPods { if result.PodLocation.Node == "node2" { legacyFound = true if result.Manifest.ID() != "second_pod" { t.Errorf("Legacy pod manifest should have had id '%s' but was '%s'", "second_pod", result.Manifest.ID()) } if result.PodUniqueKey != "" { t.Error("Legacy pod should not have a uuid") } } } if !legacyFound { t.Error("Didn't find the legacy (non uuid) pod") } uuidPodFound := false for _, result := range allIntentPods { if result.PodLocation.Node == "node1" { uuidPodFound = true if result.Manifest.ID() != "first_pod" { t.Errorf("UUID pod manifest should have had id '%s' but was '%s'", "first_pod", result.Manifest.ID()) } if result.PodUniqueKey == "" { t.Error("UUID pod should have a uuid") } if result.PodUniqueKey != uuidKey { t.Errorf("Expected legacy pod to have PodUniqueKey '%s', was '%s'", "node2/second_pod", result.PodUniqueKey) } } } if !uuidPodFound { t.Error("Didn't find uuid pod") } // Now retrieve all the pods and make sure the manifest results are sane allRealityPods, _, err := store.AllPods(REALITY_TREE) if err != nil { t.Fatalf("Unexpected error calling all pods: %s", err) } if len(allRealityPods) != 2 { t.Errorf("Expected 2 reality pods to be returned, but there were %d", len(allRealityPods)) } // Make sure the legacy pod was found legacyFound = false for _, result := range allRealityPods { if result.PodLocation.Node == "node2" { legacyFound = true if result.Manifest.ID() != "second_pod" { t.Errorf("Legacy pod manifest should have had id '%s' but was '%s'", "second_pod", result.Manifest.ID()) } if result.PodUniqueKey != "" { t.Error("Legacy pod should not have a uuid") } } } if !legacyFound { t.Error("Didn't find the legacy (non uuid) pod") } uuidPodFound = false for _, result := range allRealityPods { if result.PodLocation.Node == "node1" { uuidPodFound = true if result.Manifest.ID() != "first_pod" { t.Errorf("UUID pod manifest should have had id '%s' but was '%s'", "first_pod", result.Manifest.ID()) } if result.PodUniqueKey == "" { t.Error("UUID pod should have a uuid") } if result.PodUniqueKey != uuidKey { t.Errorf("Expected legacy pod to have PodUniqueKey '%s', was '%s'", "node2/second_pod", result.PodUniqueKey) } } } if !uuidPodFound { t.Error("Didn't find uuid pod") } }