func TestBackend(t *testing.T) { etcd, err := clustertest.StartEtcd() if err != nil { t.Fatal(err) } if etcd == nil { t.Fatal("etcd not available in PATH, can't test backend") } defer etcd.Shutdown() dsn := *dsn if env := os.Getenv("DSN"); env != "" { // for running in CI container dsn = env } db, err := sql.Open("postgres", dsn) if err != nil { t.Fatalf("sql.Open: %s", err) } // Drop all tables. for _, item := range schema { if _, err := db.Exec("DROP TABLE IF EXISTS " + item.Name + " CASCADE"); err != nil { t.Fatalf("failed to drop table %s: %s", item.Name, err) } } if _, err := db.Exec("DROP TABLE IF EXISTS gorp_migrations"); err != nil { t.Fatal(err) } // Recreate all tables. src := migrate.FileMigrationSource{"migrations"} if _, err := migrate.Exec(db, "postgres", src, migrate.Up); err != nil { t.Fatal(err) } // Start up backend. c := etcd.Join("/test", "testcase", "era") desc := &cluster.PeerDesc{ ID: "testcase", Era: "era", Version: "testver", } b, err := NewBackend(scope.New(), dsn, c, desc) if err != nil { t.Fatal(err) } defer b.Close() // Run test suite. backend.IntegrationTest(t, func() proto.Backend { return nonClosingBackend{b} }) }
func TestEtcdCluster(t *testing.T) { s, err := clustertest.StartEtcd() if err != nil { t.Fatal(err) } if s == nil { t.Skipf("etcd not in PATH, skipping tests") } defer s.Shutdown() Convey("Observe peer departure", t, func() { a := s.Join("/departure", "a", "0") // no defer a.Part() because we'll do that explicitly b := s.Join("/departure", "b", "0") defer b.Part() So(<-a.Watch(), ShouldResemble, &cluster.PeerJoinedEvent{cluster.PeerDesc{ID: "b", Era: "0"}}) a.Part() So(<-b.Watch(), ShouldResemble, &cluster.PeerLostEvent{cluster.PeerDesc{ID: "a"}}) }) Convey("Observe initial peers upon joining", t, func() { a := s.Join("/initial", "a", "0") defer a.Part() So(a.Peers(), ShouldResemble, []cluster.PeerDesc{ {ID: "a", Era: "0"}, }) b := s.Join("/initial", "b", "0") defer b.Part() So(b.Peers(), ShouldResemble, []cluster.PeerDesc{ {ID: "a", Era: "0"}, {ID: "b", Era: "0"}, }) }) Convey("Updates are seen", t, func() { a := s.Join("/updates", "a", "0") defer a.Part() b := s.Join("/updates", "b", "0") defer b.Part() b.Update(&cluster.PeerDesc{ID: "b", Era: "1"}) b.Update(&cluster.PeerDesc{ID: "b", Era: "2"}) So(<-a.Watch(), ShouldResemble, &cluster.PeerJoinedEvent{cluster.PeerDesc{ID: "b", Era: "0"}}) So(<-a.Watch(), ShouldResemble, &cluster.PeerAliveEvent{cluster.PeerDesc{ID: "b", Era: "1"}}) So(<-a.Watch(), ShouldResemble, &cluster.PeerAliveEvent{cluster.PeerDesc{ID: "b", Era: "2"}}) }) Convey("Secrets are created if necessary", t, func() { kms := security.LocalKMS() a := s.Join("/secrets1", "a", "0") defer a.Part() secret, err := a.GetSecret(kms, "test1", 16) So(err, ShouldBeNil) So(len(secret), ShouldEqual, 16) secretCopy, err := a.GetSecret(kms, "test1", 16) So(err, ShouldBeNil) So(string(secretCopy), ShouldEqual, string(secret)) }) Convey("Race to create secret is conceded gracefully", t, func() { kms := &syncKMS{ KMS: security.LocalKMS(), c: make(chan struct{}), } a := s.Join("/secrets1", "a", "0") defer a.Part() sc := make(chan []byte) errc := make(chan error) go func() { s, err := a.GetSecret(kms, "test2", 16) errc <- err sc <- s }() // Synchronize with secret generation. <-kms.c // Set the secret before releasing the goroutine. secret, err := a.GetSecret(kms.KMS, "test2", 16) So(err, ShouldBeNil) // Release the goroutine and verify it gets the secret that was set. kms.c <- struct{}{} So(<-errc, ShouldBeNil) So(string(<-sc), ShouldEqual, string(secret)) }) }