func TestPostgreSQLBackend(t *testing.T) { connURL := os.Getenv("PGURL") if connURL == "" { t.SkipNow() } table := os.Getenv("PGTABLE") if table == "" { table = "vault_kv_store" } // Run vault tests logger := logformat.NewVaultLogger(log.LevelTrace) b, err := NewBackend("postgresql", logger, map[string]string{ "connection_url": connURL, "table": table, }) if err != nil { t.Fatalf("Failed to create new backend: %v", err) } defer func() { pg := b.(*PostgreSQLBackend) _, err := pg.client.Exec("TRUNCATE TABLE " + pg.table) if err != nil { t.Fatalf("Failed to drop table: %v", err) } }() testBackend(t, b) testBackend_ListPrefix(t, b) }
// mockRollback returns a mock rollback manager func mockRollback(t *testing.T) (*RollbackManager, *NoopBackend) { backend := new(NoopBackend) mounts := new(MountTable) router := NewRouter() mounts.Entries = []*MountEntry{ &MountEntry{ Path: "foo", }, } meUUID, err := uuid.GenerateUUID() if err != nil { t.Fatal(err) } if err := router.Mount(backend, "foo", &MountEntry{UUID: meUUID}, nil); err != nil { t.Fatalf("err: %s", err) } mountsFunc := func() []*MountEntry { return mounts.Entries } logger := logformat.NewVaultLogger(log.LevelTrace) rb := NewRollbackManager(logger, mountsFunc, router) rb.period = 10 * time.Millisecond return rb, backend }
func TestInmem(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) inm := NewInmem(logger) testBackend(t, inm) testBackend_ListPrefix(t, inm) }
func TestAzureBackend(t *testing.T) { if os.Getenv("AZURE_ACCOUNT_NAME") == "" || os.Getenv("AZURE_ACCOUNT_KEY") == "" { t.SkipNow() } accountName := os.Getenv("AZURE_ACCOUNT_NAME") accountKey := os.Getenv("AZURE_ACCOUNT_KEY") ts := time.Now().UnixNano() container := fmt.Sprintf("vault-test-%d", ts) cleanupClient, _ := storage.NewBasicClient(accountName, accountKey) logger := logformat.NewVaultLogger(log.LevelTrace) backend, err := NewBackend("azure", logger, map[string]string{ "container": container, "accountName": accountName, "accountKey": accountKey, }) defer func() { cleanupClient.GetBlobService().DeleteContainerIfExists(container) }() if err != nil { t.Fatalf("err: %s", err) } testBackend(t, backend) testBackend_ListPrefix(t, backend) }
// TestCoreWithTokenStore returns an in-memory core that has a token store // mounted, so that logical token functions can be used func TestCoreWithTokenStore(t *testing.T) (*Core, *TokenStore, []byte, string) { c, key, root := TestCoreUnsealed(t) me := &MountEntry{ Table: credentialTableType, Path: "token/", Type: "token", Description: "token based credentials", } meUUID, err := uuid.GenerateUUID() if err != nil { t.Fatal(err) } me.UUID = meUUID view := NewBarrierView(c.barrier, credentialBarrierPrefix+me.UUID+"/") tokenstore, _ := c.newCredentialBackend("token", c.mountEntrySysView(me), view, nil) ts := tokenstore.(*TokenStore) router := NewRouter() router.Mount(ts, "auth/token/", &MountEntry{Table: credentialTableType, UUID: ""}, ts.view) subview := c.systemBarrierView.SubView(expirationSubPath) logger := logformat.NewVaultLogger(log.LevelTrace) exp := NewExpirationManager(router, subview, ts, logger) ts.SetExpirationManager(exp) return c, ts, key, root }
func TestAuditBroker_LogRequest(t *testing.T) { l := logformat.NewVaultLogger(log.LevelTrace) b := NewAuditBroker(l) a1 := &NoopAudit{} a2 := &NoopAudit{} b.Register("foo", a1, nil) b.Register("bar", a2, nil) auth := &logical.Auth{ ClientToken: "foo", Policies: []string{"dev", "ops"}, Metadata: map[string]string{ "user": "******", "source": "github", }, } req := &logical.Request{ Operation: logical.ReadOperation, Path: "sys/mounts", } // Create an identifier for the request to verify against var err error req.ID, err = uuid.GenerateUUID() if err != nil { t.Fatalf("failed to generate identifier for the request: path%s err: %v", req.Path, err) } reqErrs := errors.New("errs") err = b.LogRequest(auth, req, reqErrs) if err != nil { t.Fatalf("err: %v", err) } for _, a := range []*NoopAudit{a1, a2} { if !reflect.DeepEqual(a.ReqAuth[0], auth) { t.Fatalf("Bad: %#v", a.ReqAuth[0]) } if !reflect.DeepEqual(a.Req[0], req) { t.Fatalf("Bad: %#v", a.Req[0]) } if !reflect.DeepEqual(a.ReqErrs[0], reqErrs) { t.Fatalf("Bad: %#v", a.ReqErrs[0]) } } // Should still work with one failing backend a1.ReqErr = fmt.Errorf("failed") if err := b.LogRequest(auth, req, nil); err != nil { t.Fatalf("err: %v", err) } // Should FAIL work with both failing backends a2.ReqErr = fmt.Errorf("failed") if err := b.LogRequest(auth, req, nil); !errwrap.Contains(err, "no audit backend succeeded in logging the request") { t.Fatalf("err: %v", err) } }
func TestCache(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) inm := NewInmem(logger) cache := NewCache(inm, 0, logger) testBackend(t, cache) testBackend_ListPrefix(t, cache) }
func TestBackendConfig() *BackendConfig { bc := &BackendConfig{ Logger: logformat.NewVaultLogger(log.LevelTrace), System: TestSystemView(), } bc.Logger.SetLevel(log.LevelTrace) return bc }
func TestConsulHABackend(t *testing.T) { var token string addr := os.Getenv("CONSUL_HTTP_ADDR") if addr == "" { cid, connURL := prepareTestContainer(t) if cid != "" { defer cleanupTestContainer(t, cid) } addr = connURL token = dockertest.ConsulACLMasterToken } conf := api.DefaultConfig() conf.Address = addr conf.Token = token client, err := api.NewClient(conf) if err != nil { t.Fatalf("err: %v", err) } randPath := fmt.Sprintf("vault-%d/", time.Now().Unix()) defer func() { client.KV().DeleteTree(randPath, nil) }() logger := logformat.NewVaultLogger(log.LevelTrace) b, err := NewBackend("consul", logger, map[string]string{ "address": conf.Address, "path": randPath, "max_parallel": "-1", "token": conf.Token, }) if err != nil { t.Fatalf("err: %s", err) } ha, ok := b.(HABackend) if !ok { t.Fatalf("consul does not implement HABackend") } testHABackend(t, ha, ha) detect, ok := b.(RedirectDetect) if !ok { t.Fatalf("consul does not implement RedirectDetect") } host, err := detect.DetectHostAddr() if err != nil { t.Fatalf("err: %s", err) } if host == "" { t.Fatalf("bad addr: %v", host) } }
func TestEtcdBackend(t *testing.T) { addr := os.Getenv("ETCD_ADDR") if addr == "" { t.SkipNow() } cfg := client.Config{ Endpoints: []string{addr}, Transport: client.DefaultTransport, } c, err := client.New(cfg) if err != nil { t.Fatalf("err: %s", err) } ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) syncErr := c.Sync(ctx) cancel() if syncErr != nil { t.Fatalf("err: %v", EtcdSyncClusterError) } kAPI := client.NewKeysAPI(c) randPath := fmt.Sprintf("/vault-%d", time.Now().Unix()) defer func() { delOpts := &client.DeleteOptions{ Recursive: true, } if _, err := kAPI.Delete(context.Background(), randPath, delOpts); err != nil { t.Fatalf("err: %v", err) } }() // Generate new etcd backend. The etcd address is read from ETCD_ADDR. No // need to provide it explicitly. logger := logformat.NewVaultLogger(log.LevelTrace) b, err := NewBackend("etcd", logger, map[string]string{ "path": randPath, }) if err != nil { t.Fatalf("err: %s", err) } testBackend(t, b) testBackend_ListPrefix(t, b) ha, ok := b.(HABackend) if !ok { t.Fatalf("etcd does not implement HABackend") } testHABackend(t, ha, ha) }
func TestNewCore_badRedirectAddr(t *testing.T) { logger = logformat.NewVaultLogger(log.LevelTrace) conf := &CoreConfig{ RedirectAddr: "127.0.0.1:8200", Physical: physical.NewInmem(logger), DisableMlock: true, } _, err := NewCore(conf) if err == nil { t.Fatal("should error") } }
func TestDynamoDBHABackend(t *testing.T) { if os.Getenv("AWS_ACCESS_KEY_ID") == "" || os.Getenv("AWS_SECRET_ACCESS_KEY") == "" { t.SkipNow() } creds, err := credentials.NewEnvCredentials().Get() if err != nil { t.Fatalf("err: %v", err) } // If the variable is empty or doesn't exist, the default // AWS endpoints will be used endpoint := os.Getenv("AWS_DYNAMODB_ENDPOINT") region := os.Getenv("AWS_DEFAULT_REGION") if region == "" { region = "us-east-1" } conn := dynamodb.New(session.New(&aws.Config{ Credentials: credentials.NewEnvCredentials(), Endpoint: aws.String(endpoint), Region: aws.String(region), })) var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int() table := fmt.Sprintf("vault-dynamodb-testacc-%d", randInt) defer func() { conn.DeleteTable(&dynamodb.DeleteTableInput{ TableName: aws.String(table), }) }() logger := logformat.NewVaultLogger(log.LevelTrace) b, err := NewBackend("dynamodb", logger, map[string]string{ "access_key": creds.AccessKeyID, "secret_key": creds.SecretAccessKey, "session_token": creds.SessionToken, "table": table, }) if err != nil { t.Fatalf("err: %s", err) } ha, ok := b.(HABackend) if !ok { t.Fatalf("dynamodb does not implement HABackend") } testHABackend(t, ha, ha) }
func testConsulBackendConfig(t *testing.T, conf *consulConf) *ConsulBackend { logger := logformat.NewVaultLogger(log.LevelTrace) be, err := newConsulBackend(*conf, logger) if err != nil { t.Fatalf("Expected Consul to initialize: %v", err) } c, ok := be.(*ConsulBackend) if !ok { t.Fatalf("Expected ConsulBackend") } return c }
func TestMySQLBackend(t *testing.T) { address := os.Getenv("MYSQL_ADDR") if address == "" { t.SkipNow() } database := os.Getenv("MYSQL_DB") if database == "" { database = "test" } table := os.Getenv("MYSQL_TABLE") if table == "" { table = "test" } username := os.Getenv("MYSQL_USERNAME") password := os.Getenv("MYSQL_PASSWORD") // Run vault tests logger := logformat.NewVaultLogger(log.LevelTrace) b, err := NewBackend("mysql", logger, map[string]string{ "address": address, "database": database, "table": table, "username": username, "password": password, }) if err != nil { t.Fatalf("Failed to create new backend: %v", err) } defer func() { mysql := b.(*MySQLBackend) _, err := mysql.client.Exec("DROP TABLE " + mysql.dbTable) if err != nil { t.Fatalf("Failed to drop table: %v", err) } }() testBackend(t, b) testBackend_ListPrefix(t, b) }
func testNewBackend(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) _, err := NewBackend("foobar", logger, nil) if err == nil { t.Fatalf("expected error") } b, err := NewBackend("inmem", logger, nil) if err != nil { t.Fatalf("err: %v", err) } if b == nil { t.Fatalf("expected backend") } }
func testCore_NewTestCore(t *testing.T, seal Seal) (*Core, *CoreConfig) { logger := logformat.NewVaultLogger(log.LevelTrace) inm := physical.NewInmem(logger) conf := &CoreConfig{ Physical: inm, DisableMlock: true, LogicalBackends: map[string]logical.Factory{ "generic": LeasedPassthroughBackendFactory, }, Seal: seal, } c, err := NewCore(conf) if err != nil { t.Fatalf("err: %v", err) } return c, conf }
func TestZookeeperBackend(t *testing.T) { addr := os.Getenv("ZOOKEEPER_ADDR") if addr == "" { t.SkipNow() } client, _, err := zk.Connect([]string{addr}, time.Second) if err != nil { t.Fatalf("err: %v", err) } randPath := fmt.Sprintf("/vault-%d", time.Now().Unix()) acl := zk.WorldACL(zk.PermAll) _, err = client.Create(randPath, []byte("hi"), int32(0), acl) if err != nil { t.Fatalf("err: %v", err) } defer func() { client.Delete(randPath+"/foo/nested1/nested2/nested3", -1) client.Delete(randPath+"/foo/nested1/nested2", -1) client.Delete(randPath+"/foo/nested1", -1) client.Delete(randPath+"/foo/bar/baz", -1) client.Delete(randPath+"/foo/bar", -1) client.Delete(randPath+"/foo", -1) client.Delete(randPath, -1) client.Close() }() logger := logformat.NewVaultLogger(log.LevelTrace) b, err := NewBackend("zookeeper", logger, map[string]string{ "address": addr + "," + addr, "path": randPath, }) if err != nil { t.Fatalf("err: %s", err) } testBackend(t, b) testBackend_ListPrefix(t, b) }
func TestLoadConfigDir(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) config, err := LoadConfigDir("./test-fixtures/config-dir", logger) if err != nil { t.Fatalf("err: %s", err) } expected := &Config{ DisableCache: true, DisableMlock: true, Listeners: []*Listener{ &Listener{ Type: "tcp", Config: map[string]string{ "address": "127.0.0.1:443", }, }, }, Backend: &Backend{ Type: "consul", Config: map[string]string{ "foo": "bar", }, DisableClustering: true, }, Telemetry: &Telemetry{ StatsiteAddr: "qux", StatsdAddr: "baz", DisableHostname: true, }, MaxLeaseTTL: 10 * time.Hour, DefaultLeaseTTL: 10 * time.Hour, ClusterName: "testcluster", } if !reflect.DeepEqual(config, expected) { t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected) } }
func TestFileBackend(t *testing.T) { dir, err := ioutil.TempDir("", "vault") if err != nil { t.Fatalf("err: %s", err) } defer os.RemoveAll(dir) logger := logformat.NewVaultLogger(log.LevelTrace) b, err := NewBackend("file", logger, map[string]string{ "path": dir, }) if err != nil { t.Fatalf("err: %s", err) } testBackend(t, b) testBackend_ListPrefix(t, b) }
func TestParseConfig_badTopLevel(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) _, err := ParseConfig(strings.TrimSpace(` backend {} bad = "one" nope = "yes" `), logger) if err == nil { t.Fatal("expected error") } if !strings.Contains(err.Error(), "invalid key 'bad' on line 2") { t.Errorf("bad error: %q", err) } if !strings.Contains(err.Error(), "invalid key 'nope' on line 3") { t.Errorf("bad error: %q", err) } }
func TestClusterHAFetching(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) redirect := "http://127.0.0.1:8200" c, err := NewCore(&CoreConfig{ Physical: physical.NewInmemHA(logger), HAPhysical: physical.NewInmemHA(logger), RedirectAddr: redirect, DisableMlock: true, }) if err != nil { t.Fatalf("err: %v", err) } key, _ := TestCoreInit(t, c) if _, err := TestCoreUnseal(c, TestKeyCopy(key)); err != nil { t.Fatalf("unseal err: %s", err) } // Verify unsealed sealed, err := c.Sealed() if err != nil { t.Fatalf("err checking seal status: %s", err) } if sealed { t.Fatal("should not be sealed") } // Wait for core to become active TestWaitActive(t, c) cluster, err := c.Cluster() if err != nil { t.Fatal(err) } // Test whether expected values are found if cluster == nil || cluster.Name == "" || cluster.ID == "" { t.Fatalf("cluster information missing: cluster:%#v", cluster) } }
func TestConsulBackend(t *testing.T) { var token string addr := os.Getenv("CONSUL_HTTP_ADDR") if addr == "" { cid, connURL := prepareTestContainer(t) if cid != "" { defer cleanupTestContainer(t, cid) } addr = connURL token = dockertest.ConsulACLMasterToken } conf := api.DefaultConfig() conf.Address = addr conf.Token = token client, err := api.NewClient(conf) if err != nil { t.Fatalf("err: %v", err) } randPath := fmt.Sprintf("vault-%d/", time.Now().Unix()) defer func() { client.KV().DeleteTree(randPath, nil) }() logger := logformat.NewVaultLogger(log.LevelTrace) b, err := NewBackend("consul", logger, map[string]string{ "address": conf.Address, "path": randPath, "max_parallel": "256", "token": conf.Token, }) if err != nil { t.Fatalf("err: %s", err) } testBackend(t, b) testBackend_ListPrefix(t, b) }
func TestCache_Purge(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) inm := NewInmem(logger) cache := NewCache(inm, 0, logger) ent := &Entry{ Key: "foo", Value: []byte("bar"), } err := cache.Put(ent) if err != nil { t.Fatalf("err: %v", err) } // Delete from under inm.Delete("foo") // Read should work out, err := cache.Get("foo") if err != nil { t.Fatalf("err: %v", err) } if out == nil { t.Fatalf("should have key") } // Clear the cache cache.Purge() // Read should fail out, err = cache.Get("foo") if err != nil { t.Fatalf("err: %v", err) } if out != nil { t.Fatalf("should not have key") } }
func TestParseConfig_badTelemetry(t *testing.T) { logger := logformat.NewVaultLogger(log.LevelTrace) _, err := ParseConfig(strings.TrimSpace(` telemetry { statsd_address = "1.2.3.3" bad = "one" nope = "yes" } `), logger) if err == nil { t.Fatal("expected error") } if !strings.Contains(err.Error(), "telemetry: invalid key 'bad' on line 3") { t.Errorf("bad error: %q", err) } if !strings.Contains(err.Error(), "telemetry: invalid key 'nope' on line 4") { t.Errorf("bad error: %q", err) } }
func TestConsul_ServiceTags(t *testing.T) { consulConfig := map[string]string{ "path": "seaTech/", "service": "astronomy", "service_tags": "deadbeef, cafeefac, deadc0de, feedface", "redirect_addr": "http://127.0.0.2:8200", "check_timeout": "6s", "address": "127.0.0.2", "scheme": "https", "token": "deadbeef-cafeefac-deadc0de-feedface", "max_parallel": "4", "disable_registration": "false", } logger := logformat.NewVaultLogger(log.LevelTrace) be, err := newConsulBackend(consulConfig, logger) if err != nil { t.Fatal(err) } c, ok := be.(*ConsulBackend) if !ok { t.Fatalf("failed to create physical Consul backend") } expected := []string{"deadbeef", "cafeefac", "deadc0de", "feedface"} actual := c.fetchServiceTags(false) if !strutil.EquivalentSlices(actual, append(expected, "standby")) { t.Fatalf("bad: expected:%s actual:%s", append(expected, "standby"), actual) } actual = c.fetchServiceTags(true) if !strutil.EquivalentSlices(actual, append(expected, "active")) { t.Fatalf("bad: expected:%s actual:%s", append(expected, "active"), actual) } }
func TestSwiftBackend(t *testing.T) { if os.Getenv("OS_USERNAME") == "" || os.Getenv("OS_PASSWORD") == "" || os.Getenv("OS_AUTH_URL") == "" { t.SkipNow() } username := os.Getenv("OS_USERNAME") password := os.Getenv("OS_PASSWORD") authUrl := os.Getenv("OS_AUTH_URL") tenant := os.Getenv("OS_TENANT_NAME") ts := time.Now().UnixNano() container := fmt.Sprintf("vault-test-%d", ts) cleaner := swift.Connection{ UserName: username, ApiKey: password, AuthUrl: authUrl, Tenant: tenant, Transport: cleanhttp.DefaultPooledTransport(), } err := cleaner.Authenticate() if err != nil { t.Fatalf("err: %s", err) } err = cleaner.ContainerCreate(container, nil) if nil != err { t.Fatalf("Unable to create test container '%s': %v", container, err) } defer func() { newObjects, err := cleaner.ObjectNamesAll(container, nil) if err != nil { t.Fatalf("err: %s", err) } for _, o := range newObjects { err := cleaner.ObjectDelete(container, o) if err != nil { t.Fatalf("err: %s", err) } } err = cleaner.ContainerDelete(container) if err != nil { t.Fatalf("err: %s", err) } }() logger := logformat.NewVaultLogger(log.LevelTrace) b, err := NewBackend("swift", logger, map[string]string{ "username": username, "password": password, "container": container, "auth_url": authUrl, "tenant": tenant, }) if err != nil { t.Fatalf("err: %s", err) } testBackend(t, b) testBackend_ListPrefix(t, b) }
func TestCore_Standby_Seal(t *testing.T) { // Create the first core and initialize it logger = logformat.NewVaultLogger(log.LevelTrace) inm := physical.NewInmem(logger) inmha := physical.NewInmemHA(logger) redirectOriginal := "http://127.0.0.1:8200" core, err := NewCore(&CoreConfig{ Physical: inm, HAPhysical: inmha, RedirectAddr: redirectOriginal, DisableMlock: true, }) if err != nil { t.Fatalf("err: %v", err) } key, root := TestCoreInit(t, core) if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil { t.Fatalf("unseal err: %s", err) } // Verify unsealed sealed, err := core.Sealed() if err != nil { t.Fatalf("err checking seal status: %s", err) } if sealed { t.Fatal("should not be sealed") } // Wait for core to become active TestWaitActive(t, core) // Check the leader is local isLeader, advertise, err := core.Leader() if err != nil { t.Fatalf("err: %v", err) } if !isLeader { t.Fatalf("should be leader") } if advertise != redirectOriginal { t.Fatalf("Bad advertise: %v", advertise) } // Create the second core and initialize it redirectOriginal2 := "http://127.0.0.1:8500" core2, err := NewCore(&CoreConfig{ Physical: inm, HAPhysical: inmha, RedirectAddr: redirectOriginal2, DisableMlock: true, }) if err != nil { t.Fatalf("err: %v", err) } if _, err := TestCoreUnseal(core2, TestKeyCopy(key)); err != nil { t.Fatalf("unseal err: %s", err) } // Verify unsealed sealed, err = core2.Sealed() if err != nil { t.Fatalf("err checking seal status: %s", err) } if sealed { t.Fatal("should not be sealed") } // Core2 should be in standby standby, err := core2.Standby() if err != nil { t.Fatalf("err: %v", err) } if !standby { t.Fatalf("should be standby") } // Check the leader is not local isLeader, advertise, err = core2.Leader() if err != nil { t.Fatalf("err: %v", err) } if isLeader { t.Fatalf("should not be leader") } if advertise != redirectOriginal { t.Fatalf("Bad advertise: %v", advertise) } // Seal the standby core with the correct token. Shouldn't go down err = core2.Seal(root) if err == nil { t.Fatal("should not be sealed") } keyUUID, err := uuid.GenerateUUID() if err != nil { t.Fatal(err) } // Seal the standby core with an invalid token. Shouldn't go down err = core2.Seal(keyUUID) if err == nil { t.Fatal("should not be sealed") } }
func TestCore_Standby_Rotate(t *testing.T) { // Create the first core and initialize it logger = logformat.NewVaultLogger(log.LevelTrace) inm := physical.NewInmem(logger) inmha := physical.NewInmemHA(logger) redirectOriginal := "http://127.0.0.1:8200" core, err := NewCore(&CoreConfig{ Physical: inm, HAPhysical: inmha, RedirectAddr: redirectOriginal, DisableMlock: true, }) if err != nil { t.Fatalf("err: %v", err) } key, root := TestCoreInit(t, core) if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil { t.Fatalf("unseal err: %s", err) } // Wait for core to become active TestWaitActive(t, core) // Create a second core, attached to same in-memory store redirectOriginal2 := "http://127.0.0.1:8500" core2, err := NewCore(&CoreConfig{ Physical: inm, HAPhysical: inmha, RedirectAddr: redirectOriginal2, DisableMlock: true, }) if err != nil { t.Fatalf("err: %v", err) } if _, err := TestCoreUnseal(core2, TestKeyCopy(key)); err != nil { t.Fatalf("unseal err: %s", err) } // Rotate the encryption key req := &logical.Request{ Operation: logical.UpdateOperation, Path: "sys/rotate", ClientToken: root, } _, err = core.HandleRequest(req) if err != nil { t.Fatalf("err: %v", err) } // Seal the first core, should step down err = core.Seal(root) if err != nil { t.Fatalf("err: %v", err) } // Wait for core2 to become active TestWaitActive(t, core2) // Read the key status req = &logical.Request{ Operation: logical.ReadOperation, Path: "sys/key-status", ClientToken: root, } resp, err := core2.HandleRequest(req) if err != nil { t.Fatalf("err: %v", err) } // Verify the response if resp.Data["term"] != 2 { t.Fatalf("bad: %#v", resp) } }
func TestCore_Standby_SeparateHA(t *testing.T) { logger = logformat.NewVaultLogger(log.LevelTrace) testCore_Standby_Common(t, physical.NewInmemHA(logger), physical.NewInmemHA(logger)) }
func TestCore_Standby(t *testing.T) { logger = logformat.NewVaultLogger(log.LevelTrace) inmha := physical.NewInmemHA(logger) testCore_Standby_Common(t, inmha, inmha) }