func TestNodeCertificateRenewalsDoNotRequireToken(t *testing.T) { tc := testutils.NewTestCA(t) defer tc.Stop() csr, _, err := ca.GenerateNewCSR() assert.NoError(t, err) role := api.NodeRoleManager issueRequest := &api.IssueNodeCertificateRequest{CSR: csr, Role: role} issueResponse, err := tc.NodeCAClients[2].IssueNodeCertificate(context.Background(), issueRequest) assert.NoError(t, err) assert.NotNil(t, issueResponse.NodeID) assert.Equal(t, api.NodeMembershipAccepted, issueResponse.NodeMembership) statusRequest := &api.NodeCertificateStatusRequest{NodeID: issueResponse.NodeID} statusResponse, err := tc.NodeCAClients[2].NodeCertificateStatus(context.Background(), statusRequest) assert.NoError(t, err) assert.Equal(t, api.IssuanceStateIssued, statusResponse.Status.State) assert.NotNil(t, statusResponse.Certificate.Certificate) assert.Equal(t, role, statusResponse.Certificate.Role) role = api.NodeRoleWorker issueRequest = &api.IssueNodeCertificateRequest{CSR: csr, Role: role} issueResponse, err = tc.NodeCAClients[1].IssueNodeCertificate(context.Background(), issueRequest) require.NoError(t, err) assert.NotNil(t, issueResponse.NodeID) assert.Equal(t, api.NodeMembershipAccepted, issueResponse.NodeMembership) statusRequest = &api.NodeCertificateStatusRequest{NodeID: issueResponse.NodeID} statusResponse, err = tc.NodeCAClients[2].NodeCertificateStatus(context.Background(), statusRequest) require.NoError(t, err) assert.Equal(t, api.IssuanceStateIssued, statusResponse.Status.State) assert.NotNil(t, statusResponse.Certificate.Certificate) assert.Equal(t, role, statusResponse.Certificate.Role) }
func TestShardConnStreamingRetry(t *testing.T) { // ERR_RETRY s := createSandbox("TestShardConnStreamingRetry") sbc := &sandboxConn{mustFailRetry: 1} s.MapTestConn("0", sbc) sdc := NewShardConn(context.Background(), new(sandboxTopo), "aa", "TestShardConnStreamingRetry", "0", topodatapb.TabletType_REPLICA, 10*time.Millisecond, 3, connTimeoutTotal, connTimeoutPerConn, connLife, connectTimings) _, err := sdc.StreamExecute(context.Background(), "query", nil, 0) if err != nil { t.Errorf("want nil, got %v", err) } if s.DialCounter != 2 { t.Errorf("want 2, got %v", s.DialCounter) } if execCount := sbc.ExecCount.Get(); execCount != 2 { t.Errorf("want 2, got %v", execCount) } // ERR_FATAL s.Reset() sbc = &sandboxConn{mustFailFatal: 1} s.MapTestConn("0", sbc) sdc = NewShardConn(context.Background(), new(sandboxTopo), "aa", "TestShardConnStreamingRetry", "0", topodatapb.TabletType_REPLICA, 10*time.Millisecond, 3, connTimeoutTotal, connTimeoutPerConn, connLife, connectTimings) _, err = sdc.StreamExecute(context.Background(), "query", nil, 0) want := "shard, host: TestShardConnStreamingRetry.0.replica, host:\"0\" port_map:<key:\"vt\" value:1 > , fatal: err" if err == nil || err.Error() != want { t.Errorf("want %v, got %v", want, err) } if s.DialCounter != 1 { t.Errorf("want 1, got %v", s.DialCounter) } if execCount := sbc.ExecCount.Get(); execCount != 1 { t.Errorf("want 1, got %v", execCount) } }
// TestCacheWithErrors will test we properly return cached errors. func TestCacheWithErrors(t *testing.T) { ft := &fakeTopo{keyspace: "test_ks"} rsts := NewResilientSrvTopoServer(ft, "TestCacheWithErrors") // ask for the known keyspace, that populates the cache _, err := rsts.GetSrvKeyspace(context.Background(), "", "test_ks") if err != nil { t.Fatalf("GetSrvKeyspace got unexpected error: %v", err) } // now make the topo server fail, and ask again, should get cached // value, not even ask underlying guy ft.keyspace = "another_test_ks" _, err = rsts.GetSrvKeyspace(context.Background(), "", "test_ks") if err != nil { t.Fatalf("GetSrvKeyspace got unexpected error: %v", err) } // now reduce TTL to nothing, so we won't use cache, and ask again rsts.cacheTTL = 0 _, err = rsts.GetSrvKeyspace(context.Background(), "", "test_ks") if err != nil { t.Fatalf("GetSrvKeyspace got unexpected error: %v", err) } }
func TestQueryExecutorPlanSetMaxDmlRows(t *testing.T) { setUpQueryExecutorTest() want := &mproto.QueryResult{} vtMaxDmlRows := int64(256) setQuery := fmt.Sprintf("set vt_max_dml_rows = %d", vtMaxDmlRows) qre, sqlQuery := newTestQueryExecutor( setQuery, context.Background(), enableRowCache|enableStrict) defer sqlQuery.disallowQueries() checkPlanID(t, planbuilder.PLAN_SET, qre.plan.PlanId) got := qre.Execute() if !reflect.DeepEqual(got, want) { t.Fatalf("query executor Execute() = %v, want: %v", got, want) } if qre.qe.maxDMLRows.Get() != vtMaxDmlRows { t.Fatalf("set query failed, expected to have vt_max_dml_rows: %d, but got: %d", vtMaxDmlRows, qre.qe.maxDMLRows.Get()) } // set vt_max_result_size fail setQuery = "set vt_max_dml_rows = 0" qre, sqlQuery = newTestQueryExecutor( setQuery, context.Background(), enableRowCache|enableStrict) defer sqlQuery.disallowQueries() checkPlanID(t, planbuilder.PLAN_SET, qre.plan.PlanId) defer handleAndVerifyTabletError(t, "vt_max_dml_rows out of range, should always larger than 0", ErrFail) qre.Execute() }
func postFreeze( c cluster.Cluster, freeze bool, timeout time.Duration, ) (serverpb.ClusterFreezeResponse, error) { httpClient := cluster.HTTPClient httpClient.Timeout = timeout var resp serverpb.ClusterFreezeResponse log.Infof(context.Background(), "requesting: freeze=%t, timeout=%s", freeze, timeout) cb := func(v proto.Message) { oldNum := resp.RangesAffected resp = *v.(*serverpb.ClusterFreezeResponse) if oldNum > resp.RangesAffected { resp.RangesAffected = oldNum } if (resp != serverpb.ClusterFreezeResponse{}) { log.Infof(context.Background(), "%+v", &resp) } } err := httputil.StreamJSON( httpClient, c.URL(0)+"/_admin/v1/cluster/freeze", &serverpb.ClusterFreezeRequest{Freeze: freeze}, &serverpb.ClusterFreezeResponse{}, cb, ) return resp, err }
func (test *TestIterationRepository) TestListIterationBySpace() { t := test.T() resource.Require(t, resource.Database) repo := iteration.NewIterationRepository(test.DB) spaceID := uuid.NewV4() for i := 0; i < 3; i++ { start := time.Now() end := start.Add(time.Hour * (24 * 8 * 3)) name := "Sprint #2" + strconv.Itoa(i) i := iteration.Iteration{ Name: name, SpaceID: spaceID, StartAt: &start, EndAt: &end, } repo.Create(context.Background(), &i) } repo.Create(context.Background(), &iteration.Iteration{ Name: "Other Spring #2", SpaceID: uuid.NewV4(), }) its, err := repo.List(context.Background(), spaceID) assert.Nil(t, err) assert.Len(t, its, 3) }
func TestCreateAndOpen(t *testing.T) { // Verify that we can successfully create a fresh index pack. path := filepath.Join(tempDir, "TestIndexPack") pack, err := Create(context.Background(), path, UnitType((*cpb.CompilationUnit)(nil))) if err != nil { t.Fatalf("Unable to create index pack %q: %v", path, err) } t.Logf("Created index pack: %#v", pack) testArchive = pack // Creating over an existing directory doesn't work. if alt, err := Create(context.Background(), path); err == nil { t.Errorf("Create should fail for existing path %q, but returned %v", path, alt) } // Opening a non-existent pack doesn't work. if alt, err := Open(context.Background(), path+"NoneSuch"); err == nil { t.Errorf("Open should fail for %q, but returned %v", path+"NoneSuch", alt) } // Opening the index pack we created for the test should work. if _, err := Open(context.Background(), path); err != nil { t.Errorf("Error opening existing path %q: %v", path, err) } }
func commandSucceeds(t *testing.T, client vtworkerclient.Client) { logs, errFunc, err := client.ExecuteVtworkerCommand(context.Background(), []string{"Ping", "pong"}) if err != nil { t.Fatalf("Cannot execute remote command: %v", err) } count := 0 for e := range logs { expected := "Ping command was called with message: 'pong'.\n" if logutil.EventString(e) != expected { t.Errorf("Got unexpected log line '%v' expected '%v'", e.String(), expected) } count++ } if count != 1 { t.Errorf("Didn't get expected log line only, got %v lines", count) } if err := errFunc(); err != nil { t.Fatalf("Remote error: %v", err) } logs, errFunc, err = client.ExecuteVtworkerCommand(context.Background(), []string{"Reset"}) if err != nil { t.Fatalf("Cannot execute remote command: %v", err) } for range logs { } if err := errFunc(); err != nil { t.Fatalf("Cannot execute remote command: %v", err) } }
func (test *TestCommentRepository) TestDeleteComment() { t := test.T() resource.Require(t, resource.Database) defer cleaner.DeleteCreatedEntities(test.DB)() repo := comment.NewCommentRepository(test.DB) parentID := "AA" c := &comment.Comment{ ParentID: parentID, Body: "Test AA", CreatedBy: uuid.NewV4(), ID: uuid.NewV4(), } repo.Create(context.Background(), c) if c.ID == uuid.Nil { t.Errorf("Comment was not created, ID nil") } c.Body = "Test AB" err := repo.Delete(context.Background(), c.ID) if err != nil { t.Error("Failed to Delete", err.Error()) } }
func testGossipPeeringsInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) { num := c.NumNodes() deadline := timeutil.Now().Add(cfg.Duration) waitTime := longWaitTime if cfg.Duration < waitTime { waitTime = shortWaitTime } for timeutil.Now().Before(deadline) { checkGossip(t, c, waitTime, hasPeers(num)) // Restart the first node. log.Infof(context.Background(), "restarting node 0") if err := c.Restart(0); err != nil { t.Fatal(err) } checkGossip(t, c, waitTime, hasPeers(num)) // Restart another node (if there is one). var pickedNode int if num > 1 { pickedNode = rand.Intn(num-1) + 1 } log.Infof(context.Background(), "restarting node %d", pickedNode) if err := c.Restart(pickedNode); err != nil { t.Fatal(err) } checkGossip(t, c, waitTime, hasPeers(num)) } }
func TestGetters(t *testing.T) { assert.False(t, IsAuthenticatedFromContext(context.Background())) _, err := PoliciesFromContext(context.Background()) assert.NotNil(t, err) _, err = SubjectFromContext(context.Background()) assert.NotNil(t, err) _, err = TokenFromContext(context.Background()) assert.NotNil(t, err) ctx := context.Background() claims := hjwt.ClaimsCarrier{"sub": "peter"} token := &jwt.Token{Valid: true} policies := []policy.Policy{} ctx = NewContextFromAuthValues(ctx, claims, token, policies) assert.True(t, IsAuthenticatedFromContext(ctx)) policiesContext, err := PoliciesFromContext(ctx) assert.Nil(t, err) assert.Equal(t, policies, policiesContext) subjectContext, err := SubjectFromContext(ctx) assert.Nil(t, err) assert.Equal(t, claims.GetSubject(), subjectContext) tokenContext, err := TokenFromContext(ctx) assert.Nil(t, err) assert.Equal(t, token, tokenContext) }
func TestShardConnBegin(t *testing.T) { testShardConnGeneric(t, "TestShardConnBegin", func() error { sdc := NewShardConn(context.Background(), new(sandboxTopo), "aa", "TestShardConnBegin", "0", "", 1*time.Millisecond, 3, connTimeoutTotal, connTimeoutPerConn, 24*time.Hour, connectTimings) _, err := sdc.Begin(context.Background()) return err }) }
func TestScatterConnRollback(t *testing.T) { s := createSandbox("TestScatterConnRollback") sbc0 := &sandboxConn{} s.MapTestConn("0", sbc0) sbc1 := &sandboxConn{} s.MapTestConn("1", sbc1) stc := NewScatterConn(nil, topo.Server{}, new(sandboxTopo), "", "aa", retryDelay, retryCount, connTimeoutTotal, connTimeoutPerConn, connLife, "") // Sequence the executes to ensure commit order session := NewSafeSession(&proto.Session{InTransaction: true}) stc.Execute(context.Background(), "query1", nil, "TestScatterConnRollback", []string{"0"}, pb.TabletType_REPLICA, session, false) stc.Execute(context.Background(), "query1", nil, "TestScatterConnRollback", []string{"0", "1"}, pb.TabletType_REPLICA, session, false) err := stc.Rollback(context.Background(), session) if err != nil { t.Errorf("want nil, got %v", err) } wantSession := proto.Session{} if !reflect.DeepEqual(wantSession, *session.Session) { t.Errorf("want\n%#v, got\n%#v", wantSession, *session.Session) } if rollbackCount := sbc0.RollbackCount.Get(); rollbackCount != 1 { t.Errorf("want 1, got %d", rollbackCount) } if rollbackCount := sbc1.RollbackCount.Get(); rollbackCount != 1 { t.Errorf("want 1, got %d", rollbackCount) } }
func TestMultiExecs(t *testing.T) { s := createSandbox("TestMultiExecs") sbc0 := &sandboxConn{} s.MapTestConn("0", sbc0) sbc1 := &sandboxConn{} s.MapTestConn("1", sbc1) stc := NewScatterConn(nil, topo.Server{}, new(sandboxTopo), "", "aa", retryDelay, retryCount, connTimeoutTotal, connTimeoutPerConn, connLife, "") shardVars := map[string]map[string]interface{}{ "0": map[string]interface{}{ "bv0": 0, }, "1": map[string]interface{}{ "bv1": 1, }, } _, _ = stc.ExecuteMulti(context.Background(), "query", "TestMultiExecs", shardVars, pb.TabletType_REPLICA, nil, false) if !reflect.DeepEqual(sbc0.Queries[0].BindVariables, shardVars["0"]) { t.Errorf("got %+v, want %+v", sbc0.Queries[0].BindVariables, shardVars["0"]) } if !reflect.DeepEqual(sbc1.Queries[0].BindVariables, shardVars["1"]) { t.Errorf("got %+v, want %+v", sbc0.Queries[0].BindVariables, shardVars["1"]) } sbc0.Queries = nil sbc1.Queries = nil _ = stc.StreamExecuteMulti(context.Background(), "query", "TestMultiExecs", shardVars, pb.TabletType_REPLICA, func(*mproto.QueryResult) error { return nil }) if !reflect.DeepEqual(sbc0.Queries[0].BindVariables, shardVars["0"]) { t.Errorf("got %+v, want %+v", sbc0.Queries[0].BindVariables, shardVars["0"]) } if !reflect.DeepEqual(sbc1.Queries[0].BindVariables, shardVars["1"]) { t.Errorf("got %+v, want %+v", sbc0.Queries[0].BindVariables, shardVars["1"]) } }
func runMVCCConditionalPut(emk engineMaker, valueSize int, createFirst bool, b *testing.B) { rng, _ := randutil.NewPseudoRand() value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize)) keyBuf := append(make([]byte, 0, 64), []byte("key-")...) eng := emk(b, fmt.Sprintf("cput_%d", valueSize)) defer eng.Close() b.SetBytes(int64(valueSize)) var expected *roachpb.Value if createFirst { for i := 0; i < b.N; i++ { key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i))) ts := makeTS(timeutil.Now().UnixNano(), 0) if err := MVCCPut(context.Background(), eng, nil, key, ts, value, nil); err != nil { b.Fatalf("failed put: %s", err) } } expected = &value } b.ResetTimer() for i := 0; i < b.N; i++ { key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i))) ts := makeTS(timeutil.Now().UnixNano(), 0) if err := MVCCConditionalPut(context.Background(), eng, nil, key, ts, value, expected, nil); err != nil { b.Fatalf("failed put: %s", err) } } b.StopTimer() }
func TestIamQueryGrantableRoles(t *testing.T) { var expectedResponse *adminpb.QueryGrantableRolesResponse = &adminpb.QueryGrantableRolesResponse{} mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var fullResourceName string = "fullResourceName1300993644" var request = &adminpb.QueryGrantableRolesRequest{ FullResourceName: fullResourceName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.QueryGrantableRoles(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } }
func TestAllInvalid(t *testing.T) { dp := dptest.DP() dp.Metric = "" f := Forwarder{} assert.NoError(t, f.AddDatapoints(context.Background(), []*datapoint.Datapoint{dp})) assert.NoError(t, f.AddEvents(context.Background(), []*event.Event{})) }
func TestIamListServiceAccountKeys(t *testing.T) { var expectedResponse *adminpb.ListServiceAccountKeysResponse = &adminpb.ListServiceAccountKeysResponse{} mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var request = &adminpb.ListServiceAccountKeysRequest{ Name: formattedName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListServiceAccountKeys(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } }
func (test *TestIterationRepository) TestCreateChildIteration() { t := test.T() resource.Require(t, resource.Database) repo := iteration.NewIterationRepository(test.DB) start := time.Now() end := start.Add(time.Hour * (24 * 8 * 3)) name := "Sprint #24" name2 := "Sprint #24.1" i := iteration.Iteration{ Name: name, SpaceID: uuid.NewV4(), StartAt: &start, EndAt: &end, } repo.Create(context.Background(), &i) i2 := iteration.Iteration{ Name: name2, SpaceID: uuid.NewV4(), StartAt: &start, EndAt: &end, ParentID: i.ID, } repo.Create(context.Background(), &i2) i2L, err := repo.Load(context.Background(), i2.ID) require.Nil(t, err) assert.NotEqual(t, uuid.Nil, i2.ParentID) assert.Equal(t, i2.ParentID, i2L.ParentID) }
func TestIamDeleteServiceAccountKey(t *testing.T) { var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]") var request = &adminpb.DeleteServiceAccountKeyRequest{ Name: formattedName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteServiceAccountKey(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } }
func TestRangeStatsInit(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() defer stopper.Stop() tc.Start(t, stopper) ms := enginepb.MVCCStats{ LiveBytes: 1, KeyBytes: 2, ValBytes: 3, IntentBytes: 4, LiveCount: 5, KeyCount: 6, ValCount: 7, IntentCount: 8, IntentAge: 9, GCBytesAge: 10, LastUpdateNanos: 11, } if err := engine.MVCCSetRangeStats(context.Background(), tc.engine, 1, &ms); err != nil { t.Fatal(err) } loadMS, err := engine.MVCCGetRangeStats(context.Background(), tc.engine, tc.repl.RangeID) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(ms, loadMS) { t.Errorf("mvcc stats mismatch %+v != %+v", ms, loadMS) } }
func TestIamTestIamPermissions(t *testing.T) { var expectedResponse *iampb.TestIamPermissionsResponse = &iampb.TestIamPermissionsResponse{} mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var permissions []string = nil var request = &iampb.TestIamPermissionsRequest{ Resource: formattedResource, Permissions: permissions, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.TestIamPermissions(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } }
func TestQueryExecutorPlanPassDmlStrictModeAutoCommit(t *testing.T) { db := setUpQueryExecutorTest() testUtils := &testUtils{} query := "update test_table set pk = foo()" expected := &mproto.QueryResult{ Fields: []mproto.Field{}, Rows: [][]sqltypes.Value{}, } db.AddQuery(query, expected) // non strict mode qre, sqlQuery := newTestQueryExecutor(query, context.Background(), noFlags) checkPlanID(t, planbuilder.PLAN_PASS_DML, qre.plan.PlanId) testUtils.checkEqual(t, expected, qre.Execute()) sqlQuery.disallowQueries() // strict mode qre, sqlQuery = newTestQueryExecutor( "update test_table set pk = foo()", context.Background(), enableRowCache|enableStrict) defer sqlQuery.disallowQueries() checkPlanID(t, planbuilder.PLAN_PASS_DML, qre.plan.PlanId) defer handleAndVerifyTabletError( t, "update should fail because strict mode is not enabled", ErrFail) qre.Execute() }
func TestLanguageServiceAnalyzeSentiment(t *testing.T) { var language string = "language-1613589672" var expectedResponse = &languagepb.AnalyzeSentimentResponse{ Language: language, } mockLanguage.err = nil mockLanguage.reqs = nil mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) var document *languagepb.Document = &languagepb.Document{} var request = &languagepb.AnalyzeSentimentRequest{ Document: document, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnalyzeSentiment(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } }
func actionUserPasswd(c *cli.Context) { api, user := mustUserAPIAndName(c) ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) currentUser, err := api.GetUser(ctx, user) cancel() if currentUser == nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } pass, err := speakeasy.Ask("New password: "******"Error reading password:"******"Password updated\n") }
func TestLanguageServiceAnnotateTextError(t *testing.T) { errCode := codes.Internal mockLanguage.err = grpc.Errorf(errCode, "test error") var document *languagepb.Document = &languagepb.Document{} var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} var encodingType languagepb.EncodingType = 0 var request = &languagepb.AnnotateTextRequest{ Document: document, Features: features, EncodingType: encodingType, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnnotateText(context.Background(), request) if c := grpc.Code(err); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp }
func TestShardConnBegin(t *testing.T) { testShardConnGeneric(t, "TestShardConnBegin", false, func() error { sdc := NewShardConn(context.Background(), new(sandboxTopo), "aa", "TestShardConnBegin", "0", topodatapb.TabletType_REPLICA, 1*time.Millisecond, 3, connTimeoutTotal, connTimeoutPerConn, connLife, connectTimings) _, err := sdc.Begin(context.Background()) return err }) }
func modDefinedSet(settype string, modtype string, args []string) error { var d *api.DefinedSet var err error if len(args) < 1 { return fmt.Errorf(modPolicyUsageFormat[settype], modtype) } if d, err = parseDefinedSet(settype, args); err != nil { return err } switch modtype { case CMD_ADD: _, err = client.AddDefinedSet(context.Background(), &api.AddDefinedSetRequest{ Set: d, }) case CMD_DEL: all := false if len(args) < 2 { all = true } _, err = client.DeleteDefinedSet(context.Background(), &api.DeleteDefinedSetRequest{ Set: d, All: all, }) case CMD_SET: _, err = client.ReplaceDefinedSet(context.Background(), &api.ReplaceDefinedSetRequest{ Set: d, }) } return err }
// TestCachedErrors will test we properly return cached errors. func TestCachedErrors(t *testing.T) { ft := &fakeTopo{keyspace: "test_ks"} rsts := NewResilientSrvTopoServer(ft, "TestCachedErrors") // ask for an unknown keyspace, should get an error _, err := rsts.GetSrvKeyspace(context.Background(), "", "unknown_ks") if err == nil { t.Fatalf("First GetSrvKeyspace didn't return an error") } if ft.callCount != 1 { t.Fatalf("GetSrvKeyspace didn't get called 1 but %v times", ft.callCount) } // ask again, should get an error and use cache _, err = rsts.GetSrvKeyspace(context.Background(), "", "unknown_ks") if err == nil { t.Fatalf("Second GetSrvKeyspace didn't return an error") } if ft.callCount != 1 { t.Fatalf("GetSrvKeyspace was called again: %v times", ft.callCount) } // ask again after expired cache, should get an error rsts.cacheTTL = 0 _, err = rsts.GetSrvKeyspace(context.Background(), "", "unknown_ks") if err == nil { t.Fatalf("Third GetSrvKeyspace didn't return an error") } if ft.callCount != 2 { t.Fatalf("GetSrvKeyspace was not called again: %v times", ft.callCount) } }
func TestIssueNodeCertificateBrokenCA(t *testing.T) { if !testutils.External { t.Skip("test only applicable for external CA configuration") } tc := testutils.NewTestCA(t) defer tc.Stop() csr, _, err := ca.GenerateNewCSR() assert.NoError(t, err) tc.ExternalSigningServer.Flake() go func() { time.Sleep(250 * time.Millisecond) tc.ExternalSigningServer.Deflake() }() issueRequest := &api.IssueNodeCertificateRequest{CSR: csr, Token: tc.WorkerToken} issueResponse, err := tc.NodeCAClients[0].IssueNodeCertificate(context.Background(), issueRequest) assert.NoError(t, err) assert.NotNil(t, issueResponse.NodeID) assert.Equal(t, api.NodeMembershipAccepted, issueResponse.NodeMembership) statusRequest := &api.NodeCertificateStatusRequest{NodeID: issueResponse.NodeID} statusResponse, err := tc.NodeCAClients[0].NodeCertificateStatus(context.Background(), statusRequest) require.NoError(t, err) assert.Equal(t, api.IssuanceStateIssued, statusResponse.Status.State) assert.NotNil(t, statusResponse.Certificate.Certificate) assert.Equal(t, api.NodeRoleWorker, statusResponse.Certificate.Role) }