func TestFromCommit(t *testing.T) { t.Parallel() repo := uniqueString("TestFromCommit") pachClient := getPachClient(t) seed := time.Now().UnixNano() rand := rand.New(rand.NewSource(seed)) err := pfsutil.CreateRepo(pachClient, repo) require.NoError(t, err) commit1, err := pfsutil.StartCommit(pachClient, repo, "") require.NoError(t, err) _, err = pfsutil.PutFile(pachClient, repo, commit1.Id, "file", 0, workload.NewReader(rand, KB)) require.NoError(t, err) err = pfsutil.FinishCommit(pachClient, repo, commit1.Id) require.NoError(t, err) commit2, err := pfsutil.StartCommit(pachClient, repo, commit1.Id) require.NoError(t, err) _, err = pfsutil.PutFile(pachClient, repo, commit2.Id, "file", 0, workload.NewReader(rand, KB)) require.NoError(t, err) err = pfsutil.FinishCommit(pachClient, repo, commit2.Id) require.NoError(t, err) var buffer bytes.Buffer require.NoError(t, pfsutil.GetFile(pachClient, repo, commit2.Id, "file", 0, 0, commit1.Id, nil, &buffer)) require.Equal(t, buffer.Len(), KB) buffer = bytes.Buffer{} require.NoError(t, pfsutil.GetFile(pachClient, repo, commit2.Id, "file", 0, 0, "", nil, &buffer)) require.Equal(t, buffer.Len(), 2*KB) }
func TestJob(t *testing.T) { t.Parallel() dataRepo := uniqueString("TestJob.data") pachClient := getPachClient(t) require.NoError(t, pfsutil.CreateRepo(pachClient, dataRepo)) commit, err := pfsutil.StartCommit(pachClient, dataRepo, "") require.NoError(t, err) _, err = pfsutil.PutFile(pachClient, dataRepo, commit.Id, "file", 0, strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, pfsutil.FinishCommit(pachClient, dataRepo, commit.Id)) job, err := ppsutil.CreateJob( pachClient, "", []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, nil, 1, []*pps.JobInput{{Commit: commit}}, "", ) require.NoError(t, err) inspectJobRequest := &pps.InspectJobRequest{ Job: job, BlockOutput: true, BlockState: true, } jobInfo, err := pachClient.InspectJob(context.Background(), inspectJobRequest) require.NoError(t, err) require.Equal(t, pps.JobState_JOB_STATE_SUCCESS.String(), jobInfo.State.String()) commitInfo, err := pfsutil.InspectCommit(pachClient, jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.Id) require.NoError(t, err) require.Equal(t, pfs.CommitType_COMMIT_TYPE_READ, commitInfo.CommitType) var buffer bytes.Buffer require.NoError(t, pfsutil.GetFile(pachClient, jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.Id, "file", 0, 0, "", nil, &buffer)) require.Equal(t, "foo\n", buffer.String()) }
func (s *serverGroup) run(t *testing.T) { var wg sync.WaitGroup defer wg.Wait() for i, server := range s.servers { wg.Add(1) i := i server := server go func() { defer wg.Done() require.Equal( t, shard.ErrCancelled, s.sharder.Register(s.cancel, fmt.Sprintf("address-%d", i+s.offset), server), ) }() } for i, frontend := range s.frontends { wg.Add(1) i := i frontend := frontend go func() { defer wg.Done() require.Equal( t, shard.ErrCancelled, s.sharder.RegisterFrontend(s.cancel, fmt.Sprintf("address-%d", i+s.offset), frontend), ) }() } }
func testRun(t *testing.T, imageName string, commands []string, expectedStdout string, expectedStderr string) { client, err := newTestDockerClient() require.NoError(t, err) err = client.Pull(imageName, PullOptions{}) require.NoError(t, err) containers, err := client.Create( imageName, CreateOptions{ HasCommand: commands != nil, }, ) require.NoError(t, err) require.Equal(t, 1, len(containers)) container := containers[0] err = client.Start( container, StartOptions{ Commands: commands, }, ) require.NoError(t, err) err = client.Wait(container, WaitOptions{}) require.NoError(t, err) stdout := bytes.NewBuffer(nil) stderr := bytes.NewBuffer(nil) err = client.Logs(container, LogsOptions{Stdout: stdout, Stderr: stderr}) require.NoError(t, err) require.Equal(t, expectedStdout, stdout.String()) require.Equal(t, expectedStderr, stderr.String()) err = client.Remove(container, RemoveOptions{}) require.NoError(t, err) }
func runWatchTest(t *testing.T, client Client) { cancel := make(chan bool) err := client.Watch( "watch/foo", cancel, func(value string) error { if value == "" { return client.Set("watch/foo", "bar", 0) } require.Equal(t, "bar", value) close(cancel) return nil }, ) require.Equal(t, ErrCancelled, err) cancel = make(chan bool) err = client.WatchAll( "watchAll/foo", cancel, func(value map[string]string) error { if value == nil { return client.Set("watchAll/foo/bar", "quux", 0) } require.Equal(t, map[string]string{"watchAll/foo/bar": "quux"}, value) close(cancel) return nil }, ) require.Equal(t, ErrCancelled, err) }
func newCluster(tb testing.TB, discoveryClient discovery.Client, servers map[string]*grpc.Server) *cluster { realSharder := shard.NewTestSharder( discoveryClient, testShardsPerServer*testNumServers, testNumReplicas, testNamespace(), ) sharder := route.NewSharder( testShardsPerServer*testNumServers, testNumReplicas, ) cluster := cluster{ servers: make(map[string]server.APIServer), internalServers: make(map[string]server.InternalAPIServer), cancels: make(map[string]chan bool), internalCancels: make(map[string]chan bool), cancel: make(chan bool), realSharder: realSharder, sharder: sharder, tb: tb, } for address, s := range servers { cluster.addresses = append(cluster.addresses, address) router := route.NewRouter( cluster.realSharder, grpcutil.NewDialer( grpc.WithInsecure(), ), address, ) apiServer := server.NewAPIServer( cluster.sharder, router, ) cluster.servers[address] = apiServer cluster.cancels[address] = make(chan bool) go func(address string) { require.Equal(tb, cluster.realSharder.RegisterFrontend(cluster.cancels[address], address, cluster.servers[address]), shard.ErrCancelled) }(address) pfs.RegisterAPIServer(s, apiServer) internalAPIServer := server.NewInternalAPIServer( cluster.sharder, router, getDriver(tb, address), ) pfs.RegisterInternalAPIServer(s, internalAPIServer) cluster.internalServers[address] = internalAPIServer cluster.internalCancels[address] = make(chan bool) go func(address string) { require.Equal(tb, cluster.realSharder.Register(cluster.internalCancels[address], address, cluster.internalServers[address]), shard.ErrCancelled) }(address) } return &cluster }
func TestMountBig(t *testing.T) { t.Skip() t.Parallel() apiClient := getPfsClient(t) repoName := uniqueString("testMountBigRepo") err := pfsutil.CreateRepo(apiClient, repoName) require.NoError(t, err) directory := "/compile/testMount" mounter := fuse.NewMounter("localhost", apiClient) ready := make(chan bool) go func() { err = mounter.Mount(directory, &pfs.Shard{Number: 0, Modulus: 1}, nil, ready) require.NoError(t, err) }() <-ready _, err = os.Stat(filepath.Join(directory, repoName)) require.NoError(t, err) commit, err := pfsutil.StartCommit(apiClient, repoName, "") require.NoError(t, err) require.NotNil(t, commit) newCommitID := commit.Id bigValue := make([]byte, 1024*1024*300) for i := 0; i < 1024*1024*300; i++ { bigValue[i] = 'a' } wg := sync.WaitGroup{} for j := 0; j < 5; j++ { wg.Add(1) go func(j int) { defer wg.Done() err := ioutil.WriteFile(filepath.Join(directory, repoName, newCommitID, fmt.Sprintf("big%d", j)), bigValue, 0666) require.NoError(t, err) }(j) } wg.Wait() err = pfsutil.FinishCommit(apiClient, repoName, newCommitID) require.NoError(t, err) wg = sync.WaitGroup{} for j := 0; j < 5; j++ { wg.Add(1) go func(j int) { defer wg.Done() data, err := ioutil.ReadFile(filepath.Join(directory, repoName, newCommitID, fmt.Sprintf("big%d", j))) require.NoError(t, err) require.Equal(t, bigValue, data) }(j) } wg.Wait() err = mounter.Unmount(directory) require.NoError(t, err) }
func (c *cluster) Restart(server int) { address := c.addresses[server] c.cancels[address] = make(chan bool) go func() { require.Equal(c.tb, c.addresser.Register(c.cancels[address], address, address, c.internalServers[address]), route.ErrCancelled) }() }
func TestGetNameToNodeInfo(t *testing.T) { pipeline, err := parse.NewParser().ParsePipeline("../parse/testdata/basic") require.NoError(t, err) nodeInfos, err := getNameToNodeInfo(pipeline.NameToNode) require.NoError(t, err) require.Equal(t, []string{"bar-node"}, nodeInfos["baz-node-bar-in-bar-out-in"].Parents) }
func TestJob(t *testing.T) { dataRepo := uniqueString("TestJob.data") pfsClient := getPfsClient(t) require.NoError(t, pfsutil.CreateRepo(pfsClient, dataRepo)) commit, err := pfsutil.StartCommit(pfsClient, dataRepo, "") require.NoError(t, err) _, err = pfsutil.PutFile(pfsClient, dataRepo, commit.Id, "file", 0, strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, pfsutil.FinishCommit(pfsClient, dataRepo, commit.Id)) ppsClient := getPpsClient(t) job, err := ppsutil.CreateJob( ppsClient, "", []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, "", 1, []*pfs.Commit{commit}, "", ) require.NoError(t, err) inspectJobRequest := &pps.InspectJobRequest{ Job: job, BlockOutput: true, BlockState: true, } jobInfo, err := ppsClient.InspectJob(context.Background(), inspectJobRequest) require.NoError(t, err) var buffer bytes.Buffer require.NoError(t, pfsutil.GetFile(pfsClient, jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.Id, "file", 0, 0, nil, &buffer)) require.Equal(t, "foo", buffer.String()) }
func RunBench( b *testing.B, f func(*testing.B, pfs.APIClient), ) { discoveryClient, err := getEtcdClient() require.NoError(b, err) var cluster *cluster prototest.RunB( b, testNumServers, func(servers map[string]*grpc.Server) { cluster = registerFunc(b, discoveryClient, servers) }, func(b *testing.B, clientConns map[string]*grpc.ClientConn) { var clientConn *grpc.ClientConn for _, c := range clientConns { clientConn = c break } go func() { require.Equal(b, cluster.realSharder.AssignRoles(cluster.cancel), shard.ErrCancelled) }() cluster.WaitForAvailability() f( b, pfs.NewAPIClient( clientConn, ), ) }, ) cluster.Shutdown() }
func newCluster(tb testing.TB, discoveryClient discovery.Client, servers map[string]*grpc.Server) Cluster { cluster := cluster{ rolers: make(map[string]role.Roler), servers: make(map[string]server.CombinedAPIServer), addresser: route.NewDiscoveryAddresser( discoveryClient, testNamespace(), ), sharder: route.NewSharder( testShardsPerServer * testNumServers, ), tb: tb, } for address, s := range servers { combinedAPIServer := server.NewCombinedAPIServer( cluster.sharder, route.NewRouter( cluster.addresser, grpcutil.NewDialer( grpc.WithInsecure(), ), address, ), getDriver(tb, address), ) pfs.RegisterApiServer(s, combinedAPIServer) pfs.RegisterInternalApiServer(s, combinedAPIServer) roler := role.NewRoler(cluster.addresser, cluster.sharder, combinedAPIServer, address, testNumReplicas) go func() { require.Equal(tb, roler.Run(), discovery.ErrCancelled) }() cluster.addresses = append(cluster.addresses, address) cluster.rolers[address] = roler cluster.servers[address] = combinedAPIServer } return &cluster }
func checkPFSOutput(t *testing.T, pfsAPIClient pfs.APIClient, outputCommit *pfs.Commit, filePathToContent map[string][]byte) { for filePath, content := range filePathToContent { getContent, err := getPFSContent(pfsAPIClient, outputCommit, filePath) require.NoError(t, err) require.Equal(t, content, getContent) } }
func testMountBig(t *testing.T, apiClient pfs.APIClient, cluster Cluster) { repoName := "testMountBigRepo" err := pfsutil.CreateRepo(apiClient, repoName) require.NoError(t, err) directory := "/compile/testMount" mounter := fuse.NewMounter("localhost", apiClient) ready := make(chan bool) go func() { err = mounter.Mount(directory, 0, 1, ready) require.NoError(t, err) }() <-ready _, err = os.Stat(filepath.Join(directory, repoName)) require.NoError(t, err) _, err = os.Stat(filepath.Join(directory, repoName, "scratch")) require.NoError(t, err) commit, err := pfsutil.StartCommit(apiClient, repoName, "scratch") require.NoError(t, err) require.NotNil(t, commit) newCommitID := commit.Id bigValue := make([]byte, 1024*1024*300) for i := 0; i < 1024*1024*300; i++ { bigValue[i] = 'a' } wg := sync.WaitGroup{} for j := 0; j < 5; j++ { wg.Add(1) go func(j int) { defer wg.Done() err := ioutil.WriteFile(filepath.Join(directory, repoName, newCommitID, fmt.Sprintf("big%d", j)), bigValue, 0666) require.NoError(t, err) }(j) } wg.Wait() err = pfsutil.FinishCommit(apiClient, repoName, newCommitID) require.NoError(t, err) wg = sync.WaitGroup{} for j := 0; j < 5; j++ { wg.Add(1) go func(j int) { defer wg.Done() data, err := ioutil.ReadFile(filepath.Join(directory, repoName, newCommitID, fmt.Sprintf("big%d", j))) require.NoError(t, err) require.Equal(t, bigValue, data) }(j) } wg.Wait() err = mounter.Unmount(directory) require.NoError(t, err) }
func testMountBig(t *testing.T, apiClient pfs.ApiClient, internalAPIClient pfs.InternalApiClient, cluster Cluster) { repositoryName := TestRepositoryName() err := pfsutil.InitRepository(apiClient, repositoryName) require.NoError(t, err) directory := "/compile/testMount" mounter := fuse.NewMounter(apiClient) err = mounter.Mount(repositoryName, "", directory, 0, 1) require.NoError(t, err) _, err = os.Stat(filepath.Join(directory, "scratch")) require.NoError(t, err) commit, err := pfsutil.Branch(apiClient, repositoryName, "scratch") require.NoError(t, err) require.NotNil(t, commit) newCommitID := commit.Id bigValue := make([]byte, 1024*1024*300) for i := 0; i < 1024*1024*300; i++ { bigValue[i] = 'a' } wg := sync.WaitGroup{} for j := 0; j < 5; j++ { wg.Add(1) go func(j int) { defer wg.Done() err := ioutil.WriteFile(filepath.Join(directory, newCommitID, fmt.Sprintf("big%d", j)), bigValue, 0666) require.NoError(t, err) }(j) } wg.Wait() err = pfsutil.Write(apiClient, repositoryName, newCommitID) require.NoError(t, err) wg = sync.WaitGroup{} for j := 0; j < 5; j++ { wg.Add(1) go func(j int) { defer wg.Done() data, err := ioutil.ReadFile(filepath.Join(directory, newCommitID, fmt.Sprintf("big%d", j))) require.NoError(t, err) require.Equal(t, bigValue, data) }(j) } wg.Wait() err = mounter.Unmount(directory) require.NoError(t, err) err = mounter.Wait(directory) require.NoError(t, err) }
func testBasicRethink(t *testing.T, apiServer persist.APIServer) { jobInfo, err := apiServer.CreateJobInfo( context.Background(), &persist.JobInfo{ Spec: &persist.JobInfo_PipelineName{ PipelineName: "foo", }, }, ) require.NoError(t, err) getJobInfo, err := apiServer.GetJobInfo( context.Background(), &pps.Job{ Id: jobInfo.JobId, }, ) require.NoError(t, err) require.Equal(t, jobInfo.JobId, getJobInfo.JobId) require.Equal(t, "foo", getJobInfo.GetPipelineName()) }
func (s *serverGroup) run(t *testing.T) { var wg sync.WaitGroup defer wg.Wait() for _, roler := range s.rolers { wg.Add(1) go func(roler Roler) { defer wg.Done() require.Equal(t, discovery.ErrCancelled, roler.Run()) }(roler) } }
func runTest(t *testing.T, client Client) { err := client.Set("foo", "one", 0) require.NoError(t, err) value, err := client.Get("foo") require.NoError(t, err) require.Equal(t, "one", value) //values, err := client.GetAll("foo") //require.NoError(t, err) //require.Equal(t, map[string]string{"foo": "one"}, values) err = client.Set("a/b/foo", "one", 0) require.NoError(t, err) err = client.Set("a/b/bar", "two", 0) require.NoError(t, err) values, err := client.GetAll("a/b") require.NoError(t, err) require.Equal(t, map[string]string{"a/b/foo": "one", "a/b/bar": "two"}, values) require.NoError(t, client.Close()) }
func TestSimple(t *testing.T) { driver, err := NewDriver(getBtrfsRootDir(t), "drive.TestSimple") require.NoError(t, err) shards := make(map[uint64]bool) shards[0] = true repo := &pfs.Repo{Name: "drive.TestSimple"} require.NoError(t, driver.CreateRepo(repo)) commit1 := &pfs.Commit{ Repo: repo, Id: "commit1", } require.NoError(t, driver.StartCommit(nil, commit1, shards)) file1 := &pfs.File{ Commit: commit1, Path: "foo", } require.NoError(t, driver.PutFile(file1, 0, 0, strings.NewReader("foo"))) require.NoError(t, driver.FinishCommit(commit1, shards)) reader, err := driver.GetFile(file1, 0) require.NoError(t, err) contents, err := ioutil.ReadAll(reader) require.NoError(t, err) require.Equal(t, string(contents), "foo") commit2 := &pfs.Commit{ Repo: repo, Id: "commit2", } require.NoError(t, driver.StartCommit(commit1, commit2, shards)) file2 := &pfs.File{ Commit: commit2, Path: "bar", } require.NoError(t, driver.PutFile(file2, 0, 0, strings.NewReader("bar"))) require.NoError(t, driver.FinishCommit(commit2, shards)) changes, err := driver.ListChange(file2, commit1, 0) require.NoError(t, err) require.Equal(t, len(changes), 1) require.Equal(t, changes[0].File, file2) require.Equal(t, changes[0].OffsetBytes, uint64(0)) require.Equal(t, changes[0].SizeBytes, uint64(3)) }
func getJobForPipeline(t *testing.T, jobAPIClient pps.JobAPIClient, pipeline *pps.Pipeline) *pps.Job { jobInfos, err := jobAPIClient.ListJob( context.Background(), &pps.ListJobRequest{ Pipeline: pipeline, }, ) require.NoError(t, err) require.NotNil(t, jobInfos) require.Equal(t, 1, len(jobInfos.JobInfo)) return jobInfos.JobInfo[0].Job }
func checkWrites(tb testing.TB, apiClient pfs.ApiClient, repositoryName string, commitID string) { var wg sync.WaitGroup defer wg.Wait() for i := 0; i < testSize; i++ { i := i wg.Add(1) go func() { defer wg.Done() buffer := bytes.NewBuffer(nil) iErr := pfsutil.GetFile(apiClient, repositoryName, commitID, fmt.Sprintf("a/b/file%d", i), 0, pfsutil.GetAll, buffer) require.NoError(tb, iErr) require.Equal(tb, fmt.Sprintf("hello%d", i), buffer.String()) buffer = bytes.NewBuffer(nil) iErr = pfsutil.GetFile(apiClient, repositoryName, commitID, fmt.Sprintf("a/c/file%d", i), 0, pfsutil.GetAll, buffer) require.NoError(tb, iErr) require.Equal(tb, fmt.Sprintf("hello%d", i), buffer.String()) }() } }
func checkWrites(tb testing.TB, apiClient pfs.APIClient, repoName string, commitID string) { var wg sync.WaitGroup defer wg.Wait() for i := 0; i < testSize; i++ { i := i wg.Add(1) go func() { defer wg.Done() buffer := bytes.NewBuffer(nil) iErr := pfsutil.GetFile( apiClient, repoName, commitID, fmt.Sprintf("a/b/file%d", i), 0, math.MaxInt64, &pfs.Shard{Number: 0, Modulus: 1}, buffer, ) require.NoError(tb, iErr) require.Equal(tb, fmt.Sprintf("hello%d", i), buffer.String()) buffer = bytes.NewBuffer(nil) iErr = pfsutil.GetFile( apiClient, repoName, commitID, fmt.Sprintf("a/c/file%d", i), 0, math.MaxInt64, &pfs.Shard{Number: 0, Modulus: 1}, buffer, ) require.NoError(tb, iErr) require.Equal(tb, fmt.Sprintf("hello%d", i), buffer.String()) }() } }
func TestGrep(t *testing.T) { t.Skip() t.Parallel() dataRepo := uniqueString("TestGrep.data") pfsClient := getPfsClient(t) require.NoError(t, pfsutil.CreateRepo(pfsClient, dataRepo)) commit, err := pfsutil.StartCommit(pfsClient, dataRepo, "") require.NoError(t, err) for i := 0; i < 100; i++ { _, err = pfsutil.PutFile(pfsClient, dataRepo, commit.Id, fmt.Sprintf("file%d", i), 0, strings.NewReader("foo\nbar\nfizz\nbuzz\n")) require.NoError(t, err) } require.NoError(t, pfsutil.FinishCommit(pfsClient, dataRepo, commit.Id)) ppsClient := getPpsClient(t) job1, err := ppsutil.CreateJob( ppsClient, "", []string{"bash"}, fmt.Sprintf("grep foo /pfs/%s/* >/pfs/out/foo", dataRepo), 1, []*pps.JobInput{{Commit: commit}}, "", ) require.NoError(t, err) job2, err := ppsutil.CreateJob( ppsClient, "", []string{"bash"}, fmt.Sprintf("grep foo /pfs/%s/* >/pfs/out/foo", dataRepo), 4, []*pps.JobInput{{Commit: commit}}, "", ) require.NoError(t, err) inspectJobRequest := &pps.InspectJobRequest{ Job: job1, BlockOutput: true, BlockState: true, } job1Info, err := ppsClient.InspectJob(context.Background(), inspectJobRequest) require.NoError(t, err) inspectJobRequest.Job = job2 job2Info, err := ppsClient.InspectJob(context.Background(), inspectJobRequest) require.NoError(t, err) repo1Info, err := pfsutil.InspectRepo(pfsClient, job1Info.OutputCommit.Repo.Name) require.NoError(t, err) repo2Info, err := pfsutil.InspectRepo(pfsClient, job2Info.OutputCommit.Repo.Name) require.NoError(t, err) require.Equal(t, repo1Info.SizeBytes, repo2Info.SizeBytes) }
func testBasic(t *testing.T, client Client) { pipelineRun := &pps.PipelineRun{ Id: "id", PipelineId: "pipeline_id", } require.NoError(t, client.CreatePipelineRun(pipelineRun)) pipelineRunResponse, err := client.GetPipelineRun("id") require.NoError(t, err) require.Equal(t, pipelineRun, pipelineRunResponse) require.NoError(t, client.CreatePipelineRunStatus("id", pps.PipelineRunStatusType_PIPELINE_RUN_STATUS_TYPE_CREATED)) pipelineRunStatusResponse, err := client.GetAllPipelineRunStatuses("id") require.NoError(t, err) require.Equal(t, pipelineRunStatusResponse[0].PipelineRunStatusType, pps.PipelineRunStatusType_PIPELINE_RUN_STATUS_TYPE_CREATED) require.NoError(t, client.CreatePipelineRunStatus("id", pps.PipelineRunStatusType_PIPELINE_RUN_STATUS_TYPE_SUCCESS)) pipelineRunStatusResponse, err = client.GetAllPipelineRunStatuses("id") require.NoError(t, err) require.Equal(t, pipelineRunStatusResponse[0].PipelineRunStatusType, pps.PipelineRunStatusType_PIPELINE_RUN_STATUS_TYPE_SUCCESS) require.NoError(t, client.CreatePipelineRunContainers(&pps.PipelineRunContainer{PipelineRunId: "id", ContainerId: "container", Node: "node"})) containerIDs, err := client.GetPipelineRunContainers("id") require.NoError(t, err) require.Equal(t, []*pps.PipelineRunContainer{&pps.PipelineRunContainer{PipelineRunId: "id", ContainerId: "container", Node: "node"}}, containerIDs) }
func (s *serverGroup) run(t *testing.T) { var wg sync.WaitGroup defer wg.Wait() for i, server := range s.servers { wg.Add(1) go func(i int, server Server) { defer wg.Done() require.Equal( t, ErrCancelled, s.addresser.Register(s.cancel, fmt.Sprintf("server-%d", i+s.offset), fmt.Sprintf("address-%d", i+s.offset), server), ) }(i, server) } }
func TestSharding(t *testing.T) { t.Parallel() repo := uniqueString("TestSharding") pachClient := getPachClient(t) err := pfsutil.CreateRepo(pachClient, repo) require.NoError(t, err) commit, err := pfsutil.StartCommit(pachClient, repo, "") require.NoError(t, err) var wg sync.WaitGroup for i := 0; i < NUMFILES; i++ { i := i wg.Add(1) go func() { defer wg.Done() rand := rand.New(rand.NewSource(int64(i))) _, err = pfsutil.PutFile(pachClient, repo, commit.Id, fmt.Sprintf("file%d", i), 0, workload.NewReader(rand, KB)) require.NoError(t, err) }() } wg.Wait() err = pfsutil.FinishCommit(pachClient, repo, commit.Id) require.NoError(t, err) wg = sync.WaitGroup{} for i := 0; i < NUMFILES; i++ { i := i wg.Add(1) go func() { defer wg.Done() var buffer1Shard bytes.Buffer var buffer4Shard bytes.Buffer shard := &pfs.Shard{FileModulus: 1, BlockModulus: 1} err := pfsutil.GetFile(pachClient, repo, commit.Id, fmt.Sprintf("file%d", i), 0, 0, "", shard, &buffer1Shard) require.NoError(t, err) shard.BlockModulus = 4 for blockNumber := uint64(0); blockNumber < 4; blockNumber++ { shard.BlockNumber = blockNumber err := pfsutil.GetFile(pachClient, repo, commit.Id, fmt.Sprintf("file%d", i), 0, 0, "", shard, &buffer4Shard) require.NoError(t, err) } require.Equal(t, buffer1Shard.Len(), buffer4Shard.Len()) }() } wg.Wait() }
func TestGetAllFilePaths(t *testing.T) { files, err := getAllFilePaths("testdata/basic", "", []string{}, []string{"other", "root/ignore", "ignore-me.yml"}) require.NoError(t, err) require.Equal( t, []string{ "root/foo-node.yml", "root/foo-service.yml", "root/include/bar-node.yml", "root/include/bar-service.yml", "root/include/bat-node.yml", "root/include/baz-node.yml", "root/include/baz-service.yml", }, files, ) }
func (c *cluster) Restart(index int) { address := c.addresses[index] c.cancels[address] = make(chan bool) internalAPIServer := server.NewInternalAPIServer( c.sharder, route.NewRouter( c.realSharder, grpcutil.NewDialer( grpc.WithInsecure(), ), address, ), getDriver(c.tb, address), ) c.internalServers[address] = internalAPIServer go func() { require.Equal(c.tb, c.realSharder.Register(c.cancels[address], address, c.internalServers[address]), shard.ErrCancelled) }() }
func (c *cluster) WaitForAvailability() { cancel := make(chan bool) time.AfterFunc(45*time.Second, func() { close(cancel) }) var _shardToMasterAddress map[int]route.Address var _shardToReplicaAddress map[int]map[int]route.Address err := c.addresser.WatchShardToAddress(cancel, func(shardToMasterAddress map[int]route.Address, shardToReplicaAddress map[int]map[int]route.Address) (uint64, error) { _shardToMasterAddress = shardToMasterAddress _shardToReplicaAddress = shardToReplicaAddress if len(shardToMasterAddress) != testShardsPerServer*testNumServers { return 0, nil } if len(shardToReplicaAddress) != testShardsPerServer*testNumServers { return 0, nil } for _, addresses := range shardToReplicaAddress { if len(addresses) != testNumReplicas { return 0, nil } } for _, address := range shardToMasterAddress { if address.Backfilling { return 0, nil } if _, ok := c.rolers[address.Address]; !ok { return 0, nil } } for _, addresses := range shardToReplicaAddress { for _, address := range addresses { if address.Backfilling { return 0, nil } if _, ok := c.rolers[address.Address]; !ok { return 0, nil } } } return 0, fmt.Errorf("Complete") }) require.Equal(c.tb, err.Error(), "Complete") }
func runMasterReplicaTest(t *testing.T, client discovery.Client) { sharder := NewSharder(testNumShards, testNumReplicas) addresser := NewDiscoveryAddresser(client, sharder, "TestMasterReplica") cancel := make(chan bool) go func() { require.Equal(t, ErrCancelled, addresser.AssignRoles(cancel)) }() defer func() { close(cancel) }() serverGroup1 := NewServerGroup(t, addresser, testNumServers/2, 0) go serverGroup1.run(t) start := time.Now() for !serverGroup1.satisfied((testNumShards * (testNumReplicas + 1)) / (testNumServers / 2)) { time.Sleep(500 * time.Millisecond) if time.Since(start) > time.Second*time.Duration(30) { t.Fatal("test timed out") } } serverGroup2 := NewServerGroup(t, addresser, testNumServers/2, testNumServers/2) go serverGroup2.run(t) start = time.Now() for !serverGroup1.satisfied((testNumShards*(testNumReplicas+1))/testNumServers) || !serverGroup2.satisfied((testNumShards*(testNumReplicas+1))/testNumServers) { time.Sleep(time.Second) if time.Since(start) > time.Second*time.Duration(60) { t.Fatal("test timed out") } } close(serverGroup1.cancel) for !serverGroup2.satisfied((testNumShards * (testNumReplicas + 1)) / (testNumServers / 2)) { time.Sleep(500 * time.Millisecond) if time.Since(start) > time.Second*time.Duration(60) { t.Fatal("test timed out") } } }