func testBlock(t *testing.T, apiServer persist.APIServer) { jobInfo, err := apiServer.CreateJobInfo(context.Background(), &persist.JobInfo{}) require.NoError(t, err) jobID := jobInfo.JobId go func() { _, err := apiServer.CreateJobOutput( context.Background(), &persist.JobOutput{ JobId: jobID, OutputCommit: pfsutil.NewCommit("foo", "bar"), }) require.NoError(t, err) _, err = apiServer.CreateJobState( context.Background(), &persist.JobState{ JobId: jobID, State: pps.JobState_JOB_STATE_SUCCESS, }) require.NoError(t, err) }() _, err = apiServer.InspectJob( context.Background(), &pps.InspectJobRequest{ Job: &pps.Job{Id: jobID}, BlockOutput: true, BlockState: true, }, ) require.NoError(t, err) }
func testFailures(t *testing.T, apiClient pfs.ApiClient, internalAPIClient pfs.InternalApiClient, cluster Cluster) { repositoryName := TestRepositoryName() err := pfsutil.InitRepository(apiClient, repositoryName) require.NoError(t, err) commit, err := pfsutil.Branch(apiClient, repositoryName, "scratch") require.NoError(t, err) require.NotNil(t, commit) newCommitID := commit.Id err = pfsutil.MakeDirectory(apiClient, repositoryName, newCommitID, "a/b") require.NoError(t, err) err = pfsutil.MakeDirectory(apiClient, repositoryName, newCommitID, "a/c") require.NoError(t, err) doWrites(t, apiClient, repositoryName, newCommitID) err = pfsutil.Write(apiClient, repositoryName, newCommitID) require.NoError(t, err) checkWrites(t, apiClient, repositoryName, newCommitID) for server := 0; server < testNumReplicas; server++ { cluster.Kill(server) } cluster.WaitForAvailability() checkWrites(t, apiClient, repositoryName, newCommitID) }
func testFailures(t *testing.T, apiClient pfs.APIClient, cluster Cluster) { repoName := "testFailuresRepo" err := pfsutil.CreateRepo(apiClient, repoName) require.NoError(t, err) commit, err := pfsutil.StartCommit(apiClient, repoName, "") require.NoError(t, err) require.NotNil(t, commit) newCommitID := commit.Id err = pfsutil.MakeDirectory(apiClient, repoName, newCommitID, "a/b") require.NoError(t, err) err = pfsutil.MakeDirectory(apiClient, repoName, newCommitID, "a/c") require.NoError(t, err) doWrites(t, apiClient, repoName, newCommitID) err = pfsutil.FinishCommit(apiClient, repoName, newCommitID) require.NoError(t, err) checkWrites(t, apiClient, repoName, newCommitID) cluster.KillRoleAssigner() for server := 0; server < testNumReplicas; server++ { cluster.Kill(server) } cluster.RestartRoleAssigner() cluster.WaitForAvailability() checkWrites(t, apiClient, repoName, newCommitID) }
func TestGetNameToNodeInfo(t *testing.T) { pipeline, err := parse.NewParser().ParsePipeline("../parse/testdata/basic") require.NoError(t, err) nodeInfos, err := getNameToNodeInfo(pipeline.NameToNode) require.NoError(t, err) require.Equal(t, []string{"bar-node"}, nodeInfos["baz-node-bar-in-bar-out-in"].Parents) }
func RunTestWithRethinkAPIServer(t *testing.T, testFunc func(t *testing.T, persistAPIServer persist.APIServer)) { apiServer, err := NewTestRethinkAPIServer() require.NoError(t, err) defer func() { require.NoError(t, apiServer.Close()) }() testFunc(t, apiServer) }
func runTestRethink(t *testing.T, testFunc func(*testing.T, Client)) { client, err := getRethinkSession() require.NoError(t, err) defer func() { require.NoError(t, client.Close()) }() testFunc(t, client) }
func testMountBig(t *testing.T, apiClient pfs.APIClient, cluster Cluster) { repoName := "testMountBigRepo" err := pfsutil.CreateRepo(apiClient, repoName) require.NoError(t, err) directory := "/compile/testMount" mounter := fuse.NewMounter("localhost", apiClient) ready := make(chan bool) go func() { err = mounter.Mount(directory, 0, 1, ready) require.NoError(t, err) }() <-ready _, err = os.Stat(filepath.Join(directory, repoName)) require.NoError(t, err) _, err = os.Stat(filepath.Join(directory, repoName, "scratch")) require.NoError(t, err) commit, err := pfsutil.StartCommit(apiClient, repoName, "scratch") require.NoError(t, err) require.NotNil(t, commit) newCommitID := commit.Id bigValue := make([]byte, 1024*1024*300) for i := 0; i < 1024*1024*300; i++ { bigValue[i] = 'a' } wg := sync.WaitGroup{} for j := 0; j < 5; j++ { wg.Add(1) go func(j int) { defer wg.Done() err := ioutil.WriteFile(filepath.Join(directory, repoName, newCommitID, fmt.Sprintf("big%d", j)), bigValue, 0666) require.NoError(t, err) }(j) } wg.Wait() err = pfsutil.FinishCommit(apiClient, repoName, newCommitID) require.NoError(t, err) wg = sync.WaitGroup{} for j := 0; j < 5; j++ { wg.Add(1) go func(j int) { defer wg.Done() data, err := ioutil.ReadFile(filepath.Join(directory, repoName, newCommitID, fmt.Sprintf("big%d", j))) require.NoError(t, err) require.Equal(t, bigValue, data) }(j) } wg.Wait() err = mounter.Unmount(directory) require.NoError(t, err) }
func TestFromCommit(t *testing.T) { t.Parallel() repo := uniqueString("TestFromCommit") pachClient := getPachClient(t) seed := time.Now().UnixNano() rand := rand.New(rand.NewSource(seed)) err := pfsutil.CreateRepo(pachClient, repo) require.NoError(t, err) commit1, err := pfsutil.StartCommit(pachClient, repo, "") require.NoError(t, err) _, err = pfsutil.PutFile(pachClient, repo, commit1.Id, "file", 0, workload.NewReader(rand, KB)) require.NoError(t, err) err = pfsutil.FinishCommit(pachClient, repo, commit1.Id) require.NoError(t, err) commit2, err := pfsutil.StartCommit(pachClient, repo, commit1.Id) require.NoError(t, err) _, err = pfsutil.PutFile(pachClient, repo, commit2.Id, "file", 0, workload.NewReader(rand, KB)) require.NoError(t, err) err = pfsutil.FinishCommit(pachClient, repo, commit2.Id) require.NoError(t, err) var buffer bytes.Buffer require.NoError(t, pfsutil.GetFile(pachClient, repo, commit2.Id, "file", 0, 0, commit1.Id, nil, &buffer)) require.Equal(t, buffer.Len(), KB) buffer = bytes.Buffer{} require.NoError(t, pfsutil.GetFile(pachClient, repo, commit2.Id, "file", 0, 0, "", nil, &buffer)) require.Equal(t, buffer.Len(), 2*KB) }
func handleEndJobStatusOutputStream(t *testing.T, jobAPIClient pps.JobAPIClient, jobInfo *pps.JobInfo, outputStream pps.OutputStream) { jobAPIGetJobLogsClient, err := jobAPIClient.GetJobLogs( context.Background(), &pps.GetJobLogsRequest{ Job: jobInfo.Job, OutputStream: outputStream, }, ) require.NoError(t, err) require.NoError(t, protostream.WriteFromStreamingBytesClient(jobAPIGetJobLogsClient, protolog.Writer())) }
func TestGrep(t *testing.T) { t.Skip() t.Parallel() dataRepo := uniqueString("TestGrep.data") pfsClient := getPfsClient(t) require.NoError(t, pfsutil.CreateRepo(pfsClient, dataRepo)) commit, err := pfsutil.StartCommit(pfsClient, dataRepo, "") require.NoError(t, err) for i := 0; i < 100; i++ { _, err = pfsutil.PutFile(pfsClient, dataRepo, commit.Id, fmt.Sprintf("file%d", i), 0, strings.NewReader("foo\nbar\nfizz\nbuzz\n")) require.NoError(t, err) } require.NoError(t, pfsutil.FinishCommit(pfsClient, dataRepo, commit.Id)) ppsClient := getPpsClient(t) job1, err := ppsutil.CreateJob( ppsClient, "", []string{"bash"}, fmt.Sprintf("grep foo /pfs/%s/* >/pfs/out/foo", dataRepo), 1, []*pps.JobInput{{Commit: commit}}, "", ) require.NoError(t, err) job2, err := ppsutil.CreateJob( ppsClient, "", []string{"bash"}, fmt.Sprintf("grep foo /pfs/%s/* >/pfs/out/foo", dataRepo), 4, []*pps.JobInput{{Commit: commit}}, "", ) require.NoError(t, err) inspectJobRequest := &pps.InspectJobRequest{ Job: job1, BlockOutput: true, BlockState: true, } job1Info, err := ppsClient.InspectJob(context.Background(), inspectJobRequest) require.NoError(t, err) inspectJobRequest.Job = job2 job2Info, err := ppsClient.InspectJob(context.Background(), inspectJobRequest) require.NoError(t, err) repo1Info, err := pfsutil.InspectRepo(pfsClient, job1Info.OutputCommit.Repo.Name) require.NoError(t, err) repo2Info, err := pfsutil.InspectRepo(pfsClient, job2Info.OutputCommit.Repo.Name) require.NoError(t, err) require.Equal(t, repo1Info.SizeBytes, repo2Info.SizeBytes) }
func TestJob(t *testing.T) { t.Parallel() dataRepo := uniqueString("TestJob.data") pachClient := getPachClient(t) require.NoError(t, pfsutil.CreateRepo(pachClient, dataRepo)) commit, err := pfsutil.StartCommit(pachClient, dataRepo, "") require.NoError(t, err) _, err = pfsutil.PutFile(pachClient, dataRepo, commit.Id, "file", 0, strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, pfsutil.FinishCommit(pachClient, dataRepo, commit.Id)) job, err := ppsutil.CreateJob( pachClient, "", []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, nil, 1, []*pps.JobInput{{Commit: commit}}, "", ) require.NoError(t, err) inspectJobRequest := &pps.InspectJobRequest{ Job: job, BlockOutput: true, BlockState: true, } jobInfo, err := pachClient.InspectJob(context.Background(), inspectJobRequest) require.NoError(t, err) require.Equal(t, pps.JobState_JOB_STATE_SUCCESS.String(), jobInfo.State.String()) commitInfo, err := pfsutil.InspectCommit(pachClient, jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.Id) require.NoError(t, err) require.Equal(t, pfs.CommitType_COMMIT_TYPE_READ, commitInfo.CommitType) var buffer bytes.Buffer require.NoError(t, pfsutil.GetFile(pachClient, jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.Id, "file", 0, 0, "", nil, &buffer)) require.Equal(t, "foo\n", buffer.String()) }
func testMountBig(t *testing.T, apiClient pfs.ApiClient, internalAPIClient pfs.InternalApiClient, cluster Cluster) { repositoryName := TestRepositoryName() err := pfsutil.InitRepository(apiClient, repositoryName) require.NoError(t, err) directory := "/compile/testMount" mounter := fuse.NewMounter(apiClient) err = mounter.Mount(repositoryName, "", directory, 0, 1) require.NoError(t, err) _, err = os.Stat(filepath.Join(directory, "scratch")) require.NoError(t, err) commit, err := pfsutil.Branch(apiClient, repositoryName, "scratch") require.NoError(t, err) require.NotNil(t, commit) newCommitID := commit.Id bigValue := make([]byte, 1024*1024*300) for i := 0; i < 1024*1024*300; i++ { bigValue[i] = 'a' } wg := sync.WaitGroup{} for j := 0; j < 5; j++ { wg.Add(1) go func(j int) { defer wg.Done() err := ioutil.WriteFile(filepath.Join(directory, newCommitID, fmt.Sprintf("big%d", j)), bigValue, 0666) require.NoError(t, err) }(j) } wg.Wait() err = pfsutil.Write(apiClient, repositoryName, newCommitID) require.NoError(t, err) wg = sync.WaitGroup{} for j := 0; j < 5; j++ { wg.Add(1) go func(j int) { defer wg.Done() data, err := ioutil.ReadFile(filepath.Join(directory, newCommitID, fmt.Sprintf("big%d", j))) require.NoError(t, err) require.Equal(t, bigValue, data) }(j) } wg.Wait() err = mounter.Unmount(directory) require.NoError(t, err) err = mounter.Wait(directory) require.NoError(t, err) }
func TestMountBig(t *testing.T) { t.Skip() t.Parallel() apiClient := getPfsClient(t) repoName := uniqueString("testMountBigRepo") err := pfsutil.CreateRepo(apiClient, repoName) require.NoError(t, err) directory := "/compile/testMount" mounter := fuse.NewMounter("localhost", apiClient) ready := make(chan bool) go func() { err = mounter.Mount(directory, &pfs.Shard{Number: 0, Modulus: 1}, nil, ready) require.NoError(t, err) }() <-ready _, err = os.Stat(filepath.Join(directory, repoName)) require.NoError(t, err) commit, err := pfsutil.StartCommit(apiClient, repoName, "") require.NoError(t, err) require.NotNil(t, commit) newCommitID := commit.Id bigValue := make([]byte, 1024*1024*300) for i := 0; i < 1024*1024*300; i++ { bigValue[i] = 'a' } wg := sync.WaitGroup{} for j := 0; j < 5; j++ { wg.Add(1) go func(j int) { defer wg.Done() err := ioutil.WriteFile(filepath.Join(directory, repoName, newCommitID, fmt.Sprintf("big%d", j)), bigValue, 0666) require.NoError(t, err) }(j) } wg.Wait() err = pfsutil.FinishCommit(apiClient, repoName, newCommitID) require.NoError(t, err) wg = sync.WaitGroup{} for j := 0; j < 5; j++ { wg.Add(1) go func(j int) { defer wg.Done() data, err := ioutil.ReadFile(filepath.Join(directory, repoName, newCommitID, fmt.Sprintf("big%d", j))) require.NoError(t, err) require.Equal(t, bigValue, data) }(j) } wg.Wait() err = mounter.Unmount(directory) require.NoError(t, err) }
func waitForJob(t *testing.T, jobAPIClient pps.JobAPIClient, job *pps.Job, timeoutSec int, expectError bool) *pps.JobInfo { for i := 0; i < timeoutSec; i++ { time.Sleep(1 * time.Second) jobInfo, err := jobAPIClient.InspectJob( context.Background(), &pps.InspectJobRequest{ Job: job, }, ) require.NoError(t, err) if len(jobInfo.JobStatus) == 0 { continue } jobStatus := jobInfo.JobStatus[0] protolog.Infof("status of job %s at %d seconds: %v", job.Id, i+1, jobInfo.JobStatus) switch jobStatus.Type { case pps.JobStatusType_JOB_STATUS_TYPE_ERROR: handleEndJobStatus(t, jobAPIClient, jobInfo) if !expectError { t.Fatalf("job %s had error", job.Id) } return jobInfo case pps.JobStatusType_JOB_STATUS_TYPE_SUCCESS: handleEndJobStatus(t, jobAPIClient, jobInfo) if expectError { t.Fatalf("job %s did not have error", job.Id) } return jobInfo } } t.Fatalf("job %s did not finish in %d seconds", job.Id, timeoutSec) return nil }
func RunTest( t *testing.T, f func(*testing.T, pfs.ApiClient, pfs.InternalApiClient, Cluster), ) { discoveryClient, err := getEtcdClient() require.NoError(t, err) var cluster Cluster prototest.RunT( t, testNumServers, func(servers map[string]*grpc.Server) { cluster = registerFunc(t, discoveryClient, servers) }, func(t *testing.T, clientConns map[string]*grpc.ClientConn) { var clientConn *grpc.ClientConn for _, c := range clientConns { clientConn = c break } f( t, pfs.NewApiClient( clientConn, ), pfs.NewInternalApiClient( clientConn, ), cluster, ) }, ) cluster.Shutdown() }
func checkPFSOutput(t *testing.T, pfsAPIClient pfs.APIClient, outputCommit *pfs.Commit, filePathToContent map[string][]byte) { for filePath, content := range filePathToContent { getContent, err := getPFSContent(pfsAPIClient, outputCommit, filePath) require.NoError(t, err) require.Equal(t, content, getContent) } }
func TestWorkload(t *testing.T) { t.Parallel() pfsClient := getPfsClient(t) ppsClient := getPpsClient(t) seed := time.Now().UnixNano() require.NoError(t, workload.RunWorkload(pfsClient, ppsClient, rand.New(rand.NewSource(seed)), 100)) }
func RunBench( b *testing.B, f func(*testing.B, pfs.ApiClient), ) { discoveryClient, err := getEtcdClient() require.NoError(b, err) var cluster Cluster prototest.RunB( b, testNumServers, func(servers map[string]*grpc.Server) { cluster = registerFunc(b, discoveryClient, servers) }, func(b *testing.B, clientConns map[string]*grpc.ClientConn) { var clientConn *grpc.ClientConn for _, c := range clientConns { clientConn = c break } f( b, pfs.NewApiClient( clientConn, ), ) }, ) cluster.Shutdown() }
func RunBench( b *testing.B, f func(*testing.B, pfs.APIClient), ) { discoveryClient, err := getEtcdClient() require.NoError(b, err) var cluster *cluster prototest.RunB( b, testNumServers, func(servers map[string]*grpc.Server) { cluster = registerFunc(b, discoveryClient, servers) }, func(b *testing.B, clientConns map[string]*grpc.ClientConn) { var clientConn *grpc.ClientConn for _, c := range clientConns { clientConn = c break } go func() { require.Equal(b, cluster.realSharder.AssignRoles(cluster.cancel), shard.ErrCancelled) }() cluster.WaitForAvailability() f( b, pfs.NewAPIClient( clientConn, ), ) }, ) cluster.Shutdown() }
func doWrites(tb testing.TB, apiClient pfs.APIClient, repoName string, commitID string) { var wg sync.WaitGroup defer wg.Wait() for i := 0; i < testSize; i++ { i := i wg.Add(1) go func() { defer wg.Done() _, iErr := pfsutil.PutFile(apiClient, repoName, commitID, fmt.Sprintf("a/b/file%d", i), 0, strings.NewReader(fmt.Sprintf("hello%d", i))) require.NoError(tb, iErr) _, iErr = pfsutil.PutFile(apiClient, repoName, commitID, fmt.Sprintf("a/c/file%d", i), 0, strings.NewReader(fmt.Sprintf("hello%d", i))) require.NoError(tb, iErr) }() } }
func TestJob(t *testing.T) { dataRepo := uniqueString("TestJob.data") pfsClient := getPfsClient(t) require.NoError(t, pfsutil.CreateRepo(pfsClient, dataRepo)) commit, err := pfsutil.StartCommit(pfsClient, dataRepo, "") require.NoError(t, err) _, err = pfsutil.PutFile(pfsClient, dataRepo, commit.Id, "file", 0, strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, pfsutil.FinishCommit(pfsClient, dataRepo, commit.Id)) ppsClient := getPpsClient(t) job, err := ppsutil.CreateJob( ppsClient, "", []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, "", 1, []*pfs.Commit{commit}, "", ) require.NoError(t, err) inspectJobRequest := &pps.InspectJobRequest{ Job: job, BlockOutput: true, BlockState: true, } jobInfo, err := ppsClient.InspectJob(context.Background(), inspectJobRequest) require.NoError(t, err) var buffer bytes.Buffer require.NoError(t, pfsutil.GetFile(pfsClient, jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.Id, "file", 0, 0, nil, &buffer)) require.Equal(t, "foo", buffer.String()) }
func testRun(t *testing.T, imageName string, commands []string, expectedStdout string, expectedStderr string) { client, err := newTestDockerClient() require.NoError(t, err) err = client.Pull(imageName, PullOptions{}) require.NoError(t, err) containers, err := client.Create( imageName, CreateOptions{ HasCommand: commands != nil, }, ) require.NoError(t, err) require.Equal(t, 1, len(containers)) container := containers[0] err = client.Start( container, StartOptions{ Commands: commands, }, ) require.NoError(t, err) err = client.Wait(container, WaitOptions{}) require.NoError(t, err) stdout := bytes.NewBuffer(nil) stderr := bytes.NewBuffer(nil) err = client.Logs(container, LogsOptions{Stdout: stdout, Stderr: stderr}) require.NoError(t, err) require.Equal(t, expectedStdout, stdout.String()) require.Equal(t, expectedStderr, stderr.String()) err = client.Remove(container, RemoveOptions{}) require.NoError(t, err) }
func getPfsClient(tb testing.TB) pfs.APIClient { pfsdAddr := os.Getenv("PFSD_PORT_650_TCP_ADDR") if pfsdAddr == "" { tb.Error("PFSD_PORT_650_TCP_ADDR not set") } clientConn, err := grpc.Dial(fmt.Sprintf("%s:650", pfsdAddr), grpc.WithInsecure()) require.NoError(tb, err) return pfs.NewAPIClient(clientConn) }
func TestBigWrite(t *testing.T) { t.Parallel() protolog.SetLevel(protolog.Level_LEVEL_DEBUG) repo := uniqueString("TestBigWrite") pfsClient := getPfsClient(t) err := pfsutil.CreateRepo(pfsClient, repo) require.NoError(t, err) commit, err := pfsutil.StartCommit(pfsClient, repo, "") require.NoError(t, err) rand := rand.New(rand.NewSource(5)) _, err = pfsutil.PutFile(pfsClient, repo, commit.Id, "file", 0, workload.NewReader(rand, 10000)) require.NoError(t, err) err = pfsutil.FinishCommit(pfsClient, repo, commit.Id) require.NoError(t, err) var buffer bytes.Buffer err = pfsutil.GetFile(pfsClient, repo, commit.Id, "file", 0, 0, nil, &buffer) require.NoError(t, err) }
func getPachClient(t *testing.T) *APIClient { pachAddr := os.Getenv("PACHD_PORT_650_TCP_ADDR") if pachAddr == "" { t.Error("PACHD_PORT_650_TCP_ADDR not set") } clientConn, err := grpc.Dial(fmt.Sprintf("%s:650", pachAddr), grpc.WithInsecure()) require.NoError(t, err) return NewAPIClient(clientConn) }
func getPpsClient(t *testing.T) pps.APIClient { ppsdAddr := os.Getenv("PPSD_PORT_651_TCP_ADDR") if ppsdAddr == "" { t.Error("PPSD_PORT_651_TCP_ADDR not set") } clientConn, err := grpc.Dial(fmt.Sprintf("%s:651", ppsdAddr), grpc.WithInsecure()) require.NoError(t, err) return pps.NewAPIClient(clientConn) }
func (c *cluster) WaitForAvailability() { // We use address as the id for servers too var ids []string for _, address := range c.addresses { if _, ok := c.cancels[address]; ok { ids = append(ids, address) } } require.NoError(c.tb, c.addresser.WaitForAvailability(ids)) }
func BenchmarkFuse(b *testing.B) { apiClient := getPfsClient(b) repoName := uniqueString("benchMountRepo") if err := pfsutil.CreateRepo(apiClient, repoName); err != nil { b.Error(err) } directory := "/compile/benchMount" mounter := fuse.NewMounter("localhost", apiClient) ready := make(chan bool) go func() { err := mounter.Mount(directory, &pfs.Shard{Number: 0, Modulus: 1}, nil, ready) require.NoError(b, err) }() <-ready defer func() { if err := mounter.Unmount(directory); err != nil { b.Error(err) } }() bigValue := make([]byte, 1024*1024) for i := 0; i < 1024*1024; i++ { bigValue[i] = 'a' } b.ResetTimer() for i := 0; i < b.N; i++ { commit, err := pfsutil.StartCommit(apiClient, repoName, "") if err != nil { b.Error(err) } if commit == nil { b.Error("nil branch") } newCommitID := commit.Id var wg sync.WaitGroup for j := 0; j < 1024; j++ { wg.Add(1) go func(j int) { defer wg.Done() if err = ioutil.WriteFile(filepath.Join(directory, repoName, newCommitID, fmt.Sprintf("big%d", j)), bigValue, 0666); err != nil { b.Error(err) } }(j) } wg.Wait() if err := pfsutil.FinishCommit(apiClient, repoName, newCommitID); err != nil { b.Error(err) } } }
func testBasicRethink(t *testing.T, apiServer persist.APIServer) { jobInfo, err := apiServer.CreateJobInfo( context.Background(), &persist.JobInfo{ Spec: &persist.JobInfo_PipelineName{ PipelineName: "foo", }, }, ) require.NoError(t, err) getJobInfo, err := apiServer.GetJobInfo( context.Background(), &pps.Job{ Id: jobInfo.JobId, }, ) require.NoError(t, err) require.Equal(t, jobInfo.JobId, getJobInfo.JobId) require.Equal(t, "foo", getJobInfo.GetPipelineName()) }
func getJobForPipeline(t *testing.T, jobAPIClient pps.JobAPIClient, pipeline *pps.Pipeline) *pps.Job { jobInfos, err := jobAPIClient.ListJob( context.Background(), &pps.ListJobRequest{ Pipeline: pipeline, }, ) require.NoError(t, err) require.NotNil(t, jobInfos) require.Equal(t, 1, len(jobInfos.JobInfo)) return jobInfos.JobInfo[0].Job }