func (a *apiServer) Version(version int64) error { protolog.Printf("apiServer.Version %d", version) a.versionLock.Lock() defer a.versionLock.Unlock() protolog.Printf("got lock") a.version = version return nil }
func (a *sharder) announceServer( address string, server Server, versionChan chan int64, cancel chan bool, ) error { serverState := &ServerState{ Address: address, Version: InvalidVersion, } for { shards, err := server.LocalShards() if err != nil { return err } serverState.Shards = shards encodedServerState, err := marshaler.MarshalToString(serverState) if err != nil { return err } if err := a.discoveryClient.Set(a.serverStateKey(address), encodedServerState, holdTTL); err != nil { protolog.Printf("Error setting server state: %s", err.Error()) } protolog.Debug(&SetServerState{serverState}) select { case <-cancel: return nil case version := <-versionChan: serverState.Version = version case <-time.After(time.Second * time.Duration(holdTTL/2)): } } }
func getFinalPipelineRunStatus(apiClient pps.ApiClient, pipelineRunID string) (*pps.PipelineRunStatus, error) { // TODO(pedge): not good ticker := time.NewTicker(time.Second) for i := 0; i < 20; i++ { <-ticker.C pipelineRunStatuses, err := apiClient.GetPipelineRunStatus( context.Background(), &pps.GetPipelineRunStatusRequest{ PipelineRunId: pipelineRunID, }, ) if err != nil { return nil, err } pipelineRunStatus := pipelineRunStatuses.PipelineRunStatus[0] protolog.Printf("status at tick %d: %v\n", i, pipelineRunStatus) switch pipelineRunStatus.PipelineRunStatusType { case pps.PipelineRunStatusType_PIPELINE_RUN_STATUS_TYPE_ERROR: return pipelineRunStatus, nil case pps.PipelineRunStatusType_PIPELINE_RUN_STATUS_TYPE_SUCCESS: return pipelineRunStatus, nil } } return nil, fmt.Errorf("did not get final pipeline status for %s", pipelineRunID) }
func (a *apiServer) CreatePipeline(ctx context.Context, request *pps.CreatePipelineRequest) (response *google_protobuf.Empty, err error) { defer func(start time.Time) { a.Log(request, response, err, time.Since(start)) }(time.Now()) if request.Pipeline == nil { return nil, fmt.Errorf("pachyderm.pps.pipelineserver: request.Pipeline cannot be nil") } repoSet := make(map[string]bool) for _, input := range request.Inputs { repoSet[input.Repo.Name] = true } if len(repoSet) < len(request.Inputs) { return nil, fmt.Errorf("pachyderm.pps.pipelineserver: duplicate input repos") } repo := pps.PipelineRepo(request.Pipeline) persistPipelineInfo := &persist.PipelineInfo{ PipelineName: request.Pipeline.Name, Transform: request.Transform, Shards: request.Shards, Inputs: request.Inputs, OutputRepo: repo, } if _, err := a.persistAPIServer.CreatePipelineInfo(ctx, persistPipelineInfo); err != nil { return nil, err } if _, err := a.pfsAPIClient.CreateRepo(ctx, &pfs.CreateRepoRequest{Repo: repo}); err != nil { return nil, err } go func() { if err := a.runPipeline(newPipelineInfo(persistPipelineInfo)); err != nil { protolog.Printf("pipeline errored: %s", err.Error()) } }() return google_protobuf.EmptyInstance, nil }
func (a *apiServer) CreatePipeline(ctx context.Context, request *pps.CreatePipelineRequest) (response *google_protobuf.Empty, err error) { defer func(start time.Time) { a.Log(request, response, err, time.Since(start)) }(time.Now()) if request.Pipeline == nil { return nil, fmt.Errorf("pachyderm.pps.pipelineserver: request.Pipeline cannot be nil") } persistPipelineInfo := &persist.PipelineInfo{ PipelineName: request.Pipeline.Name, Transform: request.Transform, Shards: request.Shards, InputRepo: request.InputRepo, } if _, err := a.persistAPIServer.CreatePipelineInfo(ctx, persistPipelineInfo); err != nil { return nil, err } repo := pps.PipelineRepo(request.Pipeline) if _, err := a.pfsAPIClient.CreateRepo(ctx, &pfs.CreateRepoRequest{Repo: repo}); err != nil { return nil, err } go func() { if err := a.runPipeline(persistPipelineInfoToPipelineInfo(persistPipelineInfo)); err != nil { protolog.Printf("pipeline errored: %s", err.Error()) } }() return google_protobuf.EmptyInstance, nil }
func (a *sharder) announceFrontend( address string, frontend Frontend, versionChan chan int64, cancel chan bool, ) error { frontendState := &FrontendState{ Address: address, Version: InvalidVersion, } for { encodedFrontendState, err := marshaler.MarshalToString(frontendState) if err != nil { return err } if err := a.discoveryClient.Set(a.frontendStateKey(address), encodedFrontendState, holdTTL); err != nil { protolog.Printf("Error setting server state: %s", err.Error()) } protolog.Debug(&SetFrontendState{frontendState}) select { case <-cancel: return nil case version := <-versionChan: frontendState.Version = version case <-time.After(time.Second * time.Duration(holdTTL/2)): } } }
func (d *driver) PullDiff(commit *pfs.Commit, shard uint64, diff io.Writer) error { protolog.Printf("PullDiff: %s", d.readCommitPath(commit, shard)) parent, err := d.getParent(commit, shard) if err != nil { return err } if parent == nil { return execSend(d.readCommitPath(commit, shard), "", diff) } return execSend(d.readCommitPath(commit, shard), d.readCommitPath(parent, shard), diff) }
func (d *driver) PushDiff(commit *pfs.Commit, shard uint64, diff io.Reader) error { protolog.Printf("PushDiff: %s", d.readCommitPath(commit, shard)) if err := execSubvolumeCreate(d.commitPathNoShard(commit)); err != nil && !execSubvolumeExists(d.commitPathNoShard(commit)) { return err } if err := execRecv(d.commitPathNoShard(commit), diff); err != nil { return err } if !execSubvolumeExists(d.readCommitPath(commit, shard)) { return fmt.Errorf("PushDiff failed %s", d.readCommitPath(commit, shard)) } return nil }
func do(appEnvObj interface{}) error { appEnv := appEnvObj.(*appEnv) rethinkAPIClient, err := getRethinkAPIClient(appEnv.DatabaseAddress, appEnv.DatabaseName) if err != nil { return err } pfsdAddress, err := getPfsdAddress() if err != nil { return err } clientConn, err := grpc.Dial(pfsdAddress, grpc.WithInsecure()) if err != nil { return err } pfsAPIClient := pfs.NewAPIClient(clientConn) kubeAddr, err := getKubeAddress() if err != nil { return err } config := &kube.Config{ Host: kubeAddr, Insecure: true, } kubeClient, err := kube.New(config) if err != nil { protolog.Printf("Error creating kubernetes client: %s", err.Error()) } jobAPIServer := jobserver.NewAPIServer( pfsAPIClient, rethinkAPIClient, kubeClient, ) jobAPIClient := pps.NewLocalJobAPIClient(jobAPIServer) pipelineAPIServer := pipelineserver.NewAPIServer(pfsAPIClient, jobAPIClient, rethinkAPIClient) if err := pipelineAPIServer.Start(); err != nil { return err } return protoserver.Serve( uint16(appEnv.Port), func(s *grpc.Server) { pps.RegisterJobAPIServer(s, jobAPIServer) pps.RegisterInternalJobAPIServer(s, jobAPIServer) pps.RegisterPipelineAPIServer(s, pipelineAPIServer) }, protoserver.ServeOptions{ DebugPort: uint16(appEnv.DebugPort), Version: pachyderm.Version, }, ) }
func (c *etcdClient) WatchAll(key string, cancel chan bool, callBack func(map[string]string) error) error { for { if err := c.watchAllWithoutRetry(key, cancel, callBack); err != nil { etcdErr, ok := err.(*etcd.EtcdError) if ok && etcdErr.ErrorCode == 401 { continue } if ok && etcdErr.ErrorCode == 501 { protolog.Printf("Ignoring: %s", err.Error()) continue } return err } } }
func (a *apiServer) Start() error { pipelineInfos, err := a.ListPipeline(context.Background(), &pps.ListPipelineRequest{}) if err != nil { return err } for _, pipelineInfo := range pipelineInfos.PipelineInfo { pipelineInfo := pipelineInfo go func() { if err := a.runPipeline(pipelineInfo); err != nil { protolog.Printf("pipeline errored: %s", err.Error()) } }() } return nil }
func (c *etcdClient) Watch(key string, cancel chan bool, callBack func(string) error) error { // This retry is needed for when the etcd cluster gets overloaded. for { if err := c.watchWithoutRetry(key, cancel, callBack); err != nil { etcdErr, ok := err.(*etcd.EtcdError) if ok && etcdErr.ErrorCode == 401 { continue } if ok && etcdErr.ErrorCode == 501 { protolog.Printf("Ignoring: %s", err.Error()) continue } return err } } }
func PutFile(apiClient pfs.ApiClient, repoName string, commitID string, path string, offset int64, reader io.Reader) (_ int, retErr error) { putFileClient, err := apiClient.PutFile(context.Background()) if err != nil { return 0, err } defer func() { if _, err := putFileClient.CloseAndRecv(); err != nil && retErr != nil { retErr = err } }() request := pfs.PutFileRequest{ File: &pfs.File{ Commit: &pfs.Commit{ Repo: &pfs.Repo{ Name: repoName, }, Id: commitID, }, Path: path, }, FileType: pfs.FileType_FILE_TYPE_REGULAR, OffsetBytes: offset, } var size int for { value := make([]byte, chunkSize) iSize, err := reader.Read(value) request.Value = value[0:iSize] if err != nil { if err == io.EOF { break } return 0, err } size += iSize protolog.Printf("pfsutil.PutFile Send(%+v)", request) if err := putFileClient.Send(&request); err != nil { return 0, err } } if err != nil && err != io.EOF { return 0, err } return size, err }
func (a *apiServer) CreatePipeline(ctx context.Context, request *pps.CreatePipelineRequest) (response *google_protobuf.Empty, err error) { defer func(start time.Time) { a.Log(request, response, err, time.Since(start)) }(time.Now()) persistPipelineInfo := &persist.PipelineInfo{ PipelineName: request.Pipeline.Name, Transform: request.Transform, InputRepo: request.InputRepo, OutputRepo: request.OutputRepo, } if _, err := a.persistAPIClient.CreatePipelineInfo(ctx, persistPipelineInfo); err != nil { return nil, err } go func() { if err := a.runPipeline(persistPipelineInfoToPipelineInfo(persistPipelineInfo)); err != nil { protolog.Printf("pipeline errored: %s", err.Error()) } }() return google_protobuf.EmptyInstance, nil }
func getKubeClient() (*kube.Client, error) { kubeAddr, err := getKubeAddress() if err != nil { return nil, err } config := &kube.Config{ Host: kubeAddr, Insecure: true, } kubeClient, err := kube.New(config) if err != nil { protolog.Printf("Error insecure kube client: %s", err.Error()) } if kubeClient != nil { return kubeClient, nil } return kube.NewInCluster() }
func getFinalPipelineRunStatus(apiClient pps.ApiClient, pipelineRunID string) (*pps.PipelineRunStatus, error) { // TODO(pedge): not good ticker := time.NewTicker(time.Second) for i := 0; i < 60; i++ { <-ticker.C getPipelineRunStatusResponse, err := ppsutil.GetPipelineRunStatus( apiClient, pipelineRunID, ) if err != nil { return nil, err } protolog.Printf("status at tick %d: %v\n", i, getPipelineRunStatusResponse.PipelineRunStatus) pipelineRunStatus := getPipelineRunStatusResponse.PipelineRunStatus switch pipelineRunStatus.PipelineRunStatusType { case pps.PipelineRunStatusType_PIPELINE_RUN_STATUS_TYPE_ERROR: return pipelineRunStatus, nil case pps.PipelineRunStatusType_PIPELINE_RUN_STATUS_TYPE_SUCCESS: return pipelineRunStatus, nil } } return nil, fmt.Errorf("did not get final pipeline status for %s", pipelineRunID) }
func (a *discoveryAddresser) fillRoles( id string, server Server, versionChan chan int64, cancel chan bool, ) error { oldRoles := make(map[int64]proto.ServerRole) return a.discoveryClient.WatchAll( a.serverRoleKey(id), cancel, func(encodedServerRoles map[string]string) error { roles := make(map[int64]proto.ServerRole) var versions int64Slice // Decode the roles for _, encodedServerRole := range encodedServerRoles { var serverRole proto.ServerRole if err := jsonpb.UnmarshalString(encodedServerRole, &serverRole); err != nil { return err } roles[serverRole.Version] = serverRole versions = append(versions, serverRole.Version) } sort.Sort(versions) // For each new version bring the server up to date for _, version := range versions { if _, ok := oldRoles[version]; ok { // we've already seen these roles, so nothing to do here continue } serverRole := roles[version] protolog.Printf("StartAddServerRole %+v", serverRole) var wg sync.WaitGroup var addShardErr error var addShardOnce sync.Once for _, shard := range shards(serverRole) { if !containsShard(oldRoles, shard) { wg.Add(1) go func(shard uint64) { defer wg.Done() if err := server.AddShard(shard); err != nil { addShardOnce.Do(func() { addShardErr = err }) } }(shard) } } wg.Wait() if addShardErr != nil { protolog.Info(&log.AddServerRole{&serverRole, addShardErr.Error()}) return addShardErr } protolog.Info(&log.AddServerRole{&serverRole, ""}) oldRoles[version] = serverRole versionChan <- version } // See if there are any old roles that aren't needed for version, serverRole := range oldRoles { var wg sync.WaitGroup var removeShardErr error var removeShardOnce sync.Once if _, ok := roles[version]; ok { // these roles haven't expired yet, so nothing to do continue } for _, shard := range shards(serverRole) { if !containsShard(roles, shard) { wg.Add(1) go func(shard uint64) { defer wg.Done() if err := server.RemoveShard(shard); err != nil { removeShardOnce.Do(func() { removeShardErr = err }) } }(shard) } } wg.Wait() if removeShardErr != nil { protolog.Info(&log.RemoveServerRole{&serverRole, removeShardErr.Error()}) return removeShardErr } protolog.Info(&log.RemoveServerRole{&serverRole, ""}) } oldRoles = make(map[int64]proto.ServerRole) for version, serverRole := range roles { oldRoles[version] = serverRole } return nil }, ) }
func BenchmarkFreeformfGLog(b *testing.B) { runBenchmarkGLog(b, func() { protolog.Printf("%s %d\n", s, d) }, false) }
func BenchmarkThreadFreeformfGLog(b *testing.B) { runBenchmarkGLog(b, func() { protolog.Printf("%s %d\n", s, d) }, true) }
func (a *sharder) WaitForAvailability(frontendAddresses []string, serverAddresses []string) error { version := InvalidVersion if err := a.discoveryClient.WatchAll(a.serverDir(), nil, func(encodedServerStatesAndRoles map[string]string) error { serverStates := make(map[string]*ServerState) serverRoles := make(map[string]map[int64]*ServerRole) for key, encodedServerStateOrRole := range encodedServerStatesAndRoles { if strings.HasPrefix(key, a.serverStateDir()) { serverState, err := decodeServerState(encodedServerStateOrRole) if err != nil { return err } serverStates[serverState.Address] = serverState } if strings.HasPrefix(key, a.serverRoleDir()) { serverRole, err := decodeServerRole(encodedServerStateOrRole) if err != nil { return err } if _, ok := serverRoles[serverRole.Address]; !ok { serverRoles[serverRole.Address] = make(map[int64]*ServerRole) } serverRoles[serverRole.Address][serverRole.Version] = serverRole } } if len(serverStates) != len(serverAddresses) { return nil } if len(serverRoles) != len(serverAddresses) { return nil } for _, address := range serverAddresses { if _, ok := serverStates[address]; !ok { return nil } if _, ok := serverRoles[address]; !ok { return nil } } versions := make(map[int64]bool) for _, serverState := range serverStates { if serverState.Version == InvalidVersion { return nil } versions[serverState.Version] = true } if len(versions) != 1 { return nil } for _, versionToServerRole := range serverRoles { if len(versionToServerRole) != 1 { return nil } for version := range versionToServerRole { if !versions[version] { return nil } } } // This loop actually does something, it sets the outside // version variable. for version = range versions { } return errComplete }); err != errComplete { return err } if err := a.discoveryClient.WatchAll( a.frontendStateDir(), nil, func(encodedFrontendStates map[string]string) error { frontendStates := make(map[string]*FrontendState) for _, encodedFrontendState := range encodedFrontendStates { frontendState, err := decodeFrontendState(encodedFrontendState) if err != nil { return err } if frontendState.Version != version { protolog.Printf("Wrong version: %d != %d", frontendState.Version, version) return nil } frontendStates[frontendState.Address] = frontendState } protolog.Printf("frontendStates: %+v", frontendStates) if len(frontendStates) != len(frontendAddresses) { return nil } for _, address := range frontendAddresses { if _, ok := frontendStates[address]; !ok { return nil } } return errComplete }); err != nil && err != errComplete { return err } return nil }
func do(appEnvObj interface{}) error { appEnv := appEnvObj.(*appEnv) discoveryClient, err := getEtcdClient() if err != nil { return err } address := appEnv.Address if address == "" { address, err = netutil.ExternalIP() if err != nil { return err } } address = fmt.Sprintf("%s:%d", address, appEnv.Port) sharder := shard.NewSharder( discoveryClient, appEnv.NumShards, appEnv.NumReplicas, "namespace", ) var driver drive.Driver switch appEnv.DriverType { case "btrfs": driver, err = btrfs.NewDriver(appEnv.DriverRoot, "") if err != nil { return err } default: return fmt.Errorf("unknown value for PFS_DRIVER_TYPE: %s", appEnv.DriverType) } apiServer := server.NewAPIServer( route.NewSharder( appEnv.NumShards, 0, ), route.NewRouter( sharder, grpcutil.NewDialer( grpc.WithInsecure(), ), address, ), ) go func() { if err := sharder.RegisterFrontend(nil, address, apiServer); err != nil { protolog.Printf("Error from sharder.RegisterFrontend %s", err.Error()) } }() internalAPIServer := server.NewInternalAPIServer( route.NewSharder( appEnv.NumShards, 0, ), route.NewRouter( sharder, grpcutil.NewDialer( grpc.WithInsecure(), ), address, ), driver, ) go func() { if err := sharder.Register(nil, address, internalAPIServer); err != nil { protolog.Printf("Error from sharder.Register %s", err.Error()) } }() return protoserver.Serve( uint16(appEnv.Port), func(s *grpc.Server) { pfs.RegisterAPIServer(s, apiServer) pfs.RegisterInternalAPIServer(s, internalAPIServer) }, protoserver.ServeOptions{ HTTPPort: uint16(appEnv.HTTPPort), DebugPort: uint16(appEnv.DebugPort), Version: pachyderm.Version, HTTPRegisterFunc: func(ctx context.Context, mux *runtime.ServeMux, clientConn *grpc.ClientConn) error { return pfs.RegisterAPIHandler(ctx, mux, clientConn) }, }, ) }