Пример #1
0
func (r *fileReader) Read(data []byte) (int, error) {
	if r.reader == nil {
		// skip blocks as long as our offset is past the end of the current block
		for r.offset != 0 && r.index < len(r.blockRefs) && r.offset >= int64(pfsserver.ByteRangeSize(r.blockRef().Range)) {
			r.offset -= int64(pfsserver.ByteRangeSize(r.blockRef().Range))
			r.index++
		}
		if r.index == len(r.blockRefs) {
			return 0, io.EOF
		}
		var err error
		client := client.APIClient{BlockAPIClient: r.blockClient}
		r.reader, err = client.GetBlock(r.blockRef().Block.Hash, uint64(r.offset), uint64(r.size))
		if err != nil {
			return 0, err
		}
		r.offset = 0
		r.index++
	}
	size, err := r.reader.Read(data)
	if err != nil && err != io.EOF {
		return size, err
	}
	if err == io.EOF {
		r.reader = nil
	}
	r.size -= int64(size)
	if r.size == 0 {
		return size, io.EOF
	}
	if r.size < 0 {
		return 0, fmt.Errorf("read more than we need; this is likely a bug")
	}
	return size, nil
}
Пример #2
0
func RunWorkload(
	client *client.APIClient,
	rand *rand.Rand,
	size int,
) error {
	worker := newWorker(rand)
	for i := 0; i < size; i++ {
		if err := worker.work(client); err != nil {
			return err
		}
	}
	for _, job := range worker.startedJobs {
		jobInfo, err := client.InspectJob(job.ID, true)
		if err != nil {
			return err
		}
		if jobInfo.State != ppsclient.JobState_JOB_SUCCESS {
			return fmt.Errorf("job %s failed", job.ID)
		}
	}
	return nil
}
Пример #3
0
func (d *driver) PutFile(file *pfs.File, handle string,
	delimiter pfs.Delimiter, shard uint64, reader io.Reader) (retErr error) {
	blockClient, err := d.getBlockClient()
	if err != nil {
		return err
	}
	_client := client.APIClient{BlockAPIClient: blockClient}
	blockRefs, err := _client.PutBlock(delimiter, reader)
	if err != nil {
		return err
	}
	defer func() {
		if retErr == nil {
			metrics.AddFiles(1)
			for _, blockRef := range blockRefs.BlockRef {
				metrics.AddBytes(int64(blockRef.Range.Upper - blockRef.Range.Lower))
			}
		}
	}()
	d.lock.Lock()
	defer d.lock.Unlock()

	fileType, err := d.getFileType(file, shard)
	if err != nil {
		return err
	}

	if fileType == pfs.FileType_FILE_TYPE_DIR {
		return fmt.Errorf("%s is a directory", file.Path)
	}

	canonicalCommit, err := d.canonicalCommit(file.Commit)
	if err != nil {
		return err
	}
	diffInfo, ok := d.diffs.get(client.NewDiff(canonicalCommit.Repo.Name, canonicalCommit.ID, shard))
	if !ok {
		// This is a weird case since the commit existed above, it means someone
		// deleted the commit while the above code was running
		return pfsserver.NewErrCommitNotFound(canonicalCommit.Repo.Name, canonicalCommit.ID)
	}
	if diffInfo.Finished != nil {
		return fmt.Errorf("commit %s/%s has already been finished", canonicalCommit.Repo.Name, canonicalCommit.ID)
	}
	d.addDirs(diffInfo, file, shard)
	_append, ok := diffInfo.Appends[path.Clean(file.Path)]
	if !ok {
		_append = newAppend(pfs.FileType_FILE_TYPE_REGULAR)
	} else {
		_append.FileType = pfs.FileType_FILE_TYPE_REGULAR
	}
	if diffInfo.ParentCommit != nil {
		_append.LastRef = d.lastRef(
			client.NewFile(diffInfo.ParentCommit.Repo.Name, diffInfo.ParentCommit.ID, file.Path),
			shard,
		)
	}
	diffInfo.Appends[path.Clean(file.Path)] = _append
	if handle == "" {
		_append.BlockRefs = append(_append.BlockRefs, blockRefs.BlockRef...)
	} else {
		handleBlockRefs, ok := _append.Handles[handle]
		if !ok {
			handleBlockRefs = &pfs.BlockRefs{}
			_append.Handles[handle] = handleBlockRefs
		}
		handleBlockRefs.BlockRef = append(handleBlockRefs.BlockRef, blockRefs.BlockRef...)
	}
	for _, blockRef := range blockRefs.BlockRef {
		diffInfo.SizeBytes += blockRef.Range.Upper - blockRef.Range.Lower
	}
	return nil
}
Пример #4
0
func (w *worker) work(c *client.APIClient) error {
	opt := w.rand.Float64()
	switch {
	case opt < repo:
		repoName := w.randString(10)
		if err := c.CreateRepo(repoName); err != nil {
			return err
		}
		w.repos = append(w.repos, &pfs.Repo{Name: repoName})
		commit, err := c.StartCommit(repoName, "", "")
		if err != nil {
			return err
		}
		w.started = append(w.started, commit)
	case opt < commit:
		if len(w.started) >= maxStartedCommits || len(w.finished) == 0 {
			if len(w.started) == 0 {
				return nil
			}
			i := w.rand.Intn(len(w.started))
			commit := w.started[i]
			// before we finish a commit we add a file, this assures that there
			// won't be any empty commits which will later crash jobs
			if _, err := c.PutFile(commit.Repo.Name, commit.ID, w.randString(10), w.reader()); err != nil {
				return err
			}
			if err := c.FinishCommit(commit.Repo.Name, commit.ID); err != nil {
				return err
			}
			w.started = append(w.started[:i], w.started[i+1:]...)
			w.finished = append(w.finished, commit)
		} else {
			if len(w.finished) == 0 {
				return nil
			}
			commit := w.finished[w.rand.Intn(len(w.finished))]
			commit, err := c.StartCommit(commit.Repo.Name, commit.ID, "")
			if err != nil {
				return err
			}
			w.started = append(w.started, commit)
		}
	case opt < file:
		if len(w.started) == 0 {
			return nil
		}
		commit := w.started[w.rand.Intn(len(w.started))]
		if _, err := c.PutFile(commit.Repo.Name, commit.ID, w.randString(10), w.reader()); err != nil {
			return err
		}
	case opt < job:
		if len(w.startedJobs) >= maxStartedJobs {
			job := w.startedJobs[0]
			w.startedJobs = w.startedJobs[1:]
			jobInfo, err := c.InspectJob(job.ID, true)
			if err != nil {
				return err
			}
			if jobInfo.State != ppsclient.JobState_JOB_SUCCESS {
				return fmt.Errorf("job %s failed", job.ID)
			}
			w.jobs = append(w.jobs, job)
		} else {
			if len(w.finished) == 0 {
				return nil
			}
			inputs := [5]string{}
			var jobInputs []*ppsclient.JobInput
			repoSet := make(map[string]bool)
			for i := range inputs {
				commit := w.finished[w.rand.Intn(len(w.finished))]
				if _, ok := repoSet[commit.Repo.Name]; ok {
					continue
				}
				repoSet[commit.Repo.Name] = true
				inputs[i] = commit.Repo.Name
				jobInputs = append(jobInputs, &ppsclient.JobInput{Commit: commit})
			}
			outFilename := w.randString(10)
			job, err := c.CreateJob(
				"",
				[]string{"bash"},
				w.grepCmd(inputs, outFilename),
				1,
				jobInputs,
				"",
			)
			if err != nil {
				return err
			}
			w.startedJobs = append(w.startedJobs, job)
		}
	case opt < pipeline:
		if len(w.repos) == 0 {
			return nil
		}
		inputs := [5]string{}
		var pipelineInputs []*ppsclient.PipelineInput
		repoSet := make(map[string]bool)
		for i := range inputs {
			repo := w.repos[w.rand.Intn(len(w.repos))]
			if _, ok := repoSet[repo.Name]; ok {
				continue
			}
			repoSet[repo.Name] = true
			inputs[i] = repo.Name
			pipelineInputs = append(pipelineInputs, &ppsclient.PipelineInput{Repo: repo})
		}
		pipelineName := w.randString(10)
		outFilename := w.randString(10)
		if err := c.CreatePipeline(
			pipelineName,
			"",
			[]string{"bash"},
			w.grepCmd(inputs, outFilename),
			1,
			pipelineInputs,
		); err != nil {
			return err
		}
		w.pipelines = append(w.pipelines, client.NewPipeline(pipelineName))
	}
	return nil
}