Пример #1
0
// Inputs returns all of the inputs for the pipelines.
func (r *Runner) Inputs() ([]string, error) {
	pipelines, err := btrfs.ReadDir(path.Join(r.inRepo, r.commit, r.pipelineDir))
	if err != nil {
		// Notice we don't return this error but instead no-op. It's fine to not
		// have a pipeline dir.
		return nil, nil
	}
	var res []string
	for _, pInfo := range pipelines {
		f, err := btrfs.Open(path.Join(r.inRepo, r.commit, r.pipelineDir, pInfo.Name()))
		if err != nil {
			return nil, err
		}
		defer f.Close()
		lines := bufio.NewScanner(f)
		// TODO we're copy-pasting code from runPachFile. Let's abstract that.
		var tokens []string
		for lines.Scan() {
			if len(tokens) > 0 && tokens[len(tokens)-1] == "\\" {
				// We have tokens from last loop, remove the \ token which designates the line wrap
				tokens = tokens[:len(tokens)-1]
			} else {
				// No line wrap, clear the tokens they were already considered
				tokens = []string{}
			}
			tokens = append(tokens, strings.Fields(lines.Text())...)
			if len(tokens) > 0 && tokens[0] == "input" {
				if len(tokens) < 2 {
					return nil, ErrArgCount
				}
				res = append(res, tokens[1])
			}
		}
	}
	return res, nil
}
Пример #2
0
// Run runs all of the pipelines it finds in pipelineDir. Returns the
// first error it encounters.
func (r *Runner) Run() error {
	if err := btrfs.MkdirAll(r.outPrefix); err != nil {
		return err
	}
	if err := r.startInputPipelines(); err != nil {
		return err
	}
	pipelines, err := btrfs.ReadDir(path.Join(r.inRepo, r.commit, r.pipelineDir))
	if err != nil {
		// Notice we don't return this error but instead no-op. It's fine to not
		// have a pipeline dir.
		return nil
	}
	// A chanel for the errors, notice that it's capacity is the same as the
	// number of pipelines. The below code should make sure that each pipeline only
	// sends 1 error otherwise deadlock may occur.
	errors := make(chan error, len(pipelines))
	// Make sure we don't race with cancel this is held while we add pipelines.
	r.lock.Lock()
	if r.cancelled {
		// we were cancelled before we even started
		r.lock.Unlock()
		return ErrCancelled
	}
	for _, pInfo := range pipelines {
		if err := r.makeOutRepo(pInfo.Name()); err != nil {
			return err
		}
		p := newPipeline(pInfo.Name(), r.inRepo, path.Join(r.outPrefix, pInfo.Name()), r.commit, r.branch, r.shard, r.outPrefix, r.cache)
		r.pipelines = append(r.pipelines, p)
	}
	// unlocker lets us defer unlocking and explicitly unlock
	var unlocker sync.Once
	defer unlocker.Do(r.lock.Unlock)
	for _, p := range r.pipelines {
		p := p
		r.wait.Add(1)
		go func() {
			defer r.wait.Done()
			f, err := btrfs.Open(path.Join(r.inRepo, r.commit, r.pipelineDir, p.name))
			if err != nil {
				errors <- err
				return
			}
			defer f.Close()
			err = p.runPachFile(f)
			if err != nil {
				errors <- err
				return
			}
		}()
	}
	// We're done adding pipelines so unlock
	unlocker.Do(r.lock.Unlock)
	// Wait for the pipelines to finish
	r.wait.Wait()
	close(errors)
	if r.cancelled {
		// Pipelines finished because we were cancelled
		return ErrCancelled
	}
	for err := range errors {
		return err
	}
	return nil
}