Beispiel #1
0
// Given a base directory and a set of exclusions, list the files and
// directories that would be saved by a backup job with the same info in a
// human-readable format. Write the output to the supplied writer.
func List(
	ctx context.Context,
	w io.Writer,
	basePath string,
	exclusions []*regexp.Regexp) (err error) {
	// Visit all nodes in the graph with a visitor that prints info about the
	// node.
	dr := newDependencyResolver(basePath, exclusions)
	v := &listVisitor{w: w}

	const resolverParallelism = 1
	const visitorParallelism = 1

	err = dag.Visit(
		ctx,
		[]dag.Node{makeRootNode()},
		dr,
		v,
		resolverParallelism,
		visitorParallelism)

	if err != nil {
		err = fmt.Errorf("dag.Visit: %v", err)
		return
	}

	return
}
Beispiel #2
0
// Read all blobs necessary for verifying the directory structure rooted at a
// set of backup root scores, ensuring that the entire directory structure is
// intact in GCS.
//
// Optionally, all file content is also read and verified. This is less
// important than verifying directory connectedness if we trust that GCS does
// not corrupt object metadata (where we store expected CRC32C and MD5) and
// does correctly report the object's CRC32C and MD5 sums in listings,
// verifying them periodically.
//
// If work is to be preserved across runs, knownStructure should be filled in
// with parenthood information from previously-generated records (for both
// files and directories). Nodes that exist as keys in this map will not be
// re-verified, except to confirm that their content still exists in allScores.
//
// It is expected that the blob store's Load method does score verification for
// us.
func Verify(
	ctx context.Context,
	readFiles bool,
	rootScores []blob.Score,
	allScores []blob.Score,
	knownStructure map[Node][]Node,
	records chan<- Record,
	blobStore blob.Store) (err error) {
	clock := timeutil.RealClock()

	// Set up a dependency resolver that reads directory listings. It also takes
	// care of confirming that all scores (for files and directories) exist.
	dr := newDependencyResolver(
		allScores,
		knownStructure,
		records,
		blobStore,
		clock)

	// Do we need to do anything for file nodes?
	var visitor dag.Visitor
	if readFiles {
		visitor = newVisitor(records, blobStore, clock)
	} else {
		visitor = &doNothingVisitor{}
	}

	// Traverse the graph.
	var rootNodes []dag.Node
	for _, s := range rootScores {
		n := Node{
			Score: s,
			Dir:   true,
		}

		rootNodes = append(rootNodes, n)
	}

	const resolverParallelism = 128
	const visitorParallelism = 128

	err = dag.Visit(
		ctx,
		rootNodes,
		dr,
		visitor,
		resolverParallelism,
		visitorParallelism)

	if err != nil {
		err = fmt.Errorf("dag.Visit: %v", err)
		return
	}

	return
}
Beispiel #3
0
func (t *VisitTest) call(
	startNodes []string,
	findDependencies func(context.Context, string) ([]string, error),
	visit func(context.Context, string) error) (err error) {
	// Convert to a list of dag.Node.
	var startDAGNodes []dag.Node
	for _, n := range startNodes {
		startDAGNodes = append(startDAGNodes, n)
	}

	// Call.
	err = dag.Visit(
		t.ctx,
		startDAGNodes,
		&dependencyResolver{F: findDependencies},
		&visitor{F: visit},
		resolverParallelism,
		visitorParallelism)

	return
}