Example #1
0
File: gc.go Project: kpcyrd/go-ipfs
func ColoredSet(ctx context.Context, pn pin.Pinner, ds dag.DAGService, bestEffortRoots []key.Key) (key.KeySet, error) {
	// KeySet currently implemented in memory, in the future, may be bloom filter or
	// disk backed to conserve memory.
	gcs := key.NewKeySet()
	err := Descendants(ctx, ds, gcs, pn.RecursiveKeys(), false)
	if err != nil {
		return nil, err
	}

	err = Descendants(ctx, ds, gcs, bestEffortRoots, true)
	if err != nil {
		return nil, err
	}

	for _, k := range pn.DirectKeys() {
		gcs.Add(k)
	}

	err = Descendants(ctx, ds, gcs, pn.InternalPins(), false)
	if err != nil {
		return nil, err
	}

	return gcs, nil
}
Example #2
0
func pinLsAll(typeStr string, ctx context.Context, n *core.IpfsNode) (map[string]RefKeyObject, error) {

	keys := make(map[string]RefKeyObject)

	AddToResultKeys := func(keyList []key.Key, typeStr string) {
		for _, k := range keyList {
			keys[k.B58String()] = RefKeyObject{
				Type: typeStr,
			}
		}
	}

	if typeStr == "direct" || typeStr == "all" {
		AddToResultKeys(n.Pinning.DirectKeys(), "direct")
	}
	if typeStr == "indirect" || typeStr == "all" {
		ks := key.NewKeySet()
		for _, k := range n.Pinning.RecursiveKeys() {
			nd, err := n.DAG.Get(ctx, k)
			if err != nil {
				return nil, err
			}
			err = dag.EnumerateChildren(n.Context(), n.DAG, nd, ks)
			if err != nil {
				return nil, err
			}
		}
		AddToResultKeys(ks.Keys(), "indirect")
	}
	if typeStr == "recursive" || typeStr == "all" {
		AddToResultKeys(n.Pinning.RecursiveKeys(), "recursive")
	}

	return keys, nil
}
Example #3
0
func TestEnumerateChildren(t *testing.T) {
	bsi := bstest.Mocks(1)
	ds := NewDAGService(bsi[0])

	read := io.LimitReader(u.NewTimeSeededRand(), 1024*1024)
	root, err := imp.BuildDagFromReader(ds, chunk.NewSizeSplitter(read, 512))
	if err != nil {
		t.Fatal(err)
	}

	ks := key.NewKeySet()
	err = EnumerateChildren(context.Background(), ds, root, ks, false)
	if err != nil {
		t.Fatal(err)
	}

	var traverse func(n *Node)
	traverse = func(n *Node) {
		// traverse dag and check
		for _, lnk := range n.Links {
			k := key.Key(lnk.Hash)
			if !ks.Has(k) {
				t.Fatal("missing key in set!")
			}
			child, err := ds.Get(context.Background(), k)
			if err != nil {
				t.Fatal(err)
			}
			traverse(child)
		}
	}

	traverse(root)
}
Example #4
0
func TestFetchGraph(t *testing.T) {
	var dservs []DAGService
	bsis := bstest.Mocks(2)
	for _, bsi := range bsis {
		dservs = append(dservs, NewDAGService(bsi))
	}

	read := io.LimitReader(u.NewTimeSeededRand(), 1024*32)
	root, err := imp.BuildDagFromReader(dservs[0], chunk.NewSizeSplitter(read, 512))
	if err != nil {
		t.Fatal(err)
	}

	err = FetchGraph(context.TODO(), root, dservs[1])
	if err != nil {
		t.Fatal(err)
	}

	// create an offline dagstore and ensure all blocks were fetched
	bs := bserv.New(bsis[1].Blockstore, offline.Exchange(bsis[1].Blockstore))

	offline_ds := NewDAGService(bs)
	ks := key.NewKeySet()

	err = EnumerateChildren(context.Background(), offline_ds, root, ks, false)
	if err != nil {
		t.Fatal(err)
	}
}
Example #5
0
func TestAddGCLive(t *testing.T) {
	r := &repo.Mock{
		C: config.Config{
			Identity: config.Identity{
				PeerID: "Qmfoo", // required by offline node
			},
		},
		D: testutil.ThreadSafeCloserMapDatastore(),
	}
	node, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r})
	if err != nil {
		t.Fatal(err)
	}

	errs := make(chan error)
	out := make(chan interface{})
	adder, err := NewAdder(context.Background(), node, out)
	if err != nil {
		t.Fatal(err)
	}

	dataa := ioutil.NopCloser(bytes.NewBufferString("testfileA"))
	rfa := files.NewReaderFile("a", "a", dataa, nil)

	// make two files with pipes so we can 'pause' the add for timing of the test
	piper, pipew := io.Pipe()
	hangfile := files.NewReaderFile("b", "b", piper, nil)

	datad := ioutil.NopCloser(bytes.NewBufferString("testfileD"))
	rfd := files.NewReaderFile("d", "d", datad, nil)

	slf := files.NewSliceFile("files", "files", []files.File{rfa, hangfile, rfd})

	addDone := make(chan struct{})
	go func() {
		defer close(addDone)
		defer close(out)
		err := adder.AddFile(slf)

		if err != nil {
			t.Fatal(err)
		}

	}()

	addedHashes := make(map[string]struct{})
	select {
	case o := <-out:
		addedHashes[o.(*AddedObject).Hash] = struct{}{}
	case <-addDone:
		t.Fatal("add shouldnt complete yet")
	}

	var gcout <-chan key.Key
	gcstarted := make(chan struct{})
	go func() {
		defer close(gcstarted)
		gcchan, err := gc.GC(context.Background(), node.Blockstore, node.Pinning)
		if err != nil {
			log.Error("GC ERROR:", err)
			errs <- err
			return
		}

		gcout = gcchan
	}()

	// gc shouldnt start until we let the add finish its current file.
	pipew.Write([]byte("some data for file b"))

	select {
	case <-gcstarted:
		t.Fatal("gc shouldnt have started yet")
	case err := <-errs:
		t.Fatal(err)
	default:
	}

	time.Sleep(time.Millisecond * 100) // make sure gc gets to requesting lock

	// finish write and unblock gc
	pipew.Close()

	// receive next object from adder
	select {
	case o := <-out:
		addedHashes[o.(*AddedObject).Hash] = struct{}{}
	case err := <-errs:
		t.Fatal(err)
	}

	select {
	case <-gcstarted:
	case err := <-errs:
		t.Fatal(err)
	}

	for k := range gcout {
		if _, ok := addedHashes[k.B58String()]; ok {
			t.Fatal("gc'ed a hash we just added")
		}
	}

	var last key.Key
	for a := range out {
		// wait for it to finish
		last = key.B58KeyDecode(a.(*AddedObject).Hash)
	}

	ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
	defer cancel()
	root, err := node.DAG.Get(ctx, last)
	if err != nil {
		t.Fatal(err)
	}

	err = dag.EnumerateChildren(ctx, node.DAG, root, key.NewKeySet())
	if err != nil {
		t.Fatal(err)
	}
}
Example #6
0
		case "all", "direct", "indirect", "recursive":
		default:
			err = fmt.Errorf("Invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr)
			res.SetError(err, cmds.ErrClient)
		}

		keys := make(map[string]RefKeyObject)
		if typeStr == "direct" || typeStr == "all" {
			for _, k := range n.Pinning.DirectKeys() {
				keys[k.B58String()] = RefKeyObject{
					Type: "direct",
				}
			}
		}
		if typeStr == "indirect" || typeStr == "all" {
			ks := key.NewKeySet()
			for _, k := range n.Pinning.RecursiveKeys() {
				nd, err := n.DAG.Get(n.Context(), k)
				if err != nil {
					res.SetError(err, cmds.ErrNormal)
					return
				}
				err = dag.EnumerateChildren(n.Context(), n.DAG, nd, ks)
				if err != nil {
					res.SetError(err, cmds.ErrNormal)
					return
				}

			}
			for _, k := range ks.Keys() {
				keys[k.B58String()] = RefKeyObject{
Example #7
0
// FetchGraph fetches all nodes that are children of the given node
func FetchGraph(ctx context.Context, root *Node, serv DAGService) error {
	return EnumerateChildrenAsync(ctx, serv, root, key.NewKeySet())
}