func pinLsAll(typeStr string, ctx context.Context, n *core.IpfsNode) (map[string]RefKeyObject, error) { keys := make(map[string]RefKeyObject) AddToResultKeys := func(keyList []*cid.Cid, typeStr string) { for _, c := range keyList { keys[c.String()] = RefKeyObject{ Type: typeStr, } } } if typeStr == "direct" || typeStr == "all" { AddToResultKeys(n.Pinning.DirectKeys(), "direct") } if typeStr == "indirect" || typeStr == "all" { set := cid.NewSet() for _, k := range n.Pinning.RecursiveKeys() { err := dag.EnumerateChildren(n.Context(), n.DAG, k, set.Visit, false) if err != nil { return nil, err } } AddToResultKeys(set.Keys(), "indirect") } if typeStr == "recursive" || typeStr == "all" { AddToResultKeys(n.Pinning.RecursiveKeys(), "recursive") } return keys, nil }
func pinLsAll(typeStr string, ctx context.Context, n *core.IpfsNode) (map[string]RefKeyObject, error) { keys := make(map[string]RefKeyObject) AddToResultKeys := func(keyList []key.Key, typeStr string) { for _, k := range keyList { keys[k.B58String()] = RefKeyObject{ Type: typeStr, } } } if typeStr == "direct" || typeStr == "all" { AddToResultKeys(n.Pinning.DirectKeys(), "direct") } if typeStr == "indirect" || typeStr == "all" { ks := key.NewKeySet() for _, k := range n.Pinning.RecursiveKeys() { nd, err := n.DAG.Get(ctx, k) if err != nil { return nil, err } err = dag.EnumerateChildren(n.Context(), n.DAG, nd, ks) if err != nil { return nil, err } } AddToResultKeys(ks.Keys(), "indirect") } if typeStr == "recursive" || typeStr == "all" { AddToResultKeys(n.Pinning.RecursiveKeys(), "recursive") } return keys, nil }
func Descendants(ctx context.Context, ls dag.LinkService, set *cid.Set, roots []*cid.Cid, bestEffort bool) error { for _, c := range roots { set.Add(c) // EnumerateChildren recursively walks the dag and adds the keys to the given set err := dag.EnumerateChildren(ctx, ls, c, set.Visit, bestEffort) if err != nil { return err } } return nil }
func Descendants(ctx context.Context, ds dag.DAGService, set key.KeySet, roots []key.Key, bestEffort bool) error { for _, k := range roots { set.Add(k) nd, err := ds.Get(ctx, k) if err != nil { return err } // EnumerateChildren recursively walks the dag and adds the keys to the given set err = dag.EnumerateChildren(ctx, ds, nd, set, bestEffort) if err != nil { return err } } return nil }
func Descendants(ds dag.DAGService, set key.KeySet, roots []key.Key) error { for _, k := range roots { set.Add(k) nd, err := ds.Get(context.Background(), k) if err != nil { return err } // EnumerateChildren recursively walks the dag and adds the keys to the given set err = dag.EnumerateChildren(context.Background(), ds, nd, set) if err != nil { return err } } return nil }
func TestAddGCLive(t *testing.T) { r := &repo.Mock{ C: config.Config{ Identity: config.Identity{ PeerID: "Qmfoo", // required by offline node }, }, D: testutil.ThreadSafeCloserMapDatastore(), } node, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r}) if err != nil { t.Fatal(err) } errs := make(chan error) out := make(chan interface{}) adder, err := NewAdder(context.Background(), node, out) if err != nil { t.Fatal(err) } dataa := ioutil.NopCloser(bytes.NewBufferString("testfileA")) rfa := files.NewReaderFile("a", "a", dataa, nil) // make two files with pipes so we can 'pause' the add for timing of the test piper, pipew := io.Pipe() hangfile := files.NewReaderFile("b", "b", piper, nil) datad := ioutil.NopCloser(bytes.NewBufferString("testfileD")) rfd := files.NewReaderFile("d", "d", datad, nil) slf := files.NewSliceFile("files", "files", []files.File{rfa, hangfile, rfd}) addDone := make(chan struct{}) go func() { defer close(addDone) defer close(out) err := adder.AddFile(slf) if err != nil { t.Fatal(err) } }() addedHashes := make(map[string]struct{}) select { case o := <-out: addedHashes[o.(*AddedObject).Hash] = struct{}{} case <-addDone: t.Fatal("add shouldnt complete yet") } var gcout <-chan key.Key gcstarted := make(chan struct{}) go func() { defer close(gcstarted) gcchan, err := gc.GC(context.Background(), node.Blockstore, node.Pinning) if err != nil { log.Error("GC ERROR:", err) errs <- err return } gcout = gcchan }() // gc shouldnt start until we let the add finish its current file. pipew.Write([]byte("some data for file b")) select { case <-gcstarted: t.Fatal("gc shouldnt have started yet") case err := <-errs: t.Fatal(err) default: } time.Sleep(time.Millisecond * 100) // make sure gc gets to requesting lock // finish write and unblock gc pipew.Close() // receive next object from adder select { case o := <-out: addedHashes[o.(*AddedObject).Hash] = struct{}{} case err := <-errs: t.Fatal(err) } select { case <-gcstarted: case err := <-errs: t.Fatal(err) } for k := range gcout { if _, ok := addedHashes[k.B58String()]; ok { t.Fatal("gc'ed a hash we just added") } } var last key.Key for a := range out { // wait for it to finish last = key.B58KeyDecode(a.(*AddedObject).Hash) } ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() root, err := node.DAG.Get(ctx, last) if err != nil { t.Fatal(err) } err = dag.EnumerateChildren(ctx, node.DAG, root, key.NewKeySet()) if err != nil { t.Fatal(err) } }
if typeStr == "direct" || typeStr == "all" { for _, k := range n.Pinning.DirectKeys() { keys[k.B58String()] = RefKeyObject{ Type: "direct", } } } if typeStr == "indirect" || typeStr == "all" { ks := key.NewKeySet() for _, k := range n.Pinning.RecursiveKeys() { nd, err := n.DAG.Get(n.Context(), k) if err != nil { res.SetError(err, cmds.ErrNormal) return } err = dag.EnumerateChildren(n.Context(), n.DAG, nd, ks) if err != nil { res.SetError(err, cmds.ErrNormal) return } } for _, k := range ks.Keys() { keys[k.B58String()] = RefKeyObject{ Type: "indirect", } } } if typeStr == "recursive" || typeStr == "all" { for _, k := range n.Pinning.RecursiveKeys() { keys[k.B58String()] = RefKeyObject{