func escapeDhtKey(s string) (key.Key, error) { parts := strings.Split(s, "/") switch len(parts) { case 1: return key.B58KeyDecode(s), nil case 3: k := key.B58KeyDecode(parts[2]) return key.Key(strings.Join(append(parts[:2], string(k)), "/")), nil default: return "", errors.New("invalid key") } }
func escapeDhtKey(s string) (key.Key, error) { parts := path.SplitList(s) switch len(parts) { case 1: return key.B58KeyDecode(s), nil case 3: k := key.B58KeyDecode(parts[2]) return key.Key(path.Join(append(parts[:2], k.String()))), nil default: return "", errors.New("invalid key") } }
func AddMetadataTo(n *core.IpfsNode, skey string, m *ft.Metadata) (string, error) { ukey := key.B58KeyDecode(skey) nd, err := n.DAG.Get(n.Context(), ukey) if err != nil { return "", err } mdnode := new(dag.Node) mdata, err := ft.BytesForMetadata(m) if err != nil { return "", err } mdnode.Data = mdata if err := mdnode.AddNodeLinkClean("file", nd); err != nil { return "", err } nk, err := n.DAG.Add(mdnode) if err != nil { return "", err } return nk.B58String(), nil }
// ResolveToKey resolves a path to a key. // // It first checks if the path is already in the form of just a key (<key> or // /ipfs/<key>) and returns immediately if so. Otherwise, it falls back onto // Resolve to perform resolution of the dagnode being referenced. func ResolveToKey(ctx context.Context, n *IpfsNode, p path.Path) (key.Key, error) { // If the path is simply a key, parse and return it. Parsed paths are already // normalized (read: prepended with /ipfs/ if needed), so segment[1] should // always be the key. if p.IsJustAKey() { return key.B58KeyDecode(p.Segments()[1]), nil } // Fall back onto regular dagnode resolution. Retrieve the second-to-last // segment of the path and resolve its link to the last segment. head, tail, err := p.PopLastSegment() if err != nil { return key.Key(""), err } dagnode, err := Resolve(ctx, n, head) if err != nil { return key.Key(""), err } // Extract and return the key of the link to the target dag node. link, err := dagnode.GetNodeLink(tail) if err != nil { return key.Key(""), err } return key.Key(link.Hash), nil }
//FromB58Slice makes and returns Key from B58String slice func FromB58Slice(ks []string) Key { m := make(Key) for _, kk := range ks { k := key.B58KeyDecode(kk) m.Add(k) } return m }
//nameResolve resole name hash using IPNS. func (p *Peer) nameResolve() (key.Key, error) { log.Println("resolving", p.ID.Pretty()) pt, err := p.myself.ipfsNode.Namesys.Resolve(p.myself.ctx, "/ipns/"+p.ID.Pretty()) if log.If(err) { return "", err } log.Println("resolved", pt) seg := pt.Segments() return key.B58KeyDecode(seg[len(seg)-1]), nil }
func Metadata(n *core.IpfsNode, skey string) (*ft.Metadata, error) { ukey := key.B58KeyDecode(skey) nd, err := n.DAG.Get(n.Context(), ukey) if err != nil { return nil, err } return ft.MetadataFromBytes(nd.Data) }
// DelBlock deletes the block pointed to by `hash`. func DelBlock(node *Node, hash gmh.Multihash) error { nd, err := node.proc() if err != nil { log.Warningf("ipfs block-del: %v", err) return err } k := key.B58KeyDecode(hash.B58String()) return nd.Blocks.DeleteBlock(k) }
func TestMetadata(t *testing.T) { ctx := context.Background() // Make some random node ds := getDagserv(t) data := make([]byte, 1000) u.NewTimeSeededRand().Read(data) r := bytes.NewReader(data) nd, err := importer.BuildDagFromReader(ds, chunk.DefaultSplitter(r)) if err != nil { t.Fatal(err) } k, err := nd.Key() if err != nil { t.Fatal(err) } m := new(ft.Metadata) m.MimeType = "THIS IS A TEST" // Such effort, many compromise ipfsnode := &core.IpfsNode{DAG: ds} mdk, err := AddMetadataTo(ipfsnode, k.B58String(), m) if err != nil { t.Fatal(err) } rec, err := Metadata(ipfsnode, mdk) if err != nil { t.Fatal(err) } if rec.MimeType != m.MimeType { t.Fatalf("something went wrong in conversion: '%s' != '%s'", rec.MimeType, m.MimeType) } retnode, err := ds.Get(ctx, key.B58KeyDecode(mdk)) if err != nil { t.Fatal(err) } ndr, err := uio.NewDagReader(ctx, retnode, ds) if err != nil { t.Fatal(err) } out, err := ioutil.ReadAll(ndr) if err != nil { t.Fatal(err) } if !bytes.Equal(out, data) { t.Fatal("read incorrect data") } }
func pin(n *core.IpfsNode, ctx context.Context, hash string) error { hashkey := key.B58KeyDecode(hash) node, err := n.DAG.Get(ctx, hashkey) if err != nil { return err } err = n.Pinning.Pin(ctx, node, false) return err }
func addDefaultAssets(out io.Writer, repoRoot string) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() r, err := fsrepo.Open(repoRoot) if err != nil { // NB: repo is owned by the node return err } nd, err := core.NewIPFSNode(ctx, core.Offline(r)) if err != nil { return err } defer nd.Close() dirb := uio.NewDirectory(nd.DAG) // add every file in the assets pkg for fname, file := range assets.Init_dir { buf := bytes.NewBufferString(file) s, err := coreunix.Add(nd, buf) if err != nil { return err } k := key.B58KeyDecode(s) if err := dirb.AddChild(fname, k); err != nil { return err } } dir := dirb.GetNode() dkey, err := nd.DAG.Add(dir) if err != nil { return err } if err := nd.Pinning.Pin(ctx, dir, true); err != nil { return err } if err := nd.Pinning.Flush(); err != nil { return err } if _, err = fmt.Fprintf(out, "to get started, enter:\n"); err != nil { return err } _, err = fmt.Fprintf(out, "\n\tipfs cat /ipfs/%s/readme\n\n", dkey) return err }
func PublishPointer(node *core.IpfsNode, ctx context.Context, mhKey multihash.Multihash, prefixLen int, addr ma.Multiaddr) (Pointer, error) { keyhash := createKey(mhKey, prefixLen) k := key.B58KeyDecode(keyhash.B58String()) magicID, err := getMagicID() if err != nil { return Pointer{}, err } pi := peer.PeerInfo{ ID: magicID, Addrs: []ma.Multiaddr{addr}, } return Pointer{Key: k, Value: pi}, addPointer(node, ctx, k, pi) }
// Locate finds the object pointed to by `hash`. It will wait // for max `timeout` duration if it got less than `n` items in that time. // If `n` is less than 0, all reachable peers that have `hash` will be returned. // If `n` is 0, Locate will return immeditately. // This operation requires online-mode. func Locate(node *Node, hash gmh.Multihash, n int, t time.Duration) ([]*PeerInfo, error) { if n == 0 { return []*PeerInfo{}, nil } // Note: Do not use Maxint32. That makes ipfs allocate // a whole lot of memory. Just assume that 100 is fine. if n < 0 { n = 100 } if !node.IsOnline() { return nil, ErrIsOffline } nd, err := node.proc() if err != nil { log.Warningf("ipfs dht: %v", err) return nil, err } dht, ok := nd.Routing.(*ipdht.IpfsDHT) if !ok { return nil, commands.ErrNotDHT } ctx, cancel := context.WithTimeout(node.Context, t) defer cancel() k := key.B58KeyDecode(hash.B58String()) peers := dht.FindProvidersAsync(ctx, k, n) infos := []*PeerInfo{} for info := range peers { // Converting equal struct into each other is my favourite thing. peerInfo := &PeerInfo{ ID: info.ID.Pretty(), PubKey: node.ipfsNode.Peerstore.PubKey(info.ID), } for _, addr := range info.Addrs { peerInfo.Addrs = append(peerInfo.Addrs, ma.Cast(addr.Bytes())) } infos = append(infos, peerInfo) } return infos, nil }
func loadIndirPin(d ds.Datastore, k ds.Key) (*indirectPin, error) { var rcStore map[string]int err := loadSet(d, k, &rcStore) if err != nil { return nil, err } refcnt := make(map[key.Key]int) var keys []key.Key for encK, v := range rcStore { if v > 0 { k := key.B58KeyDecode(encK) keys = append(keys, k) refcnt[k] = v } } // log.Debugf("indirPin keys: %#v", keys) return &indirectPin{blockset: set.SimpleSetFromKeys(keys), refCounts: refcnt}, nil }
func addLinkCaller(req cmds.Request, root *dag.Node) (key.Key, error) { if len(req.Arguments()) < 4 { return "", fmt.Errorf("not enough arguments for add-link") } nd, err := req.InvocContext().GetNode() if err != nil { return "", err } path := req.Arguments()[2] childk := key.B58KeyDecode(req.Arguments()[3]) create, _, err := req.Option("create").Bool() if err != nil { return "", err } var createfunc func() *dag.Node if create { createfunc = func() *dag.Node { return &dag.Node{Data: ft.FolderPBData()} } } e := dagutils.NewDagEditor(nd.DAG, root) childnd, err := nd.DAG.Get(req.Context(), childk) if err != nil { return "", err } err = e.InsertNodeAtPath(req.Context(), path, childnd, createfunc) if err != nil { return "", err } nnode := e.GetNode() return nnode.Key() }
func addLinkCaller(req cmds.Request, root *dag.Node) (key.Key, error) { if len(req.Arguments()) < 4 { return "", fmt.Errorf("not enough arguments for add-link") } nd, err := req.InvocContext().GetNode() if err != nil { return "", err } path := req.Arguments()[2] childk := key.B58KeyDecode(req.Arguments()[3]) parts := strings.Split(path, "/") nnode, err := insertNodeAtPath(req.Context(), nd.DAG, root, parts, childk) if err != nil { return "", err } return nnode.Key() }
func (p *PointersDB) GetAll() ([]ipfs.Pointer, error) { p.lock.Lock() defer p.lock.Unlock() stm := "select * from pointers" rows, err := p.db.Query(stm) if err != nil { log.Error(err) return nil, err } var ret []ipfs.Pointer for rows.Next() { var pointerID string var key string var address string var purpose int var timestamp int if err := rows.Scan(&pointerID, &key, &address, &purpose, ×tamp); err != nil { log.Error(err) } maAddr, err := ma.NewMultiaddr(address) if err != nil { return ret, err } pid, err := peer.IDB58Decode(pointerID) if err != nil { return ret, err } pointer := ipfs.Pointer{ Key: keys.B58KeyDecode(key), Value: peer.PeerInfo{ ID: pid, Addrs: []ma.Multiaddr{maAddr}, }, Purpose: ipfs.Purpose(purpose), Timestamp: time.Unix(int64(timestamp), 0), } ret = append(ret, pointer) } return ret, nil }
// CatBlock retuns the data stored in the block pointed to by `hash`. // It will timeout with util.ErrTimeout if the operation takes too long, // this includes querying for an non-existing hash. // // This operation works offline and online, but if the block is stored // elsewhere on the net, node must be online to find the block. func CatBlock(node *Node, hash gmh.Multihash, timeout time.Duration) ([]byte, error) { nd, err := node.proc() if err != nil { log.Warningf("ipfs block-cat: %v", err) return nil, err } ctx, cancel := context.WithTimeout(node.Context, timeout) defer cancel() k := key.B58KeyDecode(hash.B58String()) block, err := nd.Blocks.GetBlock(ctx, k) if err == context.DeadlineExceeded { return nil, util.ErrTimeout } if err != nil { return nil, err } return block.Data(), nil }
func addAssetList(nd *core.IpfsNode, l []string) (*key.Key, error) { dirb := uio.NewDirectory(nd.DAG) for _, p := range l { d, err := Asset(p) if err != nil { return nil, fmt.Errorf("assets: could load Asset '%s': %s", p, err) } s, err := coreunix.Add(nd, bytes.NewBuffer(d)) if err != nil { return nil, fmt.Errorf("assets: could not Add '%s': %s", p, err) } fname := filepath.Base(p) k := key.B58KeyDecode(s) if err := dirb.AddChild(nd.Context(), fname, k); err != nil { return nil, fmt.Errorf("assets: could not add '%s' as a child: %s", fname, err) } } dir := dirb.GetNode() dkey, err := nd.DAG.Add(dir) if err != nil { return nil, fmt.Errorf("assets: DAG.Add(dir) failed: %s", err) } if err := nd.Pinning.Pin(nd.Context(), dir, true); err != nil { return nil, fmt.Errorf("assets: Pinning on init-docu failed: %s", err) } if err := nd.Pinning.Flush(); err != nil { return nil, fmt.Errorf("assets: Pinning flush failed: %s", err) } return &dkey, nil }
func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) { // TODO(cryptix): move me to ServeHTTP and pass into all handlers ctx, cancel := context.WithCancel(i.node.Context()) defer cancel() rootPath, err := path.ParsePath(r.URL.Path) if err != nil { webError(w, "putHandler: ipfs path not valid", err, http.StatusBadRequest) return } rsegs := rootPath.Segments() if rsegs[0] == ipnsPathPrefix { webError(w, "putHandler: updating named entries not supported", errors.New("WritableGateway: ipns put not supported"), http.StatusBadRequest) return } var newnode *dag.Node if rsegs[len(rsegs)-1] == "QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn" { newnode = uio.NewEmptyDirectory() } else { putNode, err := i.newDagFromReader(r.Body) if err != nil { webError(w, "putHandler: Could not create DAG from request", err, http.StatusInternalServerError) return } newnode = putNode } var newPath string if len(rsegs) > 1 { newPath = path.Join(rsegs[2:]) } var newkey key.Key rnode, err := core.Resolve(ctx, i.node, rootPath) switch ev := err.(type) { case path.ErrNoLink: // ev.Node < node where resolve failed // ev.Name < new link // but we need to patch from the root rnode, err := i.node.DAG.Get(ctx, key.B58KeyDecode(rsegs[1])) if err != nil { webError(w, "putHandler: Could not create DAG from request", err, http.StatusInternalServerError) return } e := dagutils.NewDagEditor(rnode, i.node.DAG) err = e.InsertNodeAtPath(ctx, newPath, newnode, uio.NewEmptyDirectory) if err != nil { webError(w, "putHandler: InsertNodeAtPath failed", err, http.StatusInternalServerError) return } nnode, err := e.Finalize(i.node.DAG) if err != nil { webError(w, "putHandler: could not get node", err, http.StatusInternalServerError) return } newkey, err = nnode.Key() if err != nil { webError(w, "putHandler: could not get key of edited node", err, http.StatusInternalServerError) return } case nil: // object set-data case rnode.Data = newnode.Data newkey, err = i.node.DAG.Add(rnode) if err != nil { nnk, _ := newnode.Key() rk, _ := rnode.Key() webError(w, fmt.Sprintf("putHandler: Could not add newnode(%q) to root(%q)", nnk.B58String(), rk.B58String()), err, http.StatusInternalServerError) return } default: log.Warningf("putHandler: unhandled resolve error %T", ev) webError(w, "could not resolve root DAG", ev, http.StatusInternalServerError) return } i.addUserHeaders(w) // ok, _now_ write user's headers. w.Header().Set("IPFS-Hash", newkey.String()) http.Redirect(w, r, gopath.Join(ipfsPathPrefix, newkey.String(), newPath), http.StatusCreated) }
"sync" "time" ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/ipfs/go-datastore" context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" key "github.com/ipfs/go-ipfs/blocks/key" "github.com/ipfs/go-ipfs/blocks/set" mdag "github.com/ipfs/go-ipfs/merkledag" logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log" ) var log = logging.Logger("pin") var pinDatastoreKey = ds.NewKey("/local/pins") var emptyKey = key.B58KeyDecode("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n") const ( linkDirect = "direct" linkRecursive = "recursive" ) type PinMode int const ( Recursive PinMode = iota Direct NotPinned ) type Pinner interface {
func TestAddGCLive(t *testing.T) { r := &repo.Mock{ C: config.Config{ Identity: config.Identity{ PeerID: "Qmfoo", // required by offline node }, }, D: testutil.ThreadSafeCloserMapDatastore(), } node, err := core.NewNode(context.Background(), &core.BuildCfg{Repo: r}) if err != nil { t.Fatal(err) } errs := make(chan error) out := make(chan interface{}) adder, err := NewAdder(context.Background(), node, out) if err != nil { t.Fatal(err) } dataa := ioutil.NopCloser(bytes.NewBufferString("testfileA")) rfa := files.NewReaderFile("a", "a", dataa, nil) // make two files with pipes so we can 'pause' the add for timing of the test piper, pipew := io.Pipe() hangfile := files.NewReaderFile("b", "b", piper, nil) datad := ioutil.NopCloser(bytes.NewBufferString("testfileD")) rfd := files.NewReaderFile("d", "d", datad, nil) slf := files.NewSliceFile("files", "files", []files.File{rfa, hangfile, rfd}) addDone := make(chan struct{}) go func() { defer close(addDone) defer close(out) err := adder.AddFile(slf) if err != nil { t.Fatal(err) } }() addedHashes := make(map[string]struct{}) select { case o := <-out: addedHashes[o.(*AddedObject).Hash] = struct{}{} case <-addDone: t.Fatal("add shouldnt complete yet") } var gcout <-chan key.Key gcstarted := make(chan struct{}) go func() { defer close(gcstarted) gcchan, err := gc.GC(context.Background(), node.Blockstore, node.Pinning) if err != nil { log.Error("GC ERROR:", err) errs <- err return } gcout = gcchan }() // gc shouldnt start until we let the add finish its current file. pipew.Write([]byte("some data for file b")) select { case <-gcstarted: t.Fatal("gc shouldnt have started yet") case err := <-errs: t.Fatal(err) default: } time.Sleep(time.Millisecond * 100) // make sure gc gets to requesting lock // finish write and unblock gc pipew.Close() // receive next object from adder select { case o := <-out: addedHashes[o.(*AddedObject).Hash] = struct{}{} case err := <-errs: t.Fatal(err) } select { case <-gcstarted: case err := <-errs: t.Fatal(err) } for k := range gcout { if _, ok := addedHashes[k.B58String()]; ok { t.Fatal("gc'ed a hash we just added") } } var last key.Key for a := range out { // wait for it to finish last = key.B58KeyDecode(a.(*AddedObject).Hash) } ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() root, err := node.DAG.Get(ctx, last) if err != nil { t.Fatal(err) } err = dag.EnumerateChildren(ctx, node.DAG, root, key.NewKeySet()) if err != nil { t.Fatal(err) } }
func TestPeer(t *testing.T) { const ( rootPath = "test" ) jsonstr := ` { "max_connection":12, "file_dir":".", "run_dir":"rundir", "option":"option" } ` denied := ` QmZQJubLBGn1JUv8zx24TasR57LnyK1KjUiHWncVqrPxy7 QmU3eUhmUnZxtUJzMtq8YNVz2Q1U75fn2DEgoMXWcCNf6v QmPhL4RCK1h241eyMBbYxPBv4iBuguPbRB2gGbGjsuXLQv QmUt5G1QiJePEGKQvjZwcjisoUNUfmKZaATXQcDYJsoEEj ` if err := ioutil.WriteFile("denied_peers.txt", []byte(denied), 0644); err != nil { t.Error(err) } if err := ioutil.WriteFile("test.json", []byte(jsonstr), 0644); err != nil { t.Error(err) } var cfg config.Config config.Load(rootPath, ".", &cfg) cfg.SetDefaults() self := NewSelf(&cfg, rootPath) m := self.ToPeer() skeys := []string{ "QmZQJubLBGn1JUv8zx24TasR57LnyK1KjUiHWncVqrPxy7", "QmU3eUhmUnZxtUJzMtq8YNVz2Q1U75fn2DEgoMXWcCNf6v", "QmdrPVCaguLLJM2QnK5qdBDqYwQWQKuq4yHK8E9zX1Et8X", "QmPhL4RCK1h241eyMBbYxPBv4iBuguPbRB2gGbGjsuXLQv", } k := key.B58KeyDecode(skeys[0]) p, err := FromBytes([]byte(k), self) if err != nil { t.Fatal(err) } if skeys[0] != p.String() { t.Fatal("cannot new") } if m.Equals(p) { t.Fatal("illegal equals") } b, err := crypto.MarshalPublicKey(self.Pubkey()) if err != nil { t.Fatal(err) } p, err = FromPubkey(b, self) if err != nil { t.Fatal(err) } if self.NodeName().Pretty() != p.String() { t.Fatal("cannot new") } if !m.Equals(p) { t.Fatal("illegal equals") } p, err = New(skeys[0], self) if err != nil { t.Fatal(err) } if skeys[0] != p.String() { t.Fatal("cannot new") } if p.IsAllowed() { t.Fatal("illegally allowed") } p, err = New(skeys[2], self) if err != nil { t.Fatal(err) } if !p.IsAllowed() { t.Fatal("illegally denied") } ps := FromStringSlice(skeys, self) strs := ps.Strings() if len(strs) != len(ps) { t.Fatal("illegally strings") } for _, s := range strs { ok := false for _, s2 := range skeys { if s == s2 { ok = true } } if !ok { t.Fatal("illegally strings") } } sl := ps.Slice() ps2 := FromSlice(sl) if !ps.Equals(ps2) { t.Fatal("illegal slicing") } if len(sl) != len(skeys) { t.Fatal("illegal slicing") } ps3 := FromStringSlice(skeys[1:], self) ps4 := ps.Excludes(ps3, 0) if len(ps4) != 1 { t.Fatal("illegal excludes") } ps5 := FromStringSlice(skeys[3:], self) ps6 := ps.Excludes(ps5, 2) if len(ps6) != 2 { t.Fatal("illegal excludes") } ps7 := FromStringSlice(skeys[1:], self) ps8 := FromStringSlice(skeys[:2], self) ps8.Extend(ps7) if !ps.Equals(ps8) { t.Fatal("illegal extends") } }
dht, ok := n.Routing.(*ipdht.IpfsDHT) if !ok { res.SetError(ErrNotDHT, cmds.ErrNormal) return } numProviders := 20 outChan := make(chan interface{}) res.SetOutput((<-chan interface{})(outChan)) events := make(chan *notif.QueryEvent) ctx := notif.RegisterForQueryEvents(req.Context(), events) pchan := dht.FindProvidersAsync(ctx, key.B58KeyDecode(req.Arguments()[0]), numProviders) go func() { defer close(outChan) for e := range events { outChan <- e } }() go func() { defer close(events) for p := range pchan { np := p notif.PublishQueryEvent(ctx, ¬if.QueryEvent{ Type: notif.Provider, Responses: []*peer.PeerInfo{&np}, })
} rootp, err := path.ParsePath(req.Arguments()[0]) if err != nil { res.SetError(err, cmds.ErrNormal) return } root, err := core.Resolve(req.Context(), nd, rootp) if err != nil { res.SetError(err, cmds.ErrNormal) return } path := req.Arguments()[1] childk := key.B58KeyDecode(req.Arguments()[2]) create, _, err := req.Option("create").Bool() if err != nil { res.SetError(err, cmds.ErrNormal) return } var createfunc func() *dag.Node if create { createfunc = func() *dag.Node { return &dag.Node{Data: ft.FolderPBData()} } } e := dagutils.NewDagEditor(root, nd.DAG)
} if !nd.OnlineMode() { res.SetError(errNotOnline, cmds.ErrClient) return } bs, ok := nd.Exchange.(*bitswap.Bitswap) if !ok { res.SetError(u.ErrCast(), cmds.ErrNormal) return } var ks []key.Key for _, arg := range req.Arguments() { dec := key.B58KeyDecode(arg) if dec == "" { res.SetError(fmt.Errorf("incorrectly formatted key: %s", arg), cmds.ErrNormal) return } ks = append(ks, dec) } bs.CancelWants(ks) }, } var showWantlistCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "Show blocks currently on the wantlist",
cmds.StringArg("command", true, false, "the operation to perform"), cmds.StringArg("args", true, true, "extra arguments").EnableStdin(), }, Type: Object{}, Run: func(req cmds.Request, res cmds.Response) { nd, err := req.InvocContext().GetNode() if err != nil { res.SetError(err, cmds.ErrNormal) return } rootarg := req.Arguments()[0] if strings.HasPrefix(rootarg, "/ipfs/") { rootarg = rootarg[6:] } rhash := key.B58KeyDecode(rootarg) if rhash == "" { res.SetError(fmt.Errorf("incorrectly formatted root hash: %s", req.Arguments()[0]), cmds.ErrNormal) return } rnode, err := nd.DAG.Get(req.Context(), rhash) if err != nil { res.SetError(err, cmds.ErrNormal) return } action := req.Arguments()[1] switch action { case "add-link":
}, Options: []cmds.Option{}, Arguments: []cmds.Argument{ cmds.StringArg("root", true, false, "the hash of the node to modify"), cmds.StringArg("command", true, false, "the operation to perform"), cmds.StringArg("args", true, true, "extra arguments").EnableStdin(), }, Type: Object{}, Run: func(req cmds.Request, res cmds.Response) { nd, err := req.InvocContext().GetNode() if err != nil { res.SetError(err, cmds.ErrNormal) return } rhash := key.B58KeyDecode(req.Arguments()[0]) if rhash == "" { res.SetError(fmt.Errorf("incorrectly formatted root hash"), cmds.ErrNormal) return } ctx, cancel := context.WithTimeout(req.Context(), time.Second*30) rnode, err := nd.DAG.Get(ctx, rhash) if err != nil { res.SetError(err, cmds.ErrNormal) cancel() return } cancel() action := req.Arguments()[1]
// Fetch pointers from the dht. They will be returned asynchronously. func FindPointersAsync(dht *routing.IpfsDHT, ctx context.Context, mhKey multihash.Multihash, prefixLen int) <-chan peer.PeerInfo { keyhash := createKey(mhKey, prefixLen) peerout := dht.FindProvidersAsync(ctx, key.B58KeyDecode(keyhash.B58String()), 100000) return peerout }