// printing self is special cased as we get values differently. func printSelf(node *core.IpfsNode) (interface{}, error) { info := new(IdOutput) info.ID = node.Identity.Pretty() if node.PrivateKey == nil { if err := node.LoadPrivateKey(); err != nil { return nil, err } } pk := node.PrivateKey.GetPublic() pkb, err := ic.MarshalPublicKey(pk) if err != nil { return nil, err } info.PublicKey = base64.StdEncoding.EncodeToString(pkb) if node.PeerHost != nil { for _, a := range node.PeerHost.Addrs() { s := a.String() + "/ipfs/" + info.ID info.Addresses = append(info.Addresses, s) } } info.ProtocolVersion = identify.IpfsVersion info.AgentVersion = identify.ClientVersion return info, nil }
// InitializeKeyspace sets the ipns record for the given key to // point to an empty directory. func InitializeKeyspace(n *core.IpfsNode, key ci.PrivKey) error { emptyDir := &mdag.Node{Data: ft.FolderPBData()} nodek, err := n.DAG.Add(emptyDir) if err != nil { return err } ctx, cancel := context.WithTimeout(context.TODO(), time.Minute) defer cancel() err = n.Pinning.Pin(ctx, emptyDir, false) if err != nil { return err } err = n.Pinning.Flush() if err != nil { return err } pub := nsys.NewRoutingPublisher(n.Routing) err = pub.Publish(n.Context(), key, path.FromKey(nodek)) if err != nil { return err } return nil }
func pinLsAll(typeStr string, ctx context.Context, n *core.IpfsNode) (map[string]RefKeyObject, error) { keys := make(map[string]RefKeyObject) AddToResultKeys := func(keyList []key.Key, typeStr string) { for _, k := range keyList { keys[k.B58String()] = RefKeyObject{ Type: typeStr, } } } if typeStr == "direct" || typeStr == "all" { AddToResultKeys(n.Pinning.DirectKeys(), "direct") } if typeStr == "indirect" || typeStr == "all" { ks := key.NewKeySet() for _, k := range n.Pinning.RecursiveKeys() { nd, err := n.DAG.Get(ctx, k) if err != nil { return nil, err } err = dag.EnumerateChildren(n.Context(), n.DAG, nd, ks) if err != nil { return nil, err } } AddToResultKeys(ks.Keys(), "indirect") } if typeStr == "recursive" || typeStr == "all" { AddToResultKeys(n.Pinning.RecursiveKeys(), "recursive") } return keys, nil }
func Unpin(n *core.IpfsNode, paths []string, recursive bool) ([]key.Key, error) { // TODO(cryptix): do we want a ctx as first param for (Un)Pin() as well, just like core.Resolve? ctx := n.Context() dagnodes := make([]*merkledag.Node, 0) for _, fpath := range paths { dagnode, err := core.Resolve(ctx, n, path.Path(fpath)) if err != nil { return nil, err } dagnodes = append(dagnodes, dagnode) } var unpinned []key.Key for _, dagnode := range dagnodes { k, _ := dagnode.Key() ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() err := n.Pinning.Unpin(ctx, k, recursive) if err != nil { return nil, err } unpinned = append(unpinned, k) } err := n.Pinning.Flush() if err != nil { return nil, err } return unpinned, nil }
func pinLsAll(typeStr string, ctx context.Context, n *core.IpfsNode) (map[string]RefKeyObject, error) { keys := make(map[string]RefKeyObject) AddToResultKeys := func(keyList []*cid.Cid, typeStr string) { for _, c := range keyList { keys[c.String()] = RefKeyObject{ Type: typeStr, } } } if typeStr == "direct" || typeStr == "all" { AddToResultKeys(n.Pinning.DirectKeys(), "direct") } if typeStr == "indirect" || typeStr == "all" { set := cid.NewSet() for _, k := range n.Pinning.RecursiveKeys() { err := dag.EnumerateChildren(n.Context(), n.DAG, k, set.Visit, false) if err != nil { return nil, err } } AddToResultKeys(set.Keys(), "indirect") } if typeStr == "recursive" || typeStr == "all" { AddToResultKeys(n.Pinning.RecursiveKeys(), "recursive") } return keys, nil }
// AddR recursively adds files in |path|. func AddR(n *core.IpfsNode, root string) (key string, err error) { n.Blockstore.PinLock().Unlock() stat, err := os.Lstat(root) if err != nil { return "", err } f, err := files.NewSerialFile(root, root, false, stat) if err != nil { return "", err } defer f.Close() fileAdder, err := NewAdder(n.Context(), n.Pinning, n.Blockstore, n.DAG) if err != nil { return "", err } err = fileAdder.addFile(f) if err != nil { return "", err } nd, err := fileAdder.Finalize() if err != nil { return "", err } return nd.String(), nil }
// AddWrapped adds data from a reader, and wraps it with a directory object // to preserve the filename. // Returns the path of the added file ("<dir hash>/filename"), the DAG node of // the directory, and and error if any. func AddWrapped(n *core.IpfsNode, r io.Reader, filename string) (string, *dag.Node, error) { file := files.NewReaderFile(filename, filename, ioutil.NopCloser(r), nil) fileAdder, err := NewAdder(n.Context(), n, nil) if err != nil { return "", nil, err } fileAdder.Wrap = true defer n.Blockstore.PinLock().Unlock() err = fileAdder.addFile(file) if err != nil { return "", nil, err } dagnode, err := fileAdder.Finalize() if err != nil { return "", nil, err } k, err := dagnode.Key() if err != nil { return "", nil, err } return gopath.Join(k.String(), filename), dagnode, nil }
func AddMetadataTo(n *core.IpfsNode, skey string, m *ft.Metadata) (string, error) { ukey := key.B58KeyDecode(skey) nd, err := n.DAG.Get(n.Context(), ukey) if err != nil { return "", err } mdnode := new(dag.Node) mdata, err := ft.BytesForMetadata(m) if err != nil { return "", err } mdnode.Data = mdata if err := mdnode.AddNodeLinkClean("file", nd); err != nil { return "", err } nk, err := n.DAG.Add(mdnode) if err != nil { return "", err } return nk.B58String(), nil }
func AddMetadataTo(n *core.IpfsNode, skey string, m *ft.Metadata) (string, error) { c, err := cid.Decode(skey) if err != nil { return "", err } nd, err := n.DAG.Get(n.Context(), c) if err != nil { return "", err } mdnode := new(dag.ProtoNode) mdata, err := ft.BytesForMetadata(m) if err != nil { return "", err } mdnode.SetData(mdata) if err := mdnode.AddNodeLinkClean("file", nd); err != nil { return "", err } nk, err := n.DAG.Add(mdnode) if err != nil { return "", err } return nk.String(), nil }
func setupIpnsTest(t *testing.T, node *core.IpfsNode) (*core.IpfsNode, *fstest.Mount) { maybeSkipFuseTests(t) var err error if node == nil { node, err = core.NewNode(context.Background(), nil) if err != nil { t.Fatal(err) } err = node.LoadPrivateKey() if err != nil { t.Fatal(err) } node.Routing = offroute.NewOfflineRouter(node.Repo.Datastore(), node.PrivateKey) node.Namesys = namesys.NewNameSystem(node.Routing, node.Repo.Datastore(), 0) err = InitializeKeyspace(node, node.PrivateKey) if err != nil { t.Fatal(err) } } fs, err := NewFileSystem(node, node.PrivateKey, "", "") if err != nil { t.Fatal(err) } mnt, err := fstest.MountedT(t, fs) if err != nil { t.Fatal(err) } return node, mnt }
// InitializeKeyspace sets the ipns record for the given key to // point to an empty directory. func InitializeKeyspace(n *core.IpfsNode, key ci.PrivKey) error { emptyDir := ft.EmptyDirNode() nodek, err := n.DAG.Add(emptyDir) if err != nil { return err } ctx, cancel := context.WithCancel(n.Context()) defer cancel() err = n.Pinning.Pin(ctx, emptyDir, false) if err != nil { return err } err = n.Pinning.Flush() if err != nil { return err } pub := nsys.NewRoutingPublisher(n.Routing, n.Repo.Datastore()) if err := pub.Publish(ctx, key, path.FromCid(nodek)); err != nil { return err } return nil }
// updateApply applies an update of the ipfs binary and shuts down the node if successful func updateApply(n *core.IpfsNode) (*UpdateOutput, error) { // TODO: 'force bool' param that stops the daemon (if running) before update output := &UpdateOutput{ OldVersion: updates.Version, } u, err := updates.CheckForUpdate() if err != nil { return nil, err } if u == nil { output.NewVersion = updates.Version return output, nil } output.NewVersion = u.Version if n.OnlineMode() { return nil, errors.New(`You must stop the IPFS daemon before updating.`) } if err = updates.Apply(u); err != nil { return nil, err } return output, nil }
func Cat(n *core.IpfsNode, pstr string) (io.Reader, error) { p := path.FromString(pstr) dagNode, err := n.Resolver.ResolvePath(n.Context(), p) if err != nil { return nil, err } return uio.NewDagReader(n.Context(), dagNode, n.DAG) }
func addNode(n *core.IpfsNode, node *merkledag.Node) error { if err := n.DAG.AddRecursive(node); err != nil { // add the file to the graph + local storage return err } ctx, cancel := context.WithCancel(n.Context()) defer cancel() err := n.Pinning.Pin(ctx, node, true) // ensure we keep it return err }
func Dial(nd *core.IpfsNode, p peer.ID, protocol string) (net.Stream, error) { ctx, cancel := context.WithTimeout(nd.Context(), time.Second*30) defer cancel() err := nd.PeerHost.Connect(ctx, pstore.PeerInfo{ID: p}) if err != nil { return nil, err } return nd.PeerHost.NewStream(nd.Context(), p, pro.ID(protocol)) }
// Mount mounts ipfs at a given location, and returns a mount.Mount instance. func Mount(ipfs *core.IpfsNode, mountpoint string) (mount.Mount, error) { cfg, err := ipfs.Repo.Config() if err != nil { return nil, err } allow_other := cfg.Mounts.FuseAllowOther fsys := NewFileSystem(ipfs) return mount.NewMount(ipfs.Process(), fsys, mountpoint, allow_other) }
func Metadata(n *core.IpfsNode, skey string) (*ft.Metadata, error) { ukey := key.B58KeyDecode(skey) nd, err := n.DAG.Get(n.Context(), ukey) if err != nil { return nil, err } return ft.MetadataFromBytes(nd.Data) }
// Mount mounts ipns at a given location, and returns a mount.Mount instance. func Mount(ipfs *core.IpfsNode, ipnsmp, ipfsmp string) (mount.Mount, error) { cfg := ipfs.Repo.Config() allow_other := cfg.Mounts.FuseAllowOther fsys, err := NewFileSystem(ipfs, ipfs.PrivateKey, ipfsmp, ipnsmp) if err != nil { return nil, err } return mount.NewMount(ipfs.Process(), fsys, ipnsmp, allow_other) }
//NewSelf make repo if needed ,starts daemon and returns Self obj. func NewSelf(cfg *config.Config, rootPath string) *Self { InitRepo(cfg.IpfsRepo) n := &merkledag.Node{} k, err := n.Key() log.IfFatal(err) //workaround "ERROR bitswap: failed to find any peer in table" i := 0 var node *core.IpfsNode var ctx context.Context var cancel context.CancelFunc for i = 0; i < 10; i++ { log.Println("setting up node...") r, err := fsrepo.Open(cfg.IpfsRepo) log.IfFatal(err) ctx, cancel = context.WithCancel(context.Background()) node, err = core.NewNode(ctx, &core.BuildCfg{ Repo: r, Online: true, }) log.IfFatal(err) if err := node.Routing.Provide(ctx, k); log.If(err) { cancel() log.If(node.Close()) log.Println("retrying...") continue } break } if i == 10 { log.Fatal("cannot provide a key to network") } self := &Self{ RootPath: rootPath, ipfsNode: node, ctx: ctx, cancel: cancel, cfg: cfg, } self.follow = FromStringSlice(cfg.FollowPeers, self) parent, err := self.ToPeer().GetDAGNode("") if log.If(err) { parent = &merkledag.Node{} } self.myIpns, err = parent.Key() log.IfFatal(err) if _, err = parent.GetNodeLink(rootPath); err != nil { log.Println("initializing DAGs for saving status") self.makeInitNodes(parent) } return self }
func SetupOpenBazaarService(node *core.IpfsNode, broadcast chan []byte, ctx commands.Context, datastore repo.Datastore) *OpenBazaarService { OBService = &OpenBazaarService{ host: node.PeerHost.(host.Host), self: node.Identity, peerstore: node.PeerHost.Peerstore(), cmdCtx: ctx, ctx: node.Context(), broadcast: broadcast, datastore: datastore, } node.PeerHost.SetStreamHandler(ProtocolOpenBazaar, OBService.HandleNewStream) log.Infof("OpenBazaar service running at %s", ProtocolOpenBazaar) return OBService }
// printSwarmAddrs prints the addresses of the host func printSwarmAddrs(node *core.IpfsNode) { if !node.OnlineMode() { fmt.Println("Swarm not listening, running in offline mode.") return } var addrs []string for _, addr := range node.PeerHost.Addrs() { addrs = append(addrs, addr.String()) } sort.Sort(sort.StringSlice(addrs)) for _, addr := range addrs { fmt.Printf("Swarm listening on %s\n", addr) } }
func getPaths(t *testing.T, ipfs *core.IpfsNode, name string, n *dag.Node) []string { if len(n.Links) == 0 { return []string{name} } var out []string for _, lnk := range n.Links { child, err := lnk.GetNode(ipfs.Context(), ipfs.DAG) if err != nil { t.Fatal(err) } sub := getPaths(t, ipfs, path.Join(name, lnk.Name), child) out = append(out, sub...) } return out }
// Add builds a merkledag from the a reader, pinning all objects to the local // datastore. Returns a key representing the root node. func Add(n *core.IpfsNode, r io.Reader) (string, error) { defer n.Blockstore.PinLock().Unlock() fileAdder, err := NewAdder(n.Context(), n.Pinning, n.Blockstore, n.DAG) if err != nil { return "", err } node, err := fileAdder.add(r) if err != nil { return "", err } return node.Cid().String(), nil }
func setupIpnsTest(t *testing.T, node *core.IpfsNode) (*core.IpfsNode, *fstest.Mount) { maybeSkipFuseTests(t) var err error if node == nil { node, err = coremock.NewMockNode() if err != nil { t.Fatal(err) } ipnsfs, err := nsfs.NewFilesystem(context.TODO(), node.DAG, node.Namesys, node.Pinning, node.PrivateKey) if err != nil { t.Fatal(err) } node.IpnsFs = ipnsfs } fs, err := NewFileSystem(node, node.PrivateKey, "", "") if err != nil { t.Fatal(err) } mnt, err := fstest.MountedT(t, fs) if err != nil { t.Fatal(err) } return node, mnt }
func Serve(node *core.IpfsNode, lis net.Listener, options ...ServeOption) error { handler, err := makeHandler(node, lis, options...) if err != nil { return err } addr, err := manet.FromNetAddr(lis.Addr()) if err != nil { return err } // if the server exits beforehand var serverError error serverExited := make(chan struct{}) node.Process().Go(func(p goprocess.Process) { serverError = http.Serve(lis, handler) close(serverExited) }) // wait for server to exit. select { case <-serverExited: // if node being closed before server exits, close server case <-node.Process().Closing(): log.Infof("server at %s terminating...", addr) lis.Close() outer: for { // wait until server exits select { case <-serverExited: // if the server exited as we are closing, we really dont care about errors serverError = nil break outer case <-time.After(5 * time.Second): log.Infof("waiting for server at %s to terminate...", addr) } } } log.Infof("server at %s terminated", addr) return serverError }
func Metadata(n *core.IpfsNode, skey string) (*ft.Metadata, error) { c, err := cid.Decode(skey) if err != nil { return nil, err } nd, err := n.DAG.Get(n.Context(), c) if err != nil { return nil, err } pbnd, ok := nd.(*dag.ProtoNode) if !ok { return nil, dag.ErrNotProtobuf } return ft.MetadataFromBytes(pbnd.Data()) }
// Mount mounts ipns at a given location, and returns a mount.Mount instance. func Mount(ipfs *core.IpfsNode, ipnsmp, ipfsmp string) (mount.Mount, error) { cfg, err := ipfs.Repo.Config() if err != nil { return nil, err } allow_other := cfg.Mounts.FuseAllowOther if err := ipfs.SetupOfflineRouting(); err != nil { log.Errorf("failed to setup offline routing: %s", err) } fsys, err := NewFileSystem(ipfs, ipfs.PrivateKey, ipfsmp, ipnsmp) if err != nil { return nil, err } return mount.NewMount(ipfs.Process(), fsys, ipnsmp, allow_other) }
// Mount mounts ipns at a given location, and returns a mount.Mount instance. func Mount(ipfs *core.IpfsNode, ipnsmp, ipfsmp string) (mount.Mount, error) { cfg := ipfs.Repo.Config() allow_other := cfg.Mounts.FuseAllowOther if ipfs.IpnsFs == nil { fs, err := ipnsfs.NewFilesystem(ipfs.Context(), ipfs.DAG, ipfs.Namesys, ipfs.Pinning, ipfs.PrivateKey) if err != nil { return nil, err } ipfs.IpnsFs = fs } fsys, err := NewFileSystem(ipfs, ipfs.PrivateKey, ipfsmp, ipnsmp) if err != nil { return nil, err } return mount.NewMount(ipfs.Process(), fsys, ipnsmp, allow_other) }
// Add builds a merkledag from the a reader, pinning all objects to the local // datastore. Returns a key representing the root node. func Add(n *core.IpfsNode, r io.Reader) (string, error) { defer n.Blockstore.PinLock().Unlock() fileAdder, err := NewAdder(n.Context(), n, nil) if err != nil { return "", err } node, err := fileAdder.add(r) if err != nil { return "", err } k, err := node.Key() if err != nil { return "", err } return k.String(), nil }
func Listen(nd *core.IpfsNode, protocol string) (*ipfsListener, error) { ctx, cancel := context.WithCancel(nd.Context()) list := &ipfsListener{ proto: pro.ID(protocol), conCh: make(chan net.Stream), ctx: ctx, cancel: cancel, } nd.PeerHost.SetStreamHandler(list.proto, func(s net.Stream) { select { case list.conCh <- s: case <-ctx.Done(): s.Close() } }) return list, nil }