func (bs *Bitswap) connectToProviders(ctx context.Context, entries []wantlist.Entry) { ctx, cancel := context.WithCancel(ctx) defer cancel() // Get providers for all entries in wantlist (could take a while) wg := sync.WaitGroup{} for _, e := range entries { wg.Add(1) go func(k key.Key) { defer wg.Done() child, cancel := context.WithTimeout(ctx, providerRequestTimeout) defer cancel() providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest) for prov := range providers { go func(p peer.ID) { bs.network.ConnectTo(ctx, p) }(prov) } }(e.Key) } wg.Wait() // make sure all our children do finish. }
// newKeyRoot creates a new KeyRoot for the given key, and starts up a republisher routine // for it func (fs *Filesystem) newKeyRoot(parent context.Context, k ci.PrivKey) (*KeyRoot, error) { hash, err := k.GetPublic().Hash() if err != nil { return nil, err } name := "/ipns/" + key.Key(hash).String() root := new(KeyRoot) root.key = k root.fs = fs root.name = name ctx, cancel := context.WithCancel(parent) defer cancel() pointsTo, err := fs.nsys.Resolve(ctx, name) if err != nil { err = namesys.InitializeKeyspace(ctx, fs.dserv, fs.nsys, fs.pins, k) if err != nil { return nil, err } pointsTo, err = fs.nsys.Resolve(ctx, name) if err != nil { return nil, err } } mnode, err := fs.resolver.ResolvePath(ctx, pointsTo) if err != nil { log.Errorf("Failed to retrieve value '%s' for ipns entry: %s\n", pointsTo, err) return nil, err } root.node = mnode root.repub = NewRepublisher(root, time.Millisecond*300, time.Second*3) go root.repub.Run(parent) pbn, err := ft.FromBytes(mnode.Data) if err != nil { log.Error("IPNS pointer was not unixfs node") return nil, err } switch pbn.GetType() { case ft.TDirectory: root.val = NewDirectory(ctx, pointsTo.String(), mnode, root, fs) case ft.TFile, ft.TMetadata, ft.TRaw: fi, err := NewFile(pointsTo.String(), mnode, root, fs) if err != nil { return nil, err } root.val = fi default: panic("unrecognized! (NYI)") } return root, nil }
func (rp *Republisher) republishEntries(p goprocess.Process) error { ctx, cancel := context.WithCancel(gpctx.OnClosingContext(p)) defer cancel() for id, _ := range rp.entries { log.Debugf("republishing ipns entry for %s", id) priv := rp.ps.PrivKey(id) // Look for it locally only _, ipnskey := namesys.IpnsKeysForID(id) p, seq, err := rp.getLastVal(ipnskey) if err != nil { if err == errNoEntry { continue } return err } // update record with same sequence number eol := time.Now().Add(rp.RecordLifetime) err = namesys.PutRecordToRouting(ctx, priv, p, seq, eol, rp.r, id) if err != nil { return err } } return nil }
func Pin(n *core.IpfsNode, ctx context.Context, paths []string, recursive bool) ([]key.Key, error) { dagnodes := make([]*merkledag.Node, 0) for _, fpath := range paths { dagnode, err := core.Resolve(ctx, n, path.Path(fpath)) if err != nil { return nil, fmt.Errorf("pin: %s", err) } dagnodes = append(dagnodes, dagnode) } var out []key.Key for _, dagnode := range dagnodes { k, err := dagnode.Key() if err != nil { return nil, err } ctx, cancel := context.WithCancel(ctx) defer cancel() err = n.Pinning.Pin(ctx, dagnode, recursive) if err != nil { return nil, fmt.Errorf("pin: %s", err) } out = append(out, k) } err := n.Pinning.Flush() if err != nil { return nil, err } return out, nil }
func Unpin(n *core.IpfsNode, ctx context.Context, paths []string, recursive bool) ([]key.Key, error) { dagnodes := make([]*merkledag.Node, 0) for _, fpath := range paths { dagnode, err := core.Resolve(ctx, n, path.Path(fpath)) if err != nil { return nil, err } dagnodes = append(dagnodes, dagnode) } var unpinned []key.Key for _, dagnode := range dagnodes { k, _ := dagnode.Key() ctx, cancel := context.WithCancel(ctx) defer cancel() err := n.Pinning.Unpin(ctx, k, recursive) if err != nil { return nil, err } unpinned = append(unpinned, k) } err := n.Pinning.Flush() if err != nil { return nil, err } return unpinned, nil }
func (dm *DagModifier) readPrep() error { err := dm.Sync() if err != nil { return err } if dm.read == nil { ctx, cancel := context.WithCancel(dm.ctx) dr, err := uio.NewDagReader(ctx, dm.curNode, dm.dagserv) if err != nil { return err } i, err := dr.Seek(int64(dm.curWrOff), os.SEEK_SET) if err != nil { return err } if i != int64(dm.curWrOff) { return ErrSeekFail } dm.readCancel = cancel dm.read = dr } return nil }
func (bs *Bitswap) rebroadcastWorker(parent context.Context) { ctx, cancel := context.WithCancel(parent) defer cancel() broadcastSignal := time.NewTicker(rebroadcastDelay.Get()) defer broadcastSignal.Stop() tick := time.NewTicker(10 * time.Second) defer tick.Stop() for { log.Event(ctx, "Bitswap.Rebroadcast.idle") select { case <-tick.C: n := bs.wm.wl.Len() if n > 0 { log.Debug(n, "keys in bitswap wantlist") } case <-broadcastSignal.C: // resend unfulfilled wantlist keys log.Event(ctx, "Bitswap.Rebroadcast.active") entries := bs.wm.wl.Entries() if len(entries) > 0 { bs.connectToProviders(ctx, entries) } case <-parent.Done(): return } } }
// WithDeadlineFraction returns a Context with a fraction of the // original context's timeout. This is useful in sequential pipelines // of work, where one might try options and fall back to others // depending on the time available, or failure to respond. For example: // // // getPicture returns a picture from our encrypted database // // we have a pipeline of multiple steps. we need to: // // - get the data from a database // // - decrypt it // // - apply many transforms // // // // we **know** that each step takes increasingly more time. // // The transforms are much more expensive than decryption, and // // decryption is more expensive than the database lookup. // // If our database takes too long (i.e. >0.2 of available time), // // there's no use in continuing. // func getPicture(ctx context.Context, key string) ([]byte, error) { // // fractional timeout contexts to the rescue! // // // try the database with 0.2 of remaining time. // ctx1, _ := ctxext.WithDeadlineFraction(ctx, 0.2) // val, err := db.Get(ctx1, key) // if err != nil { // return nil, err // } // // // try decryption with 0.3 of remaining time. // ctx2, _ := ctxext.WithDeadlineFraction(ctx, 0.3) // if val, err = decryptor.Decrypt(ctx2, val); err != nil { // return nil, err // } // // // try transforms with all remaining time. hopefully it's enough! // return transformer.Transform(ctx, val) // } // // func WithDeadlineFraction(ctx context.Context, fraction float64) ( context.Context, context.CancelFunc) { d, found := ctx.Deadline() if !found { // no deadline return context.WithCancel(ctx) } left := d.Sub(time.Now()) if left < 0 { // already passed... return context.WithCancel(ctx) } left = time.Duration(float64(left) * fraction) return context.WithTimeout(ctx, left) }
// WithProcessClosing returns a context.Context derived from ctx that // is cancelled as p is Closing (after: <-p.Closing()). It is simply: // // func WithProcessClosing(ctx context.Context, p goprocess.Process) context.Context { // ctx, cancel := context.WithCancel(ctx) // go func() { // <-p.Closing() // cancel() // }() // return ctx // } // func WithProcessClosing(ctx context.Context, p goprocess.Process) context.Context { ctx, cancel := context.WithCancel(ctx) go func() { <-p.Closing() cancel() }() return ctx }
func addNode(n *core.IpfsNode, node *merkledag.Node) error { if err := n.DAG.AddRecursive(node); err != nil { // add the file to the graph + local storage return err } ctx, cancel := context.WithCancel(n.Context()) defer cancel() err := n.Pinning.Pin(ctx, node, true) // ensure we keep it return err }
// Run runs the query at hand. pass in a list of peers to use first. func (q *dhtQuery) Run(ctx context.Context, peers []peer.ID) (*dhtQueryResult, error) { select { case <-ctx.Done(): return nil, ctx.Err() default: } ctx, cancel := context.WithCancel(ctx) defer cancel() runner := newQueryRunner(q) return runner.Run(ctx, peers) }
func NewDataFileReader(ctx context.Context, n *mdag.Node, pb *ftpb.Data, serv mdag.DAGService) *DagReader { fctx, cancel := context.WithCancel(ctx) promises := serv.GetDAG(fctx, n) return &DagReader{ node: n, serv: serv, buf: NewRSNCFromBytes(pb.GetData()), promises: promises, ctx: fctx, cancel: cancel, pbdata: pb, } }
func get(path, outFile string) error { start := time.Now() ctx, cancel := context.WithCancel(context.Background()) node, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, }) if err != nil { return fmt.Errorf("ipfs NewNode() failed: %s", err) } err = node.Bootstrap(core.DefaultBootstrapConfig) if err != nil { return fmt.Errorf("node Bootstrap() failed: %s", err) } fmt.Fprintf(os.Stderr, "IPFS Node bootstrapping (took %v)\n", time.Since(start)) // Cancel the ipfs node context if the process gets interrupted or killed. go func() { interrupts := make(chan os.Signal, 1) signal.Notify(interrupts, os.Interrupt, os.Kill) <-interrupts cancel() }() reader, length, err := cat(node.Context(), node, path) if err != nil { return fmt.Errorf("cat failed: %s", err) } file, err := os.Create(outFile) if err != nil { return fmt.Errorf("Creating output file %q failed: %s", outFile, err) } bar := pb.New(int(length)).SetUnits(pb.U_BYTES) bar.Output = os.Stderr bar.ShowSpeed = false bar.Start() writer := io.MultiWriter(file, bar) if _, err := io.Copy(writer, reader); err != nil { return fmt.Errorf("copy failed: %s", err) } bar.Finish() fmt.Fprintf(os.Stderr, "Wrote %q to %q (%s) (took %v)\n", path, outFile, humanize.Bytes(length), time.Since(start)) return nil }
// PutValue adds value corresponding to given Key. // This is the top level "Store" operation of the DHT func (dht *IpfsDHT) PutValue(ctx context.Context, key key.Key, value []byte) error { log.Debugf("PutValue %s", key) sk, err := dht.getOwnPrivateKey() if err != nil { return err } sign, err := dht.Validator.IsSigned(key) if err != nil { return err } rec, err := record.MakePutRecord(sk, key, value, sign) if err != nil { log.Debug("Creation of record failed!") return err } err = dht.putLocal(key, rec) if err != nil { return err } pchan, err := dht.GetClosestPeers(ctx, key) if err != nil { return err } wg := sync.WaitGroup{} for p := range pchan { wg.Add(1) go func(p peer.ID) { ctx, cancel := context.WithCancel(ctx) defer cancel() defer wg.Done() notif.PublishQueryEvent(ctx, ¬if.QueryEvent{ Type: notif.Value, ID: p, }) err := dht.putValueToPeer(ctx, p, key, rec) if err != nil { log.Debugf("failed putting value to peer: %s", err) } }(p) } wg.Wait() return nil }
func GarbageCollect(n *core.IpfsNode, ctx context.Context) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() // in case error occurs during operation keychan, err := n.Blockstore.AllKeysChan(ctx) if err != nil { return err } for k := range keychan { // rely on AllKeysChan to close chan if !n.Pinning.IsPinned(k) { err := n.Blockstore.DeleteBlock(k) if err != nil { return err } } } return nil }
// New initializes a BitSwap instance that communicates over the provided // BitSwapNetwork. This function registers the returned instance as the network // delegate. // Runs until context is cancelled. func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, nice bool) exchange.Interface { // important to use provided parent context (since it may include important // loggable data). It's probably not a good idea to allow bitswap to be // coupled to the concerns of the IPFS daemon in this way. // // FIXME(btc) Now that bitswap manages itself using a process, it probably // shouldn't accept a context anymore. Clients should probably use Close() // exclusively. We should probably find another way to share logging data ctx, cancelFunc := context.WithCancel(parent) notif := notifications.New() px := process.WithTeardown(func() error { notif.Shutdown() return nil }) bs := &Bitswap{ self: p, blockstore: bstore, notifications: notif, engine: decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method network: network, findKeys: make(chan *blockRequest, sizeBatchRequestChan), process: px, newBlocks: make(chan *blocks.Block, HasBlockBufferSize), provideKeys: make(chan key.Key, provideKeysBufferSize), wm: NewWantManager(ctx, network), } go bs.wm.Run() network.SetDelegate(bs) // Start up bitswaps async worker routines bs.startWorkers(px, ctx) // bind the context and process. // do it over here to avoid closing before all setup is done. go func() { <-px.Closing() // process closes first cancelFunc() }() procctx.CloseAfterContext(px, ctx) // parent cancelled first return bs }
// Get retrieves a node from the dagService, fetching the block in the BlockService func (n *dagService) Get(ctx context.Context, k key.Key) (*Node, error) { if n == nil { return nil, fmt.Errorf("dagService is nil") } ctx, cancel := context.WithCancel(ctx) defer cancel() b, err := n.Blocks.GetBlock(ctx, k) if err != nil { if err == bserv.ErrNotFound { return nil, ErrNotFound } return nil, err } return Decoded(b.Data) }
// GetNodes returns an array of 'NodeGetter' promises, with each corresponding // to the key with the same index as the passed in keys func (ds *dagService) GetNodes(ctx context.Context, keys []key.Key) []NodeGetter { // Early out if no work to do if len(keys) == 0 { return nil } promises := make([]NodeGetter, len(keys)) sendChans := make([]chan<- *Node, len(keys)) for i := range keys { promises[i], sendChans[i] = newNodePromise(ctx) } dedupedKeys := dedupeKeys(keys) go func() { ctx, cancel := context.WithCancel(ctx) defer cancel() blkchan := ds.Blocks.GetBlocks(ctx, dedupedKeys) for count := 0; count < len(keys); { select { case blk, ok := <-blkchan: if !ok { return } nd, err := Decoded(blk.Data) if err != nil { // NB: can happen with improperly formatted input data log.Debug("Got back bad block!") return } is := FindLinks(keys, blk.Key(), 0) for _, i := range is { count++ sendChans[i] <- nd } case <-ctx.Done(): return } } }() return promises }
func newSecureSession(ctx context.Context, local peer.ID, key ci.PrivKey, insecure io.ReadWriteCloser) (*secureSession, error) { s := &secureSession{localPeer: local, localKey: key} s.ctx, s.cancel = context.WithCancel(ctx) switch { case s.localPeer == "": return nil, errors.New("no local id provided") case s.localKey == nil: return nil, errors.New("no local private key provided") case !s.localPeer.MatchesPrivateKey(s.localKey): return nil, fmt.Errorf("peer.ID does not match PrivateKey") case insecure == nil: return nil, fmt.Errorf("insecure ReadWriter is nil") } s.ctx = ctx s.insecure = insecure s.insecureM = msgio.NewReadWriter(insecure) return s, nil }
func Listen(nd *core.IpfsNode, protocol string) (*ipfsListener, error) { ctx, cancel := context.WithCancel(nd.Context()) list := &ipfsListener{ proto: pro.ID(protocol), conCh: make(chan net.Stream), ctx: ctx, cancel: cancel, } nd.PeerHost.SetStreamHandler(list.proto, func(s net.Stream) { select { case list.conCh <- s: case <-ctx.Done(): s.Close() } }) return list, nil }
// IPNSHostnameOption rewrites an incoming request if its Host: header contains // an IPNS name. // The rewritten request points at the resolved name on the gateway handler. func IPNSHostnameOption() ServeOption { return func(n *core.IpfsNode, _ net.Listener, mux *http.ServeMux) (*http.ServeMux, error) { childMux := http.NewServeMux() mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithCancel(n.Context()) defer cancel() host := strings.SplitN(r.Host, ":", 2)[0] if len(host) > 0 && isd.IsDomain(host) { name := "/ipns/" + host if _, err := n.Namesys.Resolve(ctx, name); err == nil { r.Header["X-IPNS-Original-Path"] = []string{r.URL.Path} r.URL.Path = name + r.URL.Path } } childMux.ServeHTTP(w, r) }) return childMux, nil } }
// GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context. func (bs *Bitswap) GetBlock(parent context.Context, k key.Key) (*blocks.Block, error) { // Any async work initiated by this function must end when this function // returns. To ensure this, derive a new context. Note that it is okay to // listen on parent in this scope, but NOT okay to pass |parent| to // functions called by this one. Otherwise those functions won't return // when this context's cancel func is executed. This is difficult to // enforce. May this comment keep you safe. ctx, cancelFunc := context.WithCancel(parent) ctx = logging.ContextWithLoggable(ctx, logging.Uuid("GetBlockRequest")) log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k) defer log.Event(ctx, "Bitswap.GetBlockRequest.End", &k) defer func() { cancelFunc() }() promise, err := bs.GetBlocks(ctx, []key.Key{k}) if err != nil { return nil, err } select { case block, ok := <-promise: if !ok { select { case <-ctx.Done(): return nil, ctx.Err() default: return nil, errors.New("promise channel was closed") } } return block, nil case <-parent.Done(): return nil, parent.Err() } }
func (c *serveConn) serve(r fuse.Request) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() req := &serveRequest{Request: r, cancel: cancel} c.debug(request{ Op: opName(r), Request: r.Hdr(), In: r, }) var node Node var snode *serveNode c.meta.Lock() hdr := r.Hdr() if id := hdr.Node; id != 0 { if id < fuse.NodeID(len(c.node)) { snode = c.node[uint(id)] } if snode == nil { c.meta.Unlock() c.debug(response{ Op: opName(r), Request: logResponseHeader{ID: hdr.ID}, Error: fuse.ESTALE.ErrnoName(), // this is the only place that sets both Error and // Out; not sure if i want to do that; might get rid // of len(c.node) things altogether Out: logMissingNode{ MaxNode: fuse.NodeID(len(c.node)), }, }) r.RespondError(fuse.ESTALE) return } node = snode.node } if c.req[hdr.ID] != nil { // This happens with OSXFUSE. Assume it's okay and // that we'll never see an interrupt for this one. // Otherwise everything wedges. TODO: Report to OSXFUSE? // // TODO this might have been because of missing done() calls } else { c.req[hdr.ID] = req } c.meta.Unlock() // Call this before responding. // After responding is too late: we might get another request // with the same ID and be very confused. done := func(resp interface{}) { msg := response{ Op: opName(r), Request: logResponseHeader{ID: hdr.ID}, } if err, ok := resp.(error); ok { msg.Error = err.Error() if ferr, ok := err.(fuse.ErrorNumber); ok { errno := ferr.Errno() msg.Errno = errno.ErrnoName() if errno == err { // it's just a fuse.Errno with no extra detail; // skip the textual message for log readability msg.Error = "" } } else { msg.Errno = fuse.DefaultErrno.ErrnoName() } } else { msg.Out = resp } c.debug(msg) c.meta.Lock() delete(c.req, hdr.ID) c.meta.Unlock() } defer func() { if rec := recover(); rec != nil { const size = 1 << 16 buf := make([]byte, size) n := runtime.Stack(buf, false) buf = buf[:n] log.Printf("fuse: panic in handler for %v: %v\n%s", r, rec, buf) err := handlerPanickedError{ Request: r, Err: rec, } done(err) r.RespondError(err) } }() switch r := r.(type) { default: // Note: To FUSE, ENOSYS means "this server never implements this request." // It would be inappropriate to return ENOSYS for other operations in this // switch that might only be unavailable in some contexts, not all. done(fuse.ENOSYS) r.RespondError(fuse.ENOSYS) // FS operations. case *fuse.InitRequest: s := &fuse.InitResponse{ MaxWrite: 128 * 1024, Flags: fuse.InitBigWrites, } if fs, ok := c.fs.(FSIniter); ok { if err := fs.Init(ctx, r, s); err != nil { done(err) r.RespondError(err) break } } done(s) r.Respond(s) case *fuse.StatfsRequest: s := &fuse.StatfsResponse{} if fs, ok := c.fs.(FSStatfser); ok { if err := fs.Statfs(ctx, r, s); err != nil { done(err) r.RespondError(err) break } } done(s) r.Respond(s) // Node operations. case *fuse.GetattrRequest: s := &fuse.GetattrResponse{} if n, ok := node.(NodeGetattrer); ok { if err := n.Getattr(ctx, r, s); err != nil { done(err) r.RespondError(err) break } } else { if err := snode.attr(ctx, &s.Attr); err != nil { done(err) r.RespondError(err) break } } done(s) r.Respond(s) case *fuse.SetattrRequest: s := &fuse.SetattrResponse{} if n, ok := node.(NodeSetattrer); ok { if err := n.Setattr(ctx, r, s); err != nil { done(err) r.RespondError(err) break } done(s) r.Respond(s) break } if err := snode.attr(ctx, &s.Attr); err != nil { done(err) r.RespondError(err) break } done(s) r.Respond(s) case *fuse.SymlinkRequest: s := &fuse.SymlinkResponse{} initLookupResponse(&s.LookupResponse) n, ok := node.(NodeSymlinker) if !ok { done(fuse.EIO) // XXX or EPERM like Mkdir? r.RespondError(fuse.EIO) break } n2, err := n.Symlink(ctx, r) if err != nil { done(err) r.RespondError(err) break } if err := c.saveLookup(ctx, &s.LookupResponse, snode, r.NewName, n2); err != nil { done(err) r.RespondError(err) break } done(s) r.Respond(s) case *fuse.ReadlinkRequest: n, ok := node.(NodeReadlinker) if !ok { done(fuse.EIO) /// XXX or EPERM? r.RespondError(fuse.EIO) break } target, err := n.Readlink(ctx, r) if err != nil { done(err) r.RespondError(err) break } done(target) r.Respond(target) case *fuse.LinkRequest: n, ok := node.(NodeLinker) if !ok { done(fuse.EIO) /// XXX or EPERM? r.RespondError(fuse.EIO) break } c.meta.Lock() var oldNode *serveNode if int(r.OldNode) < len(c.node) { oldNode = c.node[r.OldNode] } c.meta.Unlock() if oldNode == nil { c.debug(logLinkRequestOldNodeNotFound{ Request: r.Hdr(), In: r, }) done(fuse.EIO) r.RespondError(fuse.EIO) break } n2, err := n.Link(ctx, r, oldNode.node) if err != nil { done(err) r.RespondError(err) break } s := &fuse.LookupResponse{} initLookupResponse(s) if err := c.saveLookup(ctx, s, snode, r.NewName, n2); err != nil { done(err) r.RespondError(err) break } done(s) r.Respond(s) case *fuse.RemoveRequest: n, ok := node.(NodeRemover) if !ok { done(fuse.EIO) /// XXX or EPERM? r.RespondError(fuse.EIO) break } err := n.Remove(ctx, r) if err != nil { done(err) r.RespondError(err) break } done(nil) r.Respond() case *fuse.AccessRequest: if n, ok := node.(NodeAccesser); ok { if err := n.Access(ctx, r); err != nil { done(err) r.RespondError(err) break } } done(nil) r.Respond() case *fuse.LookupRequest: var n2 Node var err error s := &fuse.LookupResponse{} initLookupResponse(s) if n, ok := node.(NodeStringLookuper); ok { n2, err = n.Lookup(ctx, r.Name) } else if n, ok := node.(NodeRequestLookuper); ok { n2, err = n.Lookup(ctx, r, s) } else { done(fuse.ENOENT) r.RespondError(fuse.ENOENT) break } if err != nil { done(err) r.RespondError(err) break } if err := c.saveLookup(ctx, s, snode, r.Name, n2); err != nil { done(err) r.RespondError(err) break } done(s) r.Respond(s) case *fuse.MkdirRequest: s := &fuse.MkdirResponse{} initLookupResponse(&s.LookupResponse) n, ok := node.(NodeMkdirer) if !ok { done(fuse.EPERM) r.RespondError(fuse.EPERM) break } n2, err := n.Mkdir(ctx, r) if err != nil { done(err) r.RespondError(err) break } if err := c.saveLookup(ctx, &s.LookupResponse, snode, r.Name, n2); err != nil { done(err) r.RespondError(err) break } done(s) r.Respond(s) case *fuse.OpenRequest: s := &fuse.OpenResponse{} var h2 Handle if n, ok := node.(NodeOpener); ok { hh, err := n.Open(ctx, r, s) if err != nil { done(err) r.RespondError(err) break } h2 = hh } else { h2 = node } s.Handle = c.saveHandle(h2, hdr.Node) done(s) r.Respond(s) case *fuse.CreateRequest: n, ok := node.(NodeCreater) if !ok { // If we send back ENOSYS, FUSE will try mknod+open. done(fuse.EPERM) r.RespondError(fuse.EPERM) break } s := &fuse.CreateResponse{OpenResponse: fuse.OpenResponse{}} initLookupResponse(&s.LookupResponse) n2, h2, err := n.Create(ctx, r, s) if err != nil { done(err) r.RespondError(err) break } if err := c.saveLookup(ctx, &s.LookupResponse, snode, r.Name, n2); err != nil { done(err) r.RespondError(err) break } s.Handle = c.saveHandle(h2, hdr.Node) done(s) r.Respond(s) case *fuse.GetxattrRequest: n, ok := node.(NodeGetxattrer) if !ok { done(fuse.ENOTSUP) r.RespondError(fuse.ENOTSUP) break } s := &fuse.GetxattrResponse{} err := n.Getxattr(ctx, r, s) if err != nil { done(err) r.RespondError(err) break } if r.Size != 0 && uint64(len(s.Xattr)) > uint64(r.Size) { done(fuse.ERANGE) r.RespondError(fuse.ERANGE) break } done(s) r.Respond(s) case *fuse.ListxattrRequest: n, ok := node.(NodeListxattrer) if !ok { done(fuse.ENOTSUP) r.RespondError(fuse.ENOTSUP) break } s := &fuse.ListxattrResponse{} err := n.Listxattr(ctx, r, s) if err != nil { done(err) r.RespondError(err) break } if r.Size != 0 && uint64(len(s.Xattr)) > uint64(r.Size) { done(fuse.ERANGE) r.RespondError(fuse.ERANGE) break } done(s) r.Respond(s) case *fuse.SetxattrRequest: n, ok := node.(NodeSetxattrer) if !ok { done(fuse.ENOTSUP) r.RespondError(fuse.ENOTSUP) break } err := n.Setxattr(ctx, r) if err != nil { done(err) r.RespondError(err) break } done(nil) r.Respond() case *fuse.RemovexattrRequest: n, ok := node.(NodeRemovexattrer) if !ok { done(fuse.ENOTSUP) r.RespondError(fuse.ENOTSUP) break } err := n.Removexattr(ctx, r) if err != nil { done(err) r.RespondError(err) break } done(nil) r.Respond() case *fuse.ForgetRequest: forget := c.dropNode(hdr.Node, r.N) if forget { n, ok := node.(NodeForgetter) if ok { n.Forget() } } done(nil) r.Respond() // Handle operations. case *fuse.ReadRequest: shandle := c.getHandle(r.Handle) if shandle == nil { done(fuse.ESTALE) r.RespondError(fuse.ESTALE) return } handle := shandle.handle s := &fuse.ReadResponse{Data: make([]byte, 0, r.Size)} if r.Dir { if h, ok := handle.(HandleReadDirAller); ok { if shandle.readData == nil { dirs, err := h.ReadDirAll(ctx) if err != nil { done(err) r.RespondError(err) break } var data []byte for _, dir := range dirs { if dir.Inode == 0 { dir.Inode = c.dynamicInode(snode.inode, dir.Name) } data = fuse.AppendDirent(data, dir) } shandle.readData = data } fuseutil.HandleRead(r, s, shandle.readData) done(s) r.Respond(s) break } } else { if h, ok := handle.(HandleReadAller); ok { if shandle.readData == nil { data, err := h.ReadAll(ctx) if err != nil { done(err) r.RespondError(err) break } if data == nil { data = []byte{} } shandle.readData = data } fuseutil.HandleRead(r, s, shandle.readData) done(s) r.Respond(s) break } h, ok := handle.(HandleReader) if !ok { fmt.Printf("NO READ FOR %T\n", handle) done(fuse.EIO) r.RespondError(fuse.EIO) break } if err := h.Read(ctx, r, s); err != nil { done(err) r.RespondError(err) break } } done(s) r.Respond(s) case *fuse.WriteRequest: shandle := c.getHandle(r.Handle) if shandle == nil { done(fuse.ESTALE) r.RespondError(fuse.ESTALE) return } s := &fuse.WriteResponse{} if h, ok := shandle.handle.(HandleWriter); ok { if err := h.Write(ctx, r, s); err != nil { done(err) r.RespondError(err) break } done(s) r.Respond(s) break } done(fuse.EIO) r.RespondError(fuse.EIO) case *fuse.FlushRequest: shandle := c.getHandle(r.Handle) if shandle == nil { done(fuse.ESTALE) r.RespondError(fuse.ESTALE) return } handle := shandle.handle if h, ok := handle.(HandleFlusher); ok { if err := h.Flush(ctx, r); err != nil { done(err) r.RespondError(err) break } } done(nil) r.Respond() case *fuse.ReleaseRequest: shandle := c.getHandle(r.Handle) if shandle == nil { done(fuse.ESTALE) r.RespondError(fuse.ESTALE) return } handle := shandle.handle // No matter what, release the handle. c.dropHandle(r.Handle) if h, ok := handle.(HandleReleaser); ok { if err := h.Release(ctx, r); err != nil { done(err) r.RespondError(err) break } } done(nil) r.Respond() case *fuse.DestroyRequest: if fs, ok := c.fs.(FSDestroyer); ok { fs.Destroy() } done(nil) r.Respond() case *fuse.RenameRequest: c.meta.Lock() var newDirNode *serveNode if int(r.NewDir) < len(c.node) { newDirNode = c.node[r.NewDir] } c.meta.Unlock() if newDirNode == nil { c.debug(renameNewDirNodeNotFound{ Request: r.Hdr(), In: r, }) done(fuse.EIO) r.RespondError(fuse.EIO) break } n, ok := node.(NodeRenamer) if !ok { done(fuse.EIO) // XXX or EPERM like Mkdir? r.RespondError(fuse.EIO) break } err := n.Rename(ctx, r, newDirNode.node) if err != nil { done(err) r.RespondError(err) break } done(nil) r.Respond() case *fuse.MknodRequest: n, ok := node.(NodeMknoder) if !ok { done(fuse.EIO) r.RespondError(fuse.EIO) break } n2, err := n.Mknod(ctx, r) if err != nil { done(err) r.RespondError(err) break } s := &fuse.LookupResponse{} initLookupResponse(s) if err := c.saveLookup(ctx, s, snode, r.Name, n2); err != nil { done(err) r.RespondError(err) break } done(s) r.Respond(s) case *fuse.FsyncRequest: n, ok := node.(NodeFsyncer) if !ok { done(fuse.EIO) r.RespondError(fuse.EIO) break } err := n.Fsync(ctx, r) if err != nil { done(err) r.RespondError(err) break } done(nil) r.Respond() case *fuse.InterruptRequest: c.meta.Lock() ireq := c.req[r.IntrID] if ireq != nil && ireq.cancel != nil { ireq.cancel() ireq.cancel = nil } c.meta.Unlock() done(nil) r.Respond() /* case *FsyncdirRequest: done(ENOSYS) r.RespondError(ENOSYS) case *GetlkRequest, *SetlkRequest, *SetlkwRequest: done(ENOSYS) r.RespondError(ENOSYS) case *BmapRequest: done(ENOSYS) r.RespondError(ENOSYS) case *SetvolnameRequest, *GetxtimesRequest, *ExchangeRequest: done(ENOSYS) r.RespondError(ENOSYS) */ } }
func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithCancel(i.node.Context()) defer cancel() urlPath := r.URL.Path // IPNSHostnameOption might have constructed an IPNS path using the Host header. // In this case, we need the original path for constructing redirects // and links that match the requested URL. // For example, http://example.net would become /ipns/example.net, and // the redirects and links would end up as http://example.net/ipns/example.net originalUrlPath := urlPath ipnsHostname := false hdr := r.Header["X-IPNS-Original-Path"] if len(hdr) > 0 { originalUrlPath = hdr[0] ipnsHostname = true } if i.config.BlockList != nil && i.config.BlockList.ShouldBlock(urlPath) { w.WriteHeader(http.StatusForbidden) w.Write([]byte("403 - Forbidden")) return } nd, err := core.Resolve(ctx, i.node, path.Path(urlPath)) if err != nil { webError(w, "Path Resolve error", err, http.StatusBadRequest) return } etag := gopath.Base(urlPath) if r.Header.Get("If-None-Match") == etag { w.WriteHeader(http.StatusNotModified) return } i.addUserHeaders(w) // ok, _now_ write user's headers. w.Header().Set("X-IPFS-Path", urlPath) // Suborigin header, sandboxes apps from each other in the browser (even // though they are served from the same gateway domain). // // Omited if the path was treated by IPNSHostnameOption(), for example // a request for http://example.net/ would be changed to /ipns/example.net/, // which would turn into an incorrect Suborigin: example.net header. // // NOTE: This is not yet widely supported by browsers. if !ipnsHostname { pathRoot := strings.SplitN(urlPath, "/", 4)[2] w.Header().Set("Suborigin", pathRoot) } dr, err := uio.NewDagReader(ctx, nd, i.node.DAG) if err != nil && err != uio.ErrIsDir { // not a directory and still an error internalWebError(w, err) return } // set these headers _after_ the error, for we may just not have it // and dont want the client to cache a 500 response... // and only if it's /ipfs! // TODO: break this out when we split /ipfs /ipns routes. modtime := time.Now() if strings.HasPrefix(urlPath, ipfsPathPrefix) { w.Header().Set("Etag", etag) w.Header().Set("Cache-Control", "public, max-age=29030400") // set modtime to a really long time ago, since files are immutable and should stay cached modtime = time.Unix(1, 0) } if err == nil { defer dr.Close() _, name := gopath.Split(urlPath) http.ServeContent(w, r, name, modtime, dr) return } // storage for directory listing var dirListing []directoryItem // loop through files foundIndex := false for _, link := range nd.Links { if link.Name == "index.html" { log.Debugf("found index.html link for %s", urlPath) foundIndex = true if urlPath[len(urlPath)-1] != '/' { // See comment above where originalUrlPath is declared. http.Redirect(w, r, originalUrlPath+"/", 302) log.Debugf("redirect to %s", originalUrlPath+"/") return } // return index page instead. nd, err := core.Resolve(ctx, i.node, path.Path(urlPath+"/index.html")) if err != nil { internalWebError(w, err) return } dr, err := uio.NewDagReader(ctx, nd, i.node.DAG) if err != nil { internalWebError(w, err) return } defer dr.Close() // write to request if r.Method != "HEAD" { io.Copy(w, dr) } break } // See comment above where originalUrlPath is declared. di := directoryItem{humanize.Bytes(link.Size), link.Name, gopath.Join(originalUrlPath, link.Name)} dirListing = append(dirListing, di) } if !foundIndex { if r.Method != "HEAD" { // construct the correct back link // https://github.com/ipfs/go-ipfs/issues/1365 var backLink string = urlPath // don't go further up than /ipfs/$hash/ pathSplit := strings.Split(backLink, "/") switch { // keep backlink case len(pathSplit) == 3: // url: /ipfs/$hash // keep backlink case len(pathSplit) == 4 && pathSplit[3] == "": // url: /ipfs/$hash/ // add the correct link depending on wether the path ends with a slash default: if strings.HasSuffix(backLink, "/") { backLink += "./.." } else { backLink += "/.." } } // strip /ipfs/$hash from backlink if IPNSHostnameOption touched the path. if ipnsHostname { backLink = "/" if len(pathSplit) > 5 { // also strip the trailing segment, because it's a backlink backLinkParts := pathSplit[3 : len(pathSplit)-2] backLink += strings.Join(backLinkParts, "/") + "/" } } // See comment above where originalUrlPath is declared. tplData := listingTemplateData{ Listing: dirListing, Path: originalUrlPath, BackLink: backLink, } err := listingTemplate.Execute(w, tplData) if err != nil { internalWebError(w, err) return } } } }
func (i *gatewayHandler) deleteHandler(w http.ResponseWriter, r *http.Request) { urlPath := r.URL.Path ctx, cancel := context.WithCancel(i.node.Context()) defer cancel() ipfsNode, err := core.Resolve(ctx, i.node, path.Path(urlPath)) if err != nil { // FIXME HTTP error code webError(w, "Could not resolve name", err, http.StatusInternalServerError) return } k, err := ipfsNode.Key() if err != nil { webError(w, "Could not get key from resolved node", err, http.StatusInternalServerError) return } h, components, err := path.SplitAbsPath(path.FromKey(k)) if err != nil { webError(w, "Could not split path", err, http.StatusInternalServerError) return } tctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() rootnd, err := i.node.Resolver.DAG.Get(tctx, key.Key(h)) if err != nil { webError(w, "Could not resolve root object", err, http.StatusBadRequest) return } pathNodes, err := i.node.Resolver.ResolveLinks(tctx, rootnd, components[:len(components)-1]) if err != nil { webError(w, "Could not resolve parent object", err, http.StatusBadRequest) return } // TODO(cyrptix): assumes len(pathNodes) > 1 - not found is an error above? err = pathNodes[len(pathNodes)-1].RemoveNodeLink(components[len(components)-1]) if err != nil { webError(w, "Could not delete link", err, http.StatusBadRequest) return } newnode := pathNodes[len(pathNodes)-1] for i := len(pathNodes) - 2; i >= 0; i-- { newnode, err = pathNodes[i].UpdateNodeLink(components[i], newnode) if err != nil { webError(w, "Could not update node links", err, http.StatusInternalServerError) return } } if err := i.node.DAG.AddRecursive(newnode); err != nil { webError(w, "Could not add recursively new node", err, http.StatusInternalServerError) return } // Redirect to new path key, err := newnode.Key() if err != nil { webError(w, "Could not get key of new node", err, http.StatusInternalServerError) return } i.addUserHeaders(w) // ok, _now_ write user's headers. w.Header().Set("IPFS-Hash", key.String()) http.Redirect(w, r, ipfsPathPrefix+key.String()+"/"+strings.Join(components[:len(components)-1], "/"), http.StatusCreated) }
func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) { // TODO(cryptix): either ask mildred about the flow of this or rewrite it webErrorWithCode(w, "Sorry, PUT is bugged right now, closing request", errors.New("handler disabled"), http.StatusInternalServerError) return urlPath := r.URL.Path pathext := urlPath[5:] var err error if urlPath == ipfsPathPrefix+"QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn/" { i.putEmptyDirHandler(w, r) return } var newnode *dag.Node if pathext[len(pathext)-1] == '/' { newnode = uio.NewEmptyDirectory() } else { newnode, err = i.newDagFromReader(r.Body) if err != nil { webError(w, "Could not create DAG from request", err, http.StatusInternalServerError) return } } ctx, cancel := context.WithCancel(i.node.Context()) defer cancel() ipfsNode, err := core.Resolve(ctx, i.node, path.Path(urlPath)) if err != nil { // FIXME HTTP error code webError(w, "Could not resolve name", err, http.StatusInternalServerError) return } k, err := ipfsNode.Key() if err != nil { webError(w, "Could not get key from resolved node", err, http.StatusInternalServerError) return } h, components, err := path.SplitAbsPath(path.FromKey(k)) if err != nil { webError(w, "Could not split path", err, http.StatusInternalServerError) return } if len(components) < 1 { err = fmt.Errorf("Cannot override existing object") webError(w, "http gateway", err, http.StatusBadRequest) return } tctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() // TODO(cryptix): could this be core.Resolve() too? rootnd, err := i.node.Resolver.DAG.Get(tctx, key.Key(h)) if err != nil { webError(w, "Could not resolve root object", err, http.StatusBadRequest) return } // resolving path components into merkledag nodes. if a component does not // resolve, create empty directories (which will be linked and populated below.) pathNodes, err := i.node.Resolver.ResolveLinks(tctx, rootnd, components[:len(components)-1]) if _, ok := err.(path.ErrNoLink); ok { // Create empty directories, links will be made further down the code for len(pathNodes) < len(components) { pathNodes = append(pathNodes, uio.NewDirectory(i.node.DAG).GetNode()) } } else if err != nil { webError(w, "Could not resolve parent object", err, http.StatusBadRequest) return } for i := len(pathNodes) - 1; i >= 0; i-- { newnode, err = pathNodes[i].UpdateNodeLink(components[i], newnode) if err != nil { webError(w, "Could not update node links", err, http.StatusInternalServerError) return } } if err := i.node.DAG.AddRecursive(newnode); err != nil { webError(w, "Could not add recursively new node", err, http.StatusInternalServerError) return } // Redirect to new path key, err := newnode.Key() if err != nil { webError(w, "Could not get key of new node", err, http.StatusInternalServerError) return } i.addUserHeaders(w) // ok, _now_ write user's headers. w.Header().Set("IPFS-Hash", key.String()) http.Redirect(w, r, ipfsPathPrefix+key.String()+"/"+strings.Join(components, "/"), http.StatusCreated) }